repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
siddharthraja/AI | Sampling - metropolis, gibbs/sampling.py | 1 | 16779 | import sys
# from importlib import reload
if('pbnt/combined' not in sys.path):
sys.path.append('pbnt/combined')
from exampleinference import inferenceExample
from random import randint
import numpy as np
from Node import BayesNode
from Graph import BayesNet
from numpy import zeros, float32
import Distribution
from Distribution import DiscreteDistribution, ConditionalDiscreteDistribution
# Example Problem
#
# There are three frisbee teams who play each other: the Airheads, the Buffoons, and the Clods (A, B and C for short). Each match is between two teams, and each team can either win, lose, or draw in a match. Each team has a fixed but unknown skill level, represented as an integer from 0 to 3. Each match's outcome is probabilistically proportional to the difference in skill level between the teams.
# Problem involves predicting the outcome of the matches, given prior knowledge of previous matches. Rather than using inference, sampling the network will be used which involve Markov Chain Monte Carlo models: Metropolis-Hastings (2b) and Gibbs sampling (2c).
#
# Build a Bayes Net to represent the three teams and their influences on the match outcomes. Variable conventions:
#
# | variable name | description|
# |---------|:------:|
# |A| A's skill level|
# |B | B's skill level|
# |C | C's skill level|
# |AvB | the outcome of A vs. B <br> (0 = A wins, 1 = B wins, 2 = tie)|
# |BvC | the outcome of B vs. C <br> (0 = B wins, 1 = C wins, 2 = tie)|
# |CvA | the outcome of C vs. A <br> (0 = C wins, 1 = A wins, 2 = tie)|
#
# Assume that each team has the following prior distribution of skill levels:
#
# |skill level|P(skill level)|
# |----|:----:|
# |0|0.15|
# |1|0.45|
# |2|0.30|
# |3|0.10|
#
# In addition, assume that the differences in skill levels correspond to the following probabilities of winning:
#
# | skill difference <br> (T2 - T1) | T1 wins | T2 wins| Tie |
# |------------|----------|---|:--------:|
# |0|0.10|0.10|0.80|
# |1|0.20|0.60|0.20|
# |2|0.15|0.75|0.10|
# |3|0.05|0.90|0.05|
def get_game_network():
"""A Bayes Net representation
of the game problem."""
nodes = []
# TODO: fill this out
A_node = BayesNode(0, 4, name='A')
B_node = BayesNode(1, 4, name='B')
C_node = BayesNode(2, 4, name='C')
AvB_node = BayesNode(3, 3, name='A vs B')
BvC_node = BayesNode(4, 3, name='B vs C')
CvA_node = BayesNode(5, 3, name='C vs A')
nodes = []
A_node.add_child(AvB_node)
AvB_node.add_parent(A_node)
B_node.add_child(AvB_node)
AvB_node.add_parent(B_node)
B_node.add_child(BvC_node)
BvC_node.add_parent(B_node)
C_node.add_child(BvC_node)
BvC_node.add_parent(C_node)
C_node.add_child(CvA_node)
CvA_node.add_parent(C_node)
A_node.add_child(CvA_node)
CvA_node.add_parent(A_node)
nodes.append(A_node)
nodes.append(B_node)
nodes.append(C_node)
nodes.append(AvB_node)
nodes.append(BvC_node)
nodes.append(CvA_node)
A_distribution = DiscreteDistribution(A_node)
index = A_distribution.generate_index([],[])
A_distribution[index] = [0.15,0.45,0.3,0.1]
A_node.set_dist(A_distribution)
B_distribution = DiscreteDistribution(B_node)
index = B_distribution.generate_index([],[])
B_distribution[index] = [0.15,0.45,0.3,0.1]
B_node.set_dist(B_distribution)
C_distribution = DiscreteDistribution(C_node)
index = C_distribution.generate_index([],[])
C_distribution[index] = [0.15,0.45,0.3,0.1]
C_node.set_dist(C_distribution)
dist = zeros([A_node.size(), B_node.size(), AvB_node.size()], dtype=float32)
# T2-T1=0
dist[0,0,:] = [0.10,0.10,0.80]
dist[1,1,:] = [0.10,0.10,0.80]
dist[2,2,:] = [0.10,0.10,0.80]
dist[3,3,:] = [0.10,0.10,0.80]
# T2-T1=1
dist[0,1,:] = [0.20,0.60,0.20]
dist[1,2,:] = [0.20,0.60,0.20]
dist[2,3,:] = [0.20,0.60,0.20]
dist[1,0,:] = [0.60,0.20,0.20]
dist[2,1,:] = [0.60,0.20,0.20]
dist[3,2,:] = [0.60,0.20,0.20]
# T2-T1=2
dist[0,2,:] = [0.15,0.75,0.10]
dist[1,3,:] = [0.15,0.75,0.10]
dist[2,0,:] = [0.75,0.15,0.10]
dist[3,1,:] = [0.75,0.15,0.10]
# T2-T1=3
dist[0,3,:] = [0.05,0.90,0.05]
dist[3,0,:] = [0.90,0.05,0.05]
AvB_distribution = ConditionalDiscreteDistribution(nodes=[A_node, B_node, AvB_node], table=dist)
AvB_node.set_dist(AvB_distribution)
# P(BvC|B,C)
dist = zeros([B_node.size(), C_node.size(), BvC_node.size()], dtype=float32)
# T2-T1=0
dist[0,0,:] = [0.10,0.10,0.80]
dist[1,1,:] = [0.10,0.10,0.80]
dist[2,2,:] = [0.10,0.10,0.80]
dist[3,3,:] = [0.10,0.10,0.80]
# T2-T1=1
dist[0,1,:] = [0.20,0.60,0.20]
dist[1,2,:] = [0.20,0.60,0.20]
dist[2,3,:] = [0.20,0.60,0.20]
dist[1,0,:] = [0.60,0.20,0.20]
dist[2,1,:] = [0.60,0.20,0.20]
dist[3,2,:] = [0.60,0.20,0.20]
# T2-T1=2
dist[0,2,:] = [0.15,0.75,0.10]
dist[1,3,:] = [0.15,0.75,0.10]
dist[2,0,:] = [0.75,0.15,0.10]
dist[3,1,:] = [0.75,0.15,0.10]
# T2-T1=3
dist[0,3,:] = [0.05,0.90,0.05]
dist[3,0,:] = [0.90,0.05,0.05]
BvC_distribution = ConditionalDiscreteDistribution(nodes=[B_node, C_node, BvC_node], table=dist)
BvC_node.set_dist(BvC_distribution)
# P(CvA|C,A)
dist = zeros([C_node.size(), A_node.size(), CvA_node.size()], dtype=float32)
# T2-T1=0
dist[0,0,:] = [0.10,0.10,0.80]
dist[1,1,:] = [0.10,0.10,0.80]
dist[2,2,:] = [0.10,0.10,0.80]
dist[3,3,:] = [0.10,0.10,0.80]
# T2-T1=1
dist[0,1,:] = [0.20,0.60,0.20]
dist[1,2,:] = [0.20,0.60,0.20]
dist[2,3,:] = [0.20,0.60,0.20]
dist[1,0,:] = [0.60,0.20,0.20]
dist[2,1,:] = [0.60,0.20,0.20]
dist[3,2,:] = [0.60,0.20,0.20]
# T2-T1=2
dist[0,2,:] = [0.15,0.75,0.10]
dist[1,3,:] = [0.15,0.75,0.10]
dist[2,0,:] = [0.75,0.15,0.10]
dist[3,1,:] = [0.75,0.15,0.10]
# T2-T1=3
dist[0,3,:] = [0.05,0.90,0.05]
dist[3,0,:] = [0.90,0.05,0.05]
CvA_distribution = ConditionalDiscreteDistribution(nodes=[C_node, A_node, CvA_node], table=dist)
CvA_node.set_dist(CvA_distribution)
return BayesNet(nodes)
# Metropolis-Hastings sampling
def get_prob(bayes_net, val):
A = bayes_net.get_node_by_name('A')
B = bayes_net.get_node_by_name('B')
C = bayes_net.get_node_by_name('C')
AB = bayes_net.get_node_by_name('A vs B')
BC = bayes_net.get_node_by_name('B vs C')
CA = bayes_net.get_node_by_name('C vs A')
tab = [A.dist.table, B.dist.table, C.dist.table] #AB.dist.table, BC.dist.table, CA.dist.table]
p1 = (tab[0][val[0]]) * (tab[1][val[1]]) * (tab[2][val[2]])
pab = AB.dist.table[val[1]][val[0]]
pbc = BC.dist.table[val[2]][val[1]]
pca = CA.dist.table[val[0]][val[2]]
probability = p1 * (pab[val[3]]) * (pbc[val[4]]) * (pca[val[5]])
return probability
def MH_sampling(bayes_net, initial_value):
"""A single iteration of the
Metropolis-Hastings algorithm given a
Bayesian network and an initial state
value. Returns the state sampled from
the probability distribution."""
# TODO: finish this function
# AvB(3) => 1-0
# BvC(4) => 2-1
# CvA(5) => 0-2
# curr_tuple = initial_value # [A,B,C, AvB,BvC,CvA]
nodes = list(bayes_net.nodes)
rand_node = randint(0,5)
current = nodes[rand_node]
prob_dist = current.dist.table
sample = []
sample.append(initial_value[0])
sample.append(initial_value[1])
sample.append(initial_value[2])
sample.append(initial_value[3])
sample.append(initial_value[4])
sample.append(initial_value[5])
if rand_node < 3:
r = randint(0,3)
#print alpha
else:
r = randint(0,2)
sample[rand_node] = r
numerator = get_prob(bayes_net, sample)
den = get_prob(bayes_net, initial_value)
alpha = (numerator) / (den)
#print alpha, numerator, den, sample
alpha = min([1, alpha])
x=np.random.uniform(0,1,1)
if x>=alpha:
#print 'not accepted'
sample = initial_value
#print alpha, sample
return sample
# Gibbs sampling
def Gibbs_sampling(bayes_net, initial_value):
"""A single iteration of the
Gibbs sampling algorithm given a
Bayesian network and an initial state
value. Returns the state sampled from
the probability distribution."""
nodes = list(bayes_net.nodes)
rand_node = randint(0,5)
current = nodes[rand_node]
prob_dist = current.dist.table
sample = []
sample.append(initial_value[0])
sample.append(initial_value[1])
sample.append(initial_value[2])
sample.append(initial_value[3])
sample.append(initial_value[4])
sample.append(initial_value[5])
temp = sample
if rand_node < 3:
#r = randint(0,3)
temp[rand_node] = 0
n0 = get_prob(bayes_net, temp)
temp[rand_node] = 1
n1 = get_prob(bayes_net, temp)
temp[rand_node] = 2
n2 = get_prob(bayes_net, temp)
temp[rand_node] = 3
n3 = get_prob(bayes_net, temp)
d = n0+n1+n2+n3
l = [n0/d, n1/d, n2/d, n3/d]
l1 = [l[0], l[0]+l[1], l[0]+l[1]+l[2], l[0]+l[1]+l[2]+l[3]]
#pdf = {n0/d:0, n1/d:1, n2/d:2, n3/d:3} #{0:n0/d, 1:n1/d, 2:n2/d, 3:n3/d}
x=np.random.uniform(0,1,1)
for i in range(0, len(l1)):
if(x<=l1[i]): break;
final_val = i
initial_value[rand_node] = final_val
else:
#r = randint(0,2)
temp[rand_node] = 0
n0 = get_prob(bayes_net, temp)
temp[rand_node] = 1
n1 = get_prob(bayes_net, temp)
temp[rand_node] = 2
n2 = get_prob(bayes_net, temp)
d = n0+n1+n2
l = [n0/d, n1/d, n2/d]
l1 = [l[0], l[0]+l[1], l[0]+l[1]+l[2]]
#pdf = {n0/d:0, n1/d:1, n2/d:2} #{0:n0/d, 1:n1/d, 2:n2/d}
x=np.random.uniform(0,1,1)
for i in range(0, len(l1)):
if(x<=l1[i]): break;
final_val = i
initial_value[rand_node] = final_val
#print rand_node
#print final_val, initial_value
return initial_value
# Comparing sampling methods
def are_tuples_same(a, b):
if len(a) != len(b):
return False
flag = True
for i in range(0, len(a)):
if a[i]!=b[i]:
flag = False
return flag
def calculate_posterior(games_net):
"""Example: Calculating the posterior distribution
of the BvC match given that A won against
B and tied C. Return a list of probabilities
corresponding to win, loss and tie likelihood."""
posterior = [0,0,0]
AB = games_net.get_node_by_name('A vs B')
BC = games_net.get_node_by_name('B vs C')
CA = games_net.get_node_by_name('C vs A')
engine = JunctionTreeEngine(games_net)
engine.evidence[AB] = 0
engine.evidence[CA] = 2
Q = engine.marginal(BC)[0]
index = Q.generate_index([0],range(Q.nDims))
posterior[0] = Q[index]
engine = JunctionTreeEngine(games_net)
engine.evidence[AB] = 0
engine.evidence[CA] = 2
Q = engine.marginal(BC)[0]
index = Q.generate_index([1],range(Q.nDims))
posterior[1] = Q[index]
engine = JunctionTreeEngine(games_net)
engine.evidence[AB] = 0
engine.evidence[CA] = 2
Q = engine.marginal(BC)[0]
index = Q.generate_index([2],range(Q.nDims))
posterior[2] = Q[index]
return posterior
iter_counts = [1e1,1e3,1e5,1e6]
def compare_sampling_initial(bayes_net, posterior):
"""Compare Gibbs and Metropolis-Hastings
sampling by calculating how long it takes
for each method to converge to the
provided posterior."""
# Metropolis
prob = [0,0,0,0,0, 0,0,0,0,0]
count = iterations = 0
converges = False
tuple_list = [[],[],[],[],[],[],[],[],[],[]]
initial_value = [0,0,0,0,0,2]
curr_sample = []
for i in range(0, len(initial_value)):
curr_sample.append(initial_value[i])
print curr_sample
while not converges and count < 150000:
iterations = iterations + 1
temp = MH_sampling(game_net, curr_sample)
if not are_tuples_same(temp, curr_sample):
curr_sample = temp
prob[count%10] = get_prob(bayes_net, curr_sample)
tuple_list[count%10] = curr_sample
count = count + 1
if count >10:
#convg = np.std(posterior)
converges = True
for i in range(1,10):
if (float(abs(prob[i] - prob[i-1]))/prob[i-1]) > 0.0009:
converges = False
if converges:
print 'converging'
MH_convergence = np.mean(prob)
print '\n', count, iterations, '\n', tuple_list, '\n', prob, MH_convergence
Gibbs_convergence = 0.0
return Gibbs_convergence, MH_convergence
iter_counts = [1e1,1e3,1e5,1e6]
def compare_sampling(bayes_net, posterior):
"""Comparing Gibbs and Metropolis-Hastings
sampling by calculating how long it takes
for each method to converge to the
provided posterior."""
# Metropolis
count = 0
convg = 0
initial_value = [0,0,0,0,0,2]
prob = [0,0,0]
new = [0,0,0]
old = [0,0,0]
iteration = 0
curr_sample = []
for i in range(0, len(initial_value)):
curr_sample.append(initial_value[i])
while convg <= 10 and iteration <= 250000:
temp = MH_sampling(bayes_net, curr_sample)
iteration = iteration + 1
if temp[3]!=0 or temp[5]!=2:
continue
#
curr_sample = temp
if count < 1000:
count = count + 1
prob[temp[4]] = prob[temp[4]] + 1
else:
old = [float(prob[0])/count, float(prob[1])/count, float(prob[2])/count]
count = count + 1
prob[temp[4]] = prob[temp[4]] + 1
new = [float(prob[0])/count, float(prob[1])/count, float(prob[2])/count]
if new[0] == 0 or new[1] == 0 or new[2] == 0:
print new, count
x = abs(old[0]-new[0])/float(new[0]) * 100
y = abs(old[1]-new[1])/float(new[1]) * 100
z = abs(old[2]-new[2])/float(new[2]) * 100
#print new
if x<0.10 and y<0.10 and z<0.10:
convg = convg + 1
else:
convg = 0
MH_convergence = count #[float(prob[0])/count, float(prob[1])/count, float(prob[2])/count]
#print 'MH_convergence', MH_convergence, count, iteration
#------------------------------------------------------------------------------------
Gibbs_convergence = gibbs_samp(bayes_net)
return Gibbs_convergence, MH_convergence
def gibbs_samp(bayes_net):
burn = 1000
count = 0
convg = 0
initial_value = [0,0,0,0,0,2]
prob = [0,0,0]
new = [0,0,0]
old = [0,0,0]
iteration = 0
curr_sample = []
for i in range(0, len(initial_value)):
curr_sample.append(initial_value[i])
while convg <= 10 and iteration <= 250000:
temp = Gibbs_sampling(bayes_net, curr_sample)
iteration = iteration + 1
if temp[3]!=0 or temp[5]!=2:
continue
#
curr_sample = temp
if count < burn:
count = count + 1
prob[temp[4]] = prob[temp[4]] + 1
else:
old = [float(prob[0])/count, float(prob[1])/count, float(prob[2])/count]
count = count + 1
prob[temp[4]] = prob[temp[4]] + 1
new = [float(prob[0])/count, float(prob[1])/count, float(prob[2])/count]
if new[0] == 0 or new[1] == 0 or new[2] == 0:
print new, count
x = abs(old[0]-new[0])/float(new[0]) * 100
y = abs(old[1]-new[1])/float(new[1]) * 100
z = abs(old[2]-new[2])/float(new[2]) * 100
#print new
if x<0.10 and y<0.10 and z<0.10:
convg = convg + 1
else:
convg = 0
Gibbs_convergence = count #[float(prob[0])/count, float(prob[1])/count, float(prob[2])/count]
#print 'Gibbs_convergence', Gibbs_convergence, count, iteration
return Gibbs_convergence
if __name__ == "__main__":
game_net = get_game_network()
# arbitrary initial state for the game system
initial_value = [0,0,0,0,0,0]
# print get_prob(game_net, [0,0,0,2,0,0])
sample = MH_sampling(game_net, initial_value)
print 'MH', sample
initial_value = [0,0,2,0,0,1]
sample = Gibbs_sampling(game_net, initial_value)
print 'Gibbs', sample
posterior = calculate_posterior(game_net)
Gibbs_convergence, MH_convergence = compare_sampling(game_net, posterior)
print Gibbs_convergence, MH_convergence | mit | 2,625,154,690,473,991,000 | 31.582524 | 400 | 0.56374 | false |
cyberbikepunk/job-spiders | joby/tests/utilities.py | 1 | 2246 | """ Utility functions and classes for the test suite. """
from codecs import open
from requests import get
from scrapy import Request
from scrapy.http import HtmlResponse
from importlib import import_module
from slugify import slugify
from os.path import join, exists
from joby.settings import TEST_ASSETS_DIR
class WebpageCachingError(Exception):
pass
# noinspection PyUnusedLocal
def make_offline_parser(spider, module_name, base_class_name, url, *loaders):
module = import_module('joby.spiders.' + module_name)
base_class = getattr(module, base_class_name)
# noinspection PyShadowingNames
class OfflineParser(base_class):
def __init__(self, url, *loaders):
self.filepath = join(TEST_ASSETS_DIR, slugify(url) + '.html')
self.base_class = base_class
self.url = url
self.html = self._load_from_cache()
self.request = Request(url=self.url)
self.response = HtmlResponse(self.url, body=self.html, request=self.request)
self.loaders = self._assign_loaders(loaders)
super(OfflineParser, self).__init__(spider, self.response, **self.loaders)
def _assign_loaders(self, loader_info):
loaders = {}
for item_name, item_class, loader_class in loader_info:
loader = loader_class(item=item_class(), response=self.response)
loaders.update({item_name: loader})
return loaders
def _load_from_cache(self):
if not exists(self.filepath):
self._save_to_cache()
with open(self.filepath) as cache:
return cache.read()
def _save_to_cache(self):
response = get(self.url)
if response.status_code != 200:
raise WebpageCachingError('Cannot download %s (%s)' % (self.url, response.status_code))
else:
with open(self.filepath, 'w+', response.encoding) as cache:
cache.write('<!-- ' + response.url + ' -->' + '\n')
cache.write(response.text)
def __repr__(self):
return '<Offline %s (%s)>' % (base_class_name, self.url)
return OfflineParser(url, *loaders)
| mit | -4,277,102,437,244,125,000 | 35.819672 | 103 | 0.607302 | false |
sxjscience/mxnet | tests/python/unittest/test_executor.py | 1 | 6701 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import mxnet as mx
from common import with_seed
from mxnet.test_utils import assert_almost_equal, environment
def check_bind_with_uniform(uf, gf, dim, sf=None, lshape=None, rshape=None):
"""check function consistency with uniform random numbers"""
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
lhs = mx.symbol.Variable('lhs')
rhs = mx.symbol.Variable('rhs')
if sf is not None:
ret = sf(lhs, rhs)
else:
ret = uf(lhs, rhs)
assert ret.list_arguments() == ['lhs', 'rhs']
lshape = shape if lshape is None else lshape
rshape = shape if rshape is None else rshape
lhs_arr = mx.nd.array(np.random.uniform(-1, 1, lshape))
rhs_arr = mx.nd.array(np.random.uniform(-1, 1, rshape))
lhs_grad = mx.nd.empty(lshape)
rhs_grad = mx.nd.empty(rshape)
executor = ret._bind(mx.Context('cpu'),
args=[lhs_arr, rhs_arr],
args_grad=[lhs_grad, rhs_grad])
exec3 = ret._bind(mx.Context('cpu'),
args=[lhs_arr, rhs_arr])
exec4 = ret._bind(mx.Context('cpu'),
args={'rhs': rhs_arr, 'lhs': lhs_arr},
args_grad={'lhs': lhs_grad, 'rhs': rhs_grad})
executor.forward()
exec3.forward()
exec4.forward()
out2 = executor.outputs[0].asnumpy()
out1 = uf(lhs_arr.asnumpy(), rhs_arr.asnumpy())
out3 = exec3.outputs[0].asnumpy()
out4 = exec4.outputs[0].asnumpy()
assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-5)
assert_almost_equal(out1, out3, rtol=1e-5, atol=1e-5)
assert_almost_equal(out1, out4, rtol=1e-5, atol=1e-5)
# test gradient
out_grad = mx.nd.array(np.ones(out2.shape))
lhs_grad2, rhs_grad2 = gf(out_grad.asnumpy(),
lhs_arr.asnumpy(),
rhs_arr.asnumpy())
executor.backward([out_grad])
assert_almost_equal(lhs_grad.asnumpy(), lhs_grad2, rtol=1e-5, atol=1e-5)
assert_almost_equal(rhs_grad.asnumpy(), rhs_grad2, rtol=1e-5, atol=1e-5)
@with_seed()
def test_bind():
for enable_bulking in ['0', '1']:
with environment({'MXNET_EXEC_BULK_EXEC_INFERENCE': enable_bulking,
'MXNET_EXEC_BULK_EXEC_TRAIN': enable_bulking}):
nrepeat = 10
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
check_bind_with_uniform(lambda x, y: x + y,
lambda g, x, y: (g, g),
dim)
check_bind_with_uniform(lambda x, y: x - y,
lambda g, x, y: (g, -g),
dim)
check_bind_with_uniform(lambda x, y: x * y,
lambda g, x, y: (y * g, x * g),
dim)
check_bind_with_uniform(lambda x, y: x / y,
lambda g, x, y: (g / y, -x * g/ (y**2)),
dim)
check_bind_with_uniform(lambda x, y: np.maximum(x, y),
lambda g, x, y: (g * (x>=y), g * (y>x)),
dim,
sf=mx.symbol.maximum)
check_bind_with_uniform(lambda x, y: np.minimum(x, y),
lambda g, x, y: (g * (x<=y), g * (y<x)),
dim,
sf=mx.symbol.minimum)
# @roywei: Removing fixed seed as flakiness in this test is fixed
# tracked at https://github.com/apache/incubator-mxnet/issues/11686
@with_seed()
def test_dot():
nrepeat = 10
maxdim = 4
for repeat in range(nrepeat):
s =tuple(np.random.randint(1, 200, size=3))
check_bind_with_uniform(lambda x, y: np.dot(x, y),
lambda g, x, y: (np.dot(g, y.T), np.dot(x.T, g)),
2,
lshape=(s[0], s[1]),
rshape=(s[1], s[2]),
sf = mx.symbol.dot)
for repeat in range(nrepeat):
s =tuple(np.random.randint(1, 200, size=1))
check_bind_with_uniform(lambda x, y: np.dot(x, y),
lambda g, x, y: (g * y, g * x),
2,
lshape=(s[0],),
rshape=(s[0],),
sf = mx.symbol.dot)
@with_seed()
def test_reshape():
x = mx.sym.Variable('x')
y = mx.sym.FullyConnected(x, num_hidden=4)
exe = y._simple_bind(mx.cpu(), x=(5,4), grad_req='null')
exe.arg_arrays[0][:] = 1
exe.arg_arrays[1][:] = mx.nd.ones((4,4))
exe.arg_arrays[2][:] = 0
exe.forward(is_train=False)
# test sub exec forward
assert np.all(exe.outputs[0].asnumpy() == 4)
# test shared memory
assert np.all(exe.outputs[0].asnumpy()[:3] == 4)
# test base exec forward
exe.forward(is_train=False)
assert np.all(exe.outputs[0].asnumpy() == 4)
# data ndarray is not shared between exe and new_exe
exe.arg_arrays[0][:] = 0
# weight ndarray is shared between exe and new_exe
assert np.all(exe.arg_arrays[1].asnumpy() == 1)
@with_seed()
def test_cached_op_init():
def check_init(static_alloc, static_shape):
out = mx.sym.zeros((3,3))
flags = [('static_alloc', static_alloc), ('static_shape', static_shape)]
exe = mx.ndarray.CachedOp(out, flags)
z = exe(None, default_ctx=mx.cpu())
assert np.all(z.asnumpy() == 0)
check_init(False, False)
check_init(True, False)
check_init(True, True)
| apache-2.0 | 5,129,683,960,537,776,000 | 39.36747 | 84 | 0.520221 | false |
klebercode/protocolle | protocolle/core/admin.py | 1 | 1442 | # coding: utf-8
from django.contrib import admin
from protocolle.core.models import (TipoDocumento, Carater, Natureza,
Status, TipoInstituicao, Grupo)
class TipoDocumentoAdmin(admin.ModelAdmin):
fields = ['nome']
list_per_page = 15
list_display = ['nome']
search_fields = ['nome']
class TipoInstituicaoAdmin(admin.ModelAdmin):
fields = ['nome']
list_per_page = 15
list_display = ['nome']
search_fields = ['nome']
class GrupoAdmin(admin.ModelAdmin):
fields = ['nome', 'tipo_instituicao']
list_per_page = 15
list_display = ['nome', 'tipo_instituicao']
list_filter = ['tipo_instituicao']
search_fields = ['nome', 'tipo_instituicao__nome']
class CaraterAdmin(admin.ModelAdmin):
fields = ['nome']
list_per_page = 15
list_display = ['nome']
search_fields = ['nome']
class NaturezaAdmin(admin.ModelAdmin):
fields = ['nome']
list_per_page = 15
list_display = ['nome']
search_fields = ['nome']
class StatusAdmin(admin.ModelAdmin):
fields = ['nome']
list_per_page = 15
list_display = ['nome']
search_fields = ['nome']
admin.site.register(TipoDocumento, TipoDocumentoAdmin)
admin.site.register(TipoInstituicao, TipoInstituicaoAdmin)
admin.site.register(Grupo, GrupoAdmin)
admin.site.register(Carater, CaraterAdmin)
admin.site.register(Natureza, NaturezaAdmin)
admin.site.register(Status, StatusAdmin)
| mit | 2,028,175,029,469,497,000 | 24.298246 | 69 | 0.668516 | false |
TACC/DisplayCluster | examples/displaycluster.py | 1 | 2126 | #!/usr/bin/python
# example launch script for DisplayCluster, executed by startdisplaycluster
# this should work for most cases, but can be modified for a particular
# installation if necessary
import os
import xml.etree.ElementTree as ET
import subprocess
import shlex
# get environment variable for the base DisplayCluster directory, set by
# startdisplaycluster
dcPath = None
if 'DISPLAYCLUSTER_DIR' in os.environ:
dcPath = os.environ['DISPLAYCLUSTER_DIR']
else:
print 'could not get DISPLAYCLUSTER_DIR!'
exit(-3)
# get rank from appropriate MPI API environment variable
myRank = None
if 'OMPI_COMM_WORLD_RANK' in os.environ:
myRank = int(os.environ['OMPI_COMM_WORLD_RANK'])
elif 'OMPI_MCA_ns_nds_vpid' in os.environ:
myRank = int(os.environ['OMPI_MCA_ns_nds_vpid'])
elif 'MPIRUN_RANK' in os.environ:
myRank = int(os.environ['MPIRUN_RANK'])
elif 'PMI_ID' in os.environ:
myRank = int(os.environ['PMI_ID'])
else:
print 'could not determine MPI rank!'
exit(-4)
if myRank == 0:
# don't manipulate DISPLAY, just launch
startCommand = dcPath + '/bin/displaycluster'
subprocess.call(shlex.split(startCommand))
else:
# configuration.xml gives the display
display = None
try:
doc = ET.parse(dcPath + '/configuration.xml')
elems = doc.findall('.//process')
if len(elems) < myRank:
print 'could not find process element for rank ' + str(myRank)
exit(-5)
elem = elems[myRank - 1]
display = elem.get('display')
if display != None:
print 'rank ' + str(myRank) + ': setting DISPLAY to ' + display
os.environ['DISPLAY'] = display
else:
print 'rank ' + str(myRank) + ': setting DISPLAY to default :0'
os.environ['DISPLAY'] = ':0'
except:
print 'Error processing configuration.xml. Make sure you have created a configuration.xml and put it in ' + dcPath + '/. An example is provided in the examples/ directory.'
exit(-6)
startCommand = dcPath + '/bin/displaycluster'
subprocess.call(shlex.split(startCommand))
| bsd-2-clause | 8,378,014,200,625,861,000 | 29.371429 | 180 | 0.661806 | false |
llvtt/mongo-orchestration-1 | mongo_orchestration/process.py | 1 | 10692 | #!/usr/bin/python
# coding=utf-8
# Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import json
import logging
import os
import platform
import shutil
import stat
import socket
import subprocess
import time
import tempfile
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
from bottle import request
from mongo_orchestration.common import DEFAULT_BIND
from mongo_orchestration.compat import reraise
from mongo_orchestration.errors import TimeoutError, RequestError
from mongo_orchestration.singleton import Singleton
logger = logging.getLogger(__name__)
def _host():
"""Get the Host from the most recent HTTP request."""
host_and_port = request.urlparts[1]
try:
host, _ = host_and_port.split(':')
except ValueError:
# No port yet. Host defaults to '127.0.0.1' in bottle.request.
return DEFAULT_BIND
return host or DEFAULT_BIND
class PortPool(Singleton):
__ports = set()
__closed = set()
__id = None
def __init__(self, min_port=1025, max_port=2000, port_sequence=None):
"""
Args:
min_port - min port number (ignoring if 'port_sequence' is not None)
max_port - max port number (ignoring if 'port_sequence' is not None)
port_sequence - iterate sequence which contains numbers of ports
"""
if not self.__id: # singleton checker
self.__id = id(self)
self.__init_range(min_port, max_port, port_sequence)
def __init_range(self, min_port=1025, max_port=2000, port_sequence=None):
if port_sequence:
self.__ports = set(port_sequence)
else:
self.__ports = set(range(min_port, max_port + 1))
self.__closed = set()
self.refresh()
def __check_port(self, port):
"""check port status
return True if port is free, False else
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((_host(), port))
return True
except socket.error:
return False
finally:
s.close()
def release_port(self, port):
"""release port"""
if port in self.__closed:
self.__closed.remove(port)
self.__ports.add(port)
def port(self, check=False):
"""return next opened port
Args:
check - check is port realy free
"""
if not self.__ports: # refresh ports if sequence is empty
self.refresh()
try:
port = self.__ports.pop()
if check:
while not self.__check_port(port):
self.release_port(port)
port = self.__ports.pop()
except (IndexError, KeyError):
raise IndexError("Could not find a free port,\nclosed ports: {closed}".format(closed=self.__closed))
self.__closed.add(port)
return port
def refresh(self, only_closed=False):
"""refresh ports status
Args:
only_closed - check status only for closed ports
"""
if only_closed:
opened = filter(self.__check_port, self.__closed)
self.__closed = self.__closed.difference(opened)
self.__ports = self.__ports.union(opened)
else:
ports = self.__closed.union(self.__ports)
self.__ports = set(filter(self.__check_port, ports))
self.__closed = ports.difference(self.__ports)
def change_range(self, min_port=1025, max_port=2000, port_sequence=None):
"""change Pool port range"""
self.__init_range(min_port, max_port, port_sequence)
def wait_for(port_num, timeout):
"""waits while process starts.
Args:
port_num - port number
timeout - specify how long, in seconds, a command can take before times out.
return True if process started, return False if not
"""
logger.debug("wait for {port_num}".format(**locals()))
t_start = time.time()
sleeps = 0.1
while time.time() - t_start < timeout:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((_host(), port_num))
return True
except (IOError, socket.error):
time.sleep(sleeps)
finally:
s.close()
return False
def repair_mongo(name, dbpath):
"""repair mongodb after usafe shutdown"""
cmd = [name, "--dbpath", dbpath, "--repair"]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
timeout = 30
t_start = time.time()
while time.time() - t_start < timeout:
proc.stdout.flush()
line = str(proc.stdout.readline())
if "dbexit: really exiting now" in line:
return
return
def mprocess(name, config_path, port=None, timeout=180, silence_stdout=True):
"""start 'name' process with params from config_path.
Args:
name - process name or path
config_path - path to file where should be stored configuration
port - process's port
timeout - specify how long, in seconds, a command can take before times out.
if timeout <=0 - doesn't wait for complete start process
silence_stdout - if True (default), redirect stdout to /dev/null
return tuple (Popen object, host) if process started, return (None, None) if not
"""
logger.debug("mprocess({name}, {config_path}, {port}, {timeout})".format(**locals()))
if not (config_path and isinstance(config_path, str) and os.path.exists(config_path)):
raise OSError("can't find config file {config_path}".format(**locals()))
cfg = read_config(config_path)
cmd = [name, "--config", config_path]
if cfg.get('port', None) is None or port:
port = port or PortPool().port(check=True)
cmd.extend(['--port', str(port)])
host = "{host}:{port}".format(host=_host(), port=port)
try:
logger.debug("execute process: {cmd}".format(**locals()))
proc = subprocess.Popen(
cmd,
stdout=DEVNULL if silence_stdout else None,
stderr=subprocess.STDOUT)
if proc.poll() is not None:
logger.debug("process is not alive")
raise OSError("Process started, but died immediately.")
except (OSError, TypeError) as err:
message = "exception while executing process: {err}".format(err=err)
logger.debug(message)
raise OSError(message)
if timeout > 0 and wait_for(port, timeout):
logger.debug("process '{name}' has started: pid={proc.pid}, host={host}".format(**locals()))
return (proc, host)
elif timeout > 0:
logger.debug("hasn't connected to pid={proc.pid} with host={host} during timeout {timeout} ".format(**locals()))
logger.debug("terminate process with pid={proc.pid}".format(**locals()))
kill_mprocess(proc)
proc_alive(proc) and time.sleep(3) # wait while process stoped
message = ("Could not connect to process during "
"{timeout} seconds".format(timeout=timeout))
raise TimeoutError(message, errno.ETIMEDOUT)
return (proc, host)
def kill_mprocess(process):
"""kill process
Args:
process - Popen object for process
"""
if process and proc_alive(process):
process.terminate()
process.communicate()
return not proc_alive(process)
def cleanup_mprocess(config_path, cfg):
"""remove all process's stuff
Args:
config_path - process's options file
cfg - process's config
"""
for key in ('keyFile', 'logPath', 'dbpath'):
remove_path(cfg.get(key, None))
isinstance(config_path, str) and os.path.exists(config_path) and remove_path(config_path)
def remove_path(path):
"""remove path from file system
If path is None - do nothing"""
if path is None or not os.path.exists(path):
return
if platform.system() == 'Windows':
# Need to have write permission before deleting the file.
os.chmod(path, stat.S_IWRITE)
try:
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
shutil.os.remove(path)
except OSError:
logger.exception("Could not remove path: %s" % path)
def write_config(params, config_path=None):
"""write mongo*'s config file
Args:
params - options wich file contains
config_path - path to the config_file, will create if None
Return config_path
where config_path - path to mongo*'s options file
"""
if config_path is None:
config_path = tempfile.mktemp(prefix="mongo-")
cfg = params.copy()
if 'setParameter' in cfg:
set_parameters = cfg.pop('setParameter')
try:
for key, value in set_parameters.items():
cfg['setParameter = ' + key] = value
except AttributeError:
reraise(RequestError,
'Not a valid value for setParameter: %r '
'Expected "setParameter": {<param name> : value, ...}'
% set_parameters)
# fix boolean value
for key, value in cfg.items():
if isinstance(value, bool):
cfg[key] = json.dumps(value)
with open(config_path, 'w') as fd:
data = '\n'.join('%s=%s' % (key, item) for key, item in cfg.items())
fd.write(data)
return config_path
def read_config(config_path):
"""read config_path and return options as dictionary"""
result = {}
with open(config_path, 'r') as fd:
for line in fd.readlines():
if '=' in line:
key, value = line.split('=', 1)
try:
result[key] = json.loads(value)
except ValueError:
result[key] = value.rstrip('\n')
return result
def proc_alive(process):
"""Check if process is alive. Return True or False."""
return process.poll() is None if process else False
| apache-2.0 | 3,881,842,002,143,383,600 | 32.835443 | 120 | 0.60073 | false |
hirofumi0810/tensorflow_end2end_speech_recognition | examples/csj/training/train_ctc.py | 1 | 18008 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Train the CTC model (CSJ corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile, abspath
import sys
import time
import tensorflow as tf
from setproctitle import setproctitle
import yaml
import shutil
sys.path.append(abspath('../../../'))
from experiments.csj.data.load_dataset_ctc import Dataset
from experiments.csj.metrics.ctc import do_eval_cer
from utils.io.labels.sparsetensor import list2sparsetensor
from utils.training.learning_rate_controller import Controller
from utils.training.plot import plot_loss, plot_ler
from utils.training.multi_gpu import average_gradients
from utils.directory import mkdir_join, mkdir
from utils.parameter import count_total_parameters
from models.ctc.ctc import CTC
def do_train(model, params, gpu_indices):
"""Run training.
Args:
model: the model to train
params (dict): A dictionary of parameters
gpu_indices (list): GPU indices
"""
# Load dataset
train_data = Dataset(
data_type='train', train_data_size=params['train_data_size'],
label_type=params['label_type'],
batch_size=params['batch_size'], max_epoch=params['num_epoch'],
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=True, sort_stop_epoch=params['sort_stop_epoch'],
num_gpu=len(gpu_indices))
dev_data = Dataset(
data_type='dev', train_data_size=params['train_data_size'],
label_type=params['label_type'],
batch_size=params['batch_size'], splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=False, num_gpu=len(gpu_indices))
# Tell TensorFlow that the model will be built into the default graph
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to track the global step
global_step = tf.Variable(0, name='global_step', trainable=False)
# Set optimizer
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate')
optimizer = model._set_optimizer(
params['optimizer'], learning_rate_pl)
# Calculate the gradients for each model tower
total_grads_and_vars, total_losses = [], []
decode_ops, ler_ops = [], []
all_devices = ['/gpu:%d' % i_gpu for i_gpu in range(len(gpu_indices))]
# NOTE: /cpu:0 is prepared for evaluation
with tf.variable_scope(tf.get_variable_scope()):
for i_gpu in range(len(all_devices)):
with tf.device(all_devices[i_gpu]):
with tf.name_scope('tower_gpu%d' % i_gpu) as scope:
# Define placeholders in each tower
model.create_placeholders()
# Calculate the total loss for the current tower of the
# model. This function constructs the entire model but
# shares the variables across all towers.
tower_loss, tower_logits = model.compute_loss(
model.inputs_pl_list[i_gpu],
model.labels_pl_list[i_gpu],
model.inputs_seq_len_pl_list[i_gpu],
model.keep_prob_pl_list[i_gpu],
scope)
tower_loss = tf.expand_dims(tower_loss, axis=0)
total_losses.append(tower_loss)
# Reuse variables for the next tower
tf.get_variable_scope().reuse_variables()
# Calculate the gradients for the batch of data on this
# tower
tower_grads_and_vars = optimizer.compute_gradients(
tower_loss)
# Gradient clipping
tower_grads_and_vars = model._clip_gradients(
tower_grads_and_vars)
# TODO: Optionally add gradient noise
# Keep track of the gradients across all towers
total_grads_and_vars.append(tower_grads_and_vars)
# Add to the graph each operation per tower
decode_op_tower = model.decoder(
tower_logits,
model.inputs_seq_len_pl_list[i_gpu],
beam_width=params['beam_width'])
decode_ops.append(decode_op_tower)
ler_op_tower = model.compute_ler(
decode_op_tower, model.labels_pl_list[i_gpu])
ler_op_tower = tf.expand_dims(ler_op_tower, axis=0)
ler_ops.append(ler_op_tower)
# Aggregate losses, then calculate average loss
total_losses = tf.concat(axis=0, values=total_losses)
loss_op = tf.reduce_mean(total_losses, axis=0)
ler_ops = tf.concat(axis=0, values=ler_ops)
ler_op = tf.reduce_mean(ler_ops, axis=0)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers
average_grads_and_vars = average_gradients(total_grads_and_vars)
# Apply the gradients to adjust the shared variables.
train_op = optimizer.apply_gradients(average_grads_and_vars,
global_step=global_step)
# Define learning rate controller
lr_controller = Controller(
learning_rate_init=params['learning_rate'],
decay_start_epoch=params['decay_start_epoch'],
decay_rate=params['decay_rate'],
decay_patient_epoch=params['decay_patient_epoch'],
lower_better=True)
# Build the summary tensor based on the TensorFlow collection of
# summaries
summary_train = tf.summary.merge(model.summaries_train)
summary_dev = tf.summary.merge(model.summaries_dev)
# Add the variable initializer operation
init_op = tf.global_variables_initializer()
# Create a saver for writing training checkpoints
saver = tf.train.Saver(max_to_keep=None)
# Count total parameters
parameters_dict, total_parameters = count_total_parameters(
tf.trainable_variables())
for parameter_name in sorted(parameters_dict.keys()):
print("%s %d" % (parameter_name, parameters_dict[parameter_name]))
print("Total %d variables, %s M parameters" %
(len(parameters_dict.keys()),
"{:,}".format(total_parameters / 1000000)))
csv_steps, csv_loss_train, csv_loss_dev = [], [], []
csv_ler_train, csv_ler_dev = [], []
# Create a session for running operation on the graph
# NOTE: Start running operations on the Graph. allow_soft_placement
# must be set to True to build towers on GPU, as some of the ops do not
# have GPU implementations.
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
# Instantiate a SummaryWriter to output summaries and the graph
summary_writer = tf.summary.FileWriter(
model.save_path, sess.graph)
# Initialize parameters
sess.run(init_op)
# Train model
start_time_train = time.time()
start_time_epoch = time.time()
start_time_step = time.time()
cer_dev_best = 1
not_improved_epoch = 0
learning_rate = float(params['learning_rate'])
for step, (data, is_new_epoch) in enumerate(train_data):
# Create feed dictionary for next mini batch (train)
inputs, labels, inputs_seq_len, _ = data
feed_dict_train = {}
for i_gpu in range(len(gpu_indices)):
feed_dict_train[model.inputs_pl_list[i_gpu]
] = inputs[i_gpu]
feed_dict_train[model.labels_pl_list[i_gpu]] = list2sparsetensor(
labels[i_gpu], padded_value=train_data.padded_value)
feed_dict_train[model.inputs_seq_len_pl_list[i_gpu]
] = inputs_seq_len[i_gpu]
feed_dict_train[model.keep_prob_pl_list[i_gpu]
] = 1 - float(params['dropout'])
feed_dict_train[learning_rate_pl] = learning_rate
# Update parameters
sess.run(train_op, feed_dict=feed_dict_train)
if (step + 1) % int(params['print_step'] / len(gpu_indices)) == 0:
# Create feed dictionary for next mini batch (dev)
inputs, labels, inputs_seq_len, _ = dev_data.next()[0]
feed_dict_dev = {}
for i_gpu in range(len(gpu_indices)):
feed_dict_dev[model.inputs_pl_list[i_gpu]
] = inputs[i_gpu]
feed_dict_dev[model.labels_pl_list[i_gpu]] = list2sparsetensor(
labels[i_gpu], padded_value=dev_data.padded_value)
feed_dict_dev[model.inputs_seq_len_pl_list[i_gpu]
] = inputs_seq_len[i_gpu]
feed_dict_dev[model.keep_prob_pl_list[i_gpu]] = 1.0
# Compute loss
loss_train = sess.run(loss_op, feed_dict=feed_dict_train)
loss_dev = sess.run(loss_op, feed_dict=feed_dict_dev)
csv_steps.append(step)
csv_loss_train.append(loss_train)
csv_loss_dev.append(loss_dev)
# Change to evaluation mode
for i_gpu in range(len(gpu_indices)):
feed_dict_train[model.keep_prob_pl_list[i_gpu]] = 1.0
# Compute accuracy & update event files
ler_train, summary_str_train = sess.run(
[ler_op, summary_train], feed_dict=feed_dict_train)
ler_dev, summary_str_dev = sess.run(
[ler_op, summary_dev], feed_dict=feed_dict_dev)
csv_ler_train.append(ler_train)
csv_ler_dev.append(ler_dev)
summary_writer.add_summary(summary_str_train, step + 1)
summary_writer.add_summary(summary_str_dev, step + 1)
summary_writer.flush()
duration_step = time.time() - start_time_step
print("Step %d (epoch: %.3f): loss = %.3f (%.3f) / ler = %.3f (%.3f) / lr = %.5f (%.3f min)" %
(step + 1, train_data.epoch_detail, loss_train, loss_dev, ler_train, ler_dev,
learning_rate, duration_step / 60))
sys.stdout.flush()
start_time_step = time.time()
# Save checkpoint and evaluate model per epoch
if is_new_epoch:
duration_epoch = time.time() - start_time_epoch
print('-----EPOCH:%d (%.3f min)-----' %
(train_data.epoch, duration_epoch / 60))
# Save fugure of loss & ler
plot_loss(csv_loss_train, csv_loss_dev, csv_steps,
save_path=model.save_path)
plot_ler(csv_ler_train, csv_ler_dev, csv_steps,
label_type=params['label_type'],
save_path=model.save_path)
if train_data.epoch >= params['eval_start_epoch']:
start_time_eval = time.time()
print('=== Dev Data Evaluation ===')
cer_dev_epoch = do_eval_cer(
session=sess,
decode_ops=decode_ops,
model=model,
dataset=dev_data,
label_type=params['label_type'],
train_data_size=params['train_data_size'],
eval_batch_size=1)
print(' CER: %f %%' % (cer_dev_epoch * 100))
if cer_dev_epoch < cer_dev_best:
cer_dev_best = cer_dev_epoch
not_improved_epoch = 0
print('■■■ ↑Best Score (CER)↑ ■■■')
# Save model only (check point)
checkpoint_file = join(
model.save_path, 'model.ckpt')
save_path = saver.save(
sess, checkpoint_file, global_step=train_data.epoch)
print("Model saved in file: %s" % save_path)
else:
not_improved_epoch += 1
duration_eval = time.time() - start_time_eval
print('Evaluation time: %.3f min' %
(duration_eval / 60))
# Early stopping
if not_improved_epoch == params['not_improved_patient_epoch']:
break
# Update learning rate
learning_rate = lr_controller.decay_lr(
learning_rate=learning_rate,
epoch=train_data.epoch,
value=cer_dev_epoch)
start_time_epoch = time.time()
duration_train = time.time() - start_time_train
print('Total time: %.3f hour' % (duration_train / 3600))
# Training was finished correctly
with open(join(model.model_dir, 'complete.txt'), 'w') as f:
f.write('')
def main(config_path, model_save_path, gpu_indices):
# Load a config file (.yml)
with open(config_path, "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a blank label
if params['label_type'] == 'kana':
params['num_classes'] = 146
elif params['label_type'] == 'kana_divide':
params['num_classes'] = 147
elif params['label_type'] == 'kanji':
if params['train_data_size'] == 'train_subset':
params['num_classes'] = 2981
elif params['train_data_size'] == 'train_fullset':
params['num_classes'] = 3385
elif params['label_type'] == 'kanji_divide':
if params['train_data_size'] == 'train_subset':
params['num_classes'] = 2982
elif params['train_data_size'] == 'train_fullset':
params['num_classes'] = 3386
else:
raise TypeError
# Model setting
model = CTC(encoder_type=params['encoder_type'],
input_size=params['input_size'],
splice=params['splice'],
num_stack=params['num_stack'],
num_units=params['num_units'],
num_layers=params['num_layers'],
num_classes=params['num_classes'],
lstm_impl=params['lstm_impl'],
use_peephole=params['use_peephole'],
parameter_init=params['weight_init'],
clip_grad_norm=params['clip_grad_norm'],
clip_activation=params['clip_activation'],
num_proj=params['num_proj'],
weight_decay=params['weight_decay'])
# Set process name
setproctitle(
'tf_csj_' + model.name + '_' + params['train_data_size'] + '_' + params['label_type'])
model.name += '_' + str(params['num_units'])
model.name += '_' + str(params['num_layers'])
model.name += '_' + params['optimizer']
model.name += '_lr' + str(params['learning_rate'])
if params['num_proj'] != 0:
model.name += '_proj' + str(params['num_proj'])
if params['dropout'] != 0:
model.name += '_drop' + str(params['dropout'])
if params['num_stack'] != 1:
model.name += '_stack' + str(params['num_stack'])
if params['weight_decay'] != 0:
model.name += '_wd' + str(params['weight_decay'])
if params['bottleneck_dim'] != 0:
model.name += '_bottle' + str(params['bottleneck_dim'])
if len(gpu_indices) >= 2:
model.name += '_gpu' + str(len(gpu_indices))
# Set save path
model.save_path = mkdir_join(
model_save_path, 'ctc', params['label_type'],
params['train_data_size'], model.name)
# Reset model directory
model_index = 0
new_model_path = model.save_path
while True:
if isfile(join(new_model_path, 'complete.txt')):
# Training of the first model have been finished
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
elif isfile(join(new_model_path, 'config.yml')):
# Training of the first model have not been finished yet
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
else:
break
model.save_path = mkdir(new_model_path)
# Save config file
shutil.copyfile(config_path, join(model.save_path, 'config.yml'))
sys.stdout = open(join(model.save_path, 'train.log'), 'w')
# TODO(hirofumi): change to logger
do_train(model=model, params=params, gpu_indices=gpu_indices)
if __name__ == '__main__':
args = sys.argv
if len(args) != 3 and len(args) != 4:
raise ValueError
main(config_path=args[1], model_save_path=args[2],
gpu_indices=list(map(int, args[3].split(','))))
| mit | 4,144,911,333,285,178,000 | 43.424691 | 114 | 0.525789 | false |
bigdig/vnpy | vnpy/app/cta_strategy/ctaTemplatePatch/utility.py | 1 | 2618 | # encoding: UTF-8
'''
本文件包含了CTA引擎中的策略开发用模板。
添加了一些基本的策略属性,变量。不做下单逻辑
'''
from vnpy.app.cta_strategy.base import EngineType
from datetime import MINYEAR
import logging
import numpy as np
from datetime import datetime,time,date,timedelta
import json,time
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
#--------------------------------------------------------------------------
def tradeDictToJSON(trade):
"""交易信息格式化"""
return json.dumps(trade.__dict__,cls=DateTimeEncoder,indent=4,ensure_ascii=False)
def isclose(a, b, ndigits = 10):
return round(a-b, ndigits) == 0
#########################################################################
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print ('%r %2.2f ms' % \
(method.__name__, (te - ts) * 1000))
return result
return timed
# def defaultCache(func):
# from vnpy.trader.app.ctaStrategy.caching import Cache
# func1 = Cache(ttl=60*60,maxsize=1024*1024*128)(func)
# func2 = Cache(ttl=60*60*24,maxsize=1024*1024*128, filepath='./temp/' + func.__name__)(func)
# #@timeit
# def decorator(self, *args, **kwargs):
# if self.cta_engine.engineType == ENGINETYPE_TRADING:
# return func1(self,*args, **kwargs)
# else:
# return func2(self,*args, **kwargs)
# return decorator
#---------------------------------------------------------------
def diffVolume(volumeArray):
#return volumeArray
"""
将跨交易日的累积成交量做Diff运算
"""
volume = np.diff(volumeArray)
#volume = np.where(volume<0,0,volume)
volume[volume < 1 ]= 1 #
#buf-fix: 使用连续成交量进行计算,考虑中间新交易日间断的情况
#更新最后一个差值
if volume[-1] < 0:
volume[-1] = volumeArray[-1]
mask = volume<0 #小于0的是跨交易日第一个BAR的成交量
#使用交易日第一个BAR的成交量代替
mask_ori = mask.copy()
mask_ori[1:] = mask[:-1]
mask_ori[0] = False
# -2 ,diff操作后arraySize会减少1
volume[mask] = volumeArray[:-1][mask_ori]
return volume | mit | 9,177,389,331,629,231,000 | 27.792683 | 97 | 0.550847 | false |
s3rvac/retdec-python | tests/matchers.py | 1 | 1319 | #
# Project: retdec-python
# Copyright: (c) 2015 by Petr Zemek <[email protected]> and contributors
# License: MIT, see the LICENSE file for more details
#
"""Matchers for tests."""
import abc
class Matcher(metaclass=abc.ABCMeta):
"""A base class of all matchers."""
@abc.abstractmethod
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
return not self == other
def __repr__(self):
name = self.__class__.__name__
attr_list = ', '.join(
'{}={!r}'.format(key, value) for key, value in self.__dict__.items()
)
return '{}({})'.format(name, attr_list)
class Anything(Matcher):
"""A matcher that matches anything."""
def __eq__(self, other):
return True
class AnyDictWith(Matcher):
"""A matcher that matches and ``dict`` with the given keys and values.
The ``dict`` may also have other keys and values, which are not considered
during the matching.
"""
def __init__(self, **kwargs):
self.__dict__ = kwargs
def __eq__(self, other):
if not isinstance(other, dict):
return False
for name, value in self.__dict__.items():
if name not in other or other[name] != value:
return False
return True
| mit | 6,625,008,059,489,529,000 | 23.886792 | 80 | 0.579227 | false |
giacomov/3ML | threeML/utils/bayesian_blocks.py | 1 | 9886 | # Author: Giacomo Vianello ([email protected])
import logging
import sys
import numexpr
import numpy as np
from threeML.io.progress_bar import progress_bar
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("bayesian_blocks")
__all__ = ["bayesian_blocks", "bayesian_blocks_not_unique"]
def bayesian_blocks_not_unique(tt, ttstart, ttstop, p0):
# Verify that the input array is one-dimensional
tt = np.asarray(tt, dtype=float)
assert tt.ndim == 1
# Now create the array of unique times
unique_t = np.unique(tt)
t = tt
tstart = ttstart
tstop = ttstop
# Create initial cell edges (Voronoi tessellation) using the unique time stamps
edges = np.concatenate([[tstart], 0.5 * (unique_t[1:] + unique_t[:-1]), [tstop]])
# The last block length is 0 by definition
block_length = tstop - edges
if np.sum((block_length <= 0)) > 1:
raise RuntimeError(
"Events appears to be out of order! Check for order, or duplicated events."
)
N = unique_t.shape[0]
# arrays to store the best configuration
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# Pre-computed priors (for speed)
# eq. 21 from Scargle 2012
priors = 4 - np.log(73.53 * p0 * np.power(np.arange(1, N + 1), -0.478))
# Count how many events are in each Voronoi cell
x, _ = np.histogram(t, edges)
# Speed tricks: resolve once for all the functions which will be used
# in the loop
cumsum = np.cumsum
log = np.log
argmax = np.argmax
numexpr_evaluate = numexpr.evaluate
arange = np.arange
# Decide the step for reporting progress
incr = max(int(float(N) / 100.0 * 10), 1)
logger.debug("Finding blocks...")
# This is where the computation happens. Following Scargle et al. 2012.
# This loop has been optimized for speed:
# * the expression for the fitness function has been rewritten to
# avoid multiple log computations, and to avoid power computations
# * the use of scipy.weave and numexpr has been evaluated. The latter
# gives a big gain (~40%) if used for the fitness function. No other
# gain is obtained by using it anywhere else
# Set numexpr precision to low (more than enough for us), which is
# faster than high
oldaccuracy = numexpr.set_vml_accuracy_mode("low")
numexpr.set_num_threads(1)
numexpr.set_vml_num_threads(1)
with progress_bar(N) as progress:
for R in range(N):
br = block_length[R + 1]
T_k = block_length[: R + 1] - br
# N_k: number of elements in each block
# This expression has been simplified for the case of
# unbinned events (i.e., one element in each block)
# It was:
N_k = cumsum(x[: R + 1][::-1])[::-1]
# Now it is:
# N_k = arange(R + 1, 0, -1)
# Evaluate fitness function
# This is the slowest part, which I'm speeding up by using
# numexpr. It provides a ~40% gain in execution speed.
fit_vec = numexpr_evaluate(
"""N_k * log(N_k/ T_k) """,
optimization="aggressive",
local_dict={"N_k": N_k, "T_k": T_k},
)
p = priors[R]
A_R = fit_vec - p
A_R[1:] += best[:R]
i_max = argmax(A_R)
last[R] = i_max
best[R] = A_R[i_max]
progress.increase()
numexpr.set_vml_accuracy_mode(oldaccuracy)
logger.debug("Done\n")
# Now find blocks
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
finalEdges = edges[change_points]
return np.asarray(finalEdges)
def bayesian_blocks(tt, ttstart, ttstop, p0, bkg_integral_distribution=None):
"""
Divide a series of events characterized by their arrival time in blocks
of perceptibly constant count rate. If the background integral distribution
is given, divide the series in blocks where the difference with respect to
the background is perceptibly constant.
:param tt: arrival times of the events
:param ttstart: the start of the interval
:param ttstop: the stop of the interval
:param p0: the false positive probability. This is used to decide the penalization on the likelihood, so this
parameter affects the number of blocks
:param bkg_integral_distribution: (default: None) If given, the algorithm account for the presence of the background and
finds changes in rate with respect to the background
:return: the np.array containing the edges of the blocks
"""
# Verify that the input array is one-dimensional
tt = np.asarray(tt, dtype=float)
assert tt.ndim == 1
if bkg_integral_distribution is not None:
# Transforming the inhomogeneous Poisson process into an homogeneous one with rate 1,
# by changing the time axis according to the background rate
logger.debug(
"Transforming the inhomogeneous Poisson process to a homogeneous one with rate 1..."
)
t = np.array(bkg_integral_distribution(tt))
logger.debug("done")
# Now compute the start and stop time in the new system
tstart = bkg_integral_distribution(ttstart)
tstop = bkg_integral_distribution(ttstop)
else:
t = tt
tstart = ttstart
tstop = ttstop
# Create initial cell edges (Voronoi tessellation)
edges = np.concatenate([[t[0]], 0.5 * (t[1:] + t[:-1]), [t[-1]]])
# Create the edges also in the original time system
edges_ = np.concatenate([[tt[0]], 0.5 * (tt[1:] + tt[:-1]), [tt[-1]]])
# Create a lookup table to be able to transform back from the transformed system
# to the original one
lookup_table = {key: value for (key, value) in zip(edges, edges_)}
# The last block length is 0 by definition
block_length = tstop - edges
if np.sum((block_length <= 0)) > 1:
raise RuntimeError(
"Events appears to be out of order! Check for order, or duplicated events."
)
N = t.shape[0]
# arrays to store the best configuration
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# eq. 21 from Scargle 2012
prior = 4 - np.log(73.53 * p0 * (N ** -0.478))
logger.debug("Finding blocks...")
# This is where the computation happens. Following Scargle et al. 2012.
# This loop has been optimized for speed:
# * the expression for the fitness function has been rewritten to
# avoid multiple log computations, and to avoid power computations
# * the use of scipy.weave and numexpr has been evaluated. The latter
# gives a big gain (~40%) if used for the fitness function. No other
# gain is obtained by using it anywhere else
# Set numexpr precision to low (more than enough for us), which is
# faster than high
oldaccuracy = numexpr.set_vml_accuracy_mode("low")
numexpr.set_num_threads(1)
numexpr.set_vml_num_threads(1)
# Speed tricks: resolve once for all the functions which will be used
# in the loop
numexpr_evaluate = numexpr.evaluate
numexpr_re_evaluate = numexpr.re_evaluate
# Pre-compute this
aranges = np.arange(N + 1, 0, -1)
for R in range(N):
br = block_length[R + 1]
T_k = (
block_length[: R + 1] - br
) # this looks like it is not used, but it actually is,
# inside the numexpr expression
# N_k: number of elements in each block
# This expression has been simplified for the case of
# unbinned events (i.e., one element in each block)
# It was:
# N_k = cumsum(x[:R + 1][::-1])[::-1]
# Now it is:
N_k = aranges[N - R :]
# where aranges has been pre-computed
# Evaluate fitness function
# This is the slowest part, which I'm speeding up by using
# numexpr. It provides a ~40% gain in execution speed.
# The first time we need to "compile" the expression in numexpr,
# all the other times we can reuse it
if R == 0:
fit_vec = numexpr_evaluate(
"""N_k * log(N_k/ T_k) """,
optimization="aggressive",
local_dict={"N_k": N_k, "T_k": T_k},
)
else:
fit_vec = numexpr_re_evaluate(local_dict={"N_k": N_k, "T_k": T_k})
A_R = fit_vec - prior # type: np.ndarray
A_R[1:] += best[:R]
i_max = A_R.argmax()
last[R] = i_max
best[R] = A_R[i_max]
numexpr.set_vml_accuracy_mode(oldaccuracy)
logger.debug("Done\n")
# Now peel off and find the blocks (see the algorithm in Scargle et al.)
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
edg = edges[change_points]
# Transform the found edges back into the original time system
if bkg_integral_distribution is not None:
final_edges = [lookup_table[x] for x in edg]
else:
final_edges = edg
# Now fix the first and last edge so that they are tstart and tstop
final_edges[0] = ttstart
final_edges[-1] = ttstop
return np.asarray(final_edges)
# To be run with a profiler
if __name__ == "__main__":
tt = np.random.uniform(0, 1000, int(sys.argv[1]))
tt.sort()
with open("sim.txt", "w+") as f:
for t in tt:
f.write("%s\n" % (t))
res = bayesian_blocks(tt, 0, 1000, 1e-3, None)
print(res)
| bsd-3-clause | 6,241,699,055,371,695,000 | 28.248521 | 124 | 0.606312 | false |
WeakGroup/twitter-rec | interface/website/website/settings.py | 1 | 2100 | """
Django settings for website project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0a$bgpz#bn3hid==*$ee^@v79^6&q0_x$&&i=@(jivwnrv73b^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'website.urls'
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| gpl-2.0 | 3,130,661,027,178,072,000 | 22.595506 | 71 | 0.713333 | false |
dmrtsvetkov/flowercoin | qa/rpc-tests/util.py | 1 | 12418 | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Flowercoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "flowercoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
flowercoind and flowercoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run flowercoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "flowercoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "flowercoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart, starting
# at 1 Jan 2015
block_time = 1420070400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in flowercoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a flowercoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "flowercoind"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "flowercoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple flowercoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| mit | -610,950,820,509,075,100 | 35.098837 | 110 | 0.631905 | false |
dims/nova | nova/cells/rpcapi.py | 1 | 28170 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of nova-cells RPC API (for talking to the nova-cells service
within a cell).
This is different than communication between child and parent nova-cells
services. That communication is handled by the cells driver via the
messaging module.
"""
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
import nova.conf
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova.objects import base as objects_base
from nova import rpc
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class CellsAPI(object):
'''Cells client-side RPC API
API version history:
* 1.0 - Initial version.
* 1.1 - Adds get_cell_info_for_neighbors() and sync_instances()
* 1.2 - Adds service_get_all(), service_get_by_compute_host(),
and proxy_rpc_to_compute_manager()
* 1.3 - Adds task_log_get_all()
* 1.4 - Adds compute_node_get(), compute_node_get_all(), and
compute_node_stats()
* 1.5 - Adds actions_get(), action_get_by_request_id(), and
action_events_get()
* 1.6 - Adds consoleauth_delete_tokens() and validate_console_port()
... Grizzly supports message version 1.6. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.6.
* 1.7 - Adds service_update()
* 1.8 - Adds build_instances(), deprecates schedule_run_instance()
* 1.9 - Adds get_capacities()
* 1.10 - Adds bdm_update_or_create_at_top(), and bdm_destroy_at_top()
* 1.11 - Adds get_migrations()
* 1.12 - Adds instance_start() and instance_stop()
* 1.13 - Adds cell_create(), cell_update(), cell_delete(), and
cell_get()
* 1.14 - Adds reboot_instance()
* 1.15 - Adds suspend_instance() and resume_instance()
* 1.16 - Adds instance_update_from_api()
* 1.17 - Adds get_host_uptime()
* 1.18 - Adds terminate_instance() and soft_delete_instance()
* 1.19 - Adds pause_instance() and unpause_instance()
* 1.20 - Adds resize_instance() and live_migrate_instance()
* 1.21 - Adds revert_resize() and confirm_resize()
* 1.22 - Adds reset_network()
* 1.23 - Adds inject_network_info()
* 1.24 - Adds backup_instance() and snapshot_instance()
... Havana supports message version 1.24. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.24.
* 1.25 - Adds rebuild_instance()
* 1.26 - Adds service_delete()
* 1.27 - Updates instance_delete_everywhere() for instance objects
... Icehouse supports message version 1.27. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.27.
* 1.28 - Make bdm_update_or_create_at_top and use bdm objects
* 1.29 - Adds set_admin_password()
... Juno supports message version 1.29. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.29.
* 1.30 - Make build_instances() use flavor object
* 1.31 - Add clean_shutdown to stop, resize, rescue, and shelve
* 1.32 - Send objects for instances in build_instances()
* 1.33 - Add clean_shutdown to resize_instance()
* 1.34 - build_instances uses BlockDeviceMapping objects, drops
legacy_bdm argument
... Kilo supports message version 1.34. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.34.
* 1.35 - Make instance_update_at_top, instance_destroy_at_top
and instance_info_cache_update_at_top use instance objects
* 1.36 - Added 'delete_type' parameter to terminate_instance()
* 1.37 - Add get_keypair_at_top to fetch keypair from api cell
... Liberty supports message version 1.37. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.37.
'''
VERSION_ALIASES = {
'grizzly': '1.6',
'havana': '1.24',
'icehouse': '1.27',
'juno': '1.29',
'kilo': '1.34',
'liberty': '1.37',
}
def __init__(self):
super(CellsAPI, self).__init__()
target = messaging.Target(topic=CONF.cells.topic, version='1.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.cells,
CONF.upgrade_levels.cells)
# NOTE(sbauza): Yes, this is ugly but cells_utils is calling cells.db
# which itself calls cells.rpcapi... You meant import cycling ? Gah.
from nova.cells import utils as cells_utils
serializer = cells_utils.ProxyObjectSerializer()
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def cast_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a cast to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
self.client.cast(ctxt, 'run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=False)
def call_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a call to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
return self.client.call(ctxt, 'run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=True)
def build_instances(self, ctxt, **kwargs):
"""Build instances."""
build_inst_kwargs = kwargs
instances = build_inst_kwargs['instances']
build_inst_kwargs['image'] = jsonutils.to_primitive(
build_inst_kwargs['image'])
version = '1.34'
if self.client.can_send_version('1.34'):
build_inst_kwargs.pop('legacy_bdm', None)
else:
bdm_p = objects_base.obj_to_primitive(
build_inst_kwargs['block_device_mapping'])
build_inst_kwargs['block_device_mapping'] = bdm_p
version = '1.32'
if not self.client.can_send_version('1.32'):
instances_p = [jsonutils.to_primitive(inst) for inst in instances]
build_inst_kwargs['instances'] = instances_p
version = '1.30'
if not self.client.can_send_version('1.30'):
if 'filter_properties' in build_inst_kwargs:
filter_properties = build_inst_kwargs['filter_properties']
flavor = filter_properties['instance_type']
flavor_p = objects_base.obj_to_primitive(flavor)
filter_properties['instance_type'] = flavor_p
version = '1.8'
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'build_instances',
build_inst_kwargs=build_inst_kwargs)
def instance_update_at_top(self, ctxt, instance):
"""Update instance at API level."""
version = '1.35'
if not self.client.can_send_version('1.35'):
instance = objects_base.obj_to_primitive(instance)
version = '1.34'
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'instance_update_at_top', instance=instance)
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy instance at API level."""
version = '1.35'
if not self.client.can_send_version('1.35'):
instance = objects_base.obj_to_primitive(instance)
version = '1.34'
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'instance_destroy_at_top', instance=instance)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""Delete instance everywhere. delete_type may be 'soft'
or 'hard'. This is generally only used to resolve races
when API cell doesn't know to what cell an instance belongs.
"""
if self.client.can_send_version('1.27'):
version = '1.27'
else:
version = '1.0'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'instance_delete_everywhere', instance=instance,
delete_type=delete_type)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top."""
instance_fault_p = jsonutils.to_primitive(instance_fault)
self.client.cast(ctxt, 'instance_fault_create_at_top',
instance_fault=instance_fault_p)
def bw_usage_update_at_top(self, ctxt, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=None):
"""Broadcast upwards that bw_usage was updated."""
bw_update_info = {'uuid': uuid,
'mac': mac,
'start_period': start_period,
'bw_in': bw_in,
'bw_out': bw_out,
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'last_refreshed': last_refreshed}
self.client.cast(ctxt, 'bw_usage_update_at_top',
bw_update_info=bw_update_info)
def instance_info_cache_update_at_top(self, ctxt, instance_info_cache):
"""Broadcast up that an instance's info_cache has changed."""
version = '1.35'
instance = objects.Instance(uuid=instance_info_cache.instance_uuid,
info_cache=instance_info_cache)
if not self.client.can_send_version('1.35'):
instance = objects_base.obj_to_primitive(instance)
version = '1.34'
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'instance_update_at_top', instance=instance)
def get_cell_info_for_neighbors(self, ctxt):
"""Get information about our neighbor cells from the manager."""
if not CONF.cells.enable:
return []
cctxt = self.client.prepare(version='1.1')
return cctxt.call(ctxt, 'get_cell_info_for_neighbors')
def sync_instances(self, ctxt, project_id=None, updated_since=None,
deleted=False):
"""Ask all cells to sync instance data."""
cctxt = self.client.prepare(version='1.1')
return cctxt.cast(ctxt, 'sync_instances',
project_id=project_id,
updated_since=updated_since,
deleted=deleted)
def service_get_all(self, ctxt, filters=None):
"""Ask all cells for their list of services."""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(ctxt, 'service_get_all', filters=filters)
def service_get_by_compute_host(self, ctxt, host_name):
"""Get the service entry for a host in a particular cell. The
cell name should be encoded within the host_name.
"""
cctxt = self.client.prepare(version='1.2')
return cctxt.call(ctxt, 'service_get_by_compute_host',
host_name=host_name)
def get_host_uptime(self, context, host_name):
"""Gets the host uptime in a particular cell. The cell name should
be encoded within the host_name
"""
cctxt = self.client.prepare(version='1.17')
return cctxt.call(context, 'get_host_uptime', host_name=host_name)
def service_update(self, ctxt, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
cctxt = self.client.prepare(version='1.7')
return cctxt.call(ctxt, 'service_update',
host_name=host_name,
binary=binary,
params_to_update=params_to_update)
def service_delete(self, ctxt, cell_service_id):
"""Deletes the specified service."""
cctxt = self.client.prepare(version='1.26')
cctxt.call(ctxt, 'service_delete',
cell_service_id=cell_service_id)
def proxy_rpc_to_manager(self, ctxt, rpc_message, topic, call=False,
timeout=None):
"""Proxy RPC to a compute manager. The host in the topic
should be encoded with the target cell name.
"""
cctxt = self.client.prepare(version='1.2', timeout=timeout)
return cctxt.call(ctxt, 'proxy_rpc_to_manager',
topic=topic,
rpc_message=rpc_message,
call=call,
timeout=timeout)
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get the task logs from the DB in child cells."""
cctxt = self.client.prepare(version='1.3')
return cctxt.call(ctxt, 'task_log_get_all',
task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state)
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
cctxt = self.client.prepare(version='1.4')
return cctxt.call(ctxt, 'compute_node_get', compute_id=compute_id)
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells, optionally
filtering by hypervisor host.
"""
cctxt = self.client.prepare(version='1.4')
return cctxt.call(ctxt, 'compute_node_get_all',
hypervisor_match=hypervisor_match)
def compute_node_stats(self, ctxt):
"""Return compute node stats from all cells."""
cctxt = self.client.prepare(version='1.4')
return cctxt.call(ctxt, 'compute_node_stats')
def actions_get(self, ctxt, instance):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
cctxt = self.client.prepare(version='1.5')
return cctxt.call(ctxt, 'actions_get',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid'])
def action_get_by_request_id(self, ctxt, instance, request_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
cctxt = self.client.prepare(version='1.5')
return cctxt.call(ctxt, 'action_get_by_request_id',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid'],
request_id=request_id)
def action_events_get(self, ctxt, instance, action_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
cctxt = self.client.prepare(version='1.5')
return cctxt.call(ctxt, 'action_events_get',
cell_name=instance['cell_name'],
action_id=action_id)
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
cctxt = self.client.prepare(version='1.6')
cctxt.cast(ctxt, 'consoleauth_delete_tokens',
instance_uuid=instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
cctxt = self.client.prepare(version='1.6')
return cctxt.call(ctxt, 'validate_console_port',
instance_uuid=instance_uuid,
console_port=console_port,
console_type=console_type)
def get_capacities(self, ctxt, cell_name=None):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(ctxt, 'get_capacities', cell_name=cell_name)
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None):
"""Create or update a block device mapping in API cells. If
create is True, only try to create. If create is None, try to
update but fall back to create. If create is False, only attempt
to update. This maps to nova-conductor's behavior.
"""
if self.client.can_send_version('1.28'):
version = '1.28'
else:
version = '1.10'
bdm = objects_base.obj_to_primitive(bdm)
cctxt = self.client.prepare(version=version)
try:
cctxt.cast(ctxt, 'bdm_update_or_create_at_top',
bdm=bdm, create=create)
except Exception:
LOG.exception(_LE("Failed to notify cells of BDM update/create."))
def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
volume_id=None):
"""Broadcast upwards that a block device mapping was destroyed.
One of device_name or volume_id should be specified.
"""
cctxt = self.client.prepare(version='1.10')
try:
cctxt.cast(ctxt, 'bdm_destroy_at_top',
instance_uuid=instance_uuid,
device_name=device_name,
volume_id=volume_id)
except Exception:
LOG.exception(_LE("Failed to notify cells of BDM destroy."))
def get_migrations(self, ctxt, filters):
"""Get all migrations applying the filters."""
cctxt = self.client.prepare(version='1.11')
return cctxt.call(ctxt, 'get_migrations', filters=filters)
def instance_update_from_api(self, ctxt, instance, expected_vm_state,
expected_task_state, admin_state_reset):
"""Update an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.16')
cctxt.cast(ctxt, 'instance_update_from_api',
instance=instance,
expected_vm_state=expected_vm_state,
expected_task_state=expected_task_state,
admin_state_reset=admin_state_reset)
def start_instance(self, ctxt, instance):
"""Start an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.12')
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance in its cell.
This method takes a new-world instance object.
"""
msg_args = {'instance': instance,
'do_cast': do_cast}
if self.client.can_send_version('1.31'):
version = '1.31'
msg_args['clean_shutdown'] = clean_shutdown
else:
version = '1.12'
cctxt = self.client.prepare(version=version)
method = do_cast and cctxt.cast or cctxt.call
return method(ctxt, 'stop_instance', **msg_args)
def cell_create(self, ctxt, values):
cctxt = self.client.prepare(version='1.13')
return cctxt.call(ctxt, 'cell_create', values=values)
def cell_update(self, ctxt, cell_name, values):
cctxt = self.client.prepare(version='1.13')
return cctxt.call(ctxt, 'cell_update',
cell_name=cell_name, values=values)
def cell_delete(self, ctxt, cell_name):
cctxt = self.client.prepare(version='1.13')
return cctxt.call(ctxt, 'cell_delete', cell_name=cell_name)
def cell_get(self, ctxt, cell_name):
cctxt = self.client.prepare(version='1.13')
return cctxt.call(ctxt, 'cell_get', cell_name=cell_name)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
"""Reboot an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.14')
cctxt.cast(ctxt, 'reboot_instance', instance=instance,
reboot_type=reboot_type)
def pause_instance(self, ctxt, instance):
"""Pause an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.19')
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def unpause_instance(self, ctxt, instance):
"""Unpause an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.19')
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def suspend_instance(self, ctxt, instance):
"""Suspend an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.15')
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def resume_instance(self, ctxt, instance):
"""Resume an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.15')
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None,
delete_type='delete'):
"""Delete an instance in its cell.
This method takes a new-world instance object.
"""
msg_kwargs = {'instance': instance}
if self.client.can_send_version('1.36'):
version = '1.36'
msg_kwargs['delete_type'] = delete_type
else:
version = '1.18'
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'terminate_instance', **msg_kwargs)
def soft_delete_instance(self, ctxt, instance, reservations=None):
"""Soft-delete an instance in its cell.
This method takes a new-world instance object.
"""
cctxt = self.client.prepare(version='1.18')
cctxt.cast(ctxt, 'soft_delete_instance', instance=instance)
def resize_instance(self, ctxt, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
flavor_p = jsonutils.to_primitive(flavor)
version = '1.33'
msg_args = {'instance': instance,
'flavor': flavor_p,
'extra_instance_updates': extra_instance_updates,
'clean_shutdown': clean_shutdown}
if not self.client.can_send_version(version):
del msg_args['clean_shutdown']
version = '1.20'
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'resize_instance', **msg_args)
def live_migrate_instance(self, ctxt, instance, host_name,
block_migration, disk_over_commit):
cctxt = self.client.prepare(version='1.20')
cctxt.cast(ctxt, 'live_migrate_instance',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit,
host_name=host_name)
def revert_resize(self, ctxt, instance, migration, host,
reservations):
cctxt = self.client.prepare(version='1.21')
cctxt.cast(ctxt, 'revert_resize', instance=instance)
def confirm_resize(self, ctxt, instance, migration, host,
reservations, cast=True):
# NOTE(comstud): This is only used in the API cell where we should
# always cast and ignore the 'cast' kwarg.
# Also, the compute api method normally takes an optional
# 'migration_ref' argument. But this is only used from the manager
# back to the API... which would happen in the child cell.
cctxt = self.client.prepare(version='1.21')
cctxt.cast(ctxt, 'confirm_resize', instance=instance)
def reset_network(self, ctxt, instance):
"""Reset networking for an instance."""
cctxt = self.client.prepare(version='1.22')
cctxt.cast(ctxt, 'reset_network', instance=instance)
def inject_network_info(self, ctxt, instance):
"""Inject networking for an instance."""
cctxt = self.client.prepare(version='1.23')
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def snapshot_instance(self, ctxt, instance, image_id):
cctxt = self.client.prepare(version='1.24')
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance, image_id=image_id)
def backup_instance(self, ctxt, instance, image_id, backup_type, rotation):
cctxt = self.client.prepare(version='1.24')
cctxt.cast(ctxt, 'backup_instance',
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, kwargs=None):
cctxt = self.client.prepare(version='1.25')
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance, image_href=image_ref,
admin_password=new_pass, files_to_inject=injected_files,
preserve_ephemeral=preserve_ephemeral, kwargs=kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
cctxt = self.client.prepare(version='1.29')
cctxt.cast(ctxt, 'set_admin_password', instance=instance,
new_pass=new_pass)
def get_keypair_at_top(self, ctxt, user_id, name):
if not CONF.cells.enable:
return
cctxt = self.client.prepare(version='1.37')
keypair = cctxt.call(ctxt, 'get_keypair_at_top', user_id=user_id,
name=name)
if keypair is None:
raise exception.KeypairNotFound(user_id=user_id,
name=name)
return keypair
| apache-2.0 | -2,072,031,802,108,248,800 | 42.674419 | 79 | 0.589173 | false |
line/line-bot-sdk-python | tests/models/serialize_test_case.py | 1 | 2551 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals, absolute_import
import sys
import unittest
from numbers import Number
from linebot.models import (
Base,
)
from linebot.utils import to_camel_case
PY3 = sys.version_info[0] == 3
class SerializeTestCase(unittest.TestCase):
MESSAGE = 'message'
STICKER = 'sticker'
POSTBACK = 'postback'
CAMERA = 'camera'
CAMERA_ROLL = 'cameraRoll'
DATETIMEPICKER = 'datetimepicker'
URI = 'uri'
LOCATION = 'location'
FLEX = 'flex'
GENDER = "gender"
APP_TYPE = "appType"
AGE = "age"
AREA = "area"
SUBSCRIPTION_PERIOD = "subscriptionPeriod"
SPAN = 'span'
BUBBLE = 'bubble'
CAROUSEL = 'carousel'
BOX = 'box'
BUTTON = 'button'
FILLER = 'filler'
ICON = 'icon'
TEXT = 'text'
IMAGE = 'image'
VIDEO = 'video'
AUDIO = 'audio'
SEPARATOR = 'separator'
IMAGEMAP = 'imagemap'
ACTION = 'action'
TEMPLATE = 'template'
BUTTONS = 'buttons'
CONFIRM = 'confirm'
IMAGE_CAROUSEL = 'image_carousel'
LINEAR_GRADIENT = 'linearGradient'
def serialize_as_dict(self, obj, type=None):
if isinstance(obj, Base):
return obj.as_json_dict()
elif isinstance(obj, dict):
ret = {to_camel_case(k): self.serialize_as_dict(v) for k, v in obj.items()}
if type is not None:
ret['type'] = type
return ret
elif isinstance(obj, list):
return [self.serialize_as_dict(elem) for elem in obj]
else:
if PY3:
self.assertIsInstance(obj, (str, bool, Number))
else:
self.assertIsInstance(obj, (basestring, bool, Number)) # noqa
return obj
class ConstError(TypeError):
pass
def __setattr__(self, name, value):
if name in SerializeTestCase.__dict__:
raise self.ConstError("Can't rebind const (%s)" % name)
self.__dict__[name] = value
| apache-2.0 | -5,113,099,517,172,017,000 | 28.321839 | 87 | 0.619757 | false |
Anber/django-extended-messages | extended_messages/api.py | 1 | 2318 | # Slightly modified django.contrib.messages.api
from django.contrib.messages import constants
from django.contrib.messages.api import get_messages, get_level, set_level, MessageFailure
def add_message(request, level, message, extra_tags='', fail_silently=False, sticky=False):
"""
Attempts to add a message to the request using the 'messages' app, falling
back to the user's message_set if MessageMiddleware hasn't been enabled.
"""
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags, sticky=sticky)
if hasattr(request, 'user') and request.user.is_authenticated():
return request.user.message_set.create(message=message)
if not fail_silently:
raise MessageFailure('Without the django.contrib.messages '
'middleware, messages can only be added to '
'authenticated users.')
def debug(request, message, extra_tags='', fail_silently=False, sticky=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently, sticky=sticky)
def info(request, message, extra_tags='', fail_silently=False, sticky=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently, sticky=sticky)
def success(request, message, extra_tags='', fail_silently=False, sticky=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently, sticky=sticky)
def warning(request, message, extra_tags='', fail_silently=False, sticky=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently, sticky=sticky)
def error(request, message, extra_tags='', fail_silently=False, sticky=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently, sticky=sticky)
| bsd-3-clause | -7,817,217,899,636,346,000 | 38.965517 | 91 | 0.667386 | false |
alexm98/dotfiles | spacemacs/elpa/live-py-mode-20170928.2107/report_builder.py | 1 | 9236 | import re
import sys
import traceback
class ReportBuilder(object):
def __init__(self, message_limit=None):
self.messages = []
self.assignments = []
self.message_count = 0
self.message_limit = message_limit
self.stack_block = None # (first_line, last_line) numbers, not indexes
self.stack = [] # current call stack
self.history = [] # all stack frames that need to be combined
self.line_widths = {}
self.max_width = None
self.frame_width = 0
def start_block(self, first_line, last_line):
""" Cap all the lines from first_line to last_line inclusive with
pipes if they need it. They don't need it if they are all empty, or
if they all end in pipes. first_line and last_line are line numbers,
not indexes.
Return the maximum width of all the lines.
"""
self._check_line_count(last_line)
line_indexes = range(first_line-1, last_line)
max_width = 0
all_end_in_pipes = True
for line_index in line_indexes:
message = self.messages[line_index]
max_width = max(len(message), max_width)
all_end_in_pipes = all_end_in_pipes and message.endswith('| ')
if max_width and not all_end_in_pipes:
for line_index in line_indexes:
message = self.messages[line_index]
self.messages[line_index] = message.ljust(max_width) + '| '
self._update_frame_width(max_width + 2, line_index+1)
else:
self._increment_message_count()
return max_width
def _update_frame_width(self, new_width, line_number):
if not self.max_width:
# no limit.
return
if new_width > self.frame_width or not self.stack_block:
should_throw = False
if not self.stack_block:
self.line_widths[line_number-1] = new_width
should_throw = new_width > self.max_width
else:
first_line, last_line = self.stack_block
for line_index in range(first_line - 1, last_line):
line_width = (
self.line_widths.get(line_index, 0) +
new_width -
self.frame_width)
if line_width > self.max_width:
should_throw = True
self.line_widths[line_index] = line_width
self.frame_width = new_width
if should_throw:
raise RuntimeError('live coding message limit exceeded')
def start_frame(self, first_line, last_line):
""" Start a new stack frame to support recursive calls.
This allows extra messages to be added to a stack frame after a
recursive call finishes.
:param int first_line: the first line of the function that the frame is
running.
:param int last_line: the last line of the function that the frame is
running.
"""
new_frame = ReportBuilder(self.message_limit)
new_frame.stack_block = (first_line, last_line)
new_frame.line_widths = self.line_widths
new_frame.max_width = self.max_width
self.history.append(new_frame)
return new_frame
def _increment_message_count(self):
if (self.message_limit is not None and
self.message_count >= self.message_limit):
raise RuntimeError('live coding message limit exceeded')
self.message_count += 1
def add_message(self, message, line_number):
""" Add a message to the report on line line_number (1-based). """
if '\n' in message:
message = re.sub(r'\s+', ' ', message)
self._increment_message_count()
self._check_line_count(line_number)
new_width = len(self.messages[line_number - 1]) + len(message)
self._update_frame_width(new_width, line_number)
self.messages[line_number - 1] += message
def add_extra_message(self, message, line_number):
""" Add an extra message to the last frame after the code has finished
running. """
target = self.history[-1] if self.history else self
target.max_width = self.max_width
target.add_message(message, line_number)
def assign(self, name, value, line_number):
""" Convenience method for simple assignments.
Just wraps all the separate steps for a flexible assignment. """
self.start_assignment()
try:
self.set_assignment_value(value)
self.report_assignment('{} = {{!r}}'.format(name),
line_number=line_number)
finally:
self.end_assignment()
return value
def start_assignment(self):
self.assignments.append(AssignmentReport())
def end_assignment(self):
self.assignments.pop()
def set_assignment_value(self, value):
self.assignments[-1].value = value
return value
def add_assignment_index(self, value):
self.assignments[-1].indexes.append(value)
return value
def get_assignment_index(self, index_index):
return self.assignments[-1].indexes[index_index]
def report_assignment(self, format_string, line_number):
assignment = self.assignments[-1]
try:
display = format_string.format(*(assignment.indexes +
[assignment.value]))
except Exception:
display = None
if display is not None and not display.endswith('>'):
self.add_message(display + ' ', line_number)
def exception(self):
etype, value, tb = sys.exc_info()
messages = traceback.format_exception_only(etype, value)
message = messages[-1].strip() + ' '
entries = traceback.extract_tb(tb)
if entries:
_, line_number, _, _ = entries[0]
try:
old_limit, self.message_limit = self.message_limit, None
old_width, self.max_width = self.max_width, None
self.add_message(message, line_number)
finally:
self.message_limit = old_limit
self.max_width = old_width
def return_value(self, value, line_number):
self.add_message('return %s ' % repr(value), line_number)
return value
def yield_value(self, value, line_number):
if isinstance(value, tuple):
display = ', '.join([repr(item) for item in value])
else:
display = repr(value)
self.add_message('yield %s ' % display, line_number)
return value
def yield_from(self, values, line_number):
for value in values:
self.start_block(line_number, line_number)
yield self.yield_value(value, line_number)
def record_call(self,
names,
displays_before,
result,
displays_after,
line_number):
zipped = zip(names, displays_before, displays_after)
for name, display_before, display_after in zipped:
if display_before != display_after:
self.add_message('%s = %s ' % (name, display_after),
line_number)
return result
def record_delete(self, name, target, line_number):
return DeletionTarget(name, target, line_number, self)
def report(self, total_lines=0):
self.max_width = None
self.message_limit = None
for frame in self.history:
first_line, last_line = frame.stack_block
self.start_block(first_line, last_line)
for i in range(len(frame.messages)):
message = frame.messages[i]
if message:
line_number = i+1
self.add_message(message, line_number)
self.history = []
self._check_line_count(total_lines)
return '\n'.join(self.messages)
def _check_line_count(self, line_count):
while len(self.messages) < line_count:
self.messages.append('')
class DeletionTarget(object):
def __init__(self, name, target, line_number, report_builder):
self.name = name
self.target = target
self.line_number = line_number
self.report_builder = report_builder
def __delitem__(self, key):
before = repr(self.target)
del self.target[key]
after = repr(self.target)
if before != after:
self.report_builder.assign(self.name,
self.target,
self.line_number)
def __delattr__(self, key):
before = repr(self.target)
self.target.__delattr__(key)
after = repr(self.target)
if before != after:
self.report_builder.assign(self.name,
self.target,
self.line_number)
class AssignmentReport(object):
def __init__(self):
self.value = None
self.indexes = []
| gpl-3.0 | -5,489,539,064,284,392,000 | 36.392713 | 79 | 0.562906 | false |
neutrons/Licorne-Py | UI-playground/layerplot.py | 1 | 2806 | from __future__ import (absolute_import, division, print_function)
from PyQt5 import QtCore, QtWidgets
import sys
import numpy as np
from layer import Layer, MSLD
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
class layerplot(QtWidgets.QWidget):
def __init__(self, *args):
QtWidgets.QWidget.__init__(self, *args)
sample=[Layer(nsld=5),Layer(thickness=2.,nsld=3),Layer(nsld=5),Layer(nsld=4.,thickness=np.inf)]
self.m = PlotCanvas(sample, self)
self.m.move(0,0)
def resizeEvent(self, event):
self.m.setGeometry(self.rect())
class PlotCanvas(FigureCanvas):
def __init__(self, layers, parent=None):
self.fig = Figure()
self.axes = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.data=layers
self.variable='nsld'
self.plot()
self.fig.canvas.mpl_connect('pick_event', self.onpick)
def onpick(self,event):
ind=event.ind[0]
if ind==len(self.data)-1:
ind='substrate'
print('picked layer {0}'.format(ind))
return True
def plot(self):
layer_thick_array=np.array([l.thickness for l in self.data])
layer_nsld_array =np.array([l.nsld for l in self.data])
depth=np.zeros(len(layer_thick_array))
depth[1:]=layer_thick_array.cumsum()[:-1]
patches=[]
N=len(self.data)
for i in range(N-1):
polygon=Polygon([[depth[i],0.],[depth[i],layer_nsld_array[i]],[depth[i+1],layer_nsld_array[i]],[depth[i+1],0]],True)
patches.append(polygon)
polygon=Polygon([[depth[N-1],0.],[depth[N-1],layer_nsld_array[N-1]],[depth[N-1]+1,layer_nsld_array[N-1]],[depth[N-1]+1,0]],True)
patches.append(polygon)
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4, picker=True)
colors = 100*np.random.rand(len(patches))
p.set_array(np.array(colors))
ax = self.figure.add_subplot(111)
ax.add_collection(p)
ax.set_title('NSLD')
ax.set_xlim(np.array([0,depth[-1]])*1.2)
ax.set_ylim(np.array([0,layer_nsld_array.max()])*1.2) #TODO allow negative
ax.set_xlabel('Thickness')
ax.set_ylabel('NSLD')
self.draw()
if __name__=='__main__':
app=QtWidgets.QApplication(sys.argv)
mainForm=layerplot()
mainForm.show()
sys.exit(app.exec_())
| gpl-3.0 | -8,143,628,558,970,020,000 | 35.921053 | 136 | 0.632217 | false |
breunigs/livestreamer | src/livestreamer/plugins/filmon.py | 1 | 4083 | import re
import requests
from livestreamer.compat import urlparse
from livestreamer.exceptions import PluginError, NoStreamsError
from livestreamer.plugin import Plugin
from livestreamer.stream import RTMPStream
from livestreamer.utils import urlget, urlopen, res_json
AJAX_HEADERS = {
"Referer": "http://www.filmon.com",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0"
}
CHINFO_URL = "http://www.filmon.com/ajax/getChannelInfo"
VODINFO_URL = "http://www.filmon.com/vod/info/{0}"
QUALITY_WEIGHTS = {
"high": 720,
"low": 480
}
SWF_URL = "http://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf"
class Filmon(Plugin):
@classmethod
def can_handle_url(cls, url):
return re.match("^http(s)?://(\w+\.)?filmon.com/(tv|vod).+", url)
@classmethod
def stream_weight(cls, key):
weight = QUALITY_WEIGHTS.get(key)
if weight:
return weight, "filmon"
return Plugin.stream_weight(key)
def _get_rtmp_app(self, rtmp):
parsed = urlparse(rtmp)
if not parsed.scheme.startswith("rtmp"):
return
if parsed.query:
app = "{0}?{1}".format(parsed.path[1:], parsed.query)
else:
app = parsed.path[1:]
return app
def _get_streams(self):
if not RTMPStream.is_usable(self.session):
raise PluginError("rtmpdump is not usable and required by Filmon plugin")
self.logger.debug("Fetching stream info")
self.rsession = requests.session()
res = urlget(self.url, session=self.rsession)
match = re.search("movie_id=(\d+)", res.text)
if match:
return self._get_vod_stream(match.group(1))
match = re.search("/channels/(\d+)/extra_big_logo.png", res.text)
if not match:
return
channel_id = match.group(1)
streams = {}
for quality in ("low", "high"):
try:
streams[quality] = self._get_stream(channel_id, quality)
except NoStreamsError:
pass
return streams
def _get_stream(self, channel_id, quality):
params = dict(channel_id=channel_id, quality=quality)
res = urlopen(CHINFO_URL, data=params, headers=AJAX_HEADERS,
session=self.rsession)
json = res_json(res)
if not json:
raise NoStreamsError(self.url)
elif not isinstance(json, list):
raise PluginError("Invalid JSON response")
info = json[0]
rtmp = info.get("serverURL")
playpath = info.get("streamName")
if not (rtmp and playpath):
raise NoStreamsError(self.url)
app = self._get_rtmp_app(rtmp)
if not app:
raise NoStreamsError(self.url)
return RTMPStream(self.session, {
"rtmp": rtmp,
"pageUrl": self.url,
"swfUrl": SWF_URL,
"playpath": playpath,
"app": app,
"live": True
})
def _get_vod_stream(self, movie_id):
res = urlopen(VODINFO_URL.format(movie_id), headers=AJAX_HEADERS,
session=self.rsession)
json = res_json(res)
json = json and json.get("data")
json = json and json.get("streams")
if not json:
raise NoStreamsError(self.url)
streams = {}
for quality in ("low", "high"):
stream = json.get(quality)
if not stream:
continue
rtmp = stream.get("url")
app = self._get_rtmp_app(rtmp)
if not app:
continue
playpath = stream.get("name")
if ".mp4" in playpath:
playpath = "mp4:" + playpath
streams[quality] = RTMPStream(self.session, {
"rtmp": rtmp,
"pageUrl": self.url,
"swfUrl": SWF_URL,
"playpath": playpath,
"app": app,
})
return streams
__plugin__ = Filmon
| bsd-2-clause | 1,767,515,076,056,671,200 | 27.957447 | 92 | 0.557188 | false |
ooici/marine-integrations | mi/dataset/driver/vel3d_l/wfp/test/test_driver.py | 1 | 35134 | """
@package mi.dataset.driver.vel3d_l.wfp.test.test_driver
@file marine-integrations/mi/dataset/driver/vel3d_l/wfp/driver.py
@author Steve Myerson (Raytheon)
@brief Test cases for vel3d_l_wfp driver (for both telemetered and recovered data)
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
Files used for testing:
rec_vel3d_l_1.dat - 1 block with 10 FSI records
rec_vel3d_l_2.dat - 2 blocks with 4, 6 FSI records
rec_vel3d_l_4.dat - 4 blocks with 1, 2, 3, 4 FSI records
tel_vel3d_l_1.dat - 1 block with 10 FSI records
tel_vel3d_l_2.dat - 2 blocks with 4, 6 FSI records
tel_vel3d_l_3.dat - 3 blocks with 2, 3, 4 FSI records
tel_vel3d_l_4.dat - 4 blocks with 1, 2, 3, 4 FSI records
"""
__author__ = 'Steve Myerson (Raytheon)'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger; log = get_logger()
import os
from mi.core.exceptions import \
DatasetParserException, \
RecoverableSampleException, \
SampleException, \
UnexpectedDataException
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.idk.exceptions import IDKConfigMissing, IDKException, SampleTimeout
from mi.idk.util import remove_all_files
from mi.dataset.dataset_driver import \
DataSourceConfigKey, \
DataSetDriverConfigKeys, \
DriverParameter
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
from mi.dataset.driver.vel3d_l.wfp.driver import \
Vel3dLWfp, \
DataTypeKey
from mi.dataset.parser.sio_mule_common import StateKey
from mi.dataset.parser.vel3d_l_wfp import \
Vel3dLWfpDataParticleType, \
Vel3dLWfpStateKey, \
Vel3dLWfpInstrumentParticle, \
Vel3dLWfpInstrumentRecoveredParticle, \
Vel3dLWfpMetadataRecoveredParticle, \
Vel3dLWfpSioMuleMetadataParticle
DIR_REC = '/tmp/dsatest_rec'
DIR_TEL = '/tmp/dsatest_tel'
FILE_REC1 = 'A00000001.DAT'
FILE_REC2 = 'A00000002.DAT'
FILE_REC4 = 'A00000004.DAT'
FILE_TEL = 'node58p1.dat'
# Fill in driver details
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.vel3d_l.wfp.driver',
driver_class='Vel3dLWfp',
agent_resource_id = '123xyz',
agent_name = 'Agent007',
agent_packet_config = Vel3dLWfp.stream_config(),
startup_config = {
DataSourceConfigKey.RESOURCE_ID: 'vel3d_l_wfp',
DataSourceConfigKey.HARVESTER:
{
DataTypeKey.VEL3D_L_WFP:
{
DataSetDriverConfigKeys.DIRECTORY: DIR_REC,
DataSetDriverConfigKeys.PATTERN: 'A*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataTypeKey.VEL3D_L_WFP_SIO_MULE:
{
DataSetDriverConfigKeys.DIRECTORY: DIR_TEL,
DataSetDriverConfigKeys.PATTERN: FILE_TEL,
DataSetDriverConfigKeys.FREQUENCY: 1,
}
},
DataSourceConfigKey.PARSER: {
DataTypeKey.VEL3D_L_WFP: {},
DataTypeKey.VEL3D_L_WFP_SIO_MULE: {}
}
}
)
PARSER_STATE = 'parser_state'
REC_PARTICLES = (Vel3dLWfpInstrumentRecoveredParticle, Vel3dLWfpMetadataRecoveredParticle)
TEL_PARTICLES = (Vel3dLWfpInstrumentParticle, Vel3dLWfpSioMuleMetadataParticle)
# The integration and qualification tests generated here are suggested tests,
# but may not be enough to fully test your driver. Additional tests should be
# written as needed.
###############################################################################
# INTEGRATION TESTS #
# Device specific integration tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
class IntegrationTest(DataSetIntegrationTestCase):
def test_get(self):
"""
Test that we can get data from multiple files.
"""
log.info("================ START INTEG TEST GET =====================")
# Start sampling.
self.driver.start_sampling()
# Generated recovered file has 10 instrument
# records and 1 metadata record.
log.info("FILE rec_vel3d_l_1.dat INTEG TEST GET")
self.clear_async_data()
self.create_sample_data_set_dir('rec_vel3d_l_1.dat', DIR_REC, FILE_REC1)
self.assert_data(REC_PARTICLES, 'rec_vel3d_l_1.yml', count=11, timeout=11)
# Generated telemetered file has 1 SIO block with 10 instrument
# records and 1 metadata record.
log.info("FILE tel_vel3d_l_1.dat INTEG TEST GET")
self.clear_async_data()
self.create_sample_data_set_dir('tel_vel3d_l_1.dat', DIR_TEL, FILE_TEL)
self.assert_data(TEL_PARTICLES, 'tel_vel3d_l_1.yml', count=11, timeout=11)
log.info("================ END INTEG TEST GET ======================")
def test_get_any_order(self):
"""
Test that we can get data from files for all harvesters / parsers.
"""
log.info("=========== START INTEG TEST GET ANY ORDER ================")
# Start sampling.
self.driver.start_sampling()
self.clear_async_data()
# Set up the test files.
# 2 Recovered files
# rec_vel3d_l_2 - 4 instrument, 1 metadata, 6 instrument, 1 metadata
# rec_vel3d_l_4 - 1 instrument, 2 instrument, 3 instrument,
# 4 instrument with metadata after each group
# 1 Telemetered file
# tel_vel3d_l_1 - 10 instrument, 1 metadata
log.info("=========== CREATE DATA FILES ================")
self.create_sample_data_set_dir('rec_vel3d_l_4.dat', DIR_REC, FILE_REC4)
self.create_sample_data_set_dir('rec_vel3d_l_2.dat', DIR_REC, FILE_REC2)
self.create_sample_data_set_dir('tel_vel3d_l_1.dat', DIR_TEL, FILE_TEL)
# Read files in the following order:
# Entire recovered data file rec_vel3d_l_2.
# Entire telemetered data file tel_vel3d_l_1.
# Entire recovered data file rec_vel3d_l_4.
log.info("=========== READ RECOVERED DATA FILE #1 ================")
self.assert_data(REC_PARTICLES, 'rec_vel3d_l_2.yml', count=12, timeout=12)
log.info("=========== READ TELEMETERED DATA FILE #1 ================")
self.assert_data(TEL_PARTICLES, 'tel_vel3d_l_1.yml', count=11, timeout=11)
log.info("=========== READ RECOVERED DATA FILE #2 ================")
self.assert_data(REC_PARTICLES, 'rec_vel3d_l_4.yml', count=14, timeout=14)
log.info("=========== END INTEG TEST GET ANY ORDER ================")
def test_non_vel3d_l_sio_block(self):
"""
Test ability of the parser to ignore SIO blocks which are not vel3d_l blocks.
"""
log.info("=========== START INTEG NON VEL3D_L SIO BLOCK ================")
# Generated telemetered file has 3 SIO blocks.
# First SIO block is not a VEL3D_L SIO Block.
# Second SIO block has 3 instrument records and 1 metadata record.
# Third SIO block has 4 instrument records and 1 metadata record.
log.info("FILE tel_vel3d_l_3.dat INTEG TEST GET")
self.clear_async_data()
self.create_sample_data_set_dir('tel_vel3d_l_3.dat', DIR_TEL, FILE_TEL)
self.driver.start_sampling()
self.assert_data(TEL_PARTICLES, 'tel_vel3d_l_3.yml', count=9, timeout=9)
log.info("=========== END INTEG NON VEL3D_L SIO BLOCK ================")
def test_sample_exception(self):
"""
Test a case that should produce a sample exception and confirm the
sample exception occurs
"""
log.info("======== START INTEG TEST SAMPLE EXCEPTION FAMILY ==========")
self.clear_async_data()
self.create_sample_data_set_dir('rec_excess.dat', DIR_REC, FILE_REC1)
self.driver.start_sampling()
# an event catches the sample exception - excess data at end of record
self.assert_event('ResourceAgentErrorEvent')
log.info("======== END INTEG TEST SAMPLE EXCEPTION FAMILY ==========")
def test_start_stop_resume(self):
"""
Test the ability to start, stop and restart sampling,
ingesting files in the correct order.
This also tests the condition where the parser is restarted after
some, but not all, particles from a "chunk" get published.
"""
log.info("===== START INTEG TEST STOP START RESUME =====")
self.clear_async_data()
self.create_sample_data_set_dir('tel_vel3d_l_1.dat', DIR_TEL, FILE_TEL)
self.create_sample_data_set_dir('rec_vel3d_l_1.dat', DIR_REC, FILE_REC1)
self.create_sample_data_set_dir('rec_vel3d_l_2.dat', DIR_REC, FILE_REC2)
self.driver.start_sampling()
# Get all the particles from rec_vel3d_l_1.dat.
log.info("===== READ RECOVERED DATA FILE #1 =====")
self.assert_data(REC_PARTICLES,
'rec_vel3d_l_1.yml', count=11, timeout=15)
self.assert_file_ingested(FILE_REC1, DataTypeKey.VEL3D_L_WFP)
# Get 2 instrument particles (of 10) from rec_vel3d_l_2.dat.
# This gets part way through the first block.
log.info("===== READ RECOVERED DATA FILE #2 =====")
self.assert_data(Vel3dLWfpInstrumentRecoveredParticle,
'rec_vel3d_l_2_inst1-2.yml', count=2, timeout=10)
# Stop and then start sampling, resuming from where we left off.
self.driver.stop_sampling()
self.driver.start_sampling()
# Get all particles from tel_vel3d_l_1.dat.
log.info("===== READ TELEMETERED DATA FILE #1 =====")
self.assert_data(TEL_PARTICLES,
'tel_vel3d_l_1.yml', count=11, timeout=15)
# Get the last 8 instrument particles (of 10) from rec_vel3d_l_2.dat.
# This spans the 2 blocks.
log.info("===== READ RECOVERED DATA FILE #2 PART 2 =====")
self.assert_data(Vel3dLWfpInstrumentRecoveredParticle,
'rec_vel3d_l_2_inst3_10.yml', count=8, timeout=10)
# Get the 2 metadata particles from rec_vel3d_l_2.dat
log.info("===== READ RECOVERED DATA FILE #2 METADATA =====")
self.assert_data(Vel3dLWfpMetadataRecoveredParticle,
'rec_vel3d_l_2_metadata.yml', count=2, timeout=10)
self.assert_file_ingested(FILE_REC2, DataTypeKey.VEL3D_L_WFP)
log.info("===== END INTEG TEST STOP START RESUME ======")
def test_stop_midblock(self):
"""
Test the condition where the parser is stopped after some,
but not all particles, from a given block have been published.
"""
log.info("===== START INTEG TEST STOP MID-BLOCK =====")
# Create file (1 block, 10 instrument particles, 1 metadata particle)
self.clear_async_data()
self.create_sample_data_set_dir('rec_vel3d_l_1.dat', DIR_REC, FILE_REC1)
self.driver.start_sampling()
# Get 1 instrument particle (of the 10 available).
self.assert_data(Vel3dLWfpInstrumentRecoveredParticle,
'rec_vel3d_l_1_inst1.yml', count=1, timeout=10)
# Stop and then start sampling, resuming from where we left off.
self.driver.stop_sampling()
self.driver.start_sampling()
# Get the next 5 instrument particles (of the 10 available).
self.assert_data(Vel3dLWfpInstrumentRecoveredParticle,
'rec_vel3d_l_1_inst2_6.yml', count=5, timeout=10)
# Stop and then start sampling, resuming from where we left off.
self.driver.stop_sampling()
self.driver.start_sampling()
# Get the last 4 instrument particles (of the 10 available)
# as well as the metadata particle.
self.assert_data(REC_PARTICLES,
'rec_vel3d_l_1_inst6_10meta.yml', count=5, timeout=10)
# File should be fully parsed at this point.
self.assert_file_ingested(FILE_REC1, DataTypeKey.VEL3D_L_WFP)
# Part 2 of this test.
# Create file (4 blocks, 1+2+3+4 instrument particles,
# 1 metadata particle per block)
self.create_sample_data_set_dir('rec_vel3d_l_4.dat', DIR_REC, FILE_REC2)
# Get the first 8 instrument particles and 3 metadata particles.
# This will leave us in the middle of the 4th block,
# with 2 of the 4 instrument particles having been retrieved.
self.assert_data(REC_PARTICLES,
'rec_vel3d_l_4_inst1_8_meta1_3.yml', count=11, timeout=20)
# Stop and then start sampling, resuming from where we left off.
self.driver.stop_sampling()
self.driver.start_sampling()
# Get the last 2 instrument particles and the last metadata particle.
self.assert_data(REC_PARTICLES,
'rec_vel3d_l_4_inst9_10_meta4.yml', count=3, timeout=10)
# File should be fully parsed at this point.
self.assert_file_ingested(FILE_REC2, DataTypeKey.VEL3D_L_WFP)
log.info("===== END INTEG TEST STOP MID-BLOCK =====")
def test_stop_resume(self):
"""
Test the ability to stop and restart the process.
"""
log.info("===== START INTEG TEST STOP RESUME =====")
self.clear_async_data()
path_1 = self.create_sample_data_set_dir('rec_vel3d_l_1.dat',
DIR_REC, FILE_REC1)
path_2 = self.create_sample_data_set_dir('rec_vel3d_l_4.dat',
DIR_REC, FILE_REC4)
# Recovered file 1 position set to EOF.
# Recovered file 2 position set to record 9 (start of group of 4 records).
pos_1 = 761
pos_2 = 1155 # 338 + 385 + 432
new_state = {
DataTypeKey.VEL3D_L_WFP:
{FILE_REC1: self.get_file_state(path_1, True, pos_1),
FILE_REC4: self.get_file_state(path_2, False, pos_2)},
DataTypeKey.VEL3D_L_WFP_SIO_MULE:
{}
}
new_state[DataTypeKey.VEL3D_L_WFP][FILE_REC1][PARSER_STATE][Vel3dLWfpStateKey.PARTICLE_NUMBER] = 0
new_state[DataTypeKey.VEL3D_L_WFP][FILE_REC4][PARSER_STATE][Vel3dLWfpStateKey.PARTICLE_NUMBER] = 0
log.info("===== INTEG TEST STOP RESUME SET STATE %s =====", new_state)
self.driver = self._get_driver_object(memento=new_state)
self.driver.start_sampling()
log.info("===== INTEG TEST STOP RESUME READ RECOVERED DATA FILE #2 ========")
self.assert_data(REC_PARTICLES, 'rec_vel3d_l_4_10-14.yml', count=5, timeout=10)
# Read Telemetered file.
self.driver.stop_sampling()
self.create_sample_data_set_dir('tel_vel3d_l_1.dat', DIR_TEL, FILE_TEL)
self.driver.start_sampling()
log.info("===== INTEG TEST STOP RESUME READ TELEMETERED DATA FILE ========")
self.assert_data(TEL_PARTICLES, 'tel_vel3d_l_1_1-4.yml', count=4, timeout=11)
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data(TEL_PARTICLES, 'tel_vel3d_l_1_5-11.yml', count=7, timeout=11)
log.info("===== END INTEG TEST STOP RESUME =====")
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualificationTest(DataSetQualificationTestCase):
def test_parser_exception(self):
"""
Test an exception is raised after the driver is started during
record parsing.
"""
log.info("========== START QUAL TEST PARSER EXCEPTION ==========")
self.event_subscribers.clear_events()
self.assert_initialize()
self.create_sample_data_set_dir('rec_excess.dat', DIR_REC, FILE_REC1)
# Verify an event was raised and we are in our retry state.
self.assert_event_received(ResourceAgentErrorEvent, 10)
self.assert_state_change(ResourceAgentState.STREAMING, 10)
log.info("========== END QUAL TEST PARSER EXCEPTION ==========")
def test_publish_path(self):
"""
Setup an agent/driver/harvester/parser and verify that data is
published out the agent
"""
log.info("=========== START QUAL TEST PUBLISH PATH =================")
# Generated telemetered file has 1 SIO block with 10 instrument
# records and 1 metadata record.
log.info("FILE tel_vel3d_l_1.dat QUAL TEST PUBLISH PATH")
self.create_sample_data_set_dir('tel_vel3d_l_1.dat', DIR_TEL, FILE_TEL)
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.assert_start_sampling()
try:
# Verify that we get 10 instrument particles from the telemetered data file.
samples = 10
particle = Vel3dLWfpDataParticleType.SIO_INSTRUMENT_PARTICLE
log.info("===== READ %d TELEMETERED INSTRUMENT PARTICLES =====", samples)
result = self.data_subscribers.get_samples(particle, samples, 30)
# Verify that we get 1 metadata particle from the telemetered data file.
samples = 1
particle = Vel3dLWfpDataParticleType.SIO_METADATA_PARTICLE
log.info("===== READ %d TELEMETERED METADATA PARTICLES =====", samples)
meta_result = self.data_subscribers.get_samples(particle, samples, 10)
# Combine the instrument and metadata particles and verify results.
result.extend(meta_result)
self.assert_data_values(result, 'tel_vel3d_l_1.yml')
except Exception as e:
log.error("Telemetered Exception trapped: %s", e)
self.fail("Sample timeout.")
self.assert_stop_sampling()
# Generated recovered file has 10 instrument records and 1 metadata record.
log.info("FILE rec_vel3d_l_1.dat QUAL TEST PUBLISH PATH")
self.create_sample_data_set_dir('rec_vel3d_l_1.dat', DIR_REC, FILE_REC1)
self.assert_start_sampling()
try:
# Verify that we get 10 instrument particles from the recovered data file.
samples = 10
particle = Vel3dLWfpDataParticleType.WFP_INSTRUMENT_PARTICLE
log.info("===== READ %d RECOVERED INSTRUMENT PARTICLES =====", samples)
result = self.data_subscribers.get_samples(particle, samples, 30)
# Verify that we get 1 metadata particle from the recovered data file.
samples = 1
particle = Vel3dLWfpDataParticleType.WFP_METADATA_PARTICLE
log.info("===== READ %d RECOVERED METADATA PARTICLES =====", samples)
meta_result = self.data_subscribers.get_samples(particle, samples, 10)
# Combine the instrument and metadata particles and verify results.
result.extend(meta_result)
self.assert_data_values(result, 'rec_vel3d_l_1.yml')
except Exception as e:
log.error("Recovered Exception trapped: %s", e)
self.fail("Sample timeout.")
log.info("=========== END QUAL TEST PUBLISH PATH =================")
def test_rec_large_import(self):
"""
Test importing a large number of samples at once from the recovered file
"""
log.info("========= START QUAL TEST RECOVERED LARGE IMPORT ============")
# The recovered data file referenced in the IDD has 1 data record
# which contains 14124 instrument records and 1 metadata record.
self.create_sample_data_set_dir('A0000001.DAT', DIR_REC, FILE_REC1)
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.assert_start_sampling()
log.info("========== READING RECOVERED PARTICLES ==============")
try:
samples = 14124
particle = Vel3dLWfpDataParticleType.WFP_INSTRUMENT_PARTICLE
log.info("===== READ %d RECOVERED INSTRUMENT PARTICLES =====", samples)
self.data_subscribers.get_samples(particle, samples, 1000)
samples = 1
particle = Vel3dLWfpDataParticleType.WFP_METADATA_PARTICLE
log.info("===== READ %d RECOVERED METADATA PARTICLES =====", samples)
self.data_subscribers.get_samples(particle, samples, 5)
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
log.info("========= END QUAL TEST RECOVERED LARGE IMPORT =============")
def test_shutdown_restart(self):
"""
Test a full stop of the dataset agent, then restart the agent
and confirm it restarts at the correct spot.
"""
log.info("========== START QUAL TEST SHUTDOWN RESTART ===============")
# This Telemetered file has 2 sets of telemetered data.
# First set has 4 instrument records and second set has 6.
# 1 metadata record for each set.
self.create_sample_data_set_dir('tel_vel3d_l_2.dat', DIR_TEL, FILE_TEL)
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
log.info("========== START TELEMETERED SAMPLING ===============")
self.assert_start_sampling()
try:
# Verify that we get 4 instrument particles from the telemetered data file.
samples = 4
log.info("===== READ %d TELEMETERED INSTRUMENT PARTICLES =====", samples)
result = self.data_subscribers.get_samples(
Vel3dLWfpDataParticleType.SIO_INSTRUMENT_PARTICLE,
samples, 10)
# Verify that we get 1 metadata particle from the telemetered data file.
samples = 1
log.info("===== READ %d TELEMETERED METADATA PARTICLES =====", samples)
meta_result = self.data_subscribers.get_samples(
Vel3dLWfpDataParticleType.SIO_METADATA_PARTICLE,
samples, 10)
# Combine the instrument and metadata particles and verify results.
result.extend(meta_result)
self.assert_data_values(result, 'tel_vel3d_l_2_1-5.yml')
log.info("========== STOP TELEMETERED SAMPLING AND AGENT ===============")
self.assert_stop_sampling()
# Stop the agent
self.stop_dataset_agent_client()
# Re-start the agent
self.init_dataset_agent_client()
# Re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Restart sampling and get the last 6 instrument particles of
# telemetered file and combine with the previous ones we read.
log.info("========== RESTART TELEMETERED ===============")
self.assert_start_sampling()
# Verify that we get 6 instrument particles from the telemetered data file.
samples = 6
log.info("===== READ %d TELEMETERED INSTRUMENT PARTICLES =====", samples)
inst_result = self.data_subscribers.get_samples(
Vel3dLWfpDataParticleType.SIO_INSTRUMENT_PARTICLE,
samples, 10)
result.extend(inst_result)
# Verify that we get 1 metadata particle from the telemetered data file.
samples = 1
log.info("===== READ %d TELEMETERED METADATA PARTICLES =====", samples)
meta_result = self.data_subscribers.get_samples(
Vel3dLWfpDataParticleType.SIO_METADATA_PARTICLE,
samples, 10)
# Combine the instrument and metadata particles and verify results.
result.extend(meta_result)
self.assert_data_values(result, 'tel_vel3d_l_2.yml')
except SampleTimeout as e:
log.error("Telemetered Exception trapped: %s", e, exc_info=True)
self.fail("Telemetered Sample timeout.")
self.assert_stop_sampling()
# This Recovered file has 2 sets of recovered data.
# First set has 4 instrument records and second set has 6.
# 1 metadata record for each set.
self.create_sample_data_set_dir('rec_vel3d_l_2.dat', DIR_REC, FILE_REC2)
log.info("========== START RECOVERED SAMPLING ===============")
self.assert_start_sampling()
try:
# Verify that we get 7 instrument particles from the recovered data file.
log.info("===== READ RECOVERED INSTRUMENT PARTICLES =====")
inst_result = self.data_subscribers.get_samples(
Vel3dLWfpDataParticleType.WFP_INSTRUMENT_PARTICLE, 7, 10)
self.assert_data_values(inst_result, 'rec_vel3d_l_2_inst1-7.yml')
# Verify that we get 1 metadata particle from the recovered data file.
log.info("===== READ RECOVERED METADATA PARTICLES =====")
meta_result = self.data_subscribers.get_samples(
Vel3dLWfpDataParticleType.WFP_METADATA_PARTICLE, 1, 10)
self.assert_data_values(meta_result, 'rec_vel3d_l_2_meta1.yml')
log.info("========== STOP RECOVERED SAMPLING AND AGENT ===============")
self.assert_stop_sampling()
# Stop the agent
self.stop_dataset_agent_client()
# Re-start the agent
self.init_dataset_agent_client()
# Re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Restart sampling.
log.info("========== RESTART RECOVERED ===============")
self.assert_start_sampling()
# Get the last 3 instrument particles, combine them with the
# first 7, and verify the contents of all 10.
log.info("===== READ RECOVERED INSTRUMENT PARTICLES =====")
result = self.data_subscribers.get_samples(
Vel3dLWfpDataParticleType.WFP_INSTRUMENT_PARTICLE, 3, 10)
inst_result.extend(result)
self.assert_data_values(inst_result, 'rec_vel3d_l_2_inst1-10.yml')
# Verify that we get 1 metadata particle from the recovered data file.
log.info("===== READ RECOVERED METADATA PARTICLES =====")
meta_result = self.data_subscribers.get_samples(
Vel3dLWfpDataParticleType.WFP_METADATA_PARTICLE, 1, 10)
self.assert_data_values(meta_result, 'rec_vel3d_l_2_meta2.yml')
except SampleTimeout as e:
log.error("Recovered Exception trapped: %s", e, exc_info=True)
self.fail("Recovered Sample timeout.")
log.info("========== END QUAL TEST SHUTDOWN RESTART ===============")
def test_stop_start(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
log.info("========== START QUAL TEST STOP START ===============")
# This file has 2 sets of recovered data.
# First set has 4 instrument records and second set has 6.
# 1 metadata record for each set.
self.create_sample_data_set_dir('rec_vel3d_l_2.dat', DIR_REC, FILE_REC2)
inst_particle = Vel3dLWfpDataParticleType.WFP_INSTRUMENT_PARTICLE
meta_particle = Vel3dLWfpDataParticleType.WFP_METADATA_PARTICLE
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
log.info("========== START RECOVERED SAMPLING ===============")
self.assert_start_sampling()
try:
# Verify that we get 4 instrument particles from the recovered data file.
samples = 4
log.info("===== READ %d RECOVERED INSTRUMENT PARTICLES =====", samples)
result = self.data_subscribers.get_samples(inst_particle, samples, 10)
# Verify that we get 1 metadata particle from the recovered data file.
samples = 1
log.info("===== READ %d RECOVERED METADATA PARTICLES =====", samples)
meta_result = self.data_subscribers.get_samples(meta_particle, samples, 10)
# Combine the instrument and metadata particles and verify results.
result.extend(meta_result)
self.assert_data_values(result, 'rec_vel3d_l_2_1-5.yml')
log.info("========== STOP RECOVERED SAMPLING ===============")
self.assert_stop_sampling()
# Restart sampling and get the last 6 instrument particles of recovered
# file and combine with the previous ones we read.
log.info("========== RESTART RECOVERED ===============")
self.assert_start_sampling()
samples = 6
log.info("===== READ %d RECOVERED INSTRUMENT PARTICLES (RESTART) =====",
samples)
inst_result = self.data_subscribers.get_samples(inst_particle, samples, 10)
result.extend(inst_result)
# Verify that we get 1 metadata particle from the recovered data file.
samples = 1
log.info("===== READ %d RECOVERED METADATA PARTICLES (RESTART) =====",
samples)
meta_result = self.data_subscribers.get_samples(meta_particle, samples, 10)
# Combine the instrument and metadata particles and verify results.
result.extend(meta_result)
self.assert_data_values(result, 'rec_vel3d_l_2.yml')
except SampleTimeout as e:
log.error("Recovered Exception trapped: %s", e, exc_info=True)
self.fail("Recovered Sample timeout.")
log.info("========== STOP SAMPLING ===============")
self.assert_stop_sampling()
# Now repeat for an SIO file with similar contents.
# This file has 2 sets of telemetered data.
# First set has 4 instrument records and second set has 6.
# 1 metadata record for each set.
self.create_sample_data_set_dir('tel_vel3d_l_2.dat', DIR_TEL, FILE_TEL)
inst_particle = Vel3dLWfpDataParticleType.SIO_INSTRUMENT_PARTICLE
meta_particle = Vel3dLWfpDataParticleType.SIO_METADATA_PARTICLE
log.info("========== START TELEMETERED SAMPLING ===============")
self.assert_start_sampling()
try:
# Verify that we get 4 instrument particles from the telemetered data file.
samples = 4
log.info("===== READ %d TELEMETERED INSTRUMENT PARTICLES =====", samples)
result = self.data_subscribers.get_samples(inst_particle, samples, 10)
# Verify that we get 1 metadata particle from the telemetered data file.
samples = 1
log.info("===== READ %d TELEMETERED METADATA PARTICLES =====", samples)
meta_result = self.data_subscribers.get_samples(meta_particle, samples, 10)
# Combine the instrument and metadata particles and verify results.
result.extend(meta_result)
self.assert_data_values(result, 'tel_vel3d_l_2_1-5.yml')
log.info("========== STOP TELEMETERED SAMPLING ===============")
self.assert_stop_sampling()
# Restart sampling and get the last 6 instrument particles of
# telemetered file and combine with the previous ones we read.
log.info("========== RESTART TELEMETERED ===============")
self.assert_start_sampling()
samples = 6
log.info("===== READ %d TELEMETERED INSTRUMENT PARTICLES (RESTART) =====",
samples)
inst_result = self.data_subscribers.get_samples(inst_particle, samples, 10)
result.extend(inst_result)
# Verify that we get 1 metadata particle from the telemetered data file.
samples = 1
log.info("===== READ %d TELEMETERED METADATA PARTICLES (RESTART) =====",
samples)
meta_result = self.data_subscribers.get_samples(meta_particle, samples, 10)
# Combine the instrument and metadata particles and verify results.
result.extend(meta_result)
self.assert_data_values(result, 'tel_vel3d_l_2.yml')
except SampleTimeout as e:
log.error("Telemetered Exception trapped: %s", e, exc_info=True)
self.fail("Telemetered Sample timeout.")
log.info("========== END QUAL TEST STOP START ===============")
def test_tel_large_import(self):
"""'vel3d_l_wfp_sio_mule'
Test importing a large number of samples at once from the telemetered file
"""
log.info("======= START QUAL TEST TELEMETERED LARGE IMPORT =============")
# The telemetered data file referenced in the IDD contains 454 vel3d_l SIO
# blocks which contain 16374 instrument records and 1 metadata record for
# each SIO block.
self.create_sample_data_set_dir('node58p1.dat', DIR_TEL, FILE_TEL)
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.assert_start_sampling()
log.info("========== READING TELEMETERED PARTICLES ==============")
try:
samples = 16374
particle = Vel3dLWfpDataParticleType.SIO_INSTRUMENT_PARTICLE
log.info("===== READ %d TELEMETERED INSTRUMENT PARTICLES =====", samples)
self.data_subscribers.get_samples(particle, samples, 1000)
samples = 454
particle = Vel3dLWfpDataParticleType.SIO_METADATA_PARTICLE
log.info("===== READ %d TELEMETERED METADATA PARTICLES =====", samples)
self.data_subscribers.get_samples(particle, samples, 300)
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
log.info("========= END QUAL TEST TELEMETERED LARGE IMPORT =============")
| bsd-2-clause | 3,705,683,549,403,056,600 | 43.217503 | 106 | 0.584021 | false |
cg31/tensorflow | tensorflow/contrib/layers/python/layers/feature_column.py | 1 | 77352 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
Supported feature types are:
* _SparseColumn: also known as categorical features.
* _RealValuedColumn: also known as continuous features.
Supported transformations on above features are:
* Bucketization: also known as binning.
* Crossing: also known as conjunction or combination.
* Embedding.
Typical usage example:
```python
# Define features and transformations
country = sparse_column_with_keys(column_name="native_country",
keys=["US", "BRA", ...])
country_emb = embedding_column(sparse_id_column=country, dimension=3,
combiner="sum")
occupation = sparse_column_with_hash_bucket(column_name="occupation",
hash_bucket_size=1000)
occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,
combiner="sum")
occupation_x_country = crossed_column(columns=[occupation, country],
hash_bucket_size=10000)
age = real_valued_column("age")
age_buckets = bucketized_column(
source_column=age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
my_features = [occupation_emb, age_buckets, country_emb]
# Building model via layers
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=my_features)
first_layer = input_from_feature_columns(
columns_to_tensors=columns_to_tensor,
feature_columns=my_features)
second_layer = fully_connected(first_layer, ...)
# Building model via tf.learn.estimators
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=my_wide_features,
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
See feature_column_ops_test for more examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
from tensorflow.contrib.framework.python.framework import deprecation
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.ops import bucketization_op
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.contrib.lookup import lookup_ops as contrib_lookup_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import tf_logging as logging
class _LinearEmbeddingLookupArguments(
collections.namedtuple("_LinearEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner"])):
"""Represents the information needed from a column for embedding lookup.
Used to to compute DNN inputs and weighted sum.
"""
pass
class _DeepEmbeddingLookupArguments(
collections.namedtuple("_DeepEmbeddingLookupArguments",
["input_tensor",
"weight_tensor",
"vocab_size",
"initializer",
"combiner",
"dimension",
"shared_embedding_name",
"hashed"])):
"""Represents the information needed from a column for embedding lookup.
Used to to compute DNN inputs and weighted sum.
"""
pass
class _FeatureColumn(object):
"""Represents a feature column abstraction.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. For example "country:US" is a feature which is in "country" feature
column and has a feature value ("US").
This class is an abstract class. User should not create one instance of this.
Following classes (_SparseColumn, _RealValuedColumn, ...) are concrete
instances.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def name(self):
"""Returns the name of column or transformed column."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def config(self):
"""Returns configuration of the base feature for `tf.parse_example`."""
pass
@abc.abstractproperty
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
pass
@abc.abstractmethod
@deprecation.deprecated(
"2016-09-25",
"Should be private.")
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
raise NotImplementedError("Transform is not implemented for {}.".format(
self))
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collection=None,
trainable=True,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network."""
raise ValueError("Calling an abstract method.")
def _deep_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to embedding lookup to build an input layer."""
raise NotImplementedError(
"No deep embedding lookup arguments for column {}.".format(self))
# It is expected that classes implement either wide_embedding_lookup_arguments
# or to_dense_tensor to be used in linear models.
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
"""Returns arguments to look up embeddings for this column."""
raise NotImplementedError("Calling an abstract method.")
# pylint: disable=unused-argument
def _to_dense_tensor(self, input_tensor):
"""Returns a dense tensor representing this column's values."""
raise NotImplementedError("Calling an abstract method.")
def _checkpoint_path(self):
"""Returns None, or a (path,tensor_name) to load a checkpoint from."""
return None
def _key_without_properties(self, properties):
"""Helper method for self.key() that omits particular properties."""
fields_values = []
# pylint: disable=protected-access
for i, k in enumerate(self._fields):
if k in properties:
# Excludes a property from the key.
# For instance, exclude `initializer` from the key of EmbeddingColumn
# since we don't support users specifying different initializers for
# the same embedding column. Ditto for `normalizer` and
# RealValuedColumn.
# Special treatment is needed since the default str form of a
# function contains its address, which could introduce non-determinism
# in sorting.
continue
fields_values.append("{}={}".format(k, self[i]))
# pylint: enable=protected-access
# This is effectively the same format as str(self), except with our special
# treatment.
return "{}({})".format(type(self).__name__, ", ".join(fields_values))
# TODO(b/30410315): Support warm starting in all feature columns.
class _SparseColumn(_FeatureColumn,
collections.namedtuple("_SparseColumn",
["column_name", "is_integerized",
"bucket_size", "lookup_config",
"combiner", "dtype"])):
"""Represents a sparse feature column also known as categorical features.
Instances of this class are immutable. A sparse column means features are
sparse and dictionary returned by InputBuilder contains a
("column_name", SparseTensor) pair.
One and only one of bucket_size or lookup_config should be set. If
is_integerized is True then bucket_size should be set.
Attributes:
column_name: A string defining sparse column name.
is_integerized: A bool if True means type of feature is an integer.
Integerized means we can use the feature itself as id.
bucket_size: An int that is > 1. The number of buckets.
lookup_config: A _SparseIdLookupConfig defining feature-to-id lookup
configuration
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features, such as `tf.string` or `tf.int64`.
Raises:
TypeError: if lookup_config is not a _SparseIdLookupConfig.
ValueError: if above expectations about input fails.
"""
def __new__(cls,
column_name,
is_integerized=False,
bucket_size=None,
lookup_config=None,
combiner="sqrtn",
dtype=dtypes.string):
if is_integerized and bucket_size is None:
raise ValueError("bucket_size must be set if is_integerized is True. "
"column_name: {}".format(column_name))
if is_integerized and not dtype.is_integer:
raise ValueError("dtype must be an integer if is_integerized is True. "
"dtype: {}, column_name: {}.".format(dtype, column_name))
if bucket_size is None and lookup_config is None:
raise ValueError("one of bucket_size or lookup_config must be set. "
"column_name: {}".format(column_name))
if bucket_size is not None and lookup_config:
raise ValueError("one and only one of bucket_size or lookup_config "
"must be set. column_name: {}".format(column_name))
if bucket_size is not None and bucket_size < 2:
raise ValueError("bucket_size must be at least 2. "
"bucket_size: {}, column_name: {}".format(bucket_size,
column_name))
if ((lookup_config) and
(not isinstance(lookup_config, _SparseIdLookupConfig))):
raise TypeError(
"lookup_config must be an instance of _SparseIdLookupConfig. "
"Given one is in type {} for column_name {}".format(
type(lookup_config), column_name))
if (lookup_config and lookup_config.vocabulary_file and
lookup_config.vocab_size is None):
raise ValueError("vocab_size must be defined. "
"column_name: {}".format(column_name))
return super(_SparseColumn, cls).__new__(cls, column_name, is_integerized,
bucket_size, lookup_config,
combiner, dtype)
@property
def name(self):
return self.column_name
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
if self.bucket_size is not None:
return self.bucket_size
return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(self.dtype)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"SparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer,
combiner=self.combiner)
def is_compatible(self, other_column):
"""Check compatability of two sparse columns."""
if self.lookup_config and other_column.lookup_config:
return self.lookup_config == other_column.lookup_config
compatible = (self.length == other_column.length and
(self.dtype == other_column.dtype or
(self.dtype.is_integer and other_column.dtype.is_integer)))
if compatible:
logging.warn("Column {} and {} may not have the same vocabulary.".
format(self.name, other_column.name))
return compatible
class _SparseColumnIntegerized(_SparseColumn):
"""See `sparse_column_with_integerized_feature`."""
def __new__(cls, column_name, bucket_size, combiner="sqrtn",
dtype=dtypes.int64):
if not dtype.is_integer:
raise ValueError("dtype must be an integer. "
"dtype: {}, column_name: {}".format(dtype, column_name))
return super(_SparseColumnIntegerized, cls).__new__(
cls,
column_name,
is_integerized=True,
bucket_size=bucket_size,
combiner=combiner,
dtype=dtype)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
sparse_id_values = math_ops.mod(columns_to_tensors[self.name].values,
self.bucket_size,
name="mod")
columns_to_tensors[self] = ops.SparseTensor(
columns_to_tensors[self.name].indices, sparse_id_values,
columns_to_tensors[self.name].shape)
def sparse_column_with_integerized_feature(column_name,
bucket_size,
combiner=None,
dtype=dtypes.int64):
"""Creates an integerized _SparseColumn.
Use this when your features are already pre-integerized into int64 IDs.
output_id = input_feature
Args:
column_name: A string defining sparse column name.
bucket_size: An int that is > 1. The number of buckets. It should be bigger
than maximum feature. In other words features in this column should be an
int64 in range [0, bucket_size)
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: Type of features. It should be an integer type. Default value is
dtypes.int64.
Returns:
An integerized _SparseColumn definition.
Raises:
ValueError: bucket_size is not greater than 1.
ValueError: dtype is not integer.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"sum\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "sum"
return _SparseColumnIntegerized(
column_name, bucket_size, combiner=combiner, dtype=dtype)
class _SparseColumnHashed(_SparseColumn):
"""See `sparse_column_with_hash_bucket`."""
def __new__(cls,
column_name,
hash_bucket_size,
combiner="sum",
dtype=dtypes.string):
if dtype != dtypes.string and not dtype.is_integer:
raise ValueError("dtype must be string or integer. "
"dtype: {}, column_name: {}".format(dtype, column_name))
return super(_SparseColumnHashed, cls).__new__(
cls,
column_name,
bucket_size=hash_bucket_size,
combiner=combiner,
dtype=dtype)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
sparse_tensor = columns_to_tensors[self.name]
if self.dtype.is_integer:
sparse_values = string_ops.as_string(sparse_tensor.values)
else:
sparse_values = sparse_tensor.values
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.bucket_size, name="lookup")
columns_to_tensors[self] = ops.SparseTensor(
sparse_tensor.indices, sparse_id_values, sparse_tensor.shape)
def sparse_column_with_hash_bucket(column_name,
hash_bucket_size,
combiner=None,
dtype=dtypes.string):
"""Creates a _SparseColumn with hashed bucket configuration.
Use this when your sparse features are in string or integer format, but you
don't have a vocab file that maps each value to an integer ID.
output_id = Hash(input_feature_string) % bucket_size
Args:
column_name: A string defining sparse column name.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A _SparseColumn with hashed bucket configuration
Raises:
ValueError: hash_bucket_size is not greater than 2.
ValueError: dtype is neither string nor integer.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"sum\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "sum"
return _SparseColumnHashed(column_name, hash_bucket_size, combiner, dtype)
class _SparseColumnKeys(_SparseColumn):
"""See `sparse_column_with_keys`."""
def __new__(cls, column_name, keys, default_value=-1, combiner="sum"):
return super(_SparseColumnKeys, cls).__new__(
cls,
column_name,
combiner=combiner,
lookup_config=_SparseIdLookupConfig(
keys=keys, vocab_size=len(keys), default_value=default_value),
dtype=dtypes.string)
def insert_transformed_feature(self, columns_to_tensors):
"""Handles sparse column to id conversion."""
columns_to_tensors[self] = contrib_lookup_ops.string_to_index(
tensor=columns_to_tensors[self.name],
mapping=list(self.lookup_config.keys),
default_value=self.lookup_config.default_value,
name="lookup")
def sparse_column_with_keys(column_name, keys, default_value=-1,
combiner=None):
"""Creates a _SparseColumn with keys.
Look up logic is as follows:
lookup_id = index_of_feature_in_keys if feature in keys else default_value
Args:
column_name: A string defining sparse column name.
keys: a string list defining vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Default is -1.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with
"sum" the default:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
Returns:
A _SparseColumnKeys with keys configuration.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"sum\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "sum"
return _SparseColumnKeys(
column_name, tuple(keys), default_value=default_value, combiner=combiner)
class _WeightedSparseColumn(_FeatureColumn, collections.namedtuple(
"_WeightedSparseColumn",
["sparse_id_column", "weight_column_name", "dtype"])):
"""See `weighted_sparse_column`."""
def __new__(cls, sparse_id_column, weight_column_name, dtype):
return super(_WeightedSparseColumn, cls).__new__(cls, sparse_id_column,
weight_column_name, dtype)
@property
def name(self):
return "{}_weighted_by_{}".format(self.sparse_id_column.name,
self.weight_column_name)
@property
def length(self):
"""Returns id size."""
return self.sparse_id_column.length
@property
def config(self):
config = _get_feature_config(self.sparse_id_column)
config.update(
{self.weight_column_name: parsing_ops.VarLenFeature(self.dtype)})
return config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Inserts a tuple with the id and weight tensors."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = tuple([
columns_to_tensors[self.sparse_id_column],
columns_to_tensors[self.weight_column_name]
])
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor[0]
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return input_tensor[1]
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError(
"WeightedSparseColumn is not supported in DNN. "
"Please use embedding_column or one_hot_column. column: {}".format(
self))
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.id_tensor(input_tensor),
weight_tensor=self.weight_tensor(input_tensor),
vocab_size=self.length,
initializer=init_ops.zeros_initializer,
combiner=self.sparse_id_column.combiner)
def weighted_sparse_column(sparse_id_column,
weight_column_name,
dtype=dtypes.float32):
"""Creates a _SparseColumn by combining sparse_id_column with a weight column.
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` functions.
weight_column_name: A string defining a sparse column name which represents
weight or value of the corresponding sparse id feature.
dtype: Type of weights, such as `tf.float32`
Returns:
A _WeightedSparseColumn composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if dtype is not convertible to float.
An example usage:
```python
words = sparse_column_with_hash_bucket("words", 1000)
tfidf_weighted_words = weighted_sparse_column(words, "tfidf_score")
```
This configuration assumes that input dictionary of model contains the
following two items:
* (key="words", value=word_tensor) where word_tensor is a SparseTensor.
* (key="tfidf_score", value=tfidf_score_tensor) where tfidf_score_tensor
is a SparseTensor.
Following are assumed to be true:
* word_tensor.indices = tfidf_score_tensor.indices
* word_tensor.shape = tfidf_score_tensor.shape
"""
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype is not convertible to float. Given {}".format(
dtype))
return _WeightedSparseColumn(sparse_id_column, weight_column_name, dtype)
class _OneHotColumn(_FeatureColumn,
collections.namedtuple("_OneHotColumn",
["sparse_id_column"])):
"""Represents a one-hot column for use in deep networks.
Args:
sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*`
function.
"""
@property
def name(self):
return "{}_one_hot".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns vocabulary or hash_bucket size."""
return self.sparse_id_column.length
@property
def config(self):
"""Returns the parsing config of the origin column."""
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
"""Used by the Transformer to prevent double transformations."""
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _to_dnn_input_layer(self,
transformed_input_tensor,
unused_weight_collections=None,
unused_trainable=False,
output_rank=2):
"""Returns a Tensor as an input to the first layer of neural network.
Args:
transformed_input_tensor: A tensor that has undergone the transformations
in `insert_transformed_feature`. Rank should be >= `output_rank`.
unused_weight_collections: Unused. One hot encodings are not variable.
unused_trainable: Unused. One hot encodings are not trainable.
output_rank: the desired rank of the output `Tensor`.
Returns:
A multihot Tensor to be fed into the first layer of neural network.
Raises:
ValueError: When using one_hot_column with weighted_sparse_column.
This is not yet supported.
"""
if (self.sparse_id_column.weight_tensor(transformed_input_tensor) is
not None):
raise ValueError("one_hot_column does not yet support "
"weighted_sparse_column. Column: {}".format(self))
# Reshape ID column to `output_rank`.
sparse_id_column = self.sparse_id_column.id_tensor(transformed_input_tensor)
# pylint: disable=protected-access
sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(sparse_id_column,
default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(
one_hot_id_tensor, reduction_indices=[output_rank - 1])
class _EmbeddingColumn(_FeatureColumn, collections.namedtuple(
"_EmbeddingColumn",
["sparse_id_column", "dimension", "combiner", "initializer",
"ckpt_to_load_from", "tensor_name_in_ckpt", "shared_embedding_name",
"shared_vocab_size"])):
"""Represents an embedding column.
Args:
sparse_id_column: A `_SparseColumn` which is created by
`sparse_column_with_*` or `weighted_sparse_column` functions.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
shared_embedding_name: (Optional). The common name for shared embedding.
shared_vocab_size: (Optional). The common vocab_size used for shared
embedding space.
Raises:
ValueError: if `initializer` is specified and is not callable. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
def __new__(cls,
sparse_id_column,
dimension,
combiner="sqrtn",
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
shared_embedding_name=None,
shared_vocab_size=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"Embedding of column_name: {}".format(
sparse_id_column.name))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
if initializer is None:
stddev = 1 / math.sqrt(sparse_id_column.length)
# TODO(b/25671353): Better initial value?
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
dimension, combiner,
initializer, ckpt_to_load_from,
tensor_name_in_ckpt,
shared_embedding_name,
shared_vocab_size)
@property
def name(self):
if self.shared_embedding_name is None:
return "{}_embedding".format(self.sparse_id_column.name)
else:
return "{}_shared_embedding".format(self.sparse_id_column.name)
@property
def length(self):
"""Returns id size."""
if self.shared_vocab_size is None:
return self.sparse_id_column.length
else:
return self.shared_vocab_size
@property
def config(self):
return _get_feature_config(self.sparse_id_column)
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["initializer"])
def insert_transformed_feature(self, columns_to_tensors):
if self.sparse_id_column not in columns_to_tensors:
self.sparse_id_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=self.sparse_id_column.id_tensor(input_tensor),
weight_tensor=self.sparse_id_column.weight_tensor(input_tensor),
vocab_size=self.length,
dimension=self.dimension,
initializer=self.initializer,
combiner=self.combiner,
shared_embedding_name=self.shared_embedding_name,
hashed=False)
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
# pylint: disable=unused-argument
def _wide_embedding_lookup_arguments(self, input_tensor):
raise ValueError("Column {} is not supported in linear models. "
"Please use sparse_column.".format(self))
def one_hot_column(sparse_id_column):
"""Creates a _OneHotColumn.
Args:
sparse_id_column: A _SparseColumn which is created by
`sparse_column_with_*`
or crossed_column functions. Note that `combiner` defined in
`sparse_id_column` is ignored.
Returns:
An _OneHotColumn.
"""
return _OneHotColumn(sparse_id_column)
def embedding_column(sparse_id_column,
dimension,
combiner=None,
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
"""Creates an `_EmbeddingColumn`.
Args:
sparse_id_column: A `_SparseColumn` which is created by for example
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in `sparse_id_column` is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be considered an example level normalization on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_column.length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Returns:
An `_EmbeddingColumn`.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt)
def shared_embedding_columns(sparse_id_columns,
dimension,
combiner=None,
shared_embedding_name=None,
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
"""Creates a list of `_EmbeddingColumn` sharing the same embedding.
Args:
sparse_id_columns: An iterable of `_SparseColumn`, such as those created by
`sparse_column_with_*` or crossed_column functions. Note that `combiner`
defined in each sparse_id_column is ignored.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be considered an example level normalization on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
shared_embedding_name: (Optional). A string specifying the name of shared
embedding weights. This will be needed if you want to reference the shared
embedding separately from the generated `_EmbeddingColumn`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0.0 and standard deviation
1/sqrt(sparse_id_columns[0].length).
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Returns:
A tuple of `_EmbeddingColumn` with shared embedding space.
Raises:
ValueError: if sparse_id_columns is empty, or its elements are not
compatible with each other.
TypeError: if at least one element of sparse_id_columns is not a
`SparseTensor`.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if len(sparse_id_columns) < 1:
raise ValueError("The input sparse_id_columns should have at least one "
"element.")
for sparse_id_column in sparse_id_columns:
if not isinstance(sparse_id_column, _SparseColumn):
raise TypeError("Elements of sparse_id_columns must be _SparseColumn, but"
"{} is not.".format(sparse_id_column))
if not isinstance(sparse_id_columns, list):
sparse_id_columns = list(sparse_id_columns)
if len(sparse_id_columns) == 1:
return [
_EmbeddingColumn(sparse_id_columns[0], dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name)]
else:
# check compatibility of sparse_id_columns
compatible = True
for column in sparse_id_columns[1:]:
compatible = compatible and column.is_compatible(sparse_id_columns[0])
if not compatible:
raise ValueError("The input sparse id columns are not compatible.")
# Construct the shared name and size for shared embedding space.
if not shared_embedding_name:
if len(sparse_id_columns) <= 3:
shared_embedding_name = "_".join([column.name
for column in sparse_id_columns])
else:
shared_embedding_name = "_".join([column.name
for column in sparse_id_columns[0:3]])
shared_embedding_name += (
"_plus_{}_others".format(len(sparse_id_columns)-3))
shared_embedding_name += "_shared_embedding"
shared_vocab_size = sparse_id_columns[0].length
embedded_columns = []
for column in sparse_id_columns:
embedded_columns.append(
_EmbeddingColumn(column, dimension, combiner, initializer,
ckpt_to_load_from, tensor_name_in_ckpt,
shared_embedding_name, shared_vocab_size))
return tuple(embedded_columns)
class _HashedEmbeddingColumn(collections.namedtuple(
"_HashedEmbeddingColumn", ["column_name", "size", "dimension", "combiner",
"initializer"]), _EmbeddingColumn):
"""See `hashed_embedding_column`."""
def __new__(cls,
column_name,
size,
dimension,
combiner="sqrtn",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified. "
"column_name: {}".format(column_name))
if initializer is None:
stddev = 0.1
# TODO(b/25671353): Better initial value?
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=stddev)
return super(_HashedEmbeddingColumn, cls).__new__(cls, column_name, size,
dimension, combiner,
initializer)
@property
def name(self):
return "{}_hashed_embedding".format(self.column_name)
@property
def config(self):
return {self.column_name: parsing_ops.VarLenFeature(dtypes.string)}
def insert_transformed_feature(self, columns_to_tensors):
columns_to_tensors[self] = columns_to_tensors[self.column_name]
def _deep_embedding_lookup_arguments(self, input_tensor):
return _DeepEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.size,
initializer=self.initializer,
combiner=self.combiner,
dimension=self.dimension,
shared_embedding_name=None,
hashed=True)
def hashed_embedding_column(column_name,
size,
dimension,
combiner=None,
initializer=None):
"""Creates an embedding column of a sparse feature using parameter hashing.
The i-th embedding component of a value v is found by retrieving an
embedding weight whose index is a fingerprint of the pair (v,i).
Args:
column_name: A string defining sparse column name.
size: An integer specifying the number of parameters in the embedding layer.
dimension: An integer specifying dimension of the embedding.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1.
Returns:
A _HashedEmbeddingColumn.
Raises:
ValueError: if dimension or size is not a positive integer; or if combiner
is not supported.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if (dimension < 1) or (size < 1):
raise ValueError("Dimension and size must be greater than 0. "
"dimension: {}, size: {}, column_name: {}".format(
dimension, size, column_name))
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. "
"combiner: {}, column_name: {}".format(combiner,
column_name))
return _HashedEmbeddingColumn(column_name, size, dimension, combiner,
initializer)
def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None):
"""Reshaping logic for dense, numeric `Tensors`.
Follows the following rules:
1. If `output_rank > input_rank + 1` raise a `ValueError`.
2. If `output_rank == input_rank + 1`, expand `input_tensor` by one
dimension and return
3. If `output_rank == input_rank`, return `input_tensor`.
4. If `output_rank < input_rank`, flatten the inner dimensions of
`input_tensor` and return a `Tensor` with `output_rank`
Args:
input_tensor: a dense `Tensor` to be reshaped.
output_rank: the desired rank of the reshaped `Tensor`.
column_name: (optional) the name of the associated column. Used for error
messages.
Returns:
A `Tensor` with the same entries as `input_tensor` and rank `output_rank`.
Raises:
ValueError: if `output_rank > input_rank + 1`.
"""
input_rank = input_tensor.get_shape().ndims
if input_rank is not None:
if output_rank > input_rank + 1:
error_string = ("Rank of input Tensor ({}) should be the same as "
"output_rank ({}). For example, sequence data should "
"typically be 3 dimensional (rank 3) while non-sequence "
"data is typically 2 dimensional (rank 2).".format(
input_rank, output_rank))
if column_name is not None:
error_string = ("Error while processing column {}.".format(column_name)
+ error_string)
raise ValueError(error_string)
if output_rank == input_rank + 1:
logging.warning(
"Rank of input Tensor ({}) should be the same as output_rank ({}) "
"for column. Will attempt to expand dims. It is highly recommended "
"that you resize your input, as this behavior may change.".format(
input_rank, output_rank))
return array_ops.expand_dims(input_tensor, -1, name="expand_dims")
if output_rank == input_rank:
return input_tensor
# Here, either `input_rank` is unknown or it is greater than `output_rank`.
return layers._inner_flatten(input_tensor, output_rank) # pylint: disable=protected-access
class _RealValuedColumn(_FeatureColumn, collections.namedtuple(
"_RealValuedColumn",
["column_name", "dimension", "default_value", "dtype", "normalizer"])):
"""Represents a real valued feature column also known as continuous features.
Instances of this class are immutable. A real valued column means features are
dense. It means dictionary returned by InputBuilder contains a
("column_name", Tensor) pair. Tensor shape should be (batch_size, 1).
"""
def __new__(cls, column_name, dimension, default_value, dtype, normalizer):
if default_value is not None:
default_value = tuple(default_value)
return super(_RealValuedColumn, cls).__new__(cls, column_name, dimension,
default_value, dtype,
normalizer)
@property
def name(self):
return self.column_name
@property
def config(self):
default_value = self.default_value
if default_value is not None:
default_value = list(default_value)
return {self.column_name: parsing_ops.FixedLenFeature([self.dimension],
self.dtype,
default_value)}
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self._key_without_properties(["normalizer"])
@property
def normalizer_fn(self):
"""Returns the function used to normalize the column."""
return self.normalizer
def _normalized_input_tensor(self, input_tensor):
"""Returns the input tensor after custom normalization is applied."""
return (self.normalizer(input_tensor) if self.normalizer is not None else
input_tensor)
def insert_transformed_feature(self, columns_to_tensors):
"""Apply transformation and inserts it into columns_to_tensors.
Args:
columns_to_tensors: A mapping from feature columns to tensors. 'string'
key means a base feature (not-transformed). It can have _FeatureColumn
as a key too. That means that _FeatureColumn is already transformed.
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return input_tensor
def real_valued_column(column_name,
dimension=1,
default_value=None,
dtype=dtypes.float32,
normalizer=None):
"""Creates a _RealValuedColumn.
Args:
column_name: A string defining real valued column name.
dimension: An integer specifying dimension of the real valued column.
The default is 1. The Tensor representing the _RealValuedColumn
will have the shape of [batch_size, dimension].
default_value: A single value compatible with dtype or a list of values
compatible with dtype which the column takes on during tf.Example parsing
if data is missing. If None, then tf.parse_example will fail if an example
does not contain this column. If a single value is provided, the same
value will be applied as the default value for every dimension. If a
list of values is provided, the length of the list should be equal to the
value of `dimension`.
dtype: defines the type of values. Default value is tf.float32. Must be a
non-quantized, real integer or floating point type.
normalizer: If not None, a function that can be used to normalize the value
of the real valued column after default_value is applied for parsing.
Normalizer function takes the input tensor as its argument, and returns
the output tensor. (e.g. lambda x: (x - 3.0) / 4.2).
Returns:
A _RealValuedColumn.
Raises:
TypeError: if dimension is not an int
ValueError: if dimension is not a positive integer
TypeError: if default_value is a list but its length is not equal to the
value of `dimension`.
TypeError: if default_value is not compatible with dtype.
ValueError: if dtype is not convertable to tf.float32.
"""
if not isinstance(dimension, int):
raise TypeError("dimension must be an integer. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if dimension < 1:
raise ValueError("dimension must be greater than 0. "
"dimension: {}, column_name: {}".format(dimension,
column_name))
if not (dtype.is_integer or dtype.is_floating):
raise ValueError("dtype must be convertible to float. "
"dtype: {}, column_name: {}".format(dtype, column_name))
if default_value is None:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, int):
if dtype.is_integer:
default_value = [default_value for _ in range(dimension)]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if dtype.is_floating:
default_value = float(default_value)
default_value = [default_value for _ in range(dimension)]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, float):
if dtype.is_floating and (not dtype.is_integer):
default_value = [default_value for _ in range(dimension)]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if isinstance(default_value, list):
if len(default_value) != dimension:
raise ValueError(
"The length of default_value must be equal to dimension. "
"default_value: {}, dimension: {}, column_name: {}".format(
default_value, dimension, column_name))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = True
is_list_all_float = True
for v in default_value:
if not isinstance(v, int):
is_list_all_int = False
if not (isinstance(v, float) or isinstance(v, int)):
is_list_all_float = False
if is_list_all_int:
if dtype.is_integer:
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
elif dtype.is_floating:
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
if is_list_all_float:
if dtype.is_floating and (not dtype.is_integer):
default_value = [float(v) for v in default_value]
return _RealValuedColumn(column_name, dimension, default_value, dtype,
normalizer)
raise TypeError("default_value must be compatible with dtype. "
"default_value: {}, dtype: {}, column_name: {}".format(
default_value, dtype, column_name))
class _BucketizedColumn(_FeatureColumn, collections.namedtuple(
"_BucketizedColumn", ["source_column", "boundaries"])):
"""Represents a bucketization transformation also known as binning.
Instances of this class are immutable. Values in `source_column` will be
bucketized based on `boundaries`.
For example, if the inputs are:
boundaries = [0, 10, 100]
source_column = [[-5], [150], [10], [0], [4], [19]]
then the bucketized feature will be:
output = [[0], [3], [2], [1], [1], [2]]
Attributes:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list of floats specifying the boundaries. It has to be sorted.
[a, b, c] defines following buckets: (-inf., a), [a, b), [b, c), [c, inf.)
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
def __new__(cls, source_column, boundaries):
if not isinstance(source_column, _RealValuedColumn):
raise TypeError("source_column must be an instance of _RealValuedColumn. "
"source_column: {}".format(source_column))
if not isinstance(boundaries, list) or not boundaries:
raise ValueError("boundaries must be a non-empty list. "
"boundaries: {}".format(boundaries))
# We allow bucket boundaries to be monotonically increasing
# (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we
# de-duplicate.
sanitized_boundaries = []
for i in range(len(boundaries) - 1):
if boundaries[i] == boundaries[i + 1]:
continue
elif boundaries[i] < boundaries[i + 1]:
sanitized_boundaries.append(boundaries[i])
else:
raise ValueError("boundaries must be a sorted list. "
"boundaries: {}".format(boundaries))
sanitized_boundaries.append(boundaries[len(boundaries) - 1])
return super(_BucketizedColumn, cls).__new__(cls, source_column,
tuple(sanitized_boundaries))
@property
def name(self):
return "{}_bucketized".format(self.source_column.name)
@property
def length(self):
"""Returns total number of buckets."""
return len(self.boundaries) + 1
@property
def config(self):
return self.source_column.config
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def insert_transformed_feature(self, columns_to_tensors):
# Bucketize the source column.
if self.source_column not in columns_to_tensors:
self.source_column.insert_transformed_feature(columns_to_tensors)
columns_to_tensors[self] = bucketization_op.bucketize(
columns_to_tensors[self.source_column],
boundaries=list(self.boundaries),
name="bucketize")
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if output_rank != 2:
raise ValueError("BucketizedColumn currently only supports output_rank=2")
return array_ops.reshape(
array_ops.one_hot(
math_ops.to_int64(input_tensor),
self.length,
1.,
0.,
name="one_hot"), [-1, self.length * self.source_column.dimension],
name="reshape")
def to_sparse_tensor(self, input_tensor):
"""Creates a SparseTensor from the bucketized Tensor."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor, name="shape")[0]
if dimension > 1:
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(
math_ops.range(0, batch_size), 1, name="expand_dims"),
[1, dimension],
name="tile"), [-1],
name="rehsape")
i2 = array_ops.tile(
math_ops.range(0, dimension), [batch_size], name="tile")
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = array_ops.reshape(
input_tensor, [-1], name="reshape") + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")
indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
shape = math_ops.to_int64(array_ops.pack([batch_size, dimension]))
sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)
return sparse_id_values
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=self.to_sparse_tensor(input_tensor),
weight_tensor=None,
vocab_size=self.length * self.source_column.dimension,
initializer=init_ops.zeros_initializer,
combiner="sum")
def bucketized_column(source_column, boundaries):
"""Creates a _BucketizedColumn.
Args:
source_column: A _RealValuedColumn defining dense column.
boundaries: A list of floats specifying the boundaries. It has to be sorted.
Returns:
A _BucketizedColumn.
Raises:
ValueError: if 'boundaries' is empty or not sorted.
"""
return _BucketizedColumn(source_column, boundaries)
class _CrossedColumn(_FeatureColumn,
collections.namedtuple("_CrossedColumn",
["columns", "hash_bucket_size",
"hash_key",
"combiner", "ckpt_to_load_from",
"tensor_name_in_ckpt"])):
"""Represents a cross transformation also known as conjuction or combination.
Instances of this class are immutable. It crosses given `columns`. Crossed
column output will be hashed to hash_bucket_size.
Conceptually, transformation can be thought as:
Hash(cartesian product of features in columns) % `hash_bucket_size`
For example, if the columns are
SparseTensor referred by first column: shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
SparseTensor referred by second column: : shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
then crossed feature will look like:
shape = [2, 2]
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
Attributes:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently "mean", "sqrtn" and "sum" are supported. Each
of this can be thought as example level normalizations on the column:
* "sum": do not normalize
* "mean": do l1 normalization
* "sqrtn": do l2 normalization
For more information: `tf.embedding_lookup_sparse`.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
Raises:
TypeError: if all items in columns are not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn.
ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1. Also,
if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified.
"""
@staticmethod
def _is_crossable(column):
return isinstance(column,
(_SparseColumn, _CrossedColumn, _BucketizedColumn))
def __new__(cls,
columns,
hash_bucket_size,
hash_key,
combiner="sqrtn",
ckpt_to_load_from=None,
tensor_name_in_ckpt=None):
for column in columns:
if not _CrossedColumn._is_crossable(column):
raise TypeError("columns must be a set of _SparseColumn, "
"_CrossedColumn, or _BucketizedColumn instances. "
"column: {}".format(column))
if len(columns) < 2:
raise ValueError("columns must contain at least 2 elements. "
"columns: {}".format(columns))
if hash_bucket_size < 2:
raise ValueError("hash_bucket_size must be at least 2. "
"hash_bucket_size: {}".format(hash_bucket_size))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError("Must specify both `ckpt_to_load_from` and "
"`tensor_name_in_ckpt` or none of them.")
sorted_columns = sorted(
[column for column in columns], key=lambda column: column.name)
return super(_CrossedColumn, cls).__new__(cls, tuple(sorted_columns),
hash_bucket_size, hash_key,
combiner,
ckpt_to_load_from,
tensor_name_in_ckpt)
@property
def name(self):
sorted_names = sorted([column.name for column in self.columns])
return "_X_".join(sorted_names)
@property
def config(self):
config = {}
for column in self.columns:
config.update(_get_feature_config(column))
return config
@property
def length(self):
"""Returns total number of buckets."""
return self.hash_bucket_size
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return "{}".format(self)
def id_tensor(self, input_tensor):
"""Returns the id tensor from the given transformed input_tensor."""
return input_tensor
# pylint: disable=unused-argument
def weight_tensor(self, input_tensor):
"""Returns the weight tensor from the given transformed input_tensor."""
return None
def insert_transformed_feature(self, columns_to_tensors):
"""Handles cross transformation."""
def _collect_leaf_level_columns(cross):
"""Collects base columns contained in the cross."""
leaf_level_columns = []
for c in cross.columns:
if isinstance(c, _CrossedColumn):
leaf_level_columns.extend(_collect_leaf_level_columns(c))
else:
leaf_level_columns.append(c)
return leaf_level_columns
feature_tensors = []
for c in _collect_leaf_level_columns(self):
if isinstance(c, _SparseColumn):
feature_tensors.append(columns_to_tensors[c.name])
else:
if c not in columns_to_tensors:
c.insert_transformed_feature(columns_to_tensors)
if isinstance(c, _BucketizedColumn):
feature_tensors.append(c.to_sparse_tensor(columns_to_tensors[c]))
else:
feature_tensors.append(columns_to_tensors[c])
columns_to_tensors[self] = sparse_feature_cross_op.sparse_feature_cross(
feature_tensors,
hashed_output=True,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key,
name="cross")
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
raise ValueError("CrossedColumn is not supported in DNN. "
"Please use embedding_column. column: {}".format(self))
def _checkpoint_path(self):
if self.ckpt_to_load_from is not None:
return self.ckpt_to_load_from, self.tensor_name_in_ckpt
return None
def _wide_embedding_lookup_arguments(self, input_tensor):
return _LinearEmbeddingLookupArguments(
input_tensor=input_tensor,
weight_tensor=None,
vocab_size=self.length,
initializer=init_ops.zeros_initializer,
combiner=self.combiner)
def crossed_column(columns, hash_bucket_size, combiner=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
hash_key=None):
"""Creates a _CrossedColumn.
Args:
columns: An iterable of _FeatureColumn. Items can be an instance of
_SparseColumn, _CrossedColumn, or _BucketizedColumn.
hash_bucket_size: An int that is > 1. The number of buckets.
combiner: A combiner string, supports sum, mean, sqrtn.
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A _CrossedColumn.
Raises:
TypeError: if any item in columns is not an instance of _SparseColumn,
_CrossedColumn, or _BucketizedColumn, or
hash_bucket_size is not an int.
ValueError: if hash_bucket_size is not > 1 or
len(columns) is not > 1.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"sum\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "sum"
return _CrossedColumn(
columns,
hash_bucket_size,
hash_key,
combiner=combiner,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt)
class DataFrameColumn(_FeatureColumn,
collections.namedtuple("DataFrameColumn",
["column_name", "series"])):
"""Represents a feature column produced from a `DataFrame`.
Instances of this class are immutable. A `DataFrame` column may be dense or
sparse, and may have any shape, with the constraint that dimension 0 is
batch_size.
Args:
column_name: a name for this column
series: a `Series` to be wrapped, which has already had its base features
substituted with `PredefinedSeries`.
"""
def __new__(cls, column_name, series):
return super(DataFrameColumn, cls).__new__(cls, column_name, series)
@property
def name(self):
return self.column_name
@property
def config(self):
return self.series.required_base_features()
@property
def key(self):
"""Returns a string which will be used as a key when we do sorting."""
return self.name
def insert_transformed_feature(self, columns_to_tensors):
# The cache must already contain mappings from the expected base feature
# names to Tensors.
# Passing columns_to_tensors as the cache here means that multiple outputs
# of the transform will be cached, keyed by the repr of their associated
# TransformedSeries.
# The specific requested output ends up in columns_to_tensors twice: once
# keyed by the TransformedSeries repr, and once keyed by this
# DataFrameColumn instance.
columns_to_tensors[self] = self.series.build(columns_to_tensors)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
input_tensor,
weight_collections=None,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
return self._to_dnn_input_layer(input_tensor)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def _get_feature_config(feature_column):
"""Returns configuration for the base feature defined in feature_column."""
if not isinstance(feature_column, _FeatureColumn):
raise TypeError(
"feature_columns should only contain instances of _FeatureColumn. "
"Given column is {}".format(feature_column))
if isinstance(feature_column, (_SparseColumn, _WeightedSparseColumn,
_EmbeddingColumn, _RealValuedColumn,
_BucketizedColumn, _CrossedColumn,
_OneHotColumn)):
return feature_column.config
raise TypeError("Not supported _FeatureColumn type. "
"Given column is {}".format(feature_column))
def create_feature_spec_for_parsing(feature_columns):
"""Helper that prepares features config from input feature_columns.
The returned feature config can be used as arg 'features' in tf.parse_example.
Typical usage example:
```python
# Define features and transformations
country = sparse_column_with_vocabulary_file("country", VOCAB_FILE)
age = real_valued_column("age")
click_bucket = bucketized_column(real_valued_column("historical_click_ratio"),
boundaries=[i/10. for i in range(10)])
country_x_click = crossed_column([country, click_bucket], 10)
feature_columns = set([age, click_bucket, country_x_click])
batch_examples = tf.parse_example(
serialized_examples,
create_feature_spec_for_parsing(feature_columns))
```
For the above example, create_feature_spec_for_parsing would return the dict:
{"age": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"historical_click_ratio": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"country": parsing_ops.VarLenFeature(tf.string)}
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn, unless
feature_columns is a dict -- in which case, this should be true of all
values in the dict.
Returns:
A dict mapping feature keys to FixedLenFeature or VarLenFeature values.
"""
if isinstance(feature_columns, dict):
feature_columns = feature_columns.values()
features_config = {}
for column in feature_columns:
features_config.update(_get_feature_config(column))
return features_config
def _create_sequence_feature_spec_for_parsing(sequence_feature_columns,
allow_missing_by_default=False):
"""Prepares a feature spec for parsing `tf.SequenceExample`s.
Args:
sequence_feature_columns: an iterable containing all the feature columns.
All items should be instances of classes derived from `_FeatureColumn`.
allow_missing_by_default: whether to set `allow_missing=True` by default for
`FixedLenSequenceFeature`s.
Returns:
A dict mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature`.
"""
feature_spec = create_feature_spec_for_parsing(sequence_feature_columns)
sequence_feature_spec = {}
for key, feature in feature_spec.items():
if isinstance(feature, parsing_ops.VarLenFeature):
sequence_feature = feature
elif isinstance(feature, parsing_ops.FixedLenFeature):
default_is_set = feature.default_value is not None
if default_is_set:
logging.warning(
'Found default value {} for feature "{}". Ignoring this value and '
'setting `allow_missing=True` instead.'.
format(feature.default_value, key))
sequence_feature = parsing_ops.FixedLenSequenceFeature(
shape=feature.shape,
dtype=feature.dtype,
allow_missing=(allow_missing_by_default or default_is_set))
else:
raise TypeError(
"Unsupported feature type: {}".format(type(feature).__name__))
sequence_feature_spec[key] = sequence_feature
return sequence_feature_spec
def make_place_holder_tensors_for_base_features(feature_columns):
"""Returns placeholder tensors for inference.
Args:
feature_columns: An iterable containing all the feature columns. All items
should be instances of classes derived from _FeatureColumn.
Returns:
A dict mapping feature keys to SparseTensors (sparse columns) or
placeholder Tensors (dense columns).
"""
# Get dict mapping features to FixedLenFeature or VarLenFeature values.
dict_for_parse_example = create_feature_spec_for_parsing(feature_columns)
placeholders = {}
for column_name, column_type in dict_for_parse_example.items():
if isinstance(column_type, parsing_ops.VarLenFeature):
# Sparse placeholder for sparse tensors.
placeholders[column_name] = array_ops.sparse_placeholder(
column_type.dtype, name="Placeholder_{}".format(column_name))
else:
# Simple placeholder for dense tensors.
placeholders[column_name] = array_ops.placeholder(
column_type.dtype,
shape=(None, column_type.shape[0]),
name="Placeholder_{}".format(column_name))
return placeholders
class _SparseIdLookupConfig(
collections.namedtuple("_SparseIdLookupConfig",
["vocabulary_file", "keys", "num_oov_buckets",
"vocab_size", "default_value"])):
"""Defines lookup configuration for a sparse feature.
An immutable object defines lookup table configuration used by
tf.feature_to_id_v2.
Attributes:
vocabulary_file: The vocabulary filename. vocabulary_file cannot be combined
with keys.
keys: A 1-D string iterable that specifies the mapping of strings to
indices. It means a feature in keys will map to it's index in keys.
num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of
vocabulary features will be ignored.
vocab_size: Number of the elements in the vocabulary.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
"""
def __new__(cls,
vocabulary_file=None,
keys=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1):
return super(_SparseIdLookupConfig, cls).__new__(cls, vocabulary_file, keys,
num_oov_buckets,
vocab_size, default_value)
| apache-2.0 | 4,226,114,984,808,948,700 | 39.350548 | 93 | 0.639518 | false |
amplifylitco/asiaq | disco_aws_automation/disco_group.py | 1 | 12582 | """Contains DiscoGroup class that is above all other group classes used"""
import logging
from .base_group import BaseGroup
from .disco_autoscale import DiscoAutoscale
from .disco_elastigroup import DiscoElastigroup
logger = logging.getLogger(__name__)
class DiscoGroup(BaseGroup):
"""Implementation of DiscoGroup regardless of the type of group"""
def __init__(self, environment_name):
"""Implementation of BaseGroup in AWS"""
self.environment_name = environment_name
self.autoscale = DiscoAutoscale(environment_name=self.environment_name)
self.elastigroup = DiscoElastigroup(environment_name=self.environment_name)
super(DiscoGroup, self).__init__()
def get_existing_group(self, hostclass=None, group_name=None, throw_on_two_groups=True):
asg_group = self.autoscale.get_existing_group(
hostclass=hostclass,
group_name=group_name,
throw_on_two_groups=throw_on_two_groups
)
spot_group = self._safe_elastigroup_call(
self.elastigroup.get_existing_group,
default=[],
hostclass=hostclass,
group_name=group_name,
throw_on_two_groups=throw_on_two_groups
)
if asg_group and spot_group:
return sorted([asg_group, spot_group], key=lambda grp: grp['name'], reverse=True)[0]
elif asg_group:
return asg_group
elif spot_group:
return spot_group
else:
logger.info('No group found for hostclass %s and group name %s ', hostclass, group_name)
def _safe_elastigroup_call(self, fun, default=None, *args, **kwargs):
"""
Call a function in DiscoElastiGroup and handle cases when SpotInst usage is disabled
Args:
fun (function): The function to call
default: Default value to return in case SpotInst usage is disabled
"""
if not self.elastigroup.is_spotinst_enabled():
return default
return fun(*args, **kwargs)
def get_existing_groups(self, hostclass=None, group_name=None):
asg_groups = self.autoscale.get_existing_groups()
spot_groups = self._safe_elastigroup_call(self.elastigroup.get_existing_groups, default=[])
return asg_groups + spot_groups
def list_groups(self):
"""Returns list of objects for display purposes for all groups"""
asg_groups = self.autoscale.list_groups()
spot_groups = self._safe_elastigroup_call(self.elastigroup.list_groups, default=[])
groups = asg_groups + spot_groups
groups.sort(key=lambda grp: grp['name'])
return groups
def get_instances(self, hostclass=None, group_name=None):
asg_instances = self.autoscale.get_instances(hostclass=hostclass, group_name=group_name)
spot_instances = self._safe_elastigroup_call(
self.elastigroup.get_instances,
default=[],
hostclass=hostclass,
group_name=group_name
)
return asg_instances + spot_instances
def delete_groups(self, hostclass=None, group_name=None, force=False):
self.autoscale.delete_groups(hostclass=hostclass, group_name=group_name, force=force)
self._safe_elastigroup_call(
self.elastigroup.delete_groups,
hostclass=hostclass,
group_name=group_name,
force=force
)
def scaledown_groups(self, hostclass=None, group_name=None, wait=False, noerror=False):
self.autoscale.scaledown_groups(
hostclass=hostclass,
group_name=group_name,
wait=wait,
noerror=noerror
)
self._safe_elastigroup_call(
self.elastigroup.scaledown_groups,
hostclass=hostclass,
group_name=group_name,
wait=wait,
noerror=noerror
)
def terminate(self, instance_id, decrement_capacity=True):
"""
Terminates an instance using the autoscaling API.
When decrement_capacity is True this allows us to avoid
autoscaling immediately replacing a terminated instance.
"""
# todo check if instance belongs to spotinst or ASG and decrement the correct group
self.autoscale.terminate(instance_id, decrement_capacity)
def delete_all_recurring_group_actions(self, hostclass=None, group_name=None):
"""Deletes all recurring scheduled actions for a hostclass"""
self.autoscale.delete_all_recurring_group_actions(hostclass, group_name)
self._safe_elastigroup_call(
self.elastigroup.delete_all_recurring_group_actions,
hostclass,
group_name
)
def create_recurring_group_action(self, recurrance, min_size=None, desired_capacity=None, max_size=None,
hostclass=None, group_name=None):
"""Creates a recurring scheduled action for a hostclass"""
self._service_call_for_group(
'create_recurring_group_action',
_hostclass=hostclass,
_group_name=group_name,
recurrance=recurrance,
min_size=min_size,
desired_capacity=desired_capacity,
max_size=max_size,
hostclass=hostclass,
group_name=group_name
)
def update_elb(self, elb_names, hostclass=None, group_name=None):
"""Updates an existing autoscaling group to use a different set of load balancers"""
self._service_call_for_group(
'update_elb',
_hostclass=hostclass,
_group_name=group_name,
elb_names=elb_names,
hostclass=hostclass,
group_name=group_name
)
def get_launch_config(self, hostclass=None, group_name=None):
"""Return launch config info for a hostclass, None otherwise"""
return self._service_call_for_group(
'get_launch_config',
_hostclass=hostclass,
_group_name=group_name,
hostclass=hostclass,
group_name=group_name
)
# pylint: disable=R0913, R0914
def create_or_update_group(self, hostclass, desired_size=None, min_size=None, max_size=None,
instance_type=None, load_balancers=None, target_groups=None, subnets=None,
security_groups=None, instance_monitoring=None, ebs_optimized=None,
image_id=None, key_name=None, associate_public_ip_address=None, user_data=None,
tags=None, instance_profile_name=None, block_device_mappings=None,
group_name=None, create_if_exists=False, termination_policies=None,
spotinst=False, spotinst_reserve=None):
"""
Create a new autoscaling group or update an existing one
"""
existing_group = self.get_existing_group(hostclass, group_name)
# if there is an existing group and create_if_exists is false then we will be updating that group
# in that case, use the group type of the existing group instead of the passed in type
if existing_group and not create_if_exists:
existing_spot = existing_group['type'] == 'spot'
if existing_spot != spotinst:
logger.info(
'Running update_group using %s group type because existing group type is %s',
existing_group['type'], existing_group['type']
)
spotinst = existing_spot
return self._service_call(
spotinst, 'create_or_update_group',
hostclass=hostclass,
desired_size=desired_size,
min_size=min_size,
max_size=max_size,
instance_type=instance_type,
load_balancers=load_balancers,
target_groups=target_groups,
subnets=subnets,
security_groups=security_groups,
instance_monitoring=instance_monitoring,
ebs_optimized=ebs_optimized,
image_id=image_id,
key_name=key_name,
associate_public_ip_address=associate_public_ip_address,
user_data=user_data,
tags=tags,
instance_profile_name=instance_profile_name,
block_device_mappings=block_device_mappings,
group_name=group_name,
create_if_exists=create_if_exists,
termination_policies=termination_policies,
spotinst=spotinst,
spotinst_reserve=spotinst_reserve
)
def clean_configs(self):
"""Delete unused Launch Configurations in current environment"""
self.autoscale.clean_configs()
def get_configs(self, names=None):
"""Returns Launch Configurations in current environment"""
return self.autoscale.get_configs(names)
def delete_config(self, config_name):
"""Delete a specific Launch Configuration"""
self.autoscale.delete_config(config_name)
def list_policies(self, group_name=None, policy_types=None, policy_names=None):
"""Returns all autoscaling policies"""
return self.autoscale.list_policies(group_name, policy_types, policy_names)
def create_policy(self, group_name, policy_name, policy_type="SimpleScaling", adjustment_type=None,
min_adjustment_magnitude=None, scaling_adjustment=None, cooldown=600,
metric_aggregation_type=None, step_adjustments=None, estimated_instance_warmup=None):
"""
Creates a new autoscaling policy, or updates an existing one if the autoscaling group name and
policy name already exist. Handles the logic of constructing the correct autoscaling policy request,
because not all parameters are required.
"""
self._service_call_for_group(
'create_policy',
_group_name=group_name,
group_name=group_name,
policy_name=policy_name,
policy_type=policy_type,
adjustment_type=adjustment_type,
min_adjustment_magnitude=min_adjustment_magnitude,
scaling_adjustment=scaling_adjustment,
cooldown=cooldown,
metric_aggregation_type=metric_aggregation_type,
step_adjustments=step_adjustments,
estimated_instance_warmup=estimated_instance_warmup
)
def delete_policy(self, policy_name, group_name):
"""Deletes an autoscaling policy"""
self._service_call_for_group(
'delete_policy',
_group_name=group_name,
policy_name=policy_name,
group_name=group_name
)
def update_snapshot(self, snapshot_id, snapshot_size, hostclass=None, group_name=None):
"""Updates all of a hostclasses existing autoscaling groups to use a different snapshot"""
self._service_call_for_group(
'update_snapshot',
_hostclass=hostclass,
_group_name=group_name,
hostclass=hostclass,
group_name=group_name,
snapshot_id=snapshot_id,
snapshot_size=snapshot_size
)
def _service_call(self, use_spotinst, fun_name, *args, **kwargs):
"""
Make a call to either DiscoAutoscale or DiscoElastigroup
Args:
use_spotinst (bool): Use DiscoElastiGroup if True
fun_name (str): Function name to call for the selected service
default: Default value to use when calling DiscoElastigroup in case SpotInst is disabled
"""
fun = getattr(self.elastigroup, fun_name) if use_spotinst else getattr(self.autoscale, fun_name)
return fun(*args, **kwargs)
def _service_call_for_group(self, fun_name, _hostclass=None, _group_name=None, *args,
**kwargs):
"""
Make a call to either DiscoAutoscale or DiscoElastigroup based on the type of group affected
Defaults to using DiscoAutoscale if the group is not found
Args:
fun_name (str): Function to call on the selected service
_hostclass (str): Hostclass name to find group by
_group_name (str): ASG or Elastigroup name to find group by
"""
existing_group = self.get_existing_group(_hostclass, _group_name)
use_spotinst = existing_group and existing_group['type'] == 'spot'
return self._service_call(use_spotinst, fun_name, *args, **kwargs)
| bsd-2-clause | -5,459,240,590,080,118,000 | 41.363636 | 110 | 0.619774 | false |
Myoldmopar/EPLaunchLight | test/test_base.py | 1 | 2718 | import os
import sys
import unittest
import threading
# add the source directory to the path so the unit test framework can find it
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'EPLaunchLite'))
try:
from FileTypes import FileTypes
has_gtk = True
except ImportError as e:
has_gtk = False
from EnergyPlusPath import EnergyPlusPath
from EnergyPlusThread import EnergyPlusThread
@unittest.skipIf(not has_gtk, "Cannot run FileTypes tests without gtk")
class TestFileTypes(unittest.TestCase):
def test_idf_file_type(self):
msg, filters = FileTypes.get_materials(FileTypes.IDF)
self.assertEqual(len(filters), 2) # should return 2: idf and imf
# make sure we have each one, idf and imf
idf_filters = [x for x in filters if 'IDF' in x.get_name()]
self.assertTrue(len(idf_filters), 1)
imf_filters = [x for x in filters if 'IMF' in x.get_name()]
self.assertTrue(len(imf_filters), 1)
def test_epw_file_type(self):
msg, filters = FileTypes.get_materials(FileTypes.EPW)
self.assertEqual(len(filters), 1)
epw_filters = [x for x in filters if 'EPW' in x.get_name()]
self.assertTrue(len(epw_filters), 1)
def test_invalid_file_type(self):
msg, result = FileTypes.get_materials('abcdef')
self.assertIsNone(msg)
self.assertIsNone(result)
class TestEnergyPlusPaths(unittest.TestCase):
def test_proper_path_no_trailing_slash(self):
eight_one = EnergyPlusPath.get_version_number_from_path('/Applications/EnergyPlus-8-1-0')
self.assertEqual(eight_one, '8-1-0')
def test_proper_path_with_trailing_slash(self):
eight_one = EnergyPlusPath.get_version_number_from_path('/Applications/EnergyPlus-8-1-0/')
self.assertEqual(eight_one, '8-1-0')
def test_bad_path_with_enough_tokens(self):
eight_one = EnergyPlusPath.get_version_number_from_path('/usr/local/EnergyPlus-8-1-0')
self.assertIsNone(eight_one)
def test_bad_path_not_enough_tokens(self):
with self.assertRaises(IndexError):
EnergyPlusPath.get_version_number_from_path('/EnergyPlus-8-1-0')
class TestEnergyPlusThread(unittest.TestCase):
def test_construction(self):
paths = ['/dummy/', '/path', '/to_nothing']
obj = EnergyPlusThread(paths[0], paths[1], paths[2], None, None, None, None)
self.assertTrue(isinstance(obj, threading.Thread))
self.assertTrue(obj.run_script, paths[0])
self.assertTrue(obj.input_file, paths[1])
self.assertTrue(obj.weather_file, paths[2])
# allow execution directly as python tests/test_ghx.py
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 739,740,589,794,984,200 | 37.28169 | 98 | 0.679176 | false |
silverfernsys/agentserver | agentserver/utils/log.py | 1 | 1886 | import logging
from ip import get_ip
log_vals = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET}
class LoggingError(Exception):
def __init__(self, arg):
self.message = 'LoggingError: {0}'.format(arg)
self.arg = arg
class LogFileError(LoggingError):
def __init__(self, arg):
self.message = 'Error opening log file "{0}".'.format(arg)
self.arg = arg
def config_logging(config):
try:
filename = config.arguments.agentserver.log_file
format = '%(asctime)s::%(levelname)s::%(name)s::%(message)s'
level = log_vals.get(config.arguments.agentserver.log_level, logging.DEBUG)
logging.basicConfig(filename=filename,
format=format,
level=level)
except IOError:
raise LogFileError(config.log_file)
def log_kafka(id, origin, name, stats, **kwargs):
logging.getLogger(origin).debug('Flushed {0} stats for agent.id = {1}, '
'process = {2} to Kafka.'
.format(len(stats), id, name))
def log_auth_error(origin, auth_token):
if auth_token:
logging.getLogger(origin.__class__.__name__).error(
'Request with invalid token "{0}" '
'from agentserver.{1}'.format(auth_token, get_ip(origin.request)))
else:
logging.getLogger(origin.__class__.__name__).error(
'Request with missing token '
'from agentserver.{0}'.format(get_ip(origin.request)))
def log_authentication_error(origin, message, username):
logging.getLogger(origin.__class__.__name__).error(
'Authentication: {0} {1} from agentserver.{2}'
.format(message, username, get_ip(origin.request)))
| bsd-3-clause | -6,249,568,894,713,753,000 | 30.433333 | 83 | 0.590668 | false |
unintended/Cohen | misc/media_server_observer.py | 1 | 2131 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
#
# Copyright 2009, Benjamin Kampmann <[email protected]>
# Copyright 2014, Hartmut Goebel <[email protected]>
from twisted.internet import reactor
from coherence.base import Coherence
from coherence.upnp.devices.control_point import ControlPoint
from coherence.upnp.core import DIDLLite
# browse callback
def process_media_server_browse(result, client):
print "browsing root of", client.device.get_friendly_name()
print "result contains", result['NumberReturned'],
print "out of", result['TotalMatches'], "total matches."
elt = DIDLLite.DIDLElement.fromString(result['Result'])
for item in elt.getItems():
if item.upnp_class.startswith("object.container"):
print " container", item.title, "(%s)" % item.id,
print "with", item.childCount, "items."
if item.upnp_class.startswith("object.item"):
print " item", item.title, "(%s)." % item.id
# called for each media server found
def media_server_found(client, udn):
print "Media Server found:", client.device.get_friendly_name()
d = client.content_directory.browse(0,
browse_flag='BrowseDirectChildren',
process_result=False,
backward_compatibility=False)
d.addCallback(process_media_server_browse, client)
# sadly they sometimes get removed as well :(
def media_server_removed(udn):
print "Media Server gone:", udn
def start():
control_point = ControlPoint(Coherence({'logmode': 'warning'}),
auto_client=['MediaServer'])
control_point.connect(media_server_found, 'Coherence.UPnP.ControlPoint.MediaServer.detected')
control_point.connect(media_server_removed, 'Coherence.UPnP.ControlPoint.MediaServer.removed')
# now we should also try to discover the ones that are already there:
for device in control_point.coherence.devices:
print device
if __name__ == "__main__":
reactor.callWhenRunning(start)
reactor.run()
| mit | -1,282,085,470,347,691,800 | 33.370968 | 96 | 0.684186 | false |
erigones/api_ipf | tests.py | 1 | 6737 | from django.test import TestCase
from rest_framework import status
from eszone_ipf.settings import BASE_DIR, API_VERSION_PREFIX
class ConfigFileTestCase(TestCase):
url = '/{}/api_ipf/config/'.format(API_VERSION_PREFIX)
url_act = ''.join([url, 'activate/'])
def test_ipf_form_post(self):
title = 'test_ipf.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('ipf', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_ipnat_form_post(self):
title = 'test_ipnat.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('ipnat', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_ippool_form_post(self):
title = 'test_ippool.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('ippool', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_ipf6_form_post(self):
title = 'test_ipf6.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('ipf6', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_bad_form_post(self):
title = 'test_ipf.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('wrong', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_missing_arg_post(self):
files={'title': ('wrong', ''),
'form': ('wrong', '')}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_conf_list(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_conf_file(self):
response = self.client.get(''.join([self.url, 'test_ipf.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_not_existing_conf_file(self):
response = self.client.get(''.join([self.url, 'no_test.conf/']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_update_conf_file(self):
title = 'test_ipf.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'directory': (title, f.read())}
response = self.client.put(''.join([self.url, 'test_ipf.conf/']),
files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_delete_conf_file(self):
response = self.client.delete(''.join([self.url, 'test_ipf.conf/']))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_activate_ipf_form(self):
response = self.client.get(''.join([self.url_act, 'test_ipf.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_activate_ipnat_form(self):
response = self.client.get(''.join([self.url_act, 'test_ipnat.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_activate_ippool_form(self):
response = self.client.get(''.join([self.url_act, 'test_ippool.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_activate_ipf6_form(self):
response = self.client.get(''.join([self.url_act, 'test_ipf6.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
class LogFileTestCase(TestCase):
url = '/{}/api_ipf/log/'.format(API_VERSION_PREFIX)
title = 'test.log'
def test_post(self):
response = self.client.post(self.url, data={'title': self.title})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_missing_arg_post(self):
response = self.client.post(self.url, data={})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_log_list(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_log_file(self):
response = self.client.get(''.join([self.url, self.title, '/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_not_existing_log_file(self):
response = self.client.get(''.join([self.url, 'no_test.log/']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_log_file(self):
response = self.client.delete(''.join([self.url, self.title, '/']))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class OtherTestCase(TestCase):
url = '/{}/api_ipf/'.format(API_VERSION_PREFIX)
def test_blacklist_update(self):
response = self.client.get(''.join([self.url, 'update/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_allowed_command(self):
response = self.client.get(''.join([self.url, 'command/ipfstat -io/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_not_allowed_command(self):
response = self.client.get(''.join([self.url, 'command/pkill python/']))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) | bsd-3-clause | 5,609,047,805,943,410,000 | 38.869822 | 80 | 0.582604 | false |
jobovy/gaia_tools | scripts/download_dr2.py | 1 | 3753 | # Script to sniff whether DR2 has appeared and grab everything once it appears
import os
import time
from datetime import datetime, timedelta
import pytz
import subprocess
import hashlib
_DR2_URL= 'http://cdn.gea.esac.esa.int/Gaia/gdr2/'
_MD5_FILENAME= 'MD5SUM.txt'
_CEST= pytz.timezone('Europe/Brussels')
_TIME_START_CHECKING= _CEST.localize(datetime(2018,4,25,11,55,0))
_TIME_START_CHECKING_MORE= _CEST.localize(datetime(2018,4,25,11,59,30))
_DT_UNTIL_DOWNLOAD_ALL= 10*60 # seconds
_MAX_DOWNLOAD_TRIES= 50 # don't try to download and fail more than this
_CUT_DIRS= 2
download_tries= 0
_VERBOSE= True
_HOLD_OFF_UNTIL_SOON_BEFORE= True
def dr2_available(dr2_url=_DR2_URL):
try:
cmd= 'curl -s --head %s | head -n 1 | grep "HTTP/1.[01] [23].." >/dev/null' % (dr2_url)
output= os.system(cmd)
return not bool(int(output))
except:
return False
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
while True:
# Only start checking soon before the official release date, sleep until
if _HOLD_OFF_UNTIL_SOON_BEFORE:
if datetime.now(_CEST) < _TIME_START_CHECKING:
print("Waiting to start checking; current time CEST:",
datetime.now(_CEST))
time.sleep(10)
continue
# Then check whether the data is available
if _VERBOSE: print("DR2 available?",dr2_available(_DR2_URL))
if not dr2_available(_DR2_URL):
if datetime.now(_CEST) < _TIME_START_CHECKING_MORE:
time.sleep(30)
else:
time.sleep(5)
# If nothing seems to appear, switch to just really grabbing all
if datetime.now(_CEST) > (_TIME_START_CHECKING_MORE\
+timedelta(0,_DT_UNTIL_DOWNLOAD_ALL)):
os.chdir('../')
_DR2_URL= 'http://cdn.gea.esac.esa.int/Gaia/'
_CUT_DIRS= 1
continue
# Once it's available, start grabbing it
try:
subprocess.check_call(['wget','-r','-nH','-nc',
'--cut-dirs=%i' % _CUT_DIRS,
'--no-parent',
'--reject="index.html*",vot.gz',
_DR2_URL])
except: # Any issue, just try again 10 seconds later
time.sleep(10)
download_tries+= 1
if download_tries > _MAX_DOWNLOAD_TRIES: break
else: continue
# Once we think we've downloaded it all, go into directories and check MD5
all_dirs= [x[0] for x in os.walk('./')]
all_dirs.append('./') # Just to make sure we check the main dir...
_CONTINUE= False
for dir in all_dirs:
if 'votable' in dir or 'vo' in dir: continue
if _VERBOSE: print("Checking directory %s" % (dir))
if not os.path.exists(os.path.join(dir,_MD5_FILENAME)):
continue
with open(os.path.join(dir,_MD5_FILENAME),'r') as md5file:
for line in md5file:
target_hash,tfile= line.split()
if not os.path.exists(os.path.join(dir,tfile)): continue
current_hash= md5(os.path.join(dir,tfile))
if _VERBOSE:
print("md5sums of file %s:" % tfile)
print("%s" % target_hash)
print("%s" % current_hash)
if target_hash.strip() != current_hash:
print('MD5 does not match for %s' % tfile)
# Remove the file
os.remove(os.path.join(dir,tfile))
_CONTINUE= True
if _CONTINUE: continue
else: break
| mit | -5,076,307,777,478,934,000 | 38.505263 | 95 | 0.563816 | false |
cihologramas/pyueye | ueye/__init__.py | 1 | 1874 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2010, Combustion Ingenieros Ltda.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Combustión Ingenieros Ltda. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Ricardo Amézquita Orozco <[email protected]>
from ueye import *
from ueyeh import *
| bsd-3-clause | 4,809,739,865,831,521,000 | 54.058824 | 82 | 0.714744 | false |
NickBeeuwsaert/SuperCanvas | doc/bezier_subdivision.py | 1 | 3896 | #!/usr/bin/env python
lerp = lambda s, e, t: tuple(S + (E-S)*t for S, E in zip(s,e))
t = 0.65
cp0, cp1, cp2, cp3 = ((0, 300), (25, 50), (450, 50), (500,300))
M0 = lerp(cp0, cp1, t)
M1 = lerp(cp1, cp2, t)
M2 = lerp(cp2, cp3, t)
M3 = lerp(M0, M1, t)
M4 = lerp(M1, M2, t)
M5 = lerp(M3, M4, t)
print("""<svg width="100%" height="100%" viewBox="-10 -10 520 320" preserveAspectRatio="xMidYMid" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<!-- Generated with {} -->
<defs>
<circle r="4" id="control_point" fill="none" stroke="black"/>
<circle r="2" id="mid_point" fill="black" stroke="none"/>
<style>
text {{
font-size: 8pt;
fill: black;
}}
</style>
</defs>
<text x="0" y="8">t = {t}</text>
<path d="M{cp0[0]},{cp0[1]} C{cp1[0]},{cp1[1]} {cp2[0]},{cp2[1]} {cp3[0]},{cp3[1]}" fill="none" stroke="hsl(196, 100%, 42%)" stroke-width="6"/>
<path d="M{cp0[0]},{cp0[1]} C{M0[0]},{M0[1]} {M3[0]},{M3[1]} {M5[0]},{M5[1]}" fill="none" stroke="red" stroke-width="2"/>
<path d="M{M5[0]},{M5[1]} C{M4[0]},{M4[1]} {M2[0]},{M2[1]} {cp3[0]},{cp3[1]}" fill="none" stroke="green" stroke-width="2"/>
<!-- draw lines -->
<polyline fill="none" stroke="hsl(196, 100%, 42%)" stroke-dasharray="5,5"
points="{cp0[0]},{cp0[1]} {cp1[0]},{cp1[1]} {cp2[0]},{cp2[1]} {cp3[0]},{cp3[1]}"/>
<polyline fill="none" stroke="hsl(196, 100%, 42%)"
points="{M0[0]},{M0[1]} {M1[0]},{M1[1]} {M2[0]},{M2[1]}"/>
<polyline fill="none" stroke="hsl(196, 100%, 42%)"
points="{M3[0]},{M3[1]} {M4[0]},{M4[1]}"/>
<!-- draw in the control points -->
<use x="{cp0[0]}" y="{cp0[1]}" xlink:href="#control_point" />
<use x="{cp1[0]}" y="{cp1[1]}" xlink:href="#control_point" />
<use x="{cp2[0]}" y="{cp2[1]}" xlink:href="#control_point" />
<use x="{cp3[0]}" y="{cp3[1]}" xlink:href="#control_point" />
<!-- draw all the midpoints -->
<use x="{M0[0]}" y="{M0[1]}" xlink:href="#mid_point" />
<use x="{M1[0]}" y="{M1[1]}" xlink:href="#mid_point" />
<use x="{M2[0]}" y="{M2[1]}" xlink:href="#mid_point" />
<use x="{M3[0]}" y="{M3[1]}" xlink:href="#mid_point" />
<use x="{M4[0]}" y="{M4[1]}" xlink:href="#mid_point" />
<use x="{M5[0]}" y="{M5[1]}" xlink:href="#mid_point" />
<!-- draw secondary control points -->
<use x="{M0[0]}" y="{M0[1]}" xlink:href="#control_point" />
<use x="{M5[0]}" y="{M5[1]}" xlink:href="#control_point" />
<use x="{M2[0]}" y="{M2[1]}" xlink:href="#control_point" />
<use x="{M3[0]}" y="{M3[1]}" xlink:href="#control_point" />
<use x="{M4[0]}" y="{M4[1]}" xlink:href="#control_point" />
<text x="{cp0[0]}" y="{cp0[1]}">P<tspan baseline-shift="sub">0,0</tspan><!-- = {cp0} --></text>
<text x="{cp1[0]}" y="{cp1[1]}">P<tspan baseline-shift="sub">0,1</tspan><!-- = {cp1} --></text>
<text x="{cp2[0]}" y="{cp2[1]}">P<tspan baseline-shift="sub">0,2</tspan><!-- = {cp2} --></text>
<text x="{cp3[0]}" y="{cp3[1]}">P<tspan baseline-shift="sub">0,3</tspan><!-- = {cp3} --></text>
<text x="{M0[0]}" y="{M0[1]}" >P<tspan baseline-shift="sub">1,0</tspan><!-- = {M0} --></text>
<text x="{M1[0]}" y="{M1[1]}" >P<tspan baseline-shift="sub">1,1</tspan><!-- = {M1} --></text>
<text x="{M2[0]}" y="{M2[1]}" >P<tspan baseline-shift="sub">1,2</tspan><!-- = {M2} --></text>
<text x="{M3[0]}" y="{M3[1]}" >P<tspan baseline-shift="sub">2,0</tspan><!-- = {M3} --></text>
<text x="{M4[0]}" y="{M4[1]}" >P<tspan baseline-shift="sub">2,1</tspan><!-- = {M4} --></text>
<text x="{M5[0]}" y="{M5[1]}" >P<tspan baseline-shift="sub">3,0</tspan><!-- = {M5} --></text>
</svg>""".format(
__file__,
t = t,
cp0 = cp0,
cp1 = cp1,
cp2 = cp2,
cp3 = cp3,
M0 = M0, M1 = M1, M2 = M2,
M3 = M3, M4 = M4,
M5 = M5
))
| mit | -7,812,529,571,077,760,000 | 43.272727 | 176 | 0.490246 | false |
gameduell/dask | dask/utils.py | 1 | 30745 | from __future__ import absolute_import, division, print_function
import codecs
import functools
import inspect
import io
import math
import os
import re
import shutil
import struct
import sys
import tempfile
from errno import ENOENT
from collections import Iterator
from contextlib import contextmanager
from importlib import import_module
from threading import Lock
import multiprocessing as mp
from .import multiprocessing
import uuid
from weakref import WeakValueDictionary
from .compatibility import (long, getargspec, BZ2File, GzipFile, LZMAFile, PY3,
urlsplit, unicode)
from .core import get_deps
from .context import _globals
system_encoding = sys.getdefaultencoding()
if system_encoding == 'ascii':
system_encoding = 'utf-8'
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def import_required(mod_name, error_msg):
"""Attempt to import a required dependency.
Raises a RuntimeError if the requested module is not available.
"""
try:
return import_module(mod_name)
except ImportError:
raise RuntimeError(error_msg)
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
with ignoring(OSError):
os.remove(filename)
@contextmanager
def tmpdir(dir=None):
dirname = tempfile.mkdtemp(dir=dir)
try:
yield dirname
finally:
if os.path.exists(dirname):
if os.path.isdir(dirname):
with ignoring(OSError):
shutil.rmtree(dirname)
else:
with ignoring(OSError):
os.remove(dirname)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def changed_cwd(new_cwd):
old_cwd = os.getcwd()
os.chdir(new_cwd)
try:
yield
finally:
os.chdir(old_cwd)
@contextmanager
def tmp_cwd(dir=None):
with tmpdir(dir) as dirname:
with changed_cwd(dirname):
yield dirname
@contextmanager
def noop_context():
yield
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open, mode='t', use_tmpdir=True):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
Since this is meant for use in tests, this context manager will
automatically switch to a temporary current directory, to avoid
race conditions when running tests in parallel.
"""
with (tmp_cwd() if use_tmpdir else noop_context()):
for filename, text in d.items():
f = open(filename, 'w' + mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
with ignoring(OSError):
os.remove(filename)
compressions = {'gz': 'gzip', 'bz2': 'bz2', 'xz': 'xz'}
def infer_compression(filename):
extension = os.path.splitext(filename)[-1].strip('.')
return compressions.get(extension, None)
opens = {'gzip': GzipFile, 'bz2': BZ2File, 'xz': LZMAFile}
def open(filename, mode='rb', compression=None, **kwargs):
if compression == 'infer':
compression = infer_compression(filename)
return opens.get(compression, io.open)(filename, mode, **kwargs)
def get_bom(fn, compression=None):
"""
Get the Byte Order Mark (BOM) if it exists.
"""
boms = set((codecs.BOM_UTF16, codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE))
with open(fn, mode='rb', compression=compression) as f:
f.seek(0)
bom = f.read(2)
f.seek(0)
if bom in boms:
return bom
else:
return b''
def get_bin_linesep(encoding, linesep):
"""
Simply doing `linesep.encode(encoding)` does not always give you
*just* the linesep bytes, for some encodings this prefix's the
linesep bytes with the BOM. This function ensures we just get the
linesep bytes.
"""
if encoding == 'utf-16':
return linesep.encode('utf-16')[2:] # [2:] strips bom
else:
return linesep.encode(encoding)
def textblock(filename, start, end, compression=None, encoding=system_encoding,
linesep=os.linesep, buffersize=4096):
"""Pull out a block of text from a file given start and stop bytes.
This gets data starting/ending from the next linesep delimiter. Each block
consists of bytes in the range [start,end[, i.e. the stop byte is excluded.
If `start` is 0, then `start` corresponds to the true start byte. If
`start` is greater than 0 and does not point to the beginning of a new
line, then `start` is incremented until it corresponds to the start byte of
the next line. If `end` does not point to the beginning of a new line, then
the line that begins before `end` is included in the block although its
last byte exceeds `end`.
Examples
--------
>> with open('myfile.txt', 'wb') as f:
.. f.write('123\n456\n789\nabc')
In the example below, 1 and 10 don't line up with endlines.
>> u''.join(textblock('myfile.txt', 1, 10))
'456\n789\n'
"""
# Make sure `linesep` is not a byte string because
# `io.TextIOWrapper` in Python versions other than 2.7 dislike byte
# strings for the `newline` argument.
linesep = str(linesep)
# Get byte representation of the line separator.
bin_linesep = get_bin_linesep(encoding, linesep)
bin_linesep_len = len(bin_linesep)
if buffersize < bin_linesep_len:
error = ('`buffersize` ({0:d}) must be at least as large as the '
'number of line separator bytes ({1:d}).')
raise ValueError(error.format(buffersize, bin_linesep_len))
chunksize = end - start
with open(filename, 'rb', compression) as f:
with io.BufferedReader(f) as fb:
# If `start` does not correspond to the beginning of the file, we
# need to move the file pointer to `start - len(bin_linesep)`,
# search for the position of the next a line separator, and set
# `start` to the position after that line separator.
if start > 0:
# `start` is decremented by `len(bin_linesep)` to detect the
# case where the original `start` value corresponds to the
# beginning of a line.
start = max(0, start - bin_linesep_len)
# Set the file pointer to `start`.
fb.seek(start)
# Number of bytes to shift the file pointer before reading a
# new chunk to make sure that a multi-byte line separator, that
# is split by the chunk reader, is still detected.
shift = 1 - bin_linesep_len
while True:
buf = f.read(buffersize)
if len(buf) < bin_linesep_len:
raise StopIteration
try:
# Find the position of the next line separator and add
# `len(bin_linesep)` which yields the position of the
# first byte of the next line.
start += buf.index(bin_linesep)
start += bin_linesep_len
except ValueError:
# No line separator was found in the current chunk.
# Before reading the next chunk, we move the file
# pointer back `len(bin_linesep) - 1` bytes to make
# sure that a multi-byte line separator, that may have
# been split by the chunk reader, is still detected.
start += len(buf)
start += shift
fb.seek(shift, os.SEEK_CUR)
else:
# We have found the next line separator, so we need to
# set the file pointer to the first byte of the next
# line.
fb.seek(start)
break
with io.TextIOWrapper(fb, encoding, newline=linesep) as fbw:
# Retrieve and yield lines until the file pointer reaches
# `end`.
while start < end:
line = next(fbw)
# We need to encode the line again to get the byte length
# in order to correctly update `start`.
bin_line_len = len(line.encode(encoding))
if chunksize < bin_line_len:
error = ('`chunksize` ({0:d}) is less than the line '
'length ({1:d}). This may cause duplicate '
'processing of this line. It is advised to '
'increase `chunksize`.')
raise IOError(error.format(chunksize, bin_line_len))
yield line
start += bin_line_len
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, random_state=None):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], random_state=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
x = random_state.random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def random_state_data(n, random_state=None):
"""Return a list of arrays that can initialize
``np.random.RandomState``.
Parameters
----------
n : int
Number of tuples to return.
random_state : int or np.random.RandomState, optional
If an int, is used to seed a new ``RandomState``.
"""
import numpy as np
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
maxuint32 = np.iinfo(np.uint32).max
return [(random_state.rand(624) * maxuint32).astype('uint32')
for i in range(n)]
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
elif compression:
# depending on the implementation, this may be inefficient
with open(fn, 'rb', compression) as f:
result = f.seek(0, 2)
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval,
float, format, frozenset, hash, hex, id, int, iter,
len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted,
staticmethod, str, sum, tuple,
type, vars, zip, memoryview])
if PY3:
ONE_ARITY_BUILTINS.add(ascii) # noqa: F821
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self):
self._lookup = {}
self._lazy = {}
def register(self, type, func=None):
"""Register dispatch of `func` on arguments of type `type`"""
def wrapper(func):
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
return func
return wrapper(func) if func is not None else wrapper
def register_lazy(self, toplevel, func=None):
"""
Register a registration function which will be called if the
*toplevel* module (e.g. 'pandas') is ever loaded.
"""
def wrapper(func):
self._lazy[toplevel] = func
return func
return wrapper(func) if func is not None else wrapper
def __call__(self, arg):
# Fast path with direct lookup on type
lk = self._lookup
typ = type(arg)
try:
impl = lk[typ]
except KeyError:
pass
else:
return impl(arg)
# Is a lazy registration function present?
toplevel, _, _ = typ.__module__.partition('.')
try:
register = self._lazy.pop(toplevel)
except KeyError:
pass
else:
register()
return self(arg) # recurse
# Walk the MRO and cache the lookup result
for cls in inspect.getmro(typ)[1:]:
if cls in lk:
lk[typ] = lk[cls]
return lk[cls](arg)
raise TypeError("No dispatch for {0} type".format(typ))
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
def _skip_doctest(line):
# NumPy docstring contains cursor and comment only example
stripped = line.strip()
if stripped == '>>>' or stripped.startswith('>>> #'):
return stripped
elif '>>>' in stripped:
return line + ' # doctest: +SKIP'
else:
return line
def skip_doctest(doc):
if doc is None:
return ''
return '\n'.join([_skip_doctest(line) for line in doc.split('\n')])
def derived_from(original_klass, version=None, ua_args=[]):
"""Decorator to attach original class's docstring to the wrapped method.
Parameters
----------
original_klass: type
Original class which the method is derived from
version : str
Original package version which supports the wrapped method
ua_args : list
List of keywords which Dask doesn't support. Keywords existing in
original but not in Dask will automatically be added.
"""
def wrapper(method):
method_name = method.__name__
try:
# do not use wraps here, as it hides keyword arguments displayed
# in the doc
original_method = getattr(original_klass, method_name)
doc = original_method.__doc__
if doc is None:
doc = ''
try:
method_args = getargspec(method).args
original_args = getargspec(original_method).args
not_supported = [m for m in original_args if m not in method_args]
except TypeError:
not_supported = []
if len(ua_args) > 0:
not_supported.extend(ua_args)
if len(not_supported) > 0:
note = ("\n Notes\n -----\n"
" Dask doesn't supports following argument(s).\n\n")
args = ''.join([' * {0}\n'.format(a) for a in not_supported])
doc = doc + note + args
doc = skip_doctest(doc)
method.__doc__ = doc
return method
except AttributeError:
module_name = original_klass.__module__.split('.')[0]
@functools.wraps(method)
def wrapped(*args, **kwargs):
msg = "Base package doesn't support '{0}'.".format(method_name)
if version is not None:
msg2 = " Use {0} {1} or later to use this method."
msg += msg2.format(module_name, version)
raise NotImplementedError(msg)
return wrapped
return wrapper
def funcname(func):
"""Get the name of a function."""
# functools.partial
if isinstance(func, functools.partial):
return funcname(func.func)
# methodcaller
if isinstance(func, methodcaller):
return func.method
module_name = getattr(func, '__module__', None) or ''
type_name = getattr(type(func), '__name__', None) or ''
# toolz.curry
if 'toolz' in module_name and 'curry' == type_name:
return func.func_name
# multipledispatch objects
if 'multipledispatch' in module_name and 'Dispatcher' == type_name:
return func.name
# All other callables
try:
name = func.__name__
if name == '<lambda>':
return 'lambda'
return name
except:
return str(func)
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes(u'123')
'123'
>>> ensure_bytes('123')
'123'
>>> ensure_bytes(b'123')
'123'
"""
if isinstance(s, bytes):
return s
if hasattr(s, 'encode'):
return s.encode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def ensure_unicode(s):
""" Turn string or bytes to bytes
>>> ensure_unicode(u'123')
u'123'
>>> ensure_unicode('123')
u'123'
>>> ensure_unicode(b'123')
u'123'
"""
if isinstance(s, unicode):
return s
if hasattr(s, 'decode'):
return s.decode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def digit(n, k, base):
"""
>>> digit(1234, 0, 10)
4
>>> digit(1234, 1, 10)
3
>>> digit(1234, 2, 10)
2
>>> digit(1234, 3, 10)
1
"""
return n // base**k % base
def insert(tup, loc, val):
"""
>>> insert(('a', 'b', 'c'), 0, 'x')
('x', 'b', 'c')
"""
L = list(tup)
L[loc] = val
return tuple(L)
def build_name_function(max_int):
""" Returns a function that receives a single integer
and returns it as a string padded by enough zero characters
to align with maximum possible integer
>>> name_f = build_name_function(57)
>>> name_f(7)
'07'
>>> name_f(31)
'31'
>>> build_name_function(1000)(42)
'0042'
>>> build_name_function(999)(42)
'042'
>>> build_name_function(0)(0)
'0'
"""
# handle corner cases max_int is 0 or exact power of 10
max_int += 1e-8
pad_length = int(math.ceil(math.log10(max_int)))
def name_function(i):
return str(i).zfill(pad_length)
return name_function
def infer_storage_options(urlpath, inherit_storage_options=None):
""" Infer storage options from URL path and merge it with existing storage
options.
Parameters
----------
urlpath: str or unicode
Either local absolute file path or URL (hdfs://namenode:8020/file.csv)
storage_options: dict (optional)
Its contents will get merged with the inferred information from the
given path
Returns
-------
Storage options dict.
Examples
--------
>>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP
{"protocol": "file", "path", "/mnt/datasets/test.csv"}
>>> infer_storage_options(
... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1',
... inherit_storage_options={'extra': 'value'}) # doctest: +SKIP
{"protocol": "hdfs", "username": "username", "password": "pwd",
"host": "node", "port": 123, "path": "/mnt/datasets/test.csv",
"url_query": "q=1", "extra": "value"}
"""
# Handle Windows paths including disk name in this special case
if re.match(r'^[a-zA-Z]:[\\/]', urlpath):
return {'protocol': 'file',
'path': urlpath}
parsed_path = urlsplit(urlpath)
protocol = parsed_path.scheme or 'file'
path = parsed_path.path
if protocol == 'file':
# Special case parsing file protocol URL on Windows according to:
# https://msdn.microsoft.com/en-us/library/jj710207.aspx
windows_path = re.match(r'^/([a-zA-Z])[:|]([\\/].*)$', path)
if windows_path:
path = '%s:%s' % windows_path.groups()
inferred_storage_options = {
'protocol': protocol,
'path': path,
}
if parsed_path.netloc:
# Parse `hostname` from netloc manually because `parsed_path.hostname`
# lowercases the hostname which is not always desirable (e.g. in S3):
# https://github.com/dask/dask/issues/1417
inferred_storage_options['host'] = parsed_path.netloc.rsplit('@', 1)[-1].rsplit(':', 1)[0]
if parsed_path.port:
inferred_storage_options['port'] = parsed_path.port
if parsed_path.username:
inferred_storage_options['username'] = parsed_path.username
if parsed_path.password:
inferred_storage_options['password'] = parsed_path.password
if parsed_path.query:
inferred_storage_options['url_query'] = parsed_path.query
if parsed_path.fragment:
inferred_storage_options['url_fragment'] = parsed_path.fragment
if inherit_storage_options:
if set(inherit_storage_options) & set(inferred_storage_options):
raise KeyError("storage options (%r) and path url options (%r) "
"collision is detected"
% (inherit_storage_options, inferred_storage_options))
inferred_storage_options.update(inherit_storage_options)
return inferred_storage_options
def dependency_depth(dsk):
import toolz
deps, _ = get_deps(dsk)
@toolz.memoize
def max_depth_by_deps(key):
if not deps[key]:
return 1
d = 1 + max(max_depth_by_deps(dep_key) for dep_key in deps[key])
return d
return max(max_depth_by_deps(dep_key) for dep_key in deps.keys())
def eq_strict(a, b):
"""Returns True if both values have the same type and are equal."""
if type(a) is type(b):
return a == b
return False
def memory_repr(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def put_lines(buf, lines):
if any(not isinstance(x, unicode) for x in lines):
lines = [unicode(x) for x in lines]
buf.write('\n'.join(lines))
hex_pattern = re.compile('[a-f]+')
def key_split(s):
"""
>>> key_split('x')
u'x'
>>> key_split('x-1')
u'x'
>>> key_split('x-1-2-3')
u'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
u'x'
>>> key_split('hello-world-1')
u'hello-world'
>>> key_split(b'hello-world-1')
u'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
u'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
u'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
result = words[0].lstrip("'(\"")
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except Exception:
return 'Other'
_method_cache = {}
class methodcaller(object):
"""Return a callable object that calls the given method on its operand.
Unlike the builtin `methodcaller`, this class is serializable"""
__slots__ = ('method',)
func = property(lambda self: self.method) # For `funcname` to work
def __new__(cls, method):
if method in _method_cache:
return _method_cache[method]
self = object.__new__(cls)
self.method = method
_method_cache[method] = self
return self
def __call__(self, obj, *args, **kwargs):
return getattr(obj, self.method)(*args, **kwargs)
def __reduce__(self):
return (methodcaller, (self.method,))
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.method)
__repr__ = __str__
class MethodCache(object):
"""Attribute access on this object returns a methodcaller for that
attribute.
Examples
--------
>>> a = [1, 3, 3]
>>> M.count(a, 3) == a.count(3)
True
"""
__getattr__ = staticmethod(methodcaller)
__dir__ = lambda self: list(_method_cache)
M = MethodCache()
class SerializableLock(object):
_locks = WeakValueDictionary()
""" A Serializable per-process Lock
This wraps a normal ``threading.Lock`` object and satisfies the same
interface. However, this lock can also be serialized and sent to different
processes. It will not block concurrent operations between processes (for
this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``
but will consistently deserialize into the same lock.
So if we make a lock in one process::
lock = SerializableLock()
And then send it over to another process multiple times::
bytes = pickle.dumps(lock)
a = pickle.loads(bytes)
b = pickle.loads(bytes)
Then the deserialized objects will operate as though they were the same
lock, and collide as appropriate.
This is useful for consistently protecting resources on a per-process
level.
The creation of locks is itself not threadsafe.
"""
def __init__(self, token=None):
self.token = token or str(uuid.uuid4())
if self.token in SerializableLock._locks:
self.lock = SerializableLock._locks[self.token]
else:
self.lock = Lock()
SerializableLock._locks[self.token] = self.lock
def acquire(self, *args):
return self.lock.acquire(*args)
def release(self, *args):
return self.lock.release(*args)
def __enter__(self):
self.lock.__enter__()
def __exit__(self, *args):
self.lock.__exit__(*args)
@property
def locked(self):
return self.locked
def __getstate__(self):
return self.token
def __setstate__(self, token):
self.__init__(token)
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.token)
__repr__ = __str__
def effective_get(get=None, collection=None):
"""Get the effective get method used in a given situation"""
collection_get = collection._default_get if collection else None
return get or _globals.get('get') or collection_get
def get_scheduler_lock(get=None, collection=None):
"""Get an instance of the appropriate lock for a certain situation based on
scheduler used."""
actual_get = effective_get(get, collection)
if actual_get == multiprocessing.get:
return mp.Manager().Lock()
return SerializableLock()
| bsd-3-clause | 1,220,396,701,005,305,600 | 27.97738 | 98 | 0.565295 | false |
sunlightlabs/tcamp | tcamp/reg/migrations/0005_auto__add_field_ticket_checked_in.py | 1 | 12104 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Ticket.checked_in'
db.add_column(u'reg_ticket', 'checked_in',
self.gf('django.db.models.fields.DateTimeField')(default=None, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Ticket.checked_in'
db.delete_column(u'reg_ticket', 'checked_in')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'reg.couponcode': {
'Meta': {'object_name': 'CouponCode'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'discount': ('django_extras.db.models.fields.PercentField', [], {'default': '100'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sked.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_tickets': ('django.db.models.fields.IntegerField', [], {})
},
u'reg.sale': {
'Meta': {'object_name': 'Sale'},
'address1': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'address2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'coupon_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reg.CouponCode']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sked.Event']"}),
'first_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'reg.ticket': {
'Meta': {'object_name': 'Ticket'},
'ambassador_program': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '12'}),
'attend_day1': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'attend_day2': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'barcode': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '36', 'blank': 'True'}),
'checked_in': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'diet_allergies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diet_allergies_desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'diet_gluten_free': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diet_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diet_other_desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'diet_vegan': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diet_vegetarian': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sked.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'lobby_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reg.Sale']", 'null': 'True'}),
'subscribe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reg.TicketType']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'reg.tickettype': {
'Meta': {'ordering': "['position']", 'object_name': 'TicketType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sked.Event']"}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_tickets': ('django.db.models.fields.PositiveIntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
},
u'sked.event': {
'Meta': {'ordering': "('-start_date',)", 'object_name': 'Event'},
'_description_rendered': ('django.db.models.fields.TextField', [], {}),
'_overview_rendered': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sked_events'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True', 'blank': 'True'}),
'description_markup_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'event'", 'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'overview': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True', 'blank': 'True'}),
'overview_markup_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'blank': 'True'}),
'registration_is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'registration_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'session_label': ('django.db.models.fields.CharField', [], {'default': "'session'", 'max_length': '64'}),
'session_length': ('timedelta.fields.TimedeltaField', [], {}),
'session_submission_is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['reg'] | bsd-3-clause | 6,451,636,792,326,531,000 | 78.117647 | 187 | 0.549901 | false |
getsentry/raven-aiohttp | tests/test_transport.py | 1 | 4590 | import asyncio
import logging
from unittest import mock
import pytest
from raven_aiohttp import AioHttpTransport
from tests.utils import Logger
pytestmark = pytest.mark.asyncio
@asyncio.coroutine
def test_basic(fake_server, raven_client, wait):
server = yield from fake_server()
client, transport = raven_client(server, AioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[200] == 1
@asyncio.coroutine
def test_no_keepalive(fake_server, raven_client, wait):
transport = AioHttpTransport(keepalive=False)
assert not hasattr(transport, '_client_session')
yield from transport.close()
server = yield from fake_server()
client, transport = raven_client(server, AioHttpTransport)
transport._keepalive = False
session = transport._client_session
def _client_session_factory():
return session
with mock.patch(
'raven_aiohttp.AioHttpTransport._client_session_factory',
side_effect=_client_session_factory,
):
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert session.closed
assert server.hits[200] == 1
@asyncio.coroutine
def test_close_timeout(fake_server, raven_client):
server = yield from fake_server()
server.slop_factor = 100
client, transport = raven_client(server, AioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from transport.close(timeout=0)
assert server.hits[200] == 0
@asyncio.coroutine
def test_rate_limit(fake_server, raven_client, wait):
server = yield from fake_server()
server.side_effect['status'] = 429
with Logger('sentry.errors', level=logging.ERROR) as log:
client, transport = raven_client(server, AioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[429] == 1
msg = 'Sentry responded with an API error: RateLimited(None)'
assert log.msgs[0] == msg
@asyncio.coroutine
def test_rate_limit_retry_after(fake_server, raven_client, wait):
server = yield from fake_server()
server.side_effect['status'] = 429
server.side_effect['headers'] = {'Retry-After': '1'}
with Logger('sentry.errors', level=logging.ERROR) as log:
client, transport = raven_client(server, AioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[429] == 1
msg = 'Sentry responded with an API error: RateLimited(None)'
assert log.msgs[0] == msg
@asyncio.coroutine
def test_status_500(fake_server, raven_client, wait):
server = yield from fake_server()
server.side_effect['status'] = 500
with Logger('sentry.errors', level=logging.ERROR) as log:
client, transport = raven_client(server, AioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[500] == 1
msg = 'Sentry responded with an API error: APIError(None)'
assert log.msgs[0] == msg
@asyncio.coroutine
def test_cancelled_error(fake_server, raven_client, wait):
server = yield from fake_server()
with mock.patch(
'aiohttp.ClientSession.post',
side_effect=asyncio.CancelledError,
):
with Logger('sentry.errors', level=logging.ERROR) as log:
client, transport = raven_client(server, AioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
with pytest.raises(asyncio.CancelledError):
yield from wait(transport)
assert server.hits[200] == 0
@asyncio.coroutine
def test_async_send_when_closed(fake_server, raven_client):
server = yield from fake_server()
with Logger('sentry.errors', level=logging.ERROR) as log:
client, transport = raven_client(server, AioHttpTransport)
close = transport.close()
try:
1 / 0
except ZeroDivisionError:
client.captureException()
assert server.hits[200] == 0
assert log.msgs[0].startswith(
'Sentry responded with an error: AioHttpTransport is closed')
yield from close
| bsd-3-clause | -4,564,477,106,730,746,000 | 24.081967 | 70 | 0.64902 | false |
asteriogonzalez/pytest-study | pytest_study.py | 1 | 6960 | """
In situations where we are developing an application or library
that will be use to create long computation reports or results,
we want to execute the long process only when all the project tests
are passed.
pytest provide a great support for creating test suits,
parallel execution, reports, command line, IDE of CI integration,
and so forth, so the idea is to write these long computation code
in test from, group them in studios and extend pytest with a plugin
that allow us to:
- Ignore these long computation studies and run only the regular ones.
- Sort all the involved tests so the study will be executed only when
all dependences are passed.
- Define the studies and dependences in a easy way.
- Don't interfere with normal pytest use.
For a more detailed refecences, please read README.md or
visit https://github.com/asteriogonzalez/pytest-study
"""
from __future__ import print_function
try:
import wingdbstub
except ImportError:
pass
import re
import pytest
from blessings import Terminal
term = Terminal()
MARKS = ['study', 'pre'] # match 1st ocurrence
def parse_args(args, kwargs):
"update kwargs with positional arguments"
positional = ['name', 'order']
kw = {'name': 'default', 'order': 1000}
kw.update(kwargs)
for key in kwargs:
if key in positional:
positional.remove(key)
for i, val in enumerate(args):
kw[positional[i]] = val
return kw
def get_study_name(item):
"Try to get the name where the test belongs to, or '' when is free"
for mark in MARKS:
marker = item.get_marker(mark)
if marker:
return parse_args(marker.args, marker.kwargs)['name']
return ''
def get_FQN(item):
"Get the Full Qualified Name of a test item"
names = []
for x in item.listchain():
if not isinstance(x, (pytest.Session, pytest.Instance)):
names.append(x.name)
return ':'.join(names)
# ------------------------------------------
# Skip studio tests
# ------------------------------------------
def pytest_addoption(parser):
"Add the --runstudy option in command line"
# parser.addoption("--runstudy", action="store_true",
# default=False, help="run studio processes")
parser.addoption("--show_order", action="store_true",
default=False,
help="""show tests and studies order execution
and which are selected for execution.""")
parser.addoption("--runstudy", action="store", type="string",
default='', metavar='all|reg expression',
help="""regular expression for the studies names
('all' runs all).
None is selected by default.""")
def pytest_collection_modifyitems(config, items):
"""Remove all study tests if --runstudy is not selected
and reorder the study dependences to be executed incrementaly
so any failed study test will abort the complete sequence.
- Mark a test with @pytest.mark.study to consider part of a study.
- Mark a test with @pytest.mark.study and named 'test_study_xxxx()'
to be executed at the end when all previous test study functions
are passed.
"""
# check if studio tests myst be skipped
run_study = config.getoption("--runstudy")
# 'all' will match all studies, '' will not match anything
run_study = {'': '(?!x)x', 'all': '.*'}.get(run_study, run_study)
# --runstudy given in cli: do not skip study tests and
test_selected = list()
test_skipped = list()
groups = dict()
incremental = pytest.mark.incremental()
def add():
"helper for gathering test info"
marker = item.get_marker(mark)
kwargs = parse_args(marker.args, marker.kwargs)
group_name = kwargs['name']
group = groups.setdefault(group_name, dict())
group.setdefault(mark, list()).append((kwargs, item))
item.add_marker(incremental)
# place every test in regular, prerequisite and studies
# group by name
for item in items:
for mark in set(item.keywords.keys()).intersection(MARKS):
add()
break
else:
test_selected.append(item)
def sort(a, b):
"Sort two items by order priority"
return cmp(a[0]['order'], b[0]['order'])
# use studies precedence to built the global sequence order
mandatory = 'study' # mandatory mark for global sorting: study
studies = list()
for name, info in groups.items():
studies.extend(info.get(mandatory, []))
studies.sort(sort)
def append(tests, where):
"helper to add the test item from info structure"
for test in tests:
test = test[1]
if test not in where:
where.append(test)
# select only the test that are going to be launched
width = 0
regexp = re.compile(run_study, re.I | re.DOTALL)
for study in studies:
group_name = study[0]['name']
width = max(width, len(group_name))
where = test_selected if regexp.search(group_name) else test_skipped
for mark, seq in groups[group_name].items():
if mark == mandatory:
continue
seq.sort(sort)
append(seq, where)
append([study], where)
if config.getoption("--show_order") or config.getoption("--debug"):
fmt = "{0:>3d} [{1:>%s}] {2}" % width
for i, item in enumerate(test_selected + test_skipped):
study = get_study_name(item)
fqn = get_FQN(item)
line = fmt.format(i, study, fqn)
if item in test_selected:
line = term.green('+' + line)
else:
line = term.yellow('-' + line)
print(line)
# we make the --runstudy check at the end to be able to show
# test order with --show_order or --debig options
# reorder tests by group name and replace items IN-PLACE
if run_study:
items[:] = test_selected
return
skip_test = pytest.mark.skip(reason="need --runstudy option to run")
for item in items:
if set(item.keywords.keys()).intersection(MARKS):
item.add_marker(skip_test)
# ------------------------------------------
# incremental failure chain (from pytest doc)
# ------------------------------------------
def pytest_runtest_makereport(item, call):
"set the last failed test"
if "incremental" in item.keywords:
if call.excinfo is not None:
parent = item.parent
parent._previousfailed = item
def pytest_runtest_setup(item):
"Abort the execution stage if a previous incremental test has failed"
if "incremental" in item.keywords:
previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None:
pytest.xfail("previous test failed (%s)" % previousfailed.name)
| mit | -3,178,928,119,728,460,300 | 33.285714 | 76 | 0.614368 | false |
cortext/crawtext | crawtext/database/database.py | 1 | 4822 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__name__ == "database"
import pymongo
import logging
import re
from datetime import datetime
from copy import copy
import os, sys
class Database(object):
'''Database creation'''
def __init__(self, database_name, local=False, debug=False):
try:
addr = os.environ["MONGO-SRV_PORT_27017_TCP_ADDR"]
except KeyError:
addr = "localhost"
try:
port = int(os.environ["MONGO-SRV_PORT_27017_TCP_PORT"])
except KeyError:
port = 27017
uri = addr+":"+str(port)
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
logging.warning("Unable to connect using uri %s" %uri)
try:
self.client = pymongo.MongoClient(addr, port)
except:
sys.exit("ConnectionFailure : Unable to connect to MongoDB with url %s:%s" %(addr,port))
except pymongo.errors.InvalidURI:
try:
self.client = pymongo.MongoClient(addr, port)
except pymongo.errors.ConnectionFailure:
sys.exit("InvalidUri : Unable to connect to MongoDB with url %s:%s" %(addr,port))
self.version = self.client.server_info()['version']
self.t_version = tuple(self.version.split("."))
self.db_name = database_name
self.db = getattr(self.client,database_name)
self.date = datetime.now()
#serverVersion = tuple(connection.server_info()['version'].split('.'))
#requiredVersion = tuple("1.3.3".split("."))
def show_dbs(self):
return self.client.database_names()
def use_db(self, database_name):
return self.client[str(database_name)]
def create_db(self, database_name):
logging.info("Configuring Database %s" %database_name)
self.db = self.client[str(database_name)]
self.create_colls()
return self
def set_coll(self,coll, index = None):
setattr(self, str(coll), self.db[str(coll)])
if index is not None:
self.__dict__[coll].create_index(index, unique=True)
return self.__dict__[coll]
def set_colls(self, colls=[]):
if len(colls) == 0:
self.colls = ["data","queue"]
else:
self.colls = colls
for i in self.colls:
setattr(self, str(i), self.db[str(i)])
return self.colls
def use_coll(self, coll_name):
self.coll = self.db[coll_name]
return self.coll
def create_coll(self, coll_name):
setattr(self, str(coll_name), self.db[str(coll_name)])
self.__dict__[coll_name].create_index("url", unique=True)
#print ("coll : %s has been created in db:%s ") %(self.__dict__[str(coll_name)], self.db_name)
#return self.__dict__[str(coll_name)]
return self.db[str(coll_name)]
def create_colls(self, coll_names=["data", "queue"], index=None):
logging.debug("Configure collections")
self.colls = []
if len(coll_names) > 0:
colls = ["data", "queue"]
else:
self.colls = coll_names
for n in colls:
self.colls.append(self.set_coll(n, index))
return self.colls
def show_coll(self):
try:
print("using current collection %s in DB : %s") %(self.coll_name, self.db_name)
return self.coll_name
except AttributeError:
return False
def show_coll_items(self, coll_name):
return [n for n in self.db[str(coll_name)].find()]
def create_index(key, coll):
return coll.create_index([(key, pymongo.DESCENDING,True,True)])
def drop(self, type, name):
if type == "collection":
return self.drop_coll(name)
elif type == "database":
return self.drop_db(n)
else:
print("Unknown Type")
return False
def drop_all_dbs(self):
'''remove EVERY SINGLE MONGO DATABASE'''
for n in self.show_dbs():
#if n not in ["projects", "tasks"]:
self.drop_db(n)
def drop_db(self, db=None):
if db is not None:
print("Drop db %s" %db)
return self.client.drop_database(str(db))
def drop_coll(self, coll):
return self.db[str(coll)].drop()
def insert_queue(self, log):
if log["url"] not in self.db.queue.distinct("url"):
self.db.queue.insert_one(log)
return True
def remove_queue(self, log):
self.db.queue.remove({"url":log["url"]})
return True
if __name__ == "__main__":
pass
| mit | 3,488,452,641,483,784,000 | 30.51634 | 104 | 0.549564 | false |
ukBaz/python-bluezero | tests/test_gatt.py | 1 | 3714 | """Automated testing of GATT functionality using unittest.mock."""
import sys
import unittest
from unittest.mock import MagicMock
from unittest.mock import patch
import tests.obj_data
from bluezero import constants
adapter_props = tests.obj_data.full_ubits
def mock_get(iface, prop):
if iface == 'org.bluez.Device1':
return tests.obj_data.full_ubits['/org/bluez/hci0/dev_E4_43_33_7E_54_1C'][iface][prop]
else:
return tests.obj_data.full_ubits['/org/bluez/hci0/dev_E4_43_33_7E_54_1C/service002a'][iface][prop]
def mock_set(iface, prop, value):
tests.obj_data.full_ubits['/org/bluez/hci0/dev_E4_43_33_7E_54_1C/service002a'][iface][prop] = value
class TestBluezeroService(unittest.TestCase):
"""Test class to exercise (remote) GATT Service Features."""
def setUp(self):
"""Initialise the class for the tests."""
self.dbus_mock = MagicMock()
self.mainloop_mock = MagicMock()
self.gobject_mock = MagicMock()
modules = {
'dbus': self.dbus_mock,
'dbus.exceptions': self.dbus_mock,
'dbus.mainloop.glib': self.mainloop_mock,
'gi.repository': self.gobject_mock,
}
self.dbus_mock.Interface.return_value.GetManagedObjects.return_value = tests.obj_data.full_ubits
self.dbus_mock.Interface.return_value.Get = mock_get
self.dbus_mock.Interface.return_value.Set = mock_set
self.module_patcher = patch.dict('sys.modules', modules)
self.module_patcher.start()
from bluezero import GATT
self.module_under_test = GATT
self.path = '/org/bluez/hci0/dev_E4_43_33_7E_54_1C/service002a'
self.adapter_path = '/org/bluez/hci0'
self.dev_name = 'BBC micro:bit [zezet]'
self.adapter_addr = '00:00:00:00:5A:AD'
self.device_addr = 'E4:43:33:7E:54:1C'
self.service_uuid = 'e95dd91d-251d-470a-a062-fa1922dfa9a8'
def tearDown(self):
self.module_patcher.stop()
def test_service_uuid(self):
"""Test the service UUID."""
# Invoke the bluez GATT library to access the mock GATT service
test_service = self.module_under_test.Service(self.adapter_addr,
self.device_addr,
self.service_uuid)
# Test for the UUID
self.assertEqual(test_service.UUID, 'e95dd91d-251d-470a-a062-fa1922dfa9a8')
def test_service_device(self):
"""Test the service device path."""
# Invoke the bluez GATT library to access the mock GATT service
test_service = self.module_under_test.Service(self.adapter_addr,
self.device_addr,
self.service_uuid)
# Test for the device path
dev_underscore = self.device_addr.replace(':', '_').upper()
dev_addr = '{0}/dev_{1}'.format(self.adapter_path, dev_underscore)
self.assertEqual(test_service.device, dev_addr)
def test_service_primary(self):
"""Test the service primary flag."""
# Invoke the bluez GATT library to access the mock GATT service
test_service = self.module_under_test.Service(self.adapter_addr,
self.device_addr,
self.service_uuid)
# Test for the UUID
self.assertEqual(test_service.primary, True)
if __name__ == '__main__':
# avoid writing to stderr
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout,
verbosity=2))
| mit | -5,795,354,547,365,733,000 | 39.813187 | 106 | 0.594507 | false |
alexgonzl/TreeMaze | PythonControl/MazeMaster_PyCMDv2.py | 1 | 13920 | #!/usr/bin/python3
###
# Master controller for the TreeMaze. It communicates with the arduino through serial port.
# Receives data through GPIO inputs and serial.
###
import threading
from MazeHeader_PyCMDv2 import *
PythonControlSet = ['T2','T3a','T3b','T3c','T3d','T3e','T3f','T3g','T3h','T3i','T3j',
'T4a','T4b','T4c','T4d', 'T5Ra','T5Rb','T5Rc','T5La','T5Lb','T5Lc']
# Main Threads:
def readArduino(arduinoEv, interruptEv):
global MS
time.sleep(2)
while True:
if not interruptEv.is_set():
# reduce cpu load by reading arduino slower
time.sleep(0.01)
try:
if MS.PythonControlFlag:
if MS.IncongruencyFlag and (time.time()-MS.IncongruencyTimer)>1:
MS.Comm.GetStateVec()
if MS.CueTimerFlag:
if MS.CueTimer>0 and (time.time()-MS.CueTimer>MS.CueDeactivateTime):
MS.deactivate_cue()
MS.CueTimerFlag=False
MS.CueTimer=-1
ardsigs,data = MS.Comm.ReceiveData()
cnt = -1
for sig in ardsigs:
cnt +=1
if sig>0:
if MS.PythonControlFlag:
if sig==2:
try:
if data[cnt][0:2]=="DE":
wellnum = int(data[cnt][2])
MS.Ard_Act_Well_State[wellnum-1]=False
if MS.PythonControlFlag:
MS.DETECT(wellnum)
print("Detection on Well #", wellnum)
elif data[cnt][0:2]=="AW":
wellnum = int(data[cnt][2])-1
MS.Ard_Act_Well_State[wellnum]=True
print("Activated Well #", wellnum+1)
if MS.Act_Well[wellnum]==False:
print('wrong activation')
MS.InconguencyFlag=True
MS.IncongruencyTimer=time.time()
elif data[cnt][0:2]=="DW":
wellnum = int(data[cnt][2])-1
MS.Ard_Act_Well_State[wellnum]=False
MS.Ard_LED_State[wellnum]=False
print("Deactivated Well #", wellnum+1)
if MS.Act_Well[wellnum]==True:
MS.InconguencyFlag=True
MS.IncongruencyTimer=time.time()
elif data[cnt][0:2]=="AL":
wellnum = int(data[cnt][2])-1
MS.Ard_LED_State[wellnum]=True
print("LED ON Well #", wellnum+1)
if MS.LED_State[wellnum]==False:
print('wrong led activation')
MS.InconguencyFlag=True
MS.IncongruencyTimer=time.time()
elif data[cnt][0:2]=="DL":
wellnum = int(data[cnt][2])-1
MS.Ard_LED_State[wellnum]=False
if MS.LED_State[wellnum]==True:
MS.InconguencyFlag=True
MS.IncongruencyTimer=time.time()
print("LED OFF Well #", wellnum+1)
elif data[cnt][0:2]=="RE":
print("Reward Delivered to ", wellnum+1)
if MS.saveFlag:
logEvent(data[cnt],MS)
except:
print("Error Processing Arduino Event.", sys.exc_info())
elif sig == 4:
try:
#print("Updating arduino states.")
MS.UpdateArdStates(data[cnt])
#print(data[cnt])
MS.InnerStateCheck(int(data[cnt][0]))
except:
print("Error updating states",sys.exc_info())
else:
if MS.Comm.verbose:# no python control
print('e',ardsigs,data)
except: # try to read data
print ("Error Processing Incoming Data", sys.exc_info())
else: # if there is an interrupt
break
def PrintInstructions():
print()
print ("Enter 'Auto', to start automatic goal sequencing.")
print ("Enter 'C#', to queue a cue for the next trial.")
print ("Enter 'S', to check state machine status")
print ("Enter 'N', to start a new trial.")
print ("Enter 'M#', to manually detect a well.")
print ("Enter 'P%', to change switch probability.")
print ("Enter 'Stop', to stop automation of well sequencing.")
print("------------------------------------------------------")
print ("Enter 'a','r' activate / reset all")
print ("Enter 's' to check status")
print ("Enter 'w#','d#', to activate/deactivate a well (e.g 'w1')")
print ("Enter 'p#', to turn on pump (e.g 'p3') ")
print ("Enter 'l#', to toggle LED (e.g 'l1') ")
print ("Enter 'z#=dur' to change pump duration ('z4=20') ")
print ("Enter 'c#','y' to turn on/off cues ('c1')")
print ("Enter 'q' to exit")
def getCmdLineInput(arduinoEv,interruptEv):
global MS
ArdWellInstSet = ['w','d','p','l','z'] # instructions for individual well control
ArdGlobalInstSet = ['a','s','r','y'] # instructions for global changes
time.sleep(1)
while True:
if not interruptEv.is_set():
# wait 1 second for arduino information to come in
#arduinoEv.wait(0.2)
try:
print('To print available commands press ?')
CL_in = input()
if CL_in == '?':
PrintInstructions()
CL_in = input()
else:
pass
if (isinstance(CL_in,str) and len(CL_in)>0):
# Automation
if (CL_in=='Auto'):
if not MS.PythonControlFlag:
try:
while True:
print('')
if MS.Protocol[:3] in ['T3a','T3b']:
cueinput = int(input('Enter cue to enable [5,6]: '))
if cueinput in [5,6]:
MS.Act_Cue = cueinput
break
else:
print("Invalid Cue")
elif MS.Protocol[:3] == ['T4a','T4b']:
cueinput = int(input('Enter cue to enable [1,3]: '))
if cueinput in [1,3]:
MS.Act_Cue = cueinput
break
else:
print("Invalid Cue")
else:
cueinput = 0
break
if cueinput>=0 and cueinput<=9:
MS.Act_Cue = cueinput
MS.START()
except:
print('Unable to start automation. Talk to Alex about it.')
print ("error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
MS.STOP()
# Automation Specific Commands
if MS.PythonControlFlag:
if (CL_in[0]=='C'):
MS.Queued_Cue = int(CL_in[1])
print("Cue queued for the next trial.")
elif (CL_in=='S'):
print("Auto Control Enabled = ", MS.PythonControlFlag)
MS.STATUS()
print("Arduino Variables Status")
print(MS.Ard_Act_Well_State)
elif (CL_in=='N'):
print("Starting a new trial.")
MS.NEW_TRIAL()
elif (CL_in[0]=='M'):
w = int(CL_in[1])
if w>=1 and w<=6:
MS.DETECT(w)
elif (CL_in[0]=='P'):
pr = int(CL_in[1:])
if pr>=0 and pr<=100:
MS.SwitchProb = float(pr)/100.0
elif (CL_in=='Stop'):
MS.STOP()
# individual instructions
ins = CL_in[0]
# quit instruction
if (ins == 'q'):
print('Terminating Arduino Communication')
MS.STOP()
interruptEv.set()
time.sleep(0.2)
close(MS)
break
# global instructions: a,s,r,y
elif ins in ArdGlobalInstSet:
if ins == 'a':
MS.Comm.ActivateAllWells()
elif ins == 's':
MS.Comm.getArdStatus()
elif ins == 'r':
MS.Comm.Reset()
elif ins == 'y':
MS.Comm.DeActivateCue()
# actions on individual wells
elif ins in ArdWellInstSet:
try:
well = int(CL_in[1])-1 # zero indexing the wells
if well>=0 and well <=5:
if ins=='w' and not MS.PythonControlFlag :
MS.Comm.ActivateWell(well)
elif ins=='d' and not MS.PythonControlFlag :
MS.Comm.DeActivateWell(well)
elif ins=='p':
MS.Comm.DeliverReward(well)
if MS.PythonControlFlag:
MS.rewardDelivered(well)
elif ins=='l':
MS.Comm.ToggleLED(well)
elif ins=='z':
try:
dur = int(CL_in[3:])
if dur>0 and dur<=1000:
MS.Comm.ChangeReward(well,dur)
except:
print('Invalid duration for reward.')
except:
print ("error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
print('Incorrect Instruction Format, Try again')
pass
# cue control
elif ins=='c' and not MS.PythonControlFlag :
try:
cuenum = int(CL_in[1])
if cuenum>=1 & cuenum<=6:
MS.Comm.ActivateCue(cuenum)
else:
print('Invalid Cue Number')
except:
print('Invalid Cue Number')
pass
except:
print ("error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
#arduinoEv.clear()
else:
break
# Parse Input:
expt, baud, verbose, headFile, datFile, saveFlag, npyfile = ParseArguments()
# Set serial comm with arduino
Comm = ArdComm(baud,verbose=verbose)
# Creat Maze object
if expt in PythonControlSet:
MS = Maze(Comm,protocol=expt,datFile=datFile,headFile=headFile,npyFile=npyfile,saveFlag=saveFlag)
else:
MS = Maze(Comm)
# leave some time
time.sleep(0.2)
# Main
arduinoEv = threading.Event()
interruptEv = threading.Event()
# Declare threads
readArdThr = threading.Thread(target = readArduino, args = (arduinoEv, interruptEv))
cmdLine = threading.Thread(target = getCmdLineInput, args = (arduinoEv,interruptEv))
try:
# Start threads
readArdThr.start()
cmdLine.start()
except KeyboardInterrupt:
print ("Keyboard Interrupt. Arduino Comm closed.")
interruptEv.set()
readArdThr.join()
cmdLine.join()
close(MS)
quit()
except:
print ("error", sys.exc_info()[0])
interruptEv.set()
readArdThr.join()
cmdLine.join()
close(MS)
quit()
| mit | 3,080,016,546,511,445,500 | 43.903226 | 112 | 0.382687 | false |
gecos-team/gecos-firstboot | firstboot/serverconf/ServerConf.py | 1 | 3755 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Antonio Hernández <[email protected]>"
__copyright__ = "Copyright (C) 2011, Junta de Andalucía <[email protected]>"
__license__ = "GPL-2"
import firstboot.serverconf
from LdapConf import LdapConf
from ChefConf import ChefConf
from ActiveDirectoryConf import ActiveDirectoryConf
from DateSyncConf import DateSyncConf
class ServerConf():
# Version of the configuration JSON file
VERSION = '1.3'
def __init__(self):
self._data = {}
self._data['version'] = ServerConf.VERSION
self._data['organization'] = ''
self._data['notes'] = ''
self._ldap_conf = LdapConf()
self._chef_conf = ChefConf()
self._ad_conf = ActiveDirectoryConf()
self._ntp_conf = DateSyncConf()
def load_data(self, conf):
msg = 'ServerConf: Key "%s" not found in the configuration file.'
try:
v = conf['version']
if v != ServerConf.VERSION:
print 'WARNING: ServerConf and AUTOCONFIG_JSON version mismatch!'
except KeyError as e:
print msg % ('version',)
try:
self.set_organization(conf['organization'])
except KeyError as e:
print msg % ('organization',)
try:
self.set_notes(conf['notes'])
except KeyError as e:
print msg % ('notes',)
try:
self._ldap_conf.load_data(conf['pamldap'])
except KeyError as e:
print msg % ('pamldap',)
try:
self._chef_conf.load_data(conf['chef'])
except KeyError as e:
print msg % ('chef',)
try:
self._ad_conf.load_data(conf['ad'])
except KeyError as e:
print msg % ('ad',)
try:
self._ntp_conf.load_data(conf['ntp'])
except KeyError as e:
print msg % ('ntp',)
def validate(self):
valid = len(self._data['version']) > 0 \
and self._ldap_conf.validate() \
and self._chef_conf.validate() \
and self._ad_conf.validate() \
and self._ntp_conf.validate()
return valid
def get_version(self):
return self._data['version'].encode('utf-8')
def set_version(self, version):
self._data['version'] = version
return self
def get_organization(self):
return self._data['organization'].encode('utf-8')
def set_organization(self, organization):
self._data['organization'] = organization
return self
def get_notes(self):
return self._data['notes'].encode('utf-8')
def set_notes(self, notes):
self._data['notes'] = notes
return self
def get_ad_conf(self):
return self._ad_conf
def get_ldap_conf(self):
return self._ldap_conf
def get_chef_conf(self):
return self._chef_conf
def get_ntp_conf(self):
return self._ntp_conf
| gpl-2.0 | 7,633,553,126,649,568,000 | 30.537815 | 83 | 0.607248 | false |
mwisslead/vfp2py | testbed/conversion.py | 1 | 6505 | # Generated from conversion.g4 by ANTLR 4.8
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\t")
buf.write("\36\4\2\t\2\4\3\t\3\3\2\7\2\b\n\2\f\2\16\2\13\13\2\3\3")
buf.write("\3\3\7\3\17\n\3\f\3\16\3\22\13\3\3\3\3\3\3\3\7\3\27\n")
buf.write("\3\f\3\16\3\32\13\3\3\3\3\3\3\3\2\2\4\2\4\2\2\2\36\2\t")
buf.write("\3\2\2\2\4\f\3\2\2\2\6\b\5\4\3\2\7\6\3\2\2\2\b\13\3\2")
buf.write("\2\2\t\7\3\2\2\2\t\n\3\2\2\2\n\3\3\2\2\2\13\t\3\2\2\2")
buf.write("\f\20\7\3\2\2\r\17\7\7\2\2\16\r\3\2\2\2\17\22\3\2\2\2")
buf.write("\20\16\3\2\2\2\20\21\3\2\2\2\21\23\3\2\2\2\22\20\3\2\2")
buf.write("\2\23\24\7\6\2\2\24\30\7\4\2\2\25\27\7\t\2\2\26\25\3\2")
buf.write("\2\2\27\32\3\2\2\2\30\26\3\2\2\2\30\31\3\2\2\2\31\33\3")
buf.write("\2\2\2\32\30\3\2\2\2\33\34\7\b\2\2\34\5\3\2\2\2\5\t\20")
buf.write("\30")
return buf.getvalue()
class conversion ( Parser ):
grammarFileName = "conversion.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ ]
symbolicNames = [ "<INVALID>", "FoxStart", "PyStart", "Line", "FoxEnd",
"FoxLine", "PyEnd", "PyLine" ]
RULE_conversionTests = 0
RULE_conversionTest = 1
ruleNames = [ "conversionTests", "conversionTest" ]
EOF = Token.EOF
FoxStart=1
PyStart=2
Line=3
FoxEnd=4
FoxLine=5
PyEnd=6
PyLine=7
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ConversionTestsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def conversionTest(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(conversion.ConversionTestContext)
else:
return self.getTypedRuleContext(conversion.ConversionTestContext,i)
def getRuleIndex(self):
return conversion.RULE_conversionTests
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConversionTests" ):
return visitor.visitConversionTests(self)
else:
return visitor.visitChildren(self)
def conversionTests(self):
localctx = conversion.ConversionTestsContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_conversionTests)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 7
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==conversion.FoxStart:
self.state = 4
self.conversionTest()
self.state = 9
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ConversionTestContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def FoxStart(self):
return self.getToken(conversion.FoxStart, 0)
def FoxEnd(self):
return self.getToken(conversion.FoxEnd, 0)
def PyStart(self):
return self.getToken(conversion.PyStart, 0)
def PyEnd(self):
return self.getToken(conversion.PyEnd, 0)
def FoxLine(self, i:int=None):
if i is None:
return self.getTokens(conversion.FoxLine)
else:
return self.getToken(conversion.FoxLine, i)
def PyLine(self, i:int=None):
if i is None:
return self.getTokens(conversion.PyLine)
else:
return self.getToken(conversion.PyLine, i)
def getRuleIndex(self):
return conversion.RULE_conversionTest
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConversionTest" ):
return visitor.visitConversionTest(self)
else:
return visitor.visitChildren(self)
def conversionTest(self):
localctx = conversion.ConversionTestContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_conversionTest)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 10
self.match(conversion.FoxStart)
self.state = 14
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==conversion.FoxLine:
self.state = 11
self.match(conversion.FoxLine)
self.state = 16
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 17
self.match(conversion.FoxEnd)
self.state = 18
self.match(conversion.PyStart)
self.state = 22
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==conversion.PyLine:
self.state = 19
self.match(conversion.PyLine)
self.state = 24
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 25
self.match(conversion.PyEnd)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| mit | 2,564,314,225,505,550,000 | 30.57767 | 103 | 0.569254 | false |
darvelo/panel | widgets/net.py | 1 | 2233 | from PyQt5 import QtWidgets
from PyQt5 import QtCore
import os
import glob
from .chart import Chart
def read_file(path):
with open(path, 'r') as fh:
return fh.read().strip()
class NetworkUsageProvider(object):
delay = 1
def __init__(self, main_window):
self.network_enabled = False
try:
self.rx_path = None
self.tx_path = None
for interface in glob.glob('/sys/class/net/*'):
state = read_file(os.path.join(interface, 'operstate'))
if state.lower() == 'up':
self.rx_path = os.path.join(interface, 'statistics', 'rx_bytes')
self.tx_path = os.path.join(interface, 'statistics', 'tx_bytes')
self.network_enabled = True
except:
pass
if self.network_enabled:
self.old_rx_bytes = int(read_file(self.rx_path))
self.old_tx_bytes = int(read_file(self.tx_path))
self.net_in_label = QtWidgets.QLabel()
self.net_out_label = QtWidgets.QLabel()
self.chart = Chart(QtCore.QSize(40, main_window.height()))
for w in [self.net_in_label, self.net_out_label, self.chart]:
main_window[0].right_widget.layout().addWidget(w)
self.chart.repaint()
def refresh(self):
if self.network_enabled:
rx_bytes = int(read_file(self.rx_path))
tx_bytes = int(read_file(self.tx_path))
self.net_in = (rx_bytes - self.old_rx_bytes) / 1
self.net_out = (tx_bytes - self.old_tx_bytes) / 1
self.old_rx_bytes = rx_bytes
self.old_tx_bytes = tx_bytes
def render(self):
if self.network_enabled:
# self.net_in_label.setText('\U0001f847 %.0f KB/s' % (self.net_in / 1024.0))
# self.net_out_label.setText('\U0001f845 %.0f KB/s' % (self.net_out / 1024.0))
self.net_in_label.setText('\U0001f847 %.0f' % (self.net_in / 1024.0))
self.net_out_label.setText('\U0001f845 %.0f' % (self.net_out / 1024.0))
self.chart.addPoint('#00a000', self.net_in)
self.chart.addPoint('#ff0000', self.net_out)
self.chart.repaint()
| mit | -6,281,391,231,829,057,000 | 38.875 | 90 | 0.559337 | false |
CenterForOpenScience/lookit-api | accounts/serializers.py | 1 | 4065 | from rest_framework_json_api import serializers
from accounts.models import Child, DemographicData, User
from api.serializers import (
PatchedHyperlinkedRelatedField,
UuidHyperlinkedModelSerializer,
)
from studies.models import Lab
class LabSerializer(UuidHyperlinkedModelSerializer):
resource_name = "labs"
url = serializers.HyperlinkedIdentityField(
view_name="api:lab-detail", lookup_field="uuid"
)
class Meta:
model = Lab
fields = (
"name",
"institution",
"principal_investigator_name",
"lab_website",
"description",
"approved_to_test",
"url",
"pk",
)
class DemographicDataSerializer(UuidHyperlinkedModelSerializer):
resource_name = "demographics"
country = serializers.CharField(default="")
date_created = serializers.DateTimeField(read_only=True, source="created_at")
url = serializers.HyperlinkedIdentityField(
view_name="api:demographicdata-detail", lookup_field="uuid"
)
class Meta:
model = DemographicData
fields = (
"url",
"number_of_children",
"child_birthdays",
"languages_spoken_at_home",
"number_of_guardians",
"number_of_guardians_explanation",
"race_identification",
"age",
"gender",
"education_level",
"spouse_education_level",
"annual_income",
"former_lookit_annual_income",
"lookit_referrer",
"number_of_books",
"additional_comments",
"country",
"state",
"density",
"extra",
"date_created",
"pk",
)
class BasicUserSerializer(UuidHyperlinkedModelSerializer):
resource_name = "users"
url = serializers.HyperlinkedIdentityField(
view_name="api:user-detail", lookup_field="uuid"
)
demographics = PatchedHyperlinkedRelatedField(
queryset=DemographicData.objects,
many=True,
related_link_view_name="api:user-demographics-list",
related_link_url_kwarg="user_uuid",
related_link_lookup_field="uuid",
)
children = PatchedHyperlinkedRelatedField(
queryset=Child.objects,
many=True,
related_link_view_name="api:user-children-list",
related_link_url_kwarg="user_uuid",
related_link_lookup_field="uuid",
)
class Meta:
model = User
fields = (
"url",
"given_name",
"middle_name",
"family_name",
"nickname",
"identicon",
"is_active",
"is_staff",
"is_researcher",
"demographics",
"children",
"former_lookit_id",
"linked_former_lookit_ids",
"email_next_session",
"email_new_studies",
"email_study_updates",
"email_response_questions",
"date_created",
"pk",
)
class FullUserSerializer(BasicUserSerializer):
class Meta:
model = User
fields = BasicUserSerializer.Meta.fields + ("username",)
class ChildSerializer(UuidHyperlinkedModelSerializer):
lookup_field = "uuid"
url = serializers.HyperlinkedIdentityField(
view_name="api:child-detail", lookup_field="uuid"
)
user = PatchedHyperlinkedRelatedField(
queryset=User.objects,
related_link_view_name="api:user-detail",
related_link_lookup_field="user.uuid",
related_link_url_kwarg="uuid",
)
class Meta:
model = Child
fields = (
"url",
"user",
"given_name",
"birthday",
"gender",
"age_at_birth",
"additional_information",
"language_list",
"condition_list",
"deleted",
"former_lookit_profile_id",
"pk",
)
| apache-2.0 | 6,840,125,021,426,943,000 | 26.466216 | 81 | 0.55449 | false |
bahamas10/Viridian | AmpacheTools/plugins/template.py | 1 | 1597 | #!/usr/bin/env python
### BEGIN LICENSE
# Copyright (C) 2010 Dave Eddy <[email protected]>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
#
# Template plugin... this plugin prints some information about the current song
# Dave Eddy <[email protected]>
def __init__():
"""Return an instance of the class used by the plugin when __init__() is called"""
return TemplatePlugin()
class TemplatePlugin:
def __init__(self):
"""Called before the plugin is asked to do anything.
title, author, and description must be set for Viridian to read the plugin."""
self.title = "Template Plugin"
self.author = "Dave Eddy <[email protected]>"
self.description = "Prints some information when the song changes"
def on_song_change(self, song_dict):
"""Called when the song changes in Viridian.
A dictionary with all of the songs information is passed in as 'song_dict'"""
for k,v in song_dict.iteritems():
print "song_dict['%s'] = '%s'" % (k,v)
| bsd-3-clause | 957,446,448,431,151,500 | 43.361111 | 86 | 0.692549 | false |
jamestwebber/scipy | scipy/stats/tests/test_binned_statistic.py | 1 | 15125 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose
from pytest import raises as assert_raises
from scipy.stats import (binned_statistic, binned_statistic_2d,
binned_statistic_dd)
from scipy._lib.six import u
from .common_tests import check_named_results
class TestBinnedStatistic(object):
@classmethod
def setup_class(cls):
np.random.seed(9865)
cls.x = np.random.random(100)
cls.y = np.random.random(100)
cls.v = np.random.random(100)
cls.X = np.random.random((100, 3))
cls.w = np.random.random(100)
cls.u = np.random.random(100) + 1e6
def test_1d_count(self):
x = self.x
v = self.v
count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)
count2, edges2 = np.histogram(x, bins=10)
assert_allclose(count1, count2)
assert_allclose(edges1, edges2)
def test_gh5927(self):
# smoke test for gh5927 - binned_statistic was using `is` for string
# comparison
x = self.x
v = self.v
statistics = [u'mean', u'median', u'count', u'sum']
for statistic in statistics:
binned_statistic(x, v, statistic, bins=10)
def test_big_number_std(self):
# tests for numerical stability of std calculation
# see issue gh-10126 for more
x = self.x
u = self.u
stat1, edges1, bc = binned_statistic(x, u, 'std', bins=10)
stat2, edges2, bc = binned_statistic(x, u, np.std, bins=10)
assert_allclose(stat1, stat2)
def test_non_finite_inputs(self):
# if either `values` or `sample` contain np.inf or np.nan throw
# see issue gh-9010 for more
x = self.x
u = self.u
u[0] = np.inf
assert_raises(ValueError, binned_statistic, x, u, 'std', bins=10)
u[0] = np.nan
assert_raises(ValueError, binned_statistic, x, u, 'count', bins=10)
def test_1d_result_attributes(self):
x = self.x
v = self.v
res = binned_statistic(x, v, 'count', bins=10)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_1d_sum(self):
x = self.x
v = self.v
sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)
sum2, edges2 = np.histogram(x, bins=10, weights=v)
assert_allclose(sum1, sum2)
assert_allclose(edges1, edges2)
def test_1d_mean(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_std(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_min(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_max(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_median(self):
x = self.x
v = self.v
stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)
stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_1d_bincode(self):
x = self.x[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)
bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,
1, 2, 1])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_allclose(bc, bc2)
assert_allclose(bcount, count1)
def test_1d_range_keyword(self):
# Regression test for gh-3063, range can be (min, max) or [(min, max)]
np.random.seed(9865)
x = np.arange(30)
data = np.random.random(30)
mean, bins, _ = binned_statistic(x[:15], data[:15])
mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)])
mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14))
assert_allclose(mean, mean_range)
assert_allclose(bins, bins_range)
assert_allclose(mean, mean_range2)
assert_allclose(bins, bins_range2)
def test_1d_multi_values(self):
x = self.x
v = self.v
w = self.w
stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10)
stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10)
stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10)
assert_allclose(stat2[0], stat1v)
assert_allclose(stat2[1], stat1w)
assert_allclose(edges1v, edges2)
assert_allclose(bc1v, bc2)
def test_2d_count(self):
x = self.x
y = self.y
v = self.v
count1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'count', bins=5)
count2, binx2, biny2 = np.histogram2d(x, y, bins=5)
assert_allclose(count1, count2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_result_attributes(self):
x = self.x
y = self.y
v = self.v
res = binned_statistic_2d(x, y, v, 'count', bins=5)
attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber')
check_named_results(res, attributes)
def test_2d_sum(self):
x = self.x
y = self.y
v = self.v
sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)
sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)
assert_allclose(sum1, sum2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_mean(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_mean_unicode(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, u('mean'), bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_std(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_min(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_max(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_median(self):
x = self.x
y = self.y
v = self.v
stat1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'median', bins=5)
stat2, binx2, biny2, bc = binned_statistic_2d(
x, y, v, np.median, bins=5)
assert_allclose(stat1, stat2)
assert_allclose(binx1, binx2)
assert_allclose(biny1, biny2)
def test_2d_bincode(self):
x = self.x[:20]
y = self.y[:20]
v = self.v[:20]
count1, binx1, biny1, bc = binned_statistic_2d(
x, y, v, 'count', bins=3)
bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,
6, 11, 16, 6, 6, 11, 8])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_allclose(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_allclose(bcount, count1adj)
def test_2d_multi_values(self):
x = self.x
y = self.y
v = self.v
w = self.w
stat1v, binx1v, biny1v, bc1v = binned_statistic_2d(
x, y, v, 'mean', bins=8)
stat1w, binx1w, biny1w, bc1w = binned_statistic_2d(
x, y, w, 'mean', bins=8)
stat2, binx2, biny2, bc2 = binned_statistic_2d(
x, y, [v, w], 'mean', bins=8)
assert_allclose(stat2[0], stat1v)
assert_allclose(stat2[1], stat1w)
assert_allclose(binx1v, binx2)
assert_allclose(biny1w, biny2)
assert_allclose(bc1v, bc2)
def test_2d_binnumbers_unraveled(self):
x = self.x
y = self.y
v = self.v
stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20)
stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10)
stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d(
x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True)
bcx3 = np.searchsorted(edgesx, x, side='right')
bcy3 = np.searchsorted(edgesy, y, side='right')
# `numpy.searchsorted` is non-inclusive on right-edge, compensate
bcx3[x == x.max()] -= 1
bcy3[y == y.max()] -= 1
assert_allclose(bcx, bc2[0])
assert_allclose(bcy, bc2[1])
assert_allclose(bcx3, bc2[0])
assert_allclose(bcy3, bc2[1])
def test_dd_count(self):
X = self.X
v = self.v
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
count2, edges2 = np.histogramdd(X, bins=3)
assert_allclose(count1, count2)
assert_allclose(edges1, edges2)
def test_dd_result_attributes(self):
X = self.X
v = self.v
res = binned_statistic_dd(X, v, 'count', bins=3)
attributes = ('statistic', 'bin_edges', 'binnumber')
check_named_results(res, attributes)
def test_dd_sum(self):
X = self.X
v = self.v
sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)
sum2, edges2 = np.histogramdd(X, bins=3, weights=v)
assert_allclose(sum1, sum2)
assert_allclose(edges1, edges2)
def test_dd_mean(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_std(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_min(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_max(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_median(self):
X = self.X
v = self.v
stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)
stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)
assert_allclose(stat1, stat2)
assert_allclose(edges1, edges2)
def test_dd_bincode(self):
X = self.X[:20]
v = self.v[:20]
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,
32, 36, 91, 43, 87, 81, 81])
bcount = [(bc == i).sum() for i in np.unique(bc)]
assert_allclose(bc, bc2)
count1adj = count1[count1.nonzero()]
assert_allclose(bcount, count1adj)
def test_dd_multi_values(self):
X = self.X
v = self.v
w = self.w
stat1v, edges1v, bc1v = binned_statistic_dd(X, v, np.std, bins=8)
stat1w, edges1w, bc1w = binned_statistic_dd(X, w, np.std, bins=8)
stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], np.std, bins=8)
assert_allclose(stat2[0], stat1v)
assert_allclose(stat2[1], stat1w)
assert_allclose(edges1v, edges2)
assert_allclose(edges1w, edges2)
assert_allclose(bc1v, bc2)
def test_dd_binnumbers_unraveled(self):
X = self.X
v = self.v
stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15)
stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20)
stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10)
stat2, edges2, bc2 = binned_statistic_dd(
X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True)
assert_allclose(bcx, bc2[0])
assert_allclose(bcy, bc2[1])
assert_allclose(bcz, bc2[2])
def test_dd_binned_statistic_result(self):
# NOTE: tests the reuse of bin_edges from previous call
x = np.random.random((10000, 3))
v = np.random.random((10000))
bins = np.linspace(0, 1, 10)
bins = (bins, bins, bins)
result = binned_statistic_dd(x, v, 'mean', bins=bins)
stat = result.statistic
result = binned_statistic_dd(x, v, 'mean',
binned_statistic_result=result)
stat2 = result.statistic
assert_allclose(stat, stat2)
def test_dd_zero_dedges(self):
x = np.random.random((10000, 3))
v = np.random.random((10000))
bins = np.linspace(0, 1, 10)
bins = np.append(bins, 1)
bins = (bins, bins, bins)
with assert_raises(ValueError, match='difference is numerically 0'):
binned_statistic_dd(x, v, 'mean', bins=bins)
| bsd-3-clause | -4,939,175,757,485,474,000 | 30.185567 | 79 | 0.559008 | false |
pioneers/PieCentral | shepherd/dawn_server.py | 1 | 2480 | import threading
import json
import time
import queue
import gevent # pylint: disable=import-error
from flask import Flask, render_template # pylint: disable=import-error
from flask_socketio import SocketIO, emit, join_room, leave_room, send # pylint: disable=import-error
from Utils import *
from LCM import *
HOST_URL = "192.168.128.64" # "0.0.0.0"
PORT = 7000
#TODO work on this, new headers and deprecated headers.
app = Flask(__name__)
app.config['SECRET_KEY'] = 'omegalul!'
socketio = SocketIO(app)
master_robots = {ALLIANCE_COLOR.BLUE: 0, ALLIANCE_COLOR.GOLD:0}
@socketio.on('dawn-to-server-alliance-codes')
def ui_to_server_setup_match(alliance_codes):
lcm_send(LCM_TARGETS.SHEPHERD, SHEPHERD_HEADER.CODE_APPLICATION, json.loads(alliance_codes))
def receiver():
events = gevent.queue.Queue()
lcm_start_read(str.encode(LCM_TARGETS.DAWN), events, put_json=True)
while True:
if not events.empty():
event = events.get_nowait()
eventDict = json.loads(event)
print("RECEIVED:", event)
if eventDict["header"] == DAWN_HEADER.ROBOT_STATE:
socketio.emit(DAWN_HEADER.ROBOT_STATE, event)
elif eventDict["header"] == DAWN_HEADER.CODES:
socketio.emit(DAWN_HEADER.CODES, event)
elif eventDict["header"] == DAWN_HEADER.RESET:
master_robots[ALLIANCE_COLOR.BLUE] = 0
master_robots[ALLIANCE_COLOR.GOLD] = 0
elif eventDict["header"] == DAWN_HEADER.MASTER:
master_robots[eventDict["alliance"]] = int(eventDict["team_number"])
# socketio.emit(DAWN_HEADER.MASTER, event)
print(master_robots)
# print({"alliance": ALLIANCE_COLOR.BLUE,
# "team_number": master_robots[ALLIANCE_COLOR.BLUE]})
# print({"alliance": ALLIANCE_COLOR.GOLD,
# "team_number": master_robots[ALLIANCE_COLOR.GOLD]})
socketio.emit(DAWN_HEADER.MASTER, json.dumps(master_robots))
# socketio.emit(DAWN_HEADER.MASTER, json.dumps({"alliance": ALLIANCE_COLOR.BLUE,
# "team_number": master_robots[ALLIANCE_COLOR.BLUE]}))
# socketio.emit(DAWN_HEADER.MASTER, json.dumps({"alliance": ALLIANCE_COLOR.GOLD,
# "team_number": master_robots[ALLIANCE_COLOR.GOLD]}))
socketio.emit(DAWN_HEADER.HEARTBEAT, json.dumps({"heartbeat" : 1}))
socketio.sleep(1)
socketio.start_background_task(receiver)
socketio.run(app, host=HOST_URL, port=PORT)
| apache-2.0 | 4,071,755,422,820,970,500 | 41.758621 | 101 | 0.659677 | false |
brenthuisman/phd_tools | graph.spotprofiles.py | 1 | 7377 | #!/usr/bin/env python
import plot, numpy as np,auger,image,rtplan
from scipy.ndimage.filters import gaussian_filter
###########################################################################################################
smooth_param = 8.5 #20 mm FWHM
volume_offset=-141.59+7.96#spot sources
pg29 = image.image('data/ct/source-ct-29.mhd')
rppg29 = image.image('data/rpct/source-rpct-29.mhd')
pg40 = image.image('data/ct/source-ct-40.mhd')
rppg40 = image.image('data/rpct/source-rpct-40.mhd')
pg61 = image.image('data/ct/source-ct-61.mhd')
rppg61 = image.image('data/rpct/source-rpct-61.mhd')
pg29.toprojection(".x", [0,1,1,1])
rppg29.toprojection(".x", [0,1,1,1])
pg40.toprojection(".x", [0,1,1,1])
rppg40.toprojection(".x", [0,1,1,1])
pg61.toprojection(".x", [0,1,1,1])
rppg61.toprojection(".x", [0,1,1,1])
pgsrc_ct_x = np.linspace(-149,149,150) #bincenters
pgsrc_ct_xhist = np.linspace(-150,150,151) #2mm voxels, endpoints
pgsrc_ct_x = pgsrc_ct_x+(volume_offset-pgsrc_ct_x[0]) #offset for pg source image
pgsrc_ct_xhist = pgsrc_ct_xhist+(volume_offset-pgsrc_ct_xhist[0]) #same
pg29_fo=auger.get_fop(pgsrc_ct_x,pg29.imdata)
rppg29_fo=auger.get_fop(pgsrc_ct_x,rppg29.imdata)
pg40_fo=auger.get_fop(pgsrc_ct_x,pg40.imdata)
rppg40_fo=auger.get_fop(pgsrc_ct_x,rppg40.imdata)
pg61_fo=auger.get_fop(pgsrc_ct_x,pg61.imdata)
rppg61_fo=auger.get_fop(pgsrc_ct_x,rppg61.imdata)
psf_pg29_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(pg29.imdata, sigma=smooth_param))
psf_rppg29_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(rppg29.imdata, sigma=smooth_param))
psf_pg40_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(pg40.imdata, sigma=smooth_param))
psf_rppg40_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(rppg40.imdata, sigma=smooth_param))
psf_pg61_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(pg61.imdata, sigma=smooth_param))
psf_rppg61_fo=auger.get_fop(pgsrc_ct_x,gaussian_filter(rppg61.imdata, sigma=smooth_param))
rtplan = rtplan.rtplan(['../doseactortest/data/plan.txt'],norm2nprim=False)#,noproc=True)
MSW=[]
for spot in rtplan.spots:
if spot[0] == 102:#
MSW.append(spot)
#### dose
dose_offset=-142.097+7.96
x = np.linspace(-149.5,149.5,300) #bincenters
xhist = np.linspace(-150,150,301) #1mm voxels, endpoints
x = x+(dose_offset-x[0]) #offset for pg source image
xhist = xhist+(dose_offset-xhist[0]) #same
dose = image.image('../doseactortest/output/new_dosespotid-ct.mhd')
dose = dose.imdata.reshape(dose.imdata.shape[::-1]).squeeze()
rpdose = image.image('../doseactortest/output/new_dosespotid-rpct.mhd')
rpdose = rpdose.imdata.reshape(rpdose.imdata.shape[::-1]).squeeze()
ct29_fo=auger.get_fop(x,dose[29])
rpct29_fo=auger.get_fop(x,rpdose[29])
ct40_fo=auger.get_fop(x,dose[40])
rpct40_fo=auger.get_fop(x,rpdose[40])
ct61_fo=auger.get_fop(x,dose[61])
rpct61_fo=auger.get_fop(x,rpdose[61])
print '###########################################################################################################'
print 'PG FOPS'
print 'pg29', pg29_fo, ', w/psf:', psf_pg29_fo
print 'rppg29', rppg29_fo, ', w/psf:', psf_rppg29_fo
print 'pg40', pg40_fo, ', w/psf:', psf_pg40_fo
print 'rppg40', rppg40_fo, ', w/psf:', psf_rppg40_fo
print 'pg61', pg61_fo, ', w/psf:', psf_pg61_fo
print 'rppg61', rppg61_fo, ', w/psf:', psf_rppg61_fo
print 'FOP shifts'
print '29, ct:', str(rpct29_fo-ct29_fo)[:4], ', pg', str(rppg29_fo-pg29_fo)[:4], ', pg+psf', str(psf_rppg29_fo-psf_pg29_fo)[:4]
print '40, ct:', str(rpct40_fo-ct40_fo)[:4], ', pg', str(rppg40_fo-pg40_fo)[:4], ', pg+psf', str(psf_rppg40_fo-psf_pg40_fo)[:4]
print '61, ct:', str(rpct61_fo-ct61_fo)[:4], ', pg', str(rppg61_fo-pg61_fo)[:4], ', pg+psf', str(psf_rppg61_fo-psf_pg61_fo)[:4]
print '###########################################################################################################'
###########################################################################################################
def yld(profile):
nr=str(profile.imdata.sum()*100.)
return nr[:3]+'\%'
def plotprof(ax,xax,emit,dete,name, **kwargs):
if name == 'CT':
color='steelblue'
elif name == 'RPCT':
color='indianred'
else:
color='black'
ax.step(xax,emit, color=color,lw=1., alpha=1, label=name+', yield: '+yld(emit), where='mid')
#ax1.step(pgsrc_ct_x,dete, color=color,lw=1., alpha=0.5, label=name+' PSF', where='mid')
return ax
###########################################################################################################
f, ((ax4,ax5,ax6),(ax1,ax2,ax3)) = plot.subplots(nrows=2, ncols=3, sharex=True, sharey=False)
ax1.step(pgsrc_ct_x,pg29.imdata, color='steelblue',lw=1., alpha=1, label='CT, yield: '+yld(pg29), where='mid')
ax1.step(pgsrc_ct_x,rppg29.imdata, color='indianred',lw=1., alpha=1, label='RPCT, yield: '+yld(rppg29), where='mid')
ax1.set_title('PG shift: '+str(rppg29_fo-pg29_fo)[:3]+' mm', fontsize=10)
ax1.legend(frameon = False,loc='upper left')
ax1.set_xlim(-80,60)
ax1.set_ylim(0,0.004)
ax1.set_ylabel('Cumulative PG emission per proton')
plot.texax(ax1)
ax2.step(pgsrc_ct_x,pg40.imdata, color='steelblue',lw=1., alpha=1, label='CT, yield: '+yld(pg40), where='mid')
ax2.step(pgsrc_ct_x,rppg40.imdata, color='indianred',lw=1., alpha=1, label='RPCT, yield: '+yld(rppg40), where='mid')
ax2.set_title('PG shift: '+str(rppg40_fo-pg40_fo)[:3]+' mm', fontsize=10)
ax2.legend(frameon = False,loc='upper left')
#ax2.set_xlim(-80,70)
ax2.set_ylim(0,0.004)
ax2.set_xlabel('Position [mm]')
plot.texax(ax2)
ax3.step(pgsrc_ct_x,pg61.imdata, color='steelblue',lw=1., alpha=1, label='CT, yield: '+yld(pg61), where='mid')
ax3.step(pgsrc_ct_x,rppg61.imdata, color='indianred',lw=1., alpha=1, label='RPCT, yield: '+yld(rppg61), where='mid')
ax3.set_title('PG shift: '+str(rppg61_fo-pg61_fo)[:2]+' mm', fontsize=10)
ax3.legend(frameon = False,loc='upper left')
#ax3.set_xlim(-80,70)
ax3.set_ylim(0,0.004)
plot.texax(ax3)
######## TopRow
ax4.step(x,dose[29]/dose[29].max(), color='steelblue',lw=1., alpha=1, label='CT', where='mid')
ax4.step(x,rpdose[29]/rpdose[29].max(), color='indianred',lw=1., alpha=1, label='RPCT', where='mid')
ax4.set_title('Spot A, Shift: '+str(rpct29_fo-ct29_fo)[:3]+' mm\n'+plot.sn(MSW[29][-1],1)+' protons', fontsize=10)
ax4.legend(frameon = False,loc='upper left')
ax4.set_xlim(-80,60)
ax4.set_ylabel('Scaled Dose [a.u.]')
plot.texax(ax4)
ax5.step(x,dose[40]/dose[40].max(), color='steelblue',lw=1., alpha=1, label='CT', where='mid')
ax5.step(x,rpdose[40]/rpdose[40].max(), color='indianred',lw=1., alpha=1, label='RPCT', where='mid')
ax5.set_title('Spot B, Shift: '+str(rpct40_fo-ct40_fo)[:3]+' mm\n'+plot.sn(MSW[40][-1],1)+' protons', fontsize=10)
ax5.legend(frameon = False,loc='upper left')
#ax5.set_xlim(-80,70)
ax5.set_xlabel('Position [mm]')
plot.texax(ax5)
ax6.step(x,dose[61]/dose[61].max(), color='steelblue',lw=1., alpha=1, label='CT', where='mid')
ax6.step(x,rpdose[61]/rpdose[61].max(), color='indianred',lw=1., alpha=1, label='RPCT', where='mid')
ax6.set_title('Spot C, Shift: '+str(rpct61_fo-ct61_fo)[:2]+' mm\n'+plot.sn(MSW[61][-1],1)+' protons', fontsize=10)
ax6.legend(frameon = False,loc='upper left')
#ax6.set_xlim(-80,70)
plot.texax(ax6)
ax4.xaxis.set_visible(False)
ax5.xaxis.set_visible(False)
ax6.xaxis.set_visible(False)
ax5.yaxis.set_visible(False)
ax6.yaxis.set_visible(False)
ax2.yaxis.set_visible(False)
ax3.yaxis.set_visible(False)
f.subplots_adjust(hspace=0.3)
f.savefig('spotprofiles.pdf', bbox_inches='tight')
plot.close('all')
| lgpl-3.0 | 6,576,384,876,859,362,000 | 41.889535 | 127 | 0.6382 | false |
ronekko/chainer | chainer/distributions/bernoulli.py | 1 | 5543 | import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.activation import sigmoid
from chainer.functions.array import broadcast
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer.functions.math import logarithm_1p
from chainer import utils
import numpy
import warnings
class BernoulliLogProb(chainer.function_node.FunctionNode):
def forward(self, inputs):
logit, x = inputs
self.retain_inputs((0, 1))
xp = cuda.get_array_module(x)
y = logit * (x - 1) - xp.log(xp.exp(-logit) + 1)
y = utils.force_array(y)
self.invalid = utils.force_array(xp.bitwise_and(x != 0, x != 1))
y[self.invalid] = - xp.inf
# extreme logit
logit_isinf = xp.isinf(logit)
self.to_zero = xp.bitwise_and(
logit_isinf, xp.sign(x-0.5) == xp.sign(logit))
self.to_m_inf = xp.bitwise_and(
logit_isinf, xp.sign(x-0.5) != xp.sign(logit))
y[self.to_zero] = 0.
y[self.to_m_inf] = - xp.inf
return utils.force_array(y, logit.dtype),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
logit, x = self.get_retained_inputs()
xp = cuda.get_array_module(x)
dlogit = x - 1. / (1. + exponential.exp(-logit))
# extreme logit
nan_dlogit = xp.zeros_like(dlogit.array)
nan_dlogit[self.invalid] = xp.nan
nan_dlogit[self.to_zero] = xp.nan
nan_dlogit[self.to_m_inf] = xp.nan
dlogit += nan_dlogit
return gy * dlogit, None
class ModifiedXLogX(chainer.function_node.FunctionNode):
def __init__(self, logx):
self._logx = logx
def forward(self, inputs):
x, = inputs
self.x_zero = utils.force_array(x == 0)
y = utils.force_array(x * self._logx.array)
y[self.x_zero] = 0.
return y,
def backward(self, indexes, grad_outputs):
if self.x_zero.any():
warnings.warn(
'cannot calculate gradient for zero input.',
RuntimeWarning)
gy, = grad_outputs
dx = (1 + self._logx) * (1 - self.x_zero)
return gy * dx,
def _bernoulli_log_prob(logit, x):
y, = BernoulliLogProb().apply((logit, x))
return y
def _modified_xlogx(x):
x = chainer.as_variable(x)
xp = x.xp
return ModifiedXLogX(exponential.log(
where.where(utils.force_array(x.array > 0),
x, xp.ones_like(x.array)))).apply((x,))[0]
class Bernoulli(distribution.Distribution):
"""Bernoulli Distribution.
The probability mass function of the distribution is expressed as
.. math::
P(x = 1; p) = p \\\\
P(x = 0; p) = 1 - p
Args:
p(:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Parameter of distribution representing \
:math:`p`. Either `p` or `logit` (not both) must have a value.
logit(:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Parameter of distribution representing \
:math:`\\log\\{p/(1-p)\\}`. Either `p` or `logit` (not both) must \
have a value.
"""
def __init__(self, p=None, logit=None):
super(Bernoulli, self).__init__()
if not (p is None) ^ (logit is None):
raise ValueError(
"Either `p` or `logit` (not both) must have a value.")
with chainer.using_config('enable_backprop', True):
if p is None:
self.logit = chainer.as_variable(logit)
self.p = sigmoid.sigmoid(self.logit)
else:
self.p = chainer.as_variable(p)
self.logit = exponential.log(self.p) \
- logarithm_1p.log1p(-self.p)
@property
def batch_shape(self):
return self.p.shape
@property
def entropy(self):
p = self.p
q = p.dtype.type(1.) - p
return - _modified_xlogx(p) - _modified_xlogx(q)
@property
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.p.array, cuda.ndarray)
def log_prob(self, x):
return _bernoulli_log_prob(self.logit, x)
@property
def mean(self):
return self.p
def prob(self, x):
x = chainer.as_variable(x)
if self._is_gpu:
valid = cuda.cupy.bitwise_or(x.array == 0, x.array == 1)
else:
valid = numpy.bitwise_or(x.array == 0, x.array == 1)
ret = x * broadcast.broadcast_to(self.p, x.shape) \
+ (1 - x) * (1 - broadcast.broadcast_to(self.p, x.shape))
return ret * valid
def sample_n(self, n):
if self._is_gpu:
eps = cuda.cupy.random.binomial(
1, self.p.array, size=(n,)+self.p.shape)
else:
eps = numpy.random.binomial(
1, self.p.array, size=(n,)+self.p.shape)
return chainer.Variable(eps)
@property
def stddev(self):
return (self.p * (1 - self.p)) ** 0.5
@property
def support(self):
return '{0, 1}'
@property
def variance(self):
return self.p * (1 - self.p)
@distribution.register_kl(Bernoulli, Bernoulli)
def _kl_bernoulli_bernoulli(dist1, dist2):
return (dist1.logit - dist2.logit) * (dist1.p - 1.) \
- exponential.log(exponential.exp(-dist1.logit) + 1) \
+ exponential.log(exponential.exp(-dist2.logit) + 1)
| mit | 6,518,292,464,861,377,000 | 29.125 | 75 | 0.571712 | false |
nuagenetworks/vspk-python | vspk/v5_0/nucontainer.py | 1 | 29058 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUAlarmsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUContainerInterfacesFetcher
from .fetchers import NUContainerResyncsFetcher
from .fetchers import NUVRSsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUContainer(NURESTObject):
""" Represents a Container in the VSD
Notes:
API that can retrieve the containers associated with a domain, zone or subnet for mediation created containers for REST created containers you need to set the additional proxy user header in http request : X-Nuage-ProxyUservalue of the header has to be either :1) enterpriseName@UserName (example : Alcatel Lucent@bob), or 2) external ID of user in VSD, typically is UUID generally decided by the CMS tool in questionUser needs to have CMS privileges to use proxy user header.
"""
__rest_name__ = "container"
__resource_name__ = "containers"
## Constants
CONST_REASON_TYPE_SHUTDOWN_UNKNOWN = "SHUTDOWN_UNKNOWN"
CONST_REASON_TYPE_CRASHED_UNKNOWN = "CRASHED_UNKNOWN"
CONST_REASON_TYPE_PAUSED_IOERROR = "PAUSED_IOERROR"
CONST_STATUS_SHUTDOWN = "SHUTDOWN"
CONST_REASON_TYPE_SHUTDOWN_LAST = "SHUTDOWN_LAST"
CONST_STATUS_DELETE_PENDING = "DELETE_PENDING"
CONST_REASON_TYPE_RUNNING_UNKNOWN = "RUNNING_UNKNOWN"
CONST_STATUS_RUNNING = "RUNNING"
CONST_REASON_TYPE_RUNNING_LAST = "RUNNING_LAST"
CONST_REASON_TYPE_RUNNING_UNPAUSED = "RUNNING_UNPAUSED"
CONST_REASON_TYPE_PAUSED_FROM_SNAPSHOT = "PAUSED_FROM_SNAPSHOT"
CONST_REASON_TYPE_PAUSED_MIGRATION = "PAUSED_MIGRATION"
CONST_REASON_TYPE_RUNNING_BOOTED = "RUNNING_BOOTED"
CONST_REASON_TYPE_UNKNOWN = "UNKNOWN"
CONST_STATUS_UNREACHABLE = "UNREACHABLE"
CONST_STATUS_BLOCKED = "BLOCKED"
CONST_REASON_TYPE_SHUTOFF_DESTROYED = "SHUTOFF_DESTROYED"
CONST_REASON_TYPE_SHUTOFF_FROM_SNAPSHOT = "SHUTOFF_FROM_SNAPSHOT"
CONST_REASON_TYPE_SHUTOFF_UNKNOWN = "SHUTOFF_UNKNOWN"
CONST_STATUS_NOSTATE = "NOSTATE"
CONST_REASON_TYPE_PAUSED_DUMP = "PAUSED_DUMP"
CONST_REASON_TYPE_CRASHED_LAST = "CRASHED_LAST"
CONST_STATUS_CRASHED = "CRASHED"
CONST_REASON_TYPE_PAUSED_LAST = "PAUSED_LAST"
CONST_REASON_TYPE_BLOCKED_LAST = "BLOCKED_LAST"
CONST_REASON_TYPE_SHUTOFF_LAST = "SHUTOFF_LAST"
CONST_STATUS_SHUTOFF = "SHUTOFF"
CONST_REASON_TYPE_SHUTOFF_SHUTDOWN = "SHUTOFF_SHUTDOWN"
CONST_REASON_TYPE_NOSTATE_UNKNOWN = "NOSTATE_UNKNOWN"
CONST_REASON_TYPE_PAUSED_SAVE = "PAUSED_SAVE"
CONST_REASON_TYPE_RUNNING_FROM_SNAPSHOT = "RUNNING_FROM_SNAPSHOT"
CONST_STATUS_UNKNOWN = "UNKNOWN"
CONST_REASON_TYPE_PAUSED_UNKNOWN = "PAUSED_UNKNOWN"
CONST_REASON_TYPE_SHUTOFF_FAILED = "SHUTOFF_FAILED"
CONST_REASON_TYPE_SHUTOFF_SAVED = "SHUTOFF_SAVED"
CONST_REASON_TYPE_SHUTOFF_MIGRATED = "SHUTOFF_MIGRATED"
CONST_STATUS_LAST = "LAST"
CONST_REASON_TYPE_RUNNING_MIGRATED = "RUNNING_MIGRATED"
CONST_REASON_TYPE_RUNNING_SAVE_CANCELED = "RUNNING_SAVE_CANCELED"
CONST_REASON_TYPE_SHUTDOWN_USER = "SHUTDOWN_USER"
CONST_REASON_TYPE_RUNNING_MIGRATION_CANCELED = "RUNNING_MIGRATION_CANCELED"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_STATUS_PAUSED = "PAUSED"
CONST_STATUS_INIT = "INIT"
CONST_REASON_TYPE_BLOCKED_UNKNOWN = "BLOCKED_UNKNOWN"
CONST_REASON_TYPE_NOSTATE_LAST = "NOSTATE_LAST"
CONST_REASON_TYPE_RUNNING_RESTORED = "RUNNING_RESTORED"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_REASON_TYPE_SHUTOFF_CRASHED = "SHUTOFF_CRASHED"
CONST_REASON_TYPE_PAUSED_USER = "PAUSED_USER"
CONST_DELETE_MODE_TIMER = "TIMER"
CONST_REASON_TYPE_PAUSED_WATCHDOG = "PAUSED_WATCHDOG"
CONST_REASON_TYPE_PAUSED_SHUTTING_DOWN = "PAUSED_SHUTTING_DOWN"
def __init__(self, **kwargs):
""" Initializes a Container instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> container = NUContainer(id=u'xxxx-xxx-xxx-xxx', name=u'Container')
>>> container = NUContainer(data=my_dict)
"""
super(NUContainer, self).__init__()
# Read/Write Attributes
self._l2_domain_ids = None
self._vrsid = None
self._uuid = None
self._name = None
self._last_updated_by = None
self._reason_type = None
self._delete_expiry = None
self._delete_mode = None
self._resync_info = None
self._site_identifier = None
self._image_id = None
self._image_name = None
self._interfaces = None
self._enterprise_id = None
self._enterprise_name = None
self._entity_scope = None
self._domain_ids = None
self._compute_provisioned = None
self._zone_ids = None
self._orchestration_id = None
self._user_id = None
self._user_name = None
self._status = None
self._subnet_ids = None
self._external_id = None
self._hypervisor_ip = None
self.expose_attribute(local_name="l2_domain_ids", remote_name="l2DomainIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="vrsid", remote_name="VRSID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="reason_type", remote_name="reasonType", attribute_type=str, is_required=False, is_unique=False, choices=[u'BLOCKED_LAST', u'BLOCKED_UNKNOWN', u'CRASHED_LAST', u'CRASHED_UNKNOWN', u'NOSTATE_LAST', u'NOSTATE_UNKNOWN', u'PAUSED_DUMP', u'PAUSED_FROM_SNAPSHOT', u'PAUSED_IOERROR', u'PAUSED_LAST', u'PAUSED_MIGRATION', u'PAUSED_SAVE', u'PAUSED_SHUTTING_DOWN', u'PAUSED_UNKNOWN', u'PAUSED_USER', u'PAUSED_WATCHDOG', u'RUNNING_BOOTED', u'RUNNING_FROM_SNAPSHOT', u'RUNNING_LAST', u'RUNNING_MIGRATED', u'RUNNING_MIGRATION_CANCELED', u'RUNNING_RESTORED', u'RUNNING_SAVE_CANCELED', u'RUNNING_UNKNOWN', u'RUNNING_UNPAUSED', u'SHUTDOWN_LAST', u'SHUTDOWN_UNKNOWN', u'SHUTDOWN_USER', u'SHUTOFF_CRASHED', u'SHUTOFF_DESTROYED', u'SHUTOFF_FAILED', u'SHUTOFF_FROM_SNAPSHOT', u'SHUTOFF_LAST', u'SHUTOFF_MIGRATED', u'SHUTOFF_SAVED', u'SHUTOFF_SHUTDOWN', u'SHUTOFF_UNKNOWN', u'UNKNOWN'])
self.expose_attribute(local_name="delete_expiry", remote_name="deleteExpiry", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="delete_mode", remote_name="deleteMode", attribute_type=str, is_required=False, is_unique=False, choices=[u'TIMER'])
self.expose_attribute(local_name="resync_info", remote_name="resyncInfo", attribute_type=dict, is_required=False, is_unique=False)
self.expose_attribute(local_name="site_identifier", remote_name="siteIdentifier", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="image_id", remote_name="imageID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="image_name", remote_name="imageName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="interfaces", remote_name="interfaces", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_name", remote_name="enterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="domain_ids", remote_name="domainIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="compute_provisioned", remote_name="computeProvisioned", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="zone_ids", remote_name="zoneIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="orchestration_id", remote_name="orchestrationID", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="user_id", remote_name="userID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="user_name", remote_name="userName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'BLOCKED', u'CRASHED', u'DELETE_PENDING', u'INIT', u'LAST', u'NOSTATE', u'PAUSED', u'RUNNING', u'SHUTDOWN', u'SHUTOFF', u'UNKNOWN', u'UNREACHABLE'])
self.expose_attribute(local_name="subnet_ids", remote_name="subnetIDs", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="hypervisor_ip", remote_name="hypervisorIP", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.alarms = NUAlarmsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.container_interfaces = NUContainerInterfacesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.container_resyncs = NUContainerResyncsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vrss = NUVRSsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def l2_domain_ids(self):
""" Get l2_domain_ids value.
Notes:
Array of IDs of the l2 domain that the container is connected to
This attribute is named `l2DomainIDs` in VSD API.
"""
return self._l2_domain_ids
@l2_domain_ids.setter
def l2_domain_ids(self, value):
""" Set l2_domain_ids value.
Notes:
Array of IDs of the l2 domain that the container is connected to
This attribute is named `l2DomainIDs` in VSD API.
"""
self._l2_domain_ids = value
@property
def vrsid(self):
""" Get vrsid value.
Notes:
Id of the VRS that this container is attached to.
This attribute is named `VRSID` in VSD API.
"""
return self._vrsid
@vrsid.setter
def vrsid(self, value):
""" Set vrsid value.
Notes:
Id of the VRS that this container is attached to.
This attribute is named `VRSID` in VSD API.
"""
self._vrsid = value
@property
def uuid(self):
""" Get uuid value.
Notes:
UUID of the container
This attribute is named `UUID` in VSD API.
"""
return self._uuid
@uuid.setter
def uuid(self, value):
""" Set uuid value.
Notes:
UUID of the container
This attribute is named `UUID` in VSD API.
"""
self._uuid = value
@property
def name(self):
""" Get name value.
Notes:
Name of the container
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the container
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def reason_type(self):
""" Get reason_type value.
Notes:
Reason of the event associated with the container.
This attribute is named `reasonType` in VSD API.
"""
return self._reason_type
@reason_type.setter
def reason_type(self, value):
""" Set reason_type value.
Notes:
Reason of the event associated with the container.
This attribute is named `reasonType` in VSD API.
"""
self._reason_type = value
@property
def delete_expiry(self):
""" Get delete_expiry value.
Notes:
reflects the container Deletion expiry timer in secs , deleteMode needs to be non-null value for deleteExpiry to be taken in to effect. CMS created containers will always have deleteMode set to TIMER
This attribute is named `deleteExpiry` in VSD API.
"""
return self._delete_expiry
@delete_expiry.setter
def delete_expiry(self, value):
""" Set delete_expiry value.
Notes:
reflects the container Deletion expiry timer in secs , deleteMode needs to be non-null value for deleteExpiry to be taken in to effect. CMS created containers will always have deleteMode set to TIMER
This attribute is named `deleteExpiry` in VSD API.
"""
self._delete_expiry = value
@property
def delete_mode(self):
""" Get delete_mode value.
Notes:
reflects the mode of container Deletion - TIMER Possible values are TIMER, .
This attribute is named `deleteMode` in VSD API.
"""
return self._delete_mode
@delete_mode.setter
def delete_mode(self, value):
""" Set delete_mode value.
Notes:
reflects the mode of container Deletion - TIMER Possible values are TIMER, .
This attribute is named `deleteMode` in VSD API.
"""
self._delete_mode = value
@property
def resync_info(self):
""" Get resync_info value.
Notes:
Information of the status of the resync operation of a container
This attribute is named `resyncInfo` in VSD API.
"""
return self._resync_info
@resync_info.setter
def resync_info(self, value):
""" Set resync_info value.
Notes:
Information of the status of the resync operation of a container
This attribute is named `resyncInfo` in VSD API.
"""
self._resync_info = value
@property
def site_identifier(self):
""" Get site_identifier value.
Notes:
This property specifies the site the container belongs to, for Geo-redundancy.
This attribute is named `siteIdentifier` in VSD API.
"""
return self._site_identifier
@site_identifier.setter
def site_identifier(self, value):
""" Set site_identifier value.
Notes:
This property specifies the site the container belongs to, for Geo-redundancy.
This attribute is named `siteIdentifier` in VSD API.
"""
self._site_identifier = value
@property
def image_id(self):
""" Get image_id value.
Notes:
Id of the container image
This attribute is named `imageID` in VSD API.
"""
return self._image_id
@image_id.setter
def image_id(self, value):
""" Set image_id value.
Notes:
Id of the container image
This attribute is named `imageID` in VSD API.
"""
self._image_id = value
@property
def image_name(self):
""" Get image_name value.
Notes:
Name of the container image
This attribute is named `imageName` in VSD API.
"""
return self._image_name
@image_name.setter
def image_name(self, value):
""" Set image_name value.
Notes:
Name of the container image
This attribute is named `imageName` in VSD API.
"""
self._image_name = value
@property
def interfaces(self):
""" Get interfaces value.
Notes:
List of container interfaces associated with the container
"""
return self._interfaces
@interfaces.setter
def interfaces(self, value):
""" Set interfaces value.
Notes:
List of container interfaces associated with the container
"""
self._interfaces = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
ID of the enterprise that this container belongs to
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
ID of the enterprise that this container belongs to
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def enterprise_name(self):
""" Get enterprise_name value.
Notes:
Name of the enterprise that this container belongs to
This attribute is named `enterpriseName` in VSD API.
"""
return self._enterprise_name
@enterprise_name.setter
def enterprise_name(self, value):
""" Set enterprise_name value.
Notes:
Name of the enterprise that this container belongs to
This attribute is named `enterpriseName` in VSD API.
"""
self._enterprise_name = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def domain_ids(self):
""" Get domain_ids value.
Notes:
Array of IDs of the domain that the container is connected to
This attribute is named `domainIDs` in VSD API.
"""
return self._domain_ids
@domain_ids.setter
def domain_ids(self, value):
""" Set domain_ids value.
Notes:
Array of IDs of the domain that the container is connected to
This attribute is named `domainIDs` in VSD API.
"""
self._domain_ids = value
@property
def compute_provisioned(self):
""" Get compute_provisioned value.
Notes:
Compute Provisioned
This attribute is named `computeProvisioned` in VSD API.
"""
return self._compute_provisioned
@compute_provisioned.setter
def compute_provisioned(self, value):
""" Set compute_provisioned value.
Notes:
Compute Provisioned
This attribute is named `computeProvisioned` in VSD API.
"""
self._compute_provisioned = value
@property
def zone_ids(self):
""" Get zone_ids value.
Notes:
Array of IDs of the zone that this container is attached to
This attribute is named `zoneIDs` in VSD API.
"""
return self._zone_ids
@zone_ids.setter
def zone_ids(self, value):
""" Set zone_ids value.
Notes:
Array of IDs of the zone that this container is attached to
This attribute is named `zoneIDs` in VSD API.
"""
self._zone_ids = value
@property
def orchestration_id(self):
""" Get orchestration_id value.
Notes:
Orchestration ID
This attribute is named `orchestrationID` in VSD API.
"""
return self._orchestration_id
@orchestration_id.setter
def orchestration_id(self, value):
""" Set orchestration_id value.
Notes:
Orchestration ID
This attribute is named `orchestrationID` in VSD API.
"""
self._orchestration_id = value
@property
def user_id(self):
""" Get user_id value.
Notes:
ID of the user that created this container
This attribute is named `userID` in VSD API.
"""
return self._user_id
@user_id.setter
def user_id(self, value):
""" Set user_id value.
Notes:
ID of the user that created this container
This attribute is named `userID` in VSD API.
"""
self._user_id = value
@property
def user_name(self):
""" Get user_name value.
Notes:
Username of the user that created this container
This attribute is named `userName` in VSD API.
"""
return self._user_name
@user_name.setter
def user_name(self, value):
""" Set user_name value.
Notes:
Username of the user that created this container
This attribute is named `userName` in VSD API.
"""
self._user_name = value
@property
def status(self):
""" Get status value.
Notes:
Status of the container.
"""
return self._status
@status.setter
def status(self, value):
""" Set status value.
Notes:
Status of the container.
"""
self._status = value
@property
def subnet_ids(self):
""" Get subnet_ids value.
Notes:
Array of IDs of the subnets that the container is connected to
This attribute is named `subnetIDs` in VSD API.
"""
return self._subnet_ids
@subnet_ids.setter
def subnet_ids(self, value):
""" Set subnet_ids value.
Notes:
Array of IDs of the subnets that the container is connected to
This attribute is named `subnetIDs` in VSD API.
"""
self._subnet_ids = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def hypervisor_ip(self):
""" Get hypervisor_ip value.
Notes:
IP address of the hypervisor that this container is currently running in
This attribute is named `hypervisorIP` in VSD API.
"""
return self._hypervisor_ip
@hypervisor_ip.setter
def hypervisor_ip(self, value):
""" Set hypervisor_ip value.
Notes:
IP address of the hypervisor that this container is currently running in
This attribute is named `hypervisorIP` in VSD API.
"""
self._hypervisor_ip = value
| bsd-3-clause | -6,679,579,384,259,088,000 | 29.081781 | 906 | 0.576055 | false |
sunze/py_flask | venv/lib/python3.4/site-packages/alembic/environment.py | 1 | 32950 | from .operations import Operations
from .migration import MigrationContext
from . import util
class EnvironmentContext(object):
"""Represent the state made available to an ``env.py`` script.
:class:`.EnvironmentContext` is normally instantiated
by the commands present in the :mod:`alembic.command`
module. From within an ``env.py`` script, the current
:class:`.EnvironmentContext` is available via the
``alembic.context`` datamember.
:class:`.EnvironmentContext` is also a Python context
manager, that is, is intended to be used using the
``with:`` statement. A typical use of :class:`.EnvironmentContext`::
from alembic.config import Config
from alembic.script import ScriptDirectory
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
def my_function(rev, context):
'''do something with revision "rev", which
will be the current database revision,
and "context", which is the MigrationContext
that the env.py will create'''
with EnvironmentContext(
config,
script,
fn = my_function,
as_sql = False,
starting_rev = 'base',
destination_rev = 'head',
tag = "sometag"
):
script.run_env()
The above script will invoke the ``env.py`` script
within the migration environment. If and when ``env.py``
calls :meth:`.MigrationContext.run_migrations`, the
``my_function()`` function above will be called
by the :class:`.MigrationContext`, given the context
itself as well as the current revision in the database.
.. note::
For most API usages other than full blown
invocation of migration scripts, the :class:`.MigrationContext`
and :class:`.ScriptDirectory` objects can be created and
used directly. The :class:`.EnvironmentContext` object
is *only* needed when you need to actually invoke the
``env.py`` module present in the migration environment.
"""
_migration_context = None
config = None
"""An instance of :class:`.Config` representing the
configuration file contents as well as other variables
set programmatically within it."""
script = None
"""An instance of :class:`.ScriptDirectory` which provides
programmatic access to version files within the ``versions/``
directory.
"""
def __init__(self, config, script, **kw):
"""Construct a new :class:`.EnvironmentContext`.
:param config: a :class:`.Config` instance.
:param script: a :class:`.ScriptDirectory` instance.
:param \**kw: keyword options that will be ultimately
passed along to the :class:`.MigrationContext` when
:meth:`.EnvironmentContext.configure` is called.
"""
self.config = config
self.script = script
self.context_opts = kw
def __enter__(self):
"""Establish a context which provides a
:class:`.EnvironmentContext` object to
env.py scripts.
The :class:`.EnvironmentContext` will
be made available as ``from alembic import context``.
"""
from .context import _install_proxy
_install_proxy(self)
return self
def __exit__(self, *arg, **kw):
from . import context, op
context._remove_proxy()
op._remove_proxy()
def is_offline_mode(self):
"""Return True if the current migrations environment
is running in "offline mode".
This is ``True`` or ``False`` depending
on the the ``--sql`` flag passed.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.context_opts.get('as_sql', False)
def is_transactional_ddl(self):
"""Return True if the context is configured to expect a
transactional DDL capable backend.
This defaults to the type of database in use, and
can be overridden by the ``transactional_ddl`` argument
to :meth:`.configure`
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().impl.transactional_ddl
def requires_connection(self):
return not self.is_offline_mode()
def get_head_revision(self):
"""Return the hex identifier of the 'head' script revision.
If the script directory has multiple heads, this
method raises a :class:`.CommandError`;
:meth:`.EnvironmentContext.get_head_revisions` should be preferred.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso:: :meth:`.EnvironmentContext.get_head_revisions`
"""
return self.script.as_revision_number("head")
def get_head_revisions(self):
"""Return the hex identifier of the 'heads' script revision(s).
This returns a tuple containing the version number of all
heads in the script directory.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. versionadded:: 0.7.0
"""
return self.script.as_revision_number("heads")
def get_starting_revision_argument(self):
"""Return the 'starting revision' argument,
if the revision was passed using ``start:end``.
This is only meaningful in "offline" mode.
Returns ``None`` if no value is available
or was configured.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
if self._migration_context is not None:
return self.script.as_revision_number(
self.get_context()._start_from_rev)
elif 'starting_rev' in self.context_opts:
return self.script.as_revision_number(
self.context_opts['starting_rev'])
else:
# this should raise only in the case that a command
# is being run where the "starting rev" is never applicable;
# this is to catch scripts which rely upon this in
# non-sql mode or similar
raise util.CommandError(
"No starting revision argument is available.")
def get_revision_argument(self):
"""Get the 'destination' revision argument.
This is typically the argument passed to the
``upgrade`` or ``downgrade`` command.
If it was specified as ``head``, the actual
version number is returned; if specified
as ``base``, ``None`` is returned.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.script.as_revision_number(
self.context_opts['destination_rev'])
def get_tag_argument(self):
"""Return the value passed for the ``--tag`` argument, if any.
The ``--tag`` argument is not used directly by Alembic,
but is available for custom ``env.py`` configurations that
wish to use it; particularly for offline generation scripts
that wish to generate tagged filenames.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso::
:meth:`.EnvironmentContext.get_x_argument` - a newer and more
open ended system of extending ``env.py`` scripts via the command
line.
"""
return self.context_opts.get('tag', None)
def get_x_argument(self, as_dictionary=False):
"""Return the value(s) passed for the ``-x`` argument, if any.
The ``-x`` argument is an open ended flag that allows any user-defined
value or values to be passed on the command line, then available
here for consumption by a custom ``env.py`` script.
The return value is a list, returned directly from the ``argparse``
structure. If ``as_dictionary=True`` is passed, the ``x`` arguments
are parsed using ``key=value`` format into a dictionary that is
then returned.
For example, to support passing a database URL on the command line,
the standard ``env.py`` script can be modified like this::
cmd_line_url = context.get_x_argument(
as_dictionary=True).get('dbname')
if cmd_line_url:
engine = create_engine(cmd_line_url)
else:
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
This then takes effect by running the ``alembic`` script as::
alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
This function does not require that the :class:`.MigrationContext`
has been configured.
.. versionadded:: 0.6.0
.. seealso::
:meth:`.EnvironmentContext.get_tag_argument`
:attr:`.Config.cmd_opts`
"""
if self.config.cmd_opts is not None:
value = self.config.cmd_opts.x or []
else:
value = []
if as_dictionary:
value = dict(
arg.split('=', 1) for arg in value
)
return value
def configure(self,
connection=None,
url=None,
dialect_name=None,
transactional_ddl=None,
transaction_per_migration=False,
output_buffer=None,
starting_rev=None,
tag=None,
template_args=None,
render_as_batch=False,
target_metadata=None,
include_symbol=None,
include_object=None,
include_schemas=False,
compare_type=False,
compare_server_default=False,
render_item=None,
literal_binds=False,
upgrade_token="upgrades",
downgrade_token="downgrades",
alembic_module_prefix="op.",
sqlalchemy_module_prefix="sa.",
user_module_prefix=None,
**kw
):
"""Configure a :class:`.MigrationContext` within this
:class:`.EnvironmentContext` which will provide database
connectivity and other configuration to a series of
migration scripts.
Many methods on :class:`.EnvironmentContext` require that
this method has been called in order to function, as they
ultimately need to have database access or at least access
to the dialect in use. Those which do are documented as such.
The important thing needed by :meth:`.configure` is a
means to determine what kind of database dialect is in use.
An actual connection to that database is needed only if
the :class:`.MigrationContext` is to be used in
"online" mode.
If the :meth:`.is_offline_mode` function returns ``True``,
then no connection is needed here. Otherwise, the
``connection`` parameter should be present as an
instance of :class:`sqlalchemy.engine.Connection`.
This function is typically called from the ``env.py``
script within a migration environment. It can be called
multiple times for an invocation. The most recent
:class:`~sqlalchemy.engine.Connection`
for which it was called is the one that will be operated upon
by the next call to :meth:`.run_migrations`.
General parameters:
:param connection: a :class:`~sqlalchemy.engine.Connection`
to use
for SQL execution in "online" mode. When present, is also
used to determine the type of dialect in use.
:param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
The type of dialect to be used will be derived from this if
``connection`` is not passed.
:param dialect_name: string name of a dialect, such as
"postgresql", "mssql", etc.
The type of dialect to be used will be derived from this if
``connection`` and ``url`` are not passed.
:param transactional_ddl: Force the usage of "transactional"
DDL on or off;
this otherwise defaults to whether or not the dialect in
use supports it.
:param transaction_per_migration: if True, nest each migration script
in a transaction rather than the full series of migrations to
run.
.. versionadded:: 0.6.5
:param output_buffer: a file-like object that will be used
for textual output
when the ``--sql`` option is used to generate SQL scripts.
Defaults to
``sys.stdout`` if not passed here and also not present on
the :class:`.Config`
object. The value here overrides that of the :class:`.Config`
object.
:param output_encoding: when using ``--sql`` to generate SQL
scripts, apply this encoding to the string output.
:param literal_binds: when using ``--sql`` to generate SQL
scripts, pass through the ``literal_binds`` flag to the compiler
so that any literal values that would ordinarily be bound
parameters are converted to plain strings.
.. warning:: Dialects can typically only handle simple datatypes
like strings and numbers for auto-literal generation. Datatypes
like dates, intervals, and others may still require manual
formatting, typically using :meth:`.Operations.inline_literal`.
.. note:: the ``literal_binds`` flag is ignored on SQLAlchemy
versions prior to 0.8 where this feature is not supported.
.. versionadded:: 0.7.6
.. seealso::
:meth:`.Operations.inline_literal`
:param starting_rev: Override the "starting revision" argument
when using ``--sql`` mode.
:param tag: a string tag for usage by custom ``env.py`` scripts.
Set via the ``--tag`` option, can be overridden here.
:param template_args: dictionary of template arguments which
will be added to the template argument environment when
running the "revision" command. Note that the script environment
is only run within the "revision" command if the --autogenerate
option is used, or if the option "revision_environment=true"
is present in the alembic.ini file.
:param version_table: The name of the Alembic version table.
The default is ``'alembic_version'``.
:param version_table_schema: Optional schema to place version
table within.
Parameters specific to the autogenerate feature, when
``alembic revision`` is run with the ``--autogenerate`` feature:
:param target_metadata: a :class:`sqlalchemy.schema.MetaData`
object that
will be consulted during autogeneration. The tables present
will be compared against
what is locally available on the target
:class:`~sqlalchemy.engine.Connection`
to produce candidate upgrade/downgrade operations.
:param compare_type: Indicates type comparison behavior during
an autogenerate
operation. Defaults to ``False`` which disables type
comparison. Set to
``True`` to turn on default type comparison, which has varied
accuracy depending on backend. See :ref:`compare_types`
for an example as well as information on other type
comparison options.
.. seealso::
:ref:`compare_types`
:paramref:`.EnvironmentContext.configure.compare_server_default`
:param compare_server_default: Indicates server default comparison
behavior during
an autogenerate operation. Defaults to ``False`` which disables
server default
comparison. Set to ``True`` to turn on server default comparison,
which has
varied accuracy depending on backend.
To customize server default comparison behavior, a callable may
be specified
which can filter server default comparisons during an
autogenerate operation.
defaults during an autogenerate operation. The format of this
callable is::
def my_compare_server_default(context, inspected_column,
metadata_column, inspected_default, metadata_default,
rendered_metadata_default):
# return True if the defaults are different,
# False if not, or None to allow the default implementation
# to compare these defaults
return None
context.configure(
# ...
compare_server_default = my_compare_server_default
)
``inspected_column`` is a dictionary structure as returned by
:meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
the local model environment.
A return value of ``None`` indicates to allow default server default
comparison
to proceed. Note that some backends such as Postgresql actually
execute
the two defaults on the database side to compare for equivalence.
.. seealso::
:paramref:`.EnvironmentContext.configure.compare_type`
:param include_object: A callable function which is given
the chance to return ``True`` or ``False`` for any object,
indicating if the given object should be considered in the
autogenerate sweep.
The function accepts the following positional arguments:
* ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such
as a :class:`~sqlalchemy.schema.Table`,
:class:`~sqlalchemy.schema.Column`,
:class:`~sqlalchemy.schema.Index`
:class:`~sqlalchemy.schema.UniqueConstraint`,
or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object
* ``name``: the name of the object. This is typically available
via ``object.name``.
* ``type``: a string describing the type of object; currently
``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``,
or ``"foreign_key_constraint"``
.. versionadded:: 0.7.0 Support for indexes and unique constraints
within the
:paramref:`~.EnvironmentContext.configure.include_object` hook.
.. versionadded:: 0.7.1 Support for foreign keys within the
:paramref:`~.EnvironmentContext.configure.include_object` hook.
* ``reflected``: ``True`` if the given object was produced based on
table reflection, ``False`` if it's from a local :class:`.MetaData`
object.
* ``compare_to``: the object being compared against, if available,
else ``None``.
E.g.::
def include_object(object, name, type_, reflected, compare_to):
if (type_ == "column" and
not reflected and
object.info.get("skip_autogenerate", False)):
return False
else:
return True
context.configure(
# ...
include_object = include_object
)
:paramref:`.EnvironmentContext.configure.include_object` can also
be used to filter on specific schemas to include or omit, when
the :paramref:`.EnvironmentContext.configure.include_schemas`
flag is set to ``True``. The :attr:`.Table.schema` attribute
on each :class:`.Table` object reflected will indicate the name of the
schema from which the :class:`.Table` originates.
.. versionadded:: 0.6.0
.. seealso::
:paramref:`.EnvironmentContext.configure.include_schemas`
:param include_symbol: A callable function which, given a table name
and schema name (may be ``None``), returns ``True`` or ``False``,
indicating if the given table should be considered in the
autogenerate sweep.
.. deprecated:: 0.6.0
:paramref:`.EnvironmentContext.configure.include_symbol`
is superceded by the more generic
:paramref:`.EnvironmentContext.configure.include_object`
parameter.
E.g.::
def include_symbol(tablename, schema):
return tablename not in ("skip_table_one", "skip_table_two")
context.configure(
# ...
include_symbol = include_symbol
)
.. seealso::
:paramref:`.EnvironmentContext.configure.include_schemas`
:paramref:`.EnvironmentContext.configure.include_object`
:param render_as_batch: if True, commands which alter elements
within a table will be placed under a ``with batch_alter_table():``
directive, so that batch migrations will take place.
.. versionadded:: 0.7.0
.. seealso::
:ref:`batch_migrations`
:param include_schemas: If True, autogenerate will scan across
all schemas located by the SQLAlchemy
:meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
method, and include all differences in tables found across all
those schemas. When using this option, you may want to also
use the :paramref:`.EnvironmentContext.configure.include_object`
option to specify a callable which
can filter the tables/schemas that get included.
.. seealso::
:paramref:`.EnvironmentContext.configure.include_object`
:param render_item: Callable that can be used to override how
any schema item, i.e. column, constraint, type,
etc., is rendered for autogenerate. The callable receives a
string describing the type of object, the object, and
the autogen context. If it returns False, the
default rendering method will be used. If it returns None,
the item will not be rendered in the context of a Table
construct, that is, can be used to skip columns or constraints
within op.create_table()::
def my_render_column(type_, col, autogen_context):
if type_ == "column" and isinstance(col, MySpecialCol):
return repr(col)
else:
return False
context.configure(
# ...
render_item = my_render_column
)
Available values for the type string include: ``"column"``,
``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
``"type"``, ``"server_default"``.
.. seealso::
:ref:`autogen_render_types`
:param upgrade_token: When autogenerate completes, the text of the
candidate upgrade operations will be present in this template
variable when ``script.py.mako`` is rendered. Defaults to
``upgrades``.
:param downgrade_token: When autogenerate completes, the text of the
candidate downgrade operations will be present in this
template variable when ``script.py.mako`` is rendered. Defaults to
``downgrades``.
:param alembic_module_prefix: When autogenerate refers to Alembic
:mod:`alembic.operations` constructs, this prefix will be used
(i.e. ``op.create_table``) Defaults to "``op.``".
Can be ``None`` to indicate no prefix.
:param sqlalchemy_module_prefix: When autogenerate refers to
SQLAlchemy
:class:`~sqlalchemy.schema.Column` or type classes, this prefix
will be used
(i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``".
Can be ``None`` to indicate no prefix.
Note that when dialect-specific types are rendered, autogenerate
will render them using the dialect module name, i.e. ``mssql.BIT()``,
``postgresql.UUID()``.
:param user_module_prefix: When autogenerate refers to a SQLAlchemy
type (e.g. :class:`.TypeEngine`) where the module name is not
under the ``sqlalchemy`` namespace, this prefix will be used
within autogenerate. If left at its default of
``None``, the ``__module__`` attribute of the type is used to
render the import module. It's a good practice to set this
and to have all custom types be available from a fixed module space,
in order to future-proof migration files against reorganizations
in modules.
.. versionchanged:: 0.7.0
:paramref:`.EnvironmentContext.configure.user_module_prefix`
no longer defaults to the value of
:paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix`
when left at ``None``; the ``__module__`` attribute is now used.
.. versionadded:: 0.6.3 added
:paramref:`.EnvironmentContext.configure.user_module_prefix`
.. seealso::
:ref:`autogen_module_prefix`
Parameters specific to individual backends:
:param mssql_batch_separator: The "batch separator" which will
be placed between each statement when generating offline SQL Server
migrations. Defaults to ``GO``. Note this is in addition to the
customary semicolon ``;`` at the end of each statement; SQL Server
considers the "batch separator" to denote the end of an
individual statement execution, and cannot group certain
dependent operations in one step.
:param oracle_batch_separator: The "batch separator" which will
be placed between each statement when generating offline
Oracle migrations. Defaults to ``/``. Oracle doesn't add a
semicolon between statements like most other backends.
"""
opts = self.context_opts
if transactional_ddl is not None:
opts["transactional_ddl"] = transactional_ddl
if output_buffer is not None:
opts["output_buffer"] = output_buffer
elif self.config.output_buffer is not None:
opts["output_buffer"] = self.config.output_buffer
if starting_rev:
opts['starting_rev'] = starting_rev
if tag:
opts['tag'] = tag
if template_args and 'template_args' in opts:
opts['template_args'].update(template_args)
opts["transaction_per_migration"] = transaction_per_migration
opts['target_metadata'] = target_metadata
opts['include_symbol'] = include_symbol
opts['include_object'] = include_object
opts['include_schemas'] = include_schemas
opts['render_as_batch'] = render_as_batch
opts['upgrade_token'] = upgrade_token
opts['downgrade_token'] = downgrade_token
opts['sqlalchemy_module_prefix'] = sqlalchemy_module_prefix
opts['alembic_module_prefix'] = alembic_module_prefix
opts['user_module_prefix'] = user_module_prefix
opts['literal_binds'] = literal_binds
if render_item is not None:
opts['render_item'] = render_item
if compare_type is not None:
opts['compare_type'] = compare_type
if compare_server_default is not None:
opts['compare_server_default'] = compare_server_default
opts['script'] = self.script
opts.update(kw)
self._migration_context = MigrationContext.configure(
connection=connection,
url=url,
dialect_name=dialect_name,
environment_context=self,
opts=opts
)
def run_migrations(self, **kw):
"""Run migrations as determined by the current command line
configuration
as well as versioning information present (or not) in the current
database connection (if one is present).
The function accepts optional ``**kw`` arguments. If these are
passed, they are sent directly to the ``upgrade()`` and
``downgrade()``
functions within each target revision file. By modifying the
``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
functions accept arguments, parameters can be passed here so that
contextual information, usually information to identify a particular
database in use, can be passed from a custom ``env.py`` script
to the migration functions.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
with Operations.context(self._migration_context):
self.get_context().run_migrations(**kw)
def execute(self, sql, execution_options=None):
"""Execute the given SQL using the current change context.
The behavior of :meth:`.execute` is the same
as that of :meth:`.Operations.execute`. Please see that
function's documentation for full detail including
caveats and limitations.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
self.get_context().execute(sql,
execution_options=execution_options)
def static_output(self, text):
"""Emit text directly to the "offline" SQL stream.
Typically this is for emitting comments that
start with --. The statement is not treated
as a SQL execution, no ; or batch separator
is added, etc.
"""
self.get_context().impl.static_output(text)
def begin_transaction(self):
"""Return a context manager that will
enclose an operation within a "transaction",
as defined by the environment's offline
and transactional DDL settings.
e.g.::
with context.begin_transaction():
context.run_migrations()
:meth:`.begin_transaction` is intended to
"do the right thing" regardless of
calling context:
* If :meth:`.is_transactional_ddl` is ``False``,
returns a "do nothing" context manager
which otherwise produces no transactional
state or directives.
* If :meth:`.is_offline_mode` is ``True``,
returns a context manager that will
invoke the :meth:`.DefaultImpl.emit_begin`
and :meth:`.DefaultImpl.emit_commit`
methods, which will produce the string
directives ``BEGIN`` and ``COMMIT`` on
the output stream, as rendered by the
target backend (e.g. SQL Server would
emit ``BEGIN TRANSACTION``).
* Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
on the current online connection, which
returns a :class:`sqlalchemy.engine.Transaction`
object. This object demarcates a real
transaction and is itself a context manager,
which will roll back if an exception
is raised.
Note that a custom ``env.py`` script which
has more specific transactional needs can of course
manipulate the :class:`~sqlalchemy.engine.Connection`
directly to produce transactional state in "online"
mode.
"""
return self.get_context().begin_transaction()
def get_context(self):
"""Return the current :class:`.MigrationContext` object.
If :meth:`.EnvironmentContext.configure` has not been
called yet, raises an exception.
"""
if self._migration_context is None:
raise Exception("No context has been configured yet.")
return self._migration_context
def get_bind(self):
"""Return the current 'bind'.
In "online" mode, this is the
:class:`sqlalchemy.engine.Connection` currently being used
to emit SQL to the database.
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().bind
def get_impl(self):
return self.get_context().impl
| mit | 2,376,002,507,410,158,600 | 38.319809 | 79 | 0.615781 | false |
pli3/pli3-openpli3 | bitbake/lib/bb/fetch2/git.py | 1 | 12793 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'Fetch' git implementation
git fetcher support the SRC_URI with format of:
SRC_URI = "git://some.host/somepath;OptionA=xxx;OptionB=xxx;..."
Supported SRC_URI options are:
- branch
The git branch to retrieve from. The default is "master"
this option also support multiple branches fetching, branches
are seperated by comma. in multiple branches case, the name option
must have the same number of names to match the branches, which is
used to specify the SRC_REV for the branch
e.g:
SRC_URI="git://some.host/somepath;branch=branchX,branchY;name=nameX,nameY"
SRCREV_nameX = "xxxxxxxxxxxxxxxxxxxx"
SRCREV_nameY = "YYYYYYYYYYYYYYYYYYYY"
- tag
The git tag to retrieve. The default is "master"
- protocol
The method to use to access the repository. Common options are "git",
"http", "file" and "rsync". The default is "git"
- rebaseable
rebaseable indicates that the upstream git repo may rebase in the future,
and current revision may disappear from upstream repo. This option will
reminder fetcher to preserve local cache carefully for future use.
The default value is "0", set rebaseable=1 for rebaseable git repo
- nocheckout
Don't checkout source code when unpacking. set this option for the recipe
who has its own routine to checkout code.
The default is "0", set nocheckout=1 if needed.
- bareclone
Create a bare clone of the source code and don't checkout the source code
when unpacking. Set this option for the recipe who has its own routine to
checkout code and tracking branch requirements.
The default is "0", set bareclone=1 if needed.
"""
#Copyright (C) 2005 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import bb
from bb import data
from bb.fetch2 import FetchMethod
from bb.fetch2 import runfetchcmd
from bb.fetch2 import logger
class Git(FetchMethod):
"""Class to fetch a module or modules from git repositories"""
def init(self, d):
#
# Only enable _sortable revision if the key is set
#
if d.getVar("BB_GIT_CLONE_FOR_SRCREV", True):
self._sortable_buildindex = self._sortable_buildindex_disabled
def supports(self, url, ud, d):
"""
Check to see if a given url can be fetched with git.
"""
return ud.type in ['git']
def urldata_init(self, ud, d):
"""
init git specific variable within url data
so that the git method like latest_revision() can work
"""
if 'protocol' in ud.parm:
ud.proto = ud.parm['protocol']
elif not ud.host:
ud.proto = 'file'
else:
ud.proto = "git"
if not ud.proto in ('git', 'file', 'ssh', 'http', 'https', 'rsync'):
raise bb.fetch2.ParameterError("Invalid protocol type", ud.url)
ud.nocheckout = ud.parm.get("nocheckout","0") == "1"
ud.rebaseable = ud.parm.get("rebaseable","0") == "1"
# bareclone implies nocheckout
ud.bareclone = ud.parm.get("bareclone","0") == "1"
if ud.bareclone:
ud.nocheckout = 1
branches = ud.parm.get("branch", "master").split(',')
if len(branches) != len(ud.names):
raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
ud.branches = {}
for name in ud.names:
branch = branches[ud.names.index(name)]
ud.branches[name] = branch
ud.basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0") or ud.rebaseable
ud.setup_revisons(d)
for name in ud.names:
# Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
if not ud.revisions[name] or len(ud.revisions[name]) != 40 or (False in [c in "abcdef0123456789" for c in ud.revisions[name]]):
ud.branches[name] = ud.revisions[name]
ud.revisions[name] = self.latest_revision(ud.url, ud, d, name)
gitsrcname = '%s%s' % (ud.host.replace(':','.'), ud.path.replace('/', '.'))
# for rebaseable git repo, it is necessary to keep mirror tar ball
# per revision, so that even the revision disappears from the
# upstream repo in the future, the mirror will remain intact and still
# contains the revision
if ud.rebaseable:
for name in ud.names:
gitsrcname = gitsrcname + '_' + ud.revisions[name]
ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname)
ud.fullmirror = os.path.join(data.getVar("DL_DIR", d, True), ud.mirrortarball)
ud.clonedir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
ud.localfile = ud.clonedir
def localpath(self, url, ud, d):
return ud.clonedir
def need_update(self, u, ud, d):
if not os.path.exists(ud.clonedir):
return True
os.chdir(ud.clonedir)
for name in ud.names:
if not self._contains_ref(ud.revisions[name], d):
return True
if ud.write_tarballs and not os.path.exists(ud.fullmirror):
return True
return False
def try_premirror(self, u, ud, d):
# If we don't do this, updating an existing checkout with only premirrors
# is not possible
if d.getVar("BB_FETCH_PREMIRRORONLY", True) is not None:
return True
if os.path.exists(ud.clonedir):
return False
return True
def download(self, loc, ud, d):
"""Fetch url"""
if ud.user:
username = ud.user + '@'
else:
username = ""
ud.repochanged = not os.path.exists(ud.fullmirror)
# If the checkout doesn't exist and the mirror tarball does, extract it
if not os.path.exists(ud.clonedir) and os.path.exists(ud.fullmirror):
bb.utils.mkdirhier(ud.clonedir)
os.chdir(ud.clonedir)
runfetchcmd("tar -xzf %s" % (ud.fullmirror), d)
repourl = "%s://%s%s%s" % (ud.proto, username, ud.host, ud.path)
# If the repo still doesn't exist, fallback to cloning it
if not os.path.exists(ud.clonedir):
clone_cmd = "%s clone --bare --mirror %s %s" % (ud.basecmd, repourl, ud.clonedir)
bb.fetch2.check_network_access(d, clone_cmd)
runfetchcmd(clone_cmd, d)
os.chdir(ud.clonedir)
# Update the checkout if needed
needupdate = False
for name in ud.names:
if not self._contains_ref(ud.revisions[name], d):
needupdate = True
if needupdate:
try:
runfetchcmd("%s remote prune origin" % ud.basecmd, d)
runfetchcmd("%s remote rm origin" % ud.basecmd, d)
except bb.fetch2.FetchError:
logger.debug(1, "No Origin")
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d)
fetch_cmd = "%s fetch -f --prune %s refs/*:refs/*" % (ud.basecmd, repourl)
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
runfetchcmd(fetch_cmd, d)
runfetchcmd("%s prune-packed" % ud.basecmd, d)
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d)
ud.repochanged = True
def build_mirror_data(self, url, ud, d):
# Generate a mirror tarball if needed
if ud.write_tarballs and (ud.repochanged or not os.path.exists(ud.fullmirror)):
os.chdir(ud.clonedir)
logger.info("Creating tarball of git repository")
runfetchcmd("tar -czf %s %s" % (ud.fullmirror, os.path.join(".") ), d)
runfetchcmd("touch %s.done" % (ud.fullmirror), d)
def unpack(self, ud, destdir, d):
""" unpack the downloaded src to destdir"""
subdir = ud.parm.get("subpath", "")
if subdir != "":
readpathspec = ":%s" % (subdir)
def_destsuffix = "%s/" % os.path.basename(subdir)
else:
readpathspec = ""
def_destsuffix = "git/"
destsuffix = ud.parm.get("destsuffix", def_destsuffix)
destdir = os.path.join(destdir, destsuffix)
if os.path.exists(destdir):
bb.utils.prunedir(destdir)
cloneflags = "-s -n"
if ud.bareclone:
cloneflags += " --mirror"
runfetchcmd("git clone %s %s/ %s" % (cloneflags, ud.clonedir, destdir), d)
if not ud.nocheckout:
os.chdir(destdir)
if subdir != "":
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d)
runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d)
else:
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d)
return True
def clean(self, ud, d):
""" clean the git directory """
bb.utils.remove(ud.localpath, True)
bb.utils.remove(ud.fullmirror)
def supports_srcrev(self):
return True
def _contains_ref(self, tag, d):
basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
cmd = "%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % (basecmd, tag)
output = runfetchcmd(cmd, d, quiet=True)
if len(output.split()) > 1:
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
return output.split()[0] != "0"
def _revision_key(self, url, ud, d, name):
"""
Return a unique key for the url
"""
return "git:" + ud.host + ud.path.replace('/', '.') + ud.branches[name]
def _latest_revision(self, url, ud, d, name):
"""
Compute the HEAD revision for the url
"""
if ud.user:
username = ud.user + '@'
else:
username = ""
basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
cmd = "%s ls-remote %s://%s%s%s %s" % \
(basecmd, ud.proto, username, ud.host, ud.path, ud.branches[name])
bb.fetch2.check_network_access(d, cmd)
output = runfetchcmd(cmd, d, True)
if not output:
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, url)
return output.split()[0]
def _build_revision(self, url, ud, d, name):
return ud.revisions[name]
def _sortable_buildindex_disabled(self, url, ud, d, rev):
"""
Return a suitable buildindex for the revision specified. This is done by counting revisions
using "git rev-list" which may or may not work in different circumstances.
"""
cwd = os.getcwd()
# Check if we have the rev already
if not os.path.exists(ud.clonedir):
logger.debug(1, "GIT repository for %s does not exist in %s. \
Downloading.", url, ud.clonedir)
self.download(None, ud, d)
if not os.path.exists(ud.clonedir):
logger.error("GIT repository for %s does not exist in %s after \
download. Cannot get sortable buildnumber, using \
old value", url, ud.clonedir)
return None
os.chdir(ud.clonedir)
if not self._contains_ref(rev, d):
self.download(None, ud, d)
output = runfetchcmd("%s rev-list %s -- 2> /dev/null | wc -l" % (ud.basecmd, rev), d, quiet=True)
os.chdir(cwd)
buildindex = "%s" % output.split()[0]
logger.debug(1, "GIT repository for %s in %s is returning %s revisions in rev-list before %s", url, ud.clonedir, buildindex, rev)
return buildindex
def checkstatus(self, uri, ud, d):
fetchcmd = "%s ls-remote %s" % (ud.basecmd, uri)
try:
runfetchcmd(fetchcmd, d, quiet=True)
return True
except FetchError:
return False
| gpl-2.0 | 7,850,380,181,073,448,000 | 37.649547 | 140 | 0.59853 | false |
adybbroe/pygac | pygac/calibration.py | 1 | 27049 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2015, 2019 Pytroll Developers
# Author(s):
# Martin Raspaud <[email protected]>
# Abhay Devasthale <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Calibration coefficients and generic calibration functions
"""
from __future__ import division
import numpy as np
coeffs = {
'metopb': {'ah': np.array([0.166, 0.183, 0.201]),
'al': np.array([0.055, 0.061, 0.029]),
'bh': np.array([2.019, 1.476, 1.478]),
'bl': np.array([2.019, 1.476, 1.478]),
'ch': np.array([-0.201, -0.137, -0.033]),
'cl': np.array([-0.201, -0.137, -0.033]),
'c_dark': np.array([39.70, 40.00, 40.30]),
'c_s': np.array([501.12, 500.82, 501.32]),
'l_date': 2012.77,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.6194, 0.050919, 1.471E-06, 0.0, 0.0],
[276.6511, 0.050892, 1.489E-06, 0.0, 0.0],
[276.6597, 0.050845, 1.521E-06, 0.0, 0.0],
[276.3685, 0.050992, 1.482E-06, 0.0, 0.0]]),
'n_s': np.array([0.0, -4.98, -3.40]),
'c_wn': np.array([2664.3384, 933.71521, 839.72764]),
'a': np.array([1.7711318, 0.51860807, 0.40059787]),
'b': np.array([1.0 / 1.0029931,
1.0 / 1.0013778,
1.0 / 1.0011702]),
'b0': np.array([0.0, 5.44, 3.84]),
'b1': np.array([1 - 0.0, 1 - 0.10152, 1 - 0.06249]),
'b2': np.array([0.0, 0.00046964, 0.00025239]),
},
'metopa': {'ah': np.array([0.169, 0.199, 0.213]),
'al': np.array([0.056, 0.066, 0.030]),
'bh': np.array([0.609, 0.980, -0.016]),
'bl': np.array([0.609, 0.980, -0.016]),
'ch': np.array([-0.029, -0.016, -0.033]),
'cl': np.array([-0.029, -0.016, -0.033]),
'c_dark': np.array([40.43, 39.75, 41.8]),
'c_s': np.array([501.0, 500.0, 502.0]),
'l_date': 2006.7995,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.6194, 0.050919, 1.471E-06, 0.0, 0.0],
[276.6511, 0.050892, 1.489E-06, 0.0, 0.0],
[276.6597, 0.050845, 1.521E-06, 0.0, 0.0],
[276.3685, 0.050992, 1.482E-06, 0.0, 0.0]]),
'n_s': np.array([0.0, -4.98, -3.40]),
'c_wn': np.array([2687.0392, 927.27630, 837.80762]),
'a': np.array([2.0653147, 0.56503332, 0.38472766]),
'b': np.array([1.0 / 1.0034418,
1.0 / 1.0015090,
1.0 / 1.0011264]),
'b0': np.array([0.0, 5.44, 3.84]),
'b1': np.array([1 - 0.0, 1 - 0.10152, 1 - 0.06249]),
'b2': np.array([0.0, 0.00046964, 0.00025239]),
},
'tirosn': {'ah': np.array([0.115, 0.133, 0.1]),
'al': np.array([0.115, 0.133, 0.1]),
'bh': np.array([5.110, 0.717, 0.0]),
'bl': np.array([5.110, 0.717, 0.0]),
'ch': np.array([0.0, 0.0, 0.0]),
'cl': np.array([0.0, 0.0, 0.0]),
'c_dark': np.array([39.44, 39.40, 37.51]),
'l_date': 1978.783,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[277.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0]]),
'n_s': np.array([-0.0039, -8.130, -8.130]),
'c_wn': np.array([2655.7409, 913.0537, 913.0537]),
'a': np.array([1.6485446, 0.53135445, 0.53135445]),
'b': np.array([1.0 / 1.0020894,
1.0 / 1.0014343,
1.0 / 1.0014343]),
'b1': np.array([1.0 - 0.015, 1.0 - 0.131942, 1.0 - 0.131942]),
'b2': np.array([0.011, 0.000673193, 0.000673193]),
'b0': np.array([0.00195, 6.13, 6.13]),
},
'noaa6': {'ah': np.array([0.133, 0.128, 0.10]),
'al': np.array([0.133, 0.128, 0.10]),
'bh': np.array([0.900, 0.699, 0.0]),
'bl': np.array([0.900, 0.699, 0.0]),
'ch': np.array([0.0, 0.0, 0.0]),
'cl': np.array([0.0, 0.0, 0.0]),
'c_dark': np.array([39.44, 39.40, 37.51]),
'l_date': 1979.490,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[277.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0]]),
'n_s': np.array([0.0, -3.26, -3.26]),
'c_wn': np.array([2671.5433, 913.46088, 913.46088]),
'a': np.array([1.76671100, 0.50395970, 0.50395970]),
'b': np.array([1.0 / 1.0024428,
1.0 / 1.0013592,
1.0 / 1.0013592]),
'b1': np.array([1.0, 1.0 - 0.03964, 1.0 - 0.03964]),
'b2': np.array([0.0, 0.00016925, 0.00016925]),
'b0': np.array([0.0, 2.24, 2.24]),
},
'noaa7': {'ah': np.array([0.115, 0.127, 0.10]),
'al': np.array([0.115, 0.127, 0.10]),
'bh': np.array([3.792, 2.685, 0.0]),
'bl': np.array([3.972, 2.685, 0.0]),
'ch': np.array([-0.269, -0.101, 0.0]),
'cl': np.array([-0.269, -0.101, 0.0]),
'c_dark': np.array([36.0, 37.0, 39.0]),
'l_date': 1981.4764,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[277.099, 5.048E-2, 2.823E-6, 0, 0],
[276.734, 5.069E-2, 2.493E-6, 0, 0],
[276.876, 5.148E-2, 1.040E-6, 0, 0],
[276.160, 5.128E-2, 1.414E-6, 0, 0]]),
'n_s': np.array([0.0, -5.16, -4.28]),
'c_wn': np.array([2684.5233, 928.23757, 841.52137]),
'a': np.array([1.94882690, 0.52807997, 0.40557027]),
'b': np.array([1.0 / 1.0029260,
1.0 / 1.0014039,
1.0 / 1.0011789]),
'b1': np.array([1.0, 0.89783, 0.93683]),
'b2': np.array([0.0, 0.0004819, 0.0002425]),
'b0': np.array([0.0, 5.25, 3.93]),
},
'noaa8': {'ah': np.array([0.119, 0.136, 0.10]),
'al': np.array([0.119, 0.136, 0.10]),
'bh': np.array([6.065, 7.248, 0.0]),
'bl': np.array([6.065, 7.248, 0.0]),
'ch': np.array([0.0, 0.0, 0.0]),
'cl': np.array([0.0, 0.0, 0.0]),
'c_dark': np.array([39.44, 39.40, 37.51]),
'l_date': 1983.241,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0]]),
'n_s': np.array([0.0, -3.26, -3.26]),
'c_wn': np.array([2651.3776, 915.30330, 915.30330]),
'a': np.array([1.76641050, 0.50017997, 0.50017997]),
'b': np.array([1.0 / 1.0024260,
1.0 / 1.0013460,
1.0 / 1.0013460]),
'b1': np.array([1.0, 1.0 - 0.03964, 1.0 - 0.03964]),
'b2': np.array([0.0, 0.00016925, 0.00016925]),
'b0': np.array([0.0, 2.24, 2.24]),
},
'noaa9': {'ah': np.array([0.108, 0.122, 0.10]),
'al': np.array([0.108, 0.122, 0.10]),
'bh': np.array([4.255, 0.310, 0.0]),
'bl': np.array([4.255, 0.310, 0.0]),
'ch': np.array([0.640, 0.642, 0.0]),
'cl': np.array([0.640, 0.642, 0.0]),
'c_dark': np.array([38.0, 40.0, 38.0]),
'l_date': 1984.9480,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[277.018000, 0.051280, 0.0, 0, 0],
[276.750000, 0.051280, 0.0, 0, 0],
[276.862000, 0.051280, 0.0, 0, 0],
[276.546000, 0.051280, 0.0, 0, 0]]),
'n_s': np.array([0.0, -5.530, -3.06]),
'c_wn': np.array([2690.0451, 930.50230, 845.75000]),
'a': np.array([1.8832662, 0.5115335, 0.3882150]),
'b': np.array([1.0 / 1.0028978,
1.0 / 1.0013570,
1.0 / 1.0011210]),
'b1': np.array([1.0, 0.88643, 0.95311]),
'b2': np.array([0.0, 0.0006033, 0.0002198]),
'b0': np.array([0.0, 5.24, 2.42]),
},
'noaa10': {'ah': np.array([0.111, 0.137, 0.10]),
'al': np.array([0.111, 0.137, 0.10]),
'bh': np.array([6.087, 0.119, 0.0]),
'bl': np.array([6.087, 0.119, 0.0]),
'ch': np.array([-1.039, 0.123, 0.0]),
'cl': np.array([-1.039, 0.123, 0.0]),
'c_dark': np.array([39.44, 39.40, 37.51]),
'l_date': 1986.712,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0],
[276.659, 0.051275, 1.363e-06, 0, 0]]),
'n_s': np.array([0.0, -7.27, -7.29]),
'c_wn': np.array([2672.6164, 910.49626, 910.49626]),
'a': np.array([1.7986926, 0.45707063, 0.45707063]),
'b': np.array([1.0 / 1.0026326,
1.0 / 1.0012272,
1.0 / 1.0012272]),
'b1': np.array([1.0, 1.0 - 0.1157, 1.0 - 0.1157]),
'b2': np.array([0.0, 0.0005885, 0.0005882]),
'b0': np.array([0.0, 5.76, 5.76]),
},
'noaa11': {'ah': np.array([0.110, 0.118, 0.0]),
'al': np.array([0.110, 0.118, 0.0]),
'bh': np.array([0.632, -0.037, 0.0]),
'bl': np.array([0.632, -0.037, 0.0]),
'ch': np.array([-0.044, 0.072, 0.0]),
'cl': np.array([-0.044, 0.072, 0.0]),
'c_dark': np.array([40.0, 40.0, 40.0]),
'l_date': 1988.7310,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.597, 0.051275, 1.363e-06, 0, 0],
[276.597, 0.051275, 1.363e-06, 0, 0],
[276.597, 0.051275, 1.363e-06, 0, 0],
[276.597, 0.051275, 1.363e-06, 0, 0]]),
'n_s': np.array([0.0, -8.055, -3.51]),
'c_wn': np.array([2680.05, 927.462, 840.746]),
'a': np.array([1.738973, 0.321199, 0.048652]),
'b': np.array([1.0 / 1.003354,
1.0 / 1.001213,
1.0 / 1.000664]),
'b1': np.array([1.0, 0.84120, 0.94598]),
'b2': np.array([0.0, 0.0008739, 0.0002504]),
'b0': np.array([0.0, 7.21, 2.92]),
},
'noaa12': {'ah': np.array([0.121, 0.148, 0.10]),
'al': np.array([0.121, 0.148, 0.10]),
'bh': np.array([2.032, 1.323, 0.0]),
'bl': np.array([2.032, 1.323, 0.0]),
'ch': np.array([-0.032, -0.008, 0.0]),
'cl': np.array([-0.032, -0.008, 0.0]),
'c_dark': np.array([41.0, 40.0, 40.0]),
'l_date': 1991.3669,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.597, 0.051275, 1.363e-06, 0, 0],
[276.597, 0.051275, 1.363e-06, 0, 0],
[276.597, 0.051275, 1.363e-06, 0, 0],
[276.597, 0.051275, 1.363e-06, 0, 0]]),
'n_s': np.array([0.0, -5.510, -2.51]),
'c_wn': np.array([2651.7708, 922.36261, 838.02678]),
'a': np.array([1.90527390, 0.63404209, 0.41086587]),
'b': np.array([1.0 / 1.0030100,
1.0 / 1.0017076,
1.0 / 1.0012010]),
'b1': np.array([1.0, 0.88929, 0.96299]),
'b2': np.array([0.0, 0.0005968, 0.0001775]),
'b0': np.array([0.0, 5.11, 1.91]),
},
'noaa14': {'ah': np.array([0.121, 0.152, 0.10]),
'al': np.array([0.121, 0.152, 0.10]),
'bh': np.array([3.555, 0.254, 0.0]),
'bl': np.array([3.555, 0.254, 0.0]),
'ch': np.array([-0.339, 0.201, 0.0]),
'cl': np.array([-0.339, 0.201, 0.0]),
'c_dark': np.array([41.0, 41.0, 39.0]),
'l_date': 1994.9966,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.597, 0.051275, 1.363e-06, 0, 0],
[276.597, 0.051275, 1.363e-06, 0, 0],
[276.597, 0.051275, 1.363e-06, 0, 0],
[276.597, 0.051275, 1.363e-06, 0, 0]]),
'n_s': np.array([0.0069, -4.05, -2.29]),
'c_wn': np.array([2654.25, 928.349, 833.040]),
'a': np.array([1.885330, 0.308384, 0.022171]),
'b': np.array([1.0 / 1.003839, 1.0 / 1.001443, 1.0 / 1.000538]),
'b1': np.array([1.00359, 0.92378, 0.96194]),
'b2': np.array([0.0, 0.0003822, 0.0001742]),
'b0': np.array([-0.0031, 3.72, 2.00]),
},
'noaa15': {'ah': np.array([0.179, 0.206, 0.175]),
'al': np.array([0.060, 0.069, 0.025]),
'bh': np.array([-0.069, 0.339, 0.0]),
'bl': np.array([-0.069, 0.339, 0.0]),
'ch': np.array([0.002, -0.010, 0.0]),
'cl': np.array([0.002, -0.010, 0.0]),
'c_dark': np.array([39.0, 40.0, 39.0]),
'c_s': np.array([500.0, 500.0, 500.0]),
'l_date': 1998.3641,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.60157, 0.051045, 1.36328E-06, 0.0, 0.0],
[276.62531, 0.050909, 1.47266E-06, 0.0, 0.0],
[276.67413, 0.050907, 1.47656E-06, 0.0, 0.0],
[276.59258, 0.050966, 1.47656E-06, 0.0, 0.0]]),
'n_s': np.array([0.0, -4.50, -3.61]),
'c_wn': np.array([2695.9743, 925.4075, 839.8979]),
'a': np.array([1.624481, 0.338243, 0.304856]),
'b': np.array([1.0 / 1.001989,
1.0 / 1.001283,
1.0 / 1.000977]),
'b0': np.array([0.0, 4.76, 3.83]),
'b1': np.array([1 - 0.0, 1 - 0.0932, 1 - 0.0659]),
'b2': np.array([0.0, 0.0004524, 0.0002811]),
},
'noaa16': {'ah': np.array([0.165, 0.179, 0.187]),
'al': np.array([0.055, 0.060, 0.027]),
'bh': np.array([0.839, 0.786, 0.290]),
'bl': np.array([0.839, 0.786, 0.290]),
'ch': np.array([-0.051, -0.031, -0.294]),
'cl': np.array([-0.051, -0.031, -0.294]),
'c_dark': np.array([39.3, 38.9, 38.4]),
'c_s': np.array([498.96, 500.17, 499.43]),
'l_date': 2000.7228,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.355, 5.562E-02, -1.590E-05,
2.486E-08, -1.199E-11],
[276.142, 5.605E-02, -1.707E-05,
2.595E-08, -1.224E-11],
[275.996, 5.486E-02, -1.223E-05,
1.862E-08, -0.853E-11],
[276.132, 5.494E-02, -1.344E-05,
2.112E-08, -1.001E-11]]),
'n_s': np.array([0.0, -2.467, -2.009]),
'c_wn': np.array([2681.2540, 922.34790, 834.61814]),
'a': np.array([1.6774586, 0.55636216, 0.41430789]),
'b': np.array([1.0 / 1.0017316,
1.0 / 1.0014921,
1.0 / 1.0012166]),
'b0': np.array([0.0, 2.96, 2.25]),
'b1': np.array([1 - 0.0, 1 - 0.05411, 1 - 0.03665]),
'b2': np.array([0.0, 0.00024532, 0.00014854]),
},
'noaa17': {'ah': np.array([0.172, 0.210, 0.209]),
'al': np.array([0.057, 0.070, 0.030]),
'bh': np.array([1.007, 1.474, 2.787]),
'bl': np.array([1.007, 1.474, 2.787]),
'ch': np.array([-0.044, -0.118, -0.292]),
'cl': np.array([-0.044, -0.118, -0.292]),
'c_dark': np.array([39.99, 39.09, 42.09]),
'c_s': np.array([501.12, 500.73, 501.37]),
'l_date': 2002.47912,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.628, 0.05098, 1.371E-06, 0.0, 0.0],
[276.538, 0.05098, 1.371E-06, 0.0, 0.0],
[276.761, 0.05097, 1.369E-06, 0.0, 0.0],
[276.660, 0.05100, 1.348E-06, 0.0, 0.0]]),
'n_s': np.array([0.0, -8.55, -3.97]),
'c_wn': np.array([2669.1414, 928.29959, 840.20289]),
'a': np.array([1.70002941, 0.56634758, 0.37264803]),
'b': np.array([1.0 / 1.0026724,
1.0 / 1.0015205,
1.0 / 1.0010841]),
'b0': np.array([0.0, 8.22, 4.31]),
'b1': np.array([1 - 0.0, 1 - 0.15795, 1 - 0.07318]),
'b2': np.array([0.0, 0.00075579, 0.00030976]),
},
'noaa18': {'ah': np.array([0.171, 0.192, 0.175]),
'al': np.array([0.057, 0.064, 0.025]),
'bh': np.array([0.603, 0.632, 0.0]),
'bl': np.array([0.603, 0.632, 0.0]),
'ch': np.array([0.0, 0.045, 0.0]),
'cl': np.array([0.0, 0.045, 0.0]),
'c_dark': np.array([39.44, 39.40, 37.51]),
'c_s': np.array([500.54, 500.40, 500.56]),
'l_date': 2005.3833,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.601, 0.05090, 1.657E-06, 0.0, 0.0],
[276.683, 0.05101, 1.482E-06, 0.0, 0.0],
[276.565, 0.05117, 1.313E-06, 0.0, 0.0],
[276.615, 0.05103, 1.484E-06, 0.0, 0.0]]),
'n_s': np.array([0.0, -5.53, -2.22]),
'c_wn': np.array([2660.6468, 928.73452, 834.08306]),
'a': np.array([1.7222650, 0.54696239, 0.39938376]),
'b': np.array([1.0 / 1.0028633,
1.0 / 1.0014581,
1.0 / 1.0011724]),
'b0': np.array([0.0, 5.82, 2.67]),
'b1': np.array([1 - 0.0, 1 - 0.11069, 1 - 0.04360]),
'b2': np.array([0.0, 0.00052337, 0.00017715]),
},
'noaa19': {'ah': np.array([0.162, 0.183, 0.175]),
'al': np.array([0.054, 0.061, 0.025]),
'bh': np.array([0.626, 0.950, 0.0]),
'bl': np.array([0.626, 0.950, 0.0]),
'ch': np.array([-0.044, -0.039, 0.0]),
'cl': np.array([-0.044, -0.039, 0.0]),
'c_dark': np.array([38.8, 39.00, 39.4]),
'c_s': np.array([496.43, 500.37, 496.11]),
'l_date': 2009.096,
'd': np.array([[0, 0, 0, 0, 0], # reset prt
[276.6067, 0.051111, 1.405783e-06, 0, 0],
[276.6119, 0.051090, 1.496037e-06, 0, 0],
[276.6311, 0.051033, 1.496990e-06, 0, 0],
[276.6268, 0.051058, 1.493110e-06, 0, 0]]),
'n_s': np.array([0.0, -5.49, -3.39]),
'c_wn': np.array([2670.2425, 927.92374, 831.28619]),
'a': np.array([1.6863857, 0.39419031, 0.26364620]),
'b': np.array([1.0 / 1.0025955,
1.0 / 1.0013299,
1.0 / 1.0009546]),
'b0': np.array([0.0, 5.70, 3.58]),
'b1': np.array([1 - 0.0, 1 - 0.11187, 1 - 0.05991]),
'b2': np.array([0.0, 0.00054668, 0.00024985])}}
class Calibrator(object):
def __init__(self, spacecraft):
self.ah = None
self.al = None
self.bh = None
self.bl = None
self.ch = None
self.cl = None
self.c_s = None
self.c_dark = None
self.l_date = None
self.d = None
self.n_s = None
self.c_wn = None
self.a = None
self.b = None
self.b0 = None
self.b1 = None
self.b2 = None
self.__dict__.update(coeffs[spacecraft])
def calibrate_solar(counts, chan, year, jday, spacecraft, corr=1):
"""Do the solar calibration and return reflectance (between 0 and 100)."""
cal = Calibrator(spacecraft)
t = (year + jday / 365.0) - cal.l_date
stl = (cal.al[chan] * (100.0 + cal.bl[chan] * t
+ cal.cl[chan] * t * t)) / 100.0
sth = (cal.ah[chan] * (100.0 + cal.bh[chan] * t
+ cal.ch[chan] * t * t)) / 100.0
if cal.c_s is not None:
return np.where(counts <= cal.c_s[chan],
(counts - cal.c_dark[chan]) * stl * corr,
((cal.c_s[chan] - cal.c_dark[chan]) * stl
+ (counts - cal.c_s[chan]) * sth) * corr)
else:
return (counts - cal.c_dark[chan]) * stl * corr
def calibrate_thermal(counts, prt, ict, space, line_numbers, channel, spacecraft):
"""Do the thermal calibration and return brightness temperatures (K)."""
cal = Calibrator(spacecraft)
chan = channel - 3
lines, columns = counts.shape[:2]
offset = 0
for i, prt_val in enumerate(prt):
if prt_val < 50:
offset = i
break
iprt = (line_numbers - line_numbers[0] + 5 - offset) % 5
ifix = np.where(np.logical_and(iprt == 1, prt < 50))
if len(ifix[0]):
inofix = np.where(np.logical_and(iprt == 1, prt > 50))
prt[ifix] = np.interp(ifix[0], inofix[0], prt[inofix])
ifix = np.where(np.logical_and(iprt == 2, prt < 50))
if len(ifix[0]):
inofix = np.where(np.logical_and(iprt == 2, prt > 50))
prt[ifix] = np.interp(ifix[0], inofix[0], prt[inofix])
ifix = np.where(np.logical_and(iprt == 3, prt < 50))
if len(ifix[0]):
inofix = np.where(np.logical_and(iprt == 3, prt > 50))
prt[ifix] = np.interp(ifix[0], inofix[0], prt[inofix])
ifix = np.where(np.logical_and(iprt == 4, prt < 50))
if len(ifix[0]):
inofix = np.where(np.logical_and(iprt == 4, prt > 50))
prt[ifix] = np.interp(ifix[0], inofix[0], prt[inofix])
tprt = (cal.d[iprt, 0] + prt *
(cal.d[iprt, 1] + prt *
(cal.d[iprt, 2] + prt *
(cal.d[iprt, 3] + prt *
(cal.d[iprt, 4])))))
zeros = iprt == 0
nonzeros = np.logical_not(zeros)
tprt[zeros] = np.interp((zeros).nonzero()[0],
(nonzeros).nonzero()[0],
tprt[nonzeros])
if channel == 3:
zeros = ict < 100
nonzeros = np.logical_not(zeros)
ict[zeros] = np.interp((zeros).nonzero()[0],
(nonzeros).nonzero()[0],
ict[nonzeros])
zeros = space < 100
nonzeros = np.logical_not(zeros)
space[zeros] = np.interp((zeros).nonzero()[0],
(nonzeros).nonzero()[0],
space[nonzeros])
# convolving and smoothing PRT, ICT and SPACE values
if lines > 51:
wlength = 51
else:
wlength = 3
weighting_function = np.ones(wlength, dtype=float) / wlength
tprt_convolved = np.convolve(tprt, weighting_function, 'same')
ict_convolved = np.convolve(ict, weighting_function, 'same')
space_convolved = np.convolve(space, weighting_function, 'same')
# take care of the beginning and end
tprt_convolved[0:(wlength - 1) // 2] = tprt_convolved[(wlength - 1) // 2]
ict_convolved[0:(wlength - 1) // 2] = ict_convolved[(wlength - 1) // 2]
space_convolved[0:(wlength - 1) // 2] = space_convolved[(wlength - 1) // 2]
tprt_convolved[-(wlength - 1) // 2:] = tprt_convolved[-((wlength + 1) // 2)]
ict_convolved[-(wlength - 1) // 2:] = ict_convolved[-((wlength + 1) // 2)]
space_convolved[-(wlength - 1) // 2:] = \
space_convolved[-((wlength + 1) // 2)]
new_tprt = np.transpose(np.tile(tprt_convolved, (columns, 1)))
new_ict = np.transpose(np.tile(ict_convolved, (columns, 1)))
new_space = np.transpose(np.tile(space_convolved, (columns, 1)))
# calibrating thermal channel
tBB = new_tprt
tsBB = cal.a[chan] + cal.b[chan] * tBB
nBB_num = (1.1910427 * 0.000010) * cal.c_wn[chan] ** 3
nBB = nBB_num / (np.exp((1.4387752 * cal.c_wn[chan]) / tsBB) - 1.0)
Nlin = (cal.n_s[chan] +
(((nBB - cal.n_s[chan])
* (new_space - counts.astype(float)))
/ (new_space - new_ict)))
Ncor = cal.b0[chan] + Nlin * (cal.b1[chan] + cal.b2[chan] * Nlin)
Ne = Ncor
tsE = ((1.4387752 * cal.c_wn[chan])
/ np.log(1.0 + nBB_num / Ne))
bt = (tsE - cal.a[chan]) / cal.b[chan]
if chan == 0:
bt = np.where((counts - new_space) >= 0, 0.0, bt)
return bt
| gpl-3.0 | 7,217,521,221,545,139,000 | 47.47491 | 82 | 0.409553 | false |
sgabe/Enumerator | enumerator/lib/services/ssl.py | 1 | 2170 | #!/usr/bin/env python
"""
The SSL module performs ssl-related
enumeration tasks.
@author: Gabor Seljan (gabor<at>seljan.hu)
@version: 1.0
"""
import sys
from ..config import Config
from ..process_manager import ProcessManager
from ..generic_service import GenericService
class SslEnumeration(GenericService, ProcessManager):
SERVICE_DEFINITION = 'service:ssl/http,ssl/smtp,ssl/imap,ssl/pop3,https,smtps,imaps,pop3s'
PROCESSES = [{
'command': 'nmap -sV -Pn -p %(port)s %(scan_mode)s \
--script=ssl-ccs-injection,ssl-cert,ssl-date,ssl-enum-ciphers,ssl-google-cert-catalog,ssl-heartbleed,ssl-known-key,ssl-poodle,sslv2 \
-oN %(output_dir)s/%(host)s-ssl-%(port)s-standard.txt %(host)s',
'normal': '-T4',
'stealth': '-T2',
}, {
'command': 'testssl %(host)s:%(port)s | aha > %(output_dir)s/%(host)s-ssl-%(port)s-testssl.html',
'normal': '',
'stealth': '',
}]
def scan(self, directory, service_parameters):
"""Iterates over PROCESSES and builds
the specific parameters required for
command line execution of each process.
@param directory: Directory path where
final command output will go.
@param service_parameters: Dictionary with
key:value pairs of service-related data.
"""
ip = service_parameters.get('ip')
port = service_parameters.get('port')
config = Config().ssl
print '[+] enumerating SSL service on host %s port %s' % (ip, port)
for process in self.PROCESSES:
self.start_processes(process.get('command'), params={
'host': ip,
'port': port,
'output_dir': directory,
'scan_mode': process.get(config['mode']),
}, display_exception=False)
if __name__ == '__main__':
"""For testing purposes, this
module can be executed as a script.
Use the following syntax from the root
directory of enumerator:
python -m lib.services.ssl <ip> <port> <output directory>
"""
ssl = SslEnumeration()
ssl.scan(sys.argv[3], dict(ip=sys.argv[1], port=sys.argv[2]))
| mit | -6,150,778,650,805,814,000 | 33.444444 | 145 | 0.614747 | false |
Azure/azure-sdk-for-python | sdk/tables/azure-data-tables/azure/data/tables/_generated/operations/_service_operations.py | 1 | 13459 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceOperations(object):
"""ServiceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.data.tables.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def set_properties(
self,
table_service_properties, # type: "_models.TableServiceProperties"
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Sets properties for an account's Table service endpoint, including properties for Analytics and
CORS (Cross-Origin Resource Sharing) rules.
:param table_service_properties: The Table Service properties.
:type table_service_properties: ~azure.data.tables.models.TableServiceProperties
:param timeout: The timeout parameter is expressed in seconds.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "properties"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.set_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(table_service_properties, 'TableServiceProperties', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.TableServiceError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
if cls:
return cls(pipeline_response, None, response_headers)
set_properties.metadata = {'url': '/'} # type: ignore
def get_properties(
self,
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.TableServiceProperties"
"""Gets the properties of an account's Table service, including properties for Analytics and CORS
(Cross-Origin Resource Sharing) rules.
:param timeout: The timeout parameter is expressed in seconds.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TableServiceProperties, or the result of cls(response)
:rtype: ~azure.data.tables.models.TableServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TableServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.TableServiceError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = self._deserialize('TableServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_properties.metadata = {'url': '/'} # type: ignore
def get_statistics(
self,
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.TableServiceStats"
"""Retrieves statistics related to replication for the Table service. It is only available on the
secondary location endpoint when read-access geo-redundant replication is enabled for the
account.
:param timeout: The timeout parameter is expressed in seconds.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TableServiceStats, or the result of cls(response)
:rtype: ~azure.data.tables.models.TableServiceStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TableServiceStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "stats"
accept = "application/xml"
# Construct URL
url = self.get_statistics.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.TableServiceError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('TableServiceStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_statistics.metadata = {'url': '/'} # type: ignore
| mit | -7,503,523,874,916,741,000 | 49.788679 | 133 | 0.653838 | false |
Ghalko/waterbutler | waterbutler/providers/s3/provider.py | 1 | 10178 | import os
import asyncio
import hashlib
from urllib import parse
import xmltodict
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import SubdomainCallingFormat
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.s3 import settings
from waterbutler.providers.s3.metadata import S3Revision
from waterbutler.providers.s3.metadata import S3FileMetadata
from waterbutler.providers.s3.metadata import S3FolderMetadata
from waterbutler.providers.s3.metadata import S3FolderKeyMetadata
from waterbutler.providers.s3.metadata import S3FileMetadataHeaders
class S3Provider(provider.BaseProvider):
"""Provider for the Amazon's S3
"""
NAME = 's3'
def __init__(self, auth, credentials, settings):
"""
.. note::
Neither `S3Connection#__init__` nor `S3Connection#get_bucket`
sends a request.
:param dict auth: Not used
:param dict credentials: Dict containing `access_key` and `secret_key`
:param dict settings: Dict containing `bucket`
"""
super().__init__(auth, credentials, settings)
# If a bucket has capital letters in the name
# ordinary calling format MUST be used
if settings['bucket'] != settings['bucket'].lower():
calling_format = OrdinaryCallingFormat()
else:
# if a bucket is out of the us Subdomain calling format MUST be used
calling_format = SubdomainCallingFormat()
self.connection = S3Connection(credentials['access_key'],
credentials['secret_key'], calling_format=calling_format)
self.bucket = self.connection.get_bucket(settings['bucket'], validate=False)
@asyncio.coroutine
def validate_path(self, path, **kwargs):
return WaterButlerPath(path)
def can_intra_copy(self, dest_provider, path=None):
return type(self) == type(dest_provider) and not getattr(path, 'is_dir', False)
def can_intra_move(self, dest_provider, path=None):
return type(self) == type(dest_provider) and not getattr(path, 'is_dir', False)
@asyncio.coroutine
def intra_copy(self, dest_provider, source_path, dest_path):
"""Copy key from one S3 bucket to another. The credentials specified in
`dest_provider` must have read access to `source.bucket`.
"""
exists = yield from dest_provider.exists(dest_path)
dest_key = dest_provider.bucket.new_key(dest_path.path)
# ensure no left slash when joining paths
source_path = '/' + os.path.join(self.settings['bucket'], source_path.path)
headers = {'x-amz-copy-source': parse.quote(source_path)}
url = dest_key.generate_url(
settings.TEMP_URL_SECS,
'PUT',
headers=headers,
)
yield from self.make_request(
'PUT', url,
headers=headers,
expects=(200, ),
throws=exceptions.IntraCopyError,
)
return (yield from dest_provider.metadata(dest_path)), not exists
@asyncio.coroutine
def download(self, path, accept_url=False, version=None, range=None, **kwargs):
"""Returns a ResponseWrapper (Stream) for the specified path
raises FileNotFoundError if the status from S3 is not 200
:param str path: Path to the key you want to download
:param dict \*\*kwargs: Additional arguments that are ignored
:rtype: :class:`waterbutler.core.streams.ResponseStreamReader`
:raises: :class:`waterbutler.core.exceptions.DownloadError`
"""
if not path.is_file:
raise exceptions.DownloadError('No file specified for download', code=400)
if not version or version.lower() == 'latest':
query_parameters = None
else:
query_parameters = {'versionId': version}
if kwargs.get('displayName'):
response_headers = {'response-content-disposition': 'attachment; filename*=UTF-8\'\'{}'.format(parse.quote(kwargs['displayName']))}
else:
response_headers = {'response-content-disposition': 'attachment'}
url = self.bucket.new_key(
path.path
).generate_url(
settings.TEMP_URL_SECS,
query_parameters=query_parameters,
response_headers=response_headers
)
if accept_url:
return url
resp = yield from self.make_request(
'GET',
url,
range=range,
expects=(200, 206),
throws=exceptions.DownloadError,
)
return streams.ResponseStreamReader(resp)
@asyncio.coroutine
def upload(self, stream, path, conflict='replace', **kwargs):
"""Uploads the given stream to S3
:param waterbutler.core.streams.RequestWrapper stream: The stream to put to S3
:param str path: The full path of the key to upload to/into
:rtype: dict, bool
"""
path, exists = yield from self.handle_name_conflict(path, conflict=conflict)
stream.add_writer('md5', streams.HashStreamWriter(hashlib.md5))
resp = yield from self.make_request(
'PUT',
self.bucket.new_key(path.path).generate_url(settings.TEMP_URL_SECS, 'PUT'),
data=stream,
headers={'Content-Length': str(stream.size)},
expects=(200, 201, ),
throws=exceptions.UploadError,
)
# md5 is returned as ETag header as long as server side encryption is not used.
# TODO: nice assertion error goes here
assert resp.headers['ETag'].replace('"', '') == stream.writers['md5'].hexdigest
return (yield from self.metadata(path, **kwargs)), not exists
@asyncio.coroutine
def delete(self, path, **kwargs):
"""Deletes the key at the specified path
:param str path: The path of the key to delete
"""
yield from self.make_request(
'DELETE',
self.bucket.new_key(path.path).generate_url(settings.TEMP_URL_SECS, 'DELETE'),
expects=(200, 204, ),
throws=exceptions.DeleteError,
)
@asyncio.coroutine
def revisions(self, path, **kwargs):
"""Get past versions of the requested key
:param str path: The path to a key
:rtype list:
"""
url = self.bucket.generate_url(settings.TEMP_URL_SECS, 'GET', query_parameters={'versions': ''})
resp = yield from self.make_request(
'GET',
url,
params={'prefix': path.path, 'delimiter': '/'},
expects=(200, ),
throws=exceptions.MetadataError,
)
content = yield from resp.read_and_close()
versions = xmltodict.parse(content)['ListVersionsResult'].get('Version') or []
if isinstance(versions, dict):
versions = [versions]
return [
S3Revision(item)
for item in versions
if item['Key'] == path.path
]
@asyncio.coroutine
def metadata(self, path, **kwargs):
"""Get Metadata about the requested file or folder
:param WaterButlerPath path: The path to a key or folder
:rtype: dict or list
"""
if path.is_dir:
return (yield from self._metadata_folder(path))
return (yield from self._metadata_file(path))
@asyncio.coroutine
def create_folder(self, path, **kwargs):
"""
:param str path: The path to create a folder at
"""
WaterButlerPath.validate_folder(path)
if (yield from self.exists(path)):
raise exceptions.FolderNamingConflict(str(path))
yield from self.make_request(
'PUT',
self.bucket.new_key(path.path).generate_url(settings.TEMP_URL_SECS, 'PUT'),
expects=(200, 201),
throws=exceptions.CreateFolderError
)
return S3FolderMetadata({'Prefix': path.path})
@asyncio.coroutine
def _metadata_file(self, path):
resp = yield from self.make_request(
'HEAD',
self.bucket.new_key(path.path).generate_url(settings.TEMP_URL_SECS, 'HEAD'),
expects=(200, ),
throws=exceptions.MetadataError,
)
return S3FileMetadataHeaders(path.path, resp.headers)
@asyncio.coroutine
def _metadata_folder(self, path):
resp = yield from self.make_request(
'GET',
self.bucket.generate_url(settings.TEMP_URL_SECS, 'GET'),
params={'prefix': path.path, 'delimiter': '/'},
expects=(200, ),
throws=exceptions.MetadataError,
)
contents = yield from resp.read_and_close()
parsed = xmltodict.parse(contents, strip_whitespace=False)['ListBucketResult']
contents = parsed.get('Contents', [])
prefixes = parsed.get('CommonPrefixes', [])
if not contents and not prefixes and not path.is_root:
# If contents and prefixes are empty then this "folder"
# must exist as a key with a / at the end of the name
# if the path is root there is no need to test if it exists
yield from self.make_request(
'HEAD',
self.bucket.new_key(path.path).generate_url(settings.TEMP_URL_SECS, 'HEAD'),
expects=(200, ),
throws=exceptions.MetadataError,
)
if isinstance(contents, dict):
contents = [contents]
if isinstance(prefixes, dict):
prefixes = [prefixes]
items = [
S3FolderMetadata(item)
for item in prefixes
]
for content in contents:
if content['Key'] == path.path:
continue
if content['Key'].endswith('/'):
items.append(S3FolderKeyMetadata(content))
else:
items.append(S3FileMetadata(content))
return items
| apache-2.0 | -1,311,005,674,001,923,000 | 34.096552 | 143 | 0.609452 | false |
sckott/pytaxize | pytaxize/ids/ids.py | 1 | 4823 | import warnings
import sys
import itertools
from ..col import search
from pytaxize.ncbi import ncbi
from pytaxize.itis import terms
from .gbif_helpers import gbif_query_for_single_name, process_gbif_response
from .format_helpers import _make_id
from .eol_helpers import (
eol_search_query_for_single_name,
process_eol_search_response,
process_list_of_taxa_details,
eol_taxa_query,
)
class NoResultException(Exception):
pass
class Ids(object):
"""
ids: A class for taxonomic identifiers
Usage::
from pytaxize import Ids
x = Ids('Poa annua')
x
x.name
x.ncbi()
x.ids
x.db_ids
# more than one result
x = Ids(name="Echinacea")
x.ncbi()
x.ids
x.ids["Echinacea"]
# more than one name supplied
x = Ids(name=['Helianthus annuus', 'Poa annua', 'Echinacea'])
x
x.ncbi()
x
x.ids
x.ids["Helianthus annuus"]
x.ids["Poa annua"]
x.ids["Echinacea"]
# extract just ids
out = x.extract_ids()
out["Echinacea"]
# ITIS
x = Ids("Helianthus annuus")
x.itis(type="scientific")
x.extract_ids()
# EOL
z = Ids("Helianthus annuus")
z.eol()
z.extract_ids()
z.ids
"""
def __init__(self, name):
if isinstance(name, str):
name = [name]
self.name = name
self.ids = {}
self.db_ids = None
def __repr__(self):
x = """<%s>\n""" % type(self).__name__
y = """ names: %s""" % ",".join(self.name[:10])
# z = """ ids: %s""" % ",".join(self.extract_ids())
return x + y
def ncbi(self):
out = []
for i in range(len(self.name)):
fname = self.name[i]
res = ncbi.search(sci_com=fname)
if len(res[fname]) == 0:
warnings.warn("No results for taxon '" + fname + "'")
result = [_make_id(None, fname, None, "ncbi")]
else:
id = [x["TaxId"] for x in res[fname]]
if len(id) == 1:
z = res[fname][0]
result = [_make_id(id[0], fname, z["Rank"], "ncbi")]
if len(id) > 1:
result = [
_make_id(w["TaxId"], w["ScientificName"], w["Rank"], "ncbi")
for w in res[fname]
]
out.append(result)
self.db_ids = "ncbi"
self.ids = dict(zip(self.name, out))
# FIXME: ITIS doesn't give back ranks, ideally need ranks
def itis(self, type="scientific"):
out = []
for i in range(len(self.name)):
fname = self.name[i]
res = terms(x=self.name, what=type)
if len(res) == 0:
warnings.warn("No results for taxon '" + fname + "'")
result = [_make_id(None, fname, None, "itis")]
else:
id = [x["tsn"] for x in res]
if len(id) == 1:
z = res[0]
# rank_taken = z["Rank"]
result = [_make_id(id[0], fname, "species", "itis")]
if len(id) > 1:
result = [
_make_id(w["tsn"], w["scientificName"], "species", "itis")
for w in res
]
out.append(result)
self.db_ids = "itis"
self.ids = dict(zip(self.name, out))
def gbif(self, rank="species"):
self.db_ids = "gbif"
response = map(lambda x: gbif_query_for_single_name(x, rank), self.name)
self.ids = dict(
zip(
self.name, list(map(lambda x: process_gbif_response(x, rank), response))
)
)
def eol(self):
self.db_ids = "eol"
response = zip(self.name, map(eol_search_query_for_single_name, self.name))
pageIds_per_species = list(map(process_eol_search_response, response))
taxa_dicts_per_species = map(eol_taxa_query, pageIds_per_species)
taxa_dicts_per_species = list(
map(lambda x: list(itertools.chain(*x)), taxa_dicts_per_species)
)
self.ids = dict(
zip(
self.name,
list(map(process_list_of_taxa_details, taxa_dicts_per_species)),
)
)
def db(self, db, **kwargs):
if db == "ncbi":
self.ncbi()
elif db == "itis":
self.itis(**kwargs)
else:
raise Exception("'db' must be either ncbi or itis")
def extract_ids(self):
x = self.ids
if len(x) > 0:
x = {k: [w.get("id", None) for w in v] for (k, v) in x.items()}
return x
| mit | 7,925,472,593,893,624,000 | 28.588957 | 88 | 0.477296 | false |
stephenliu1989/msmbuilder | msmbuilder/tests/test_msm_uncertainty.py | 1 | 3263 | from __future__ import print_function
import numpy as np
from scipy.linalg import eigvals
from msmbuilder.cluster import NDGrid
from msmbuilder.example_datasets import load_doublewell
from msmbuilder.msm import MarkovStateModel, ContinuousTimeMSM
from msmbuilder.msm.core import _solve_msm_eigensystem
def test_eigenvalue_partials():
# Verify that the partial derivatives of the ith eigenvalue of the
# transition matrix with respect to the entries of the transition matrix
# is given by the outer product of the left and right eigenvectors
# corresponding to that eigenvalue.
# \frac{\partial \lambda_k}{\partial T_{ij}} = U_{i,k} V_{j,k}
X = load_doublewell(random_state=0)['trajectories']
Y = NDGrid(n_bins_per_feature=10).fit_transform(X)
model = MarkovStateModel(verbose=False).fit(Y)
n = model.n_states_
u, lv, rv = _solve_msm_eigensystem(model.transmat_, n)
# first, compute forward difference numerical derivatives
h = 1e-7
dLambda_dP_numeric = np.zeros((n, n, n))
for i in range(n):
for j in range(n):
# perturb the (i,j) entry of transmat
H = np.zeros((n, n))
H[i, j] = h
u_perturbed = sorted(np.real(eigvals(model.transmat_ + H)),
reverse=True)
# compute the forward different approx. derivative of each
# of the eigenvalues
for k in range(n):
# sort the eigenvalues of the perturbed matrix in descending
# order, to be consistent w/ _solve_msm_eigensystem
dLambda_dP_numeric[k, i, j] = (u_perturbed[k] - u[k]) / h
for k in range(n):
analytic = np.outer(lv[:, k], rv[:, k])
np.testing.assert_almost_equal(dLambda_dP_numeric[k],
analytic, decimal=5)
def test_doublewell():
X = load_doublewell(random_state=0)['trajectories']
for i in range(3):
Y = NDGrid(n_bins_per_feature=10).fit_transform([X[i]])
model1 = MarkovStateModel(verbose=False).fit(Y)
model2 = ContinuousTimeMSM().fit(Y)
print('MSM uncertainty timescales:')
print(model1.uncertainty_timescales())
print('ContinuousTimeMSM uncertainty timescales:')
print(model2.uncertainty_timescales())
print()
def test_countsmat():
model = MarkovStateModel(verbose=False)
C = np.array([
[4380, 153, 15, 2, 0, 0],
[211, 4788, 1, 0, 0, 0],
[169, 1, 4604, 226, 0, 0],
[3, 13, 158, 4823, 3, 0],
[0, 0, 0, 4, 4978, 18],
[7, 5, 0, 0, 62, 4926]], dtype=float)
C = C + (1.0 / 6.0)
model.n_states_ = C.shape[0]
model.countsmat_ = C
model.transmat_, model.populations_ = model._fit_mle(C)
n_trials = 5000
random = np.random.RandomState(0)
all_timescales = np.zeros((n_trials, model.n_states_ - 1))
all_eigenvalues = np.zeros((n_trials, model.n_states_))
for i in range(n_trials):
T = np.vstack([random.dirichlet(C[i]) for i in range(C.shape[0])])
u = _solve_msm_eigensystem(T, k=6)[0]
u = np.real(u) # quiet warning. Don't know if this is legit
all_eigenvalues[i] = u
all_timescales[i] = -1 / np.log(u[1:])
| lgpl-2.1 | 6,347,031,682,103,972,000 | 36.505747 | 76 | 0.604658 | false |
madformuse/client | src/client/_clientwindow.py | 1 | 60723 | from functools import partial
from client.player import Player
from client.updater import fetchClientUpdate
from config import Settings
import fa
from fa.factions import Factions
'''
Created on Dec 1, 2011
@author: thygrrr
'''
from PyQt4 import QtCore, QtGui, QtNetwork, QtWebKit
from PyQt4.QtCore import QDataStream
from types import IntType, FloatType, ListType, DictType
from client import ClientState, GAME_PORT_DEFAULT, LOBBY_HOST, \
LOBBY_PORT, LOCAL_REPLAY_PORT
import logging
logger = logging.getLogger(__name__)
import util
import secondaryServer
import json
import sys
import replays
import time
import os
import random
import notificatation_system as ns
FormClass, BaseClass = util.loadUiType("client/client.ui")
class mousePosition(object):
def __init__(self, parent):
self.parent = parent
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
self.warning_buttons = dict()
def computeMousePosition(self, pos):
self.onLeftEdge = pos.x() < 8
self.onRightEdge = pos.x() > self.parent.size().width() - 8
self.onTopEdge = pos.y() < 8
self.onBottomEdge = pos.y() > self.parent.size().height() - 8
self.onTopLeftEdge = self.onTopEdge and self.onLeftEdge
self.onBottomLeftEdge = self.onBottomEdge and self.onLeftEdge
self.onTopRightEdge = self.onTopEdge and self.onRightEdge
self.onBottomRightEdge = self.onBottomEdge and self.onRightEdge
self.onEdges = self.onLeftEdge or self.onRightEdge or self.onTopEdge or self.onBottomEdge
def resetToFalse(self):
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
def isOnEdge(self):
return self.onEdges
class ClientWindow(FormClass, BaseClass):
'''
This is the main lobby client that manages the FAF-related connection and data,
in particular players, games, ranking, etc.
Its UI also houses all the other UIs for the sub-modules.
'''
topWidget = QtGui.QWidget()
#These signals are emitted when the client is connected or disconnected from FAF
connected = QtCore.pyqtSignal()
disconnected = QtCore.pyqtSignal()
#This signal is emitted when the client is done rezising
doneresize = QtCore.pyqtSignal()
#These signals notify connected modules of game state changes (i.e. reasons why FA is launched)
viewingReplay = QtCore.pyqtSignal(QtCore.QUrl)
#Game state controls
gameEnter = QtCore.pyqtSignal()
gameExit = QtCore.pyqtSignal()
#These signals propagate important client state changes to other modules
statsInfo = QtCore.pyqtSignal(dict)
tourneyTypesInfo = QtCore.pyqtSignal(dict)
tutorialsInfo = QtCore.pyqtSignal(dict)
tourneyInfo = QtCore.pyqtSignal(dict)
modInfo = QtCore.pyqtSignal(dict)
gameInfo = QtCore.pyqtSignal(dict)
modVaultInfo = QtCore.pyqtSignal(dict)
coopInfo = QtCore.pyqtSignal(dict)
avatarList = QtCore.pyqtSignal(list)
playerAvatarList = QtCore.pyqtSignal(dict)
usersUpdated = QtCore.pyqtSignal(list)
localBroadcast = QtCore.pyqtSignal(str, str)
autoJoin = QtCore.pyqtSignal(list)
channelsUpdated = QtCore.pyqtSignal(list)
replayVault = QtCore.pyqtSignal(dict)
coopLeaderBoard = QtCore.pyqtSignal(dict)
#These signals are emitted whenever a certain tab is activated
showReplays = QtCore.pyqtSignal()
showMaps = QtCore.pyqtSignal()
showGames = QtCore.pyqtSignal()
showTourneys = QtCore.pyqtSignal()
showLadder = QtCore.pyqtSignal()
showChat = QtCore.pyqtSignal()
showMods = QtCore.pyqtSignal()
showCoop = QtCore.pyqtSignal()
joinGameFromURL = QtCore.pyqtSignal(str)
matchmakerInfo = QtCore.pyqtSignal(dict)
def __init__(self, *args, **kwargs):
BaseClass.__init__(self, *args, **kwargs)
logger.debug("Client instantiating")
# Hook to Qt's application management system
QtGui.QApplication.instance().aboutToQuit.connect(self.cleanup)
#Init and wire the TCP Network socket to communicate with faforever.com
# This is the evil stream API.
self.socket = QtNetwork.QTcpSocket()
self.socket.readyRead.connect(self.readFromServer)
self.socket.disconnected.connect(self.disconnectedFromServer)
self.socket.error.connect(self.socketError)
self.blockSize = 0
self.useUPnP = False
self.uniqueId = None
self.sendFile = False
self.progress = QtGui.QProgressDialog()
self.progress.setMinimum(0)
self.progress.setMaximum(0)
#Tray icon
self.tray = QtGui.QSystemTrayIcon()
self.tray.setIcon(util.icon("client/tray_icon.png"))
self.tray.show()
self.state = ClientState.NONE
self.session = None
#Timer for resize events
self.resizeTimer = QtCore.QTimer(self)
self.resizeTimer.timeout.connect(self.resized)
self.preferedSize = 0
#Process used to run Forged Alliance (managed in module fa)
fa.instance.started.connect(self.startedFA)
fa.instance.finished.connect(self.finishedFA)
fa.instance.error.connect(self.errorFA)
self.gameInfo.connect(fa.instance.processGameInfo)
#Local Replay Server (and relay)
self.replayServer = fa.replayserver.ReplayServer(self)
#Local Relay Server
self.relayServer = fa.relayserver.RelayServer(self)
#Local proxy servers
self.proxyServer = fa.proxies.proxies(self)
#stat server
self.statsServer = secondaryServer.SecondaryServer("Statistic", 11002, self)
#create user interface (main window) and load theme
self.setupUi(self)
self.setStyleSheet(util.readstylesheet("client/client.css"))
self.windowsTitleLabel = QtGui.QLabel(self)
self.windowsTitleLabel.setText("FA Forever " + util.VERSION_STRING)
self.windowsTitleLabel.setProperty("titleLabel", True)
self.setWindowTitle("FA Forever " + util.VERSION_STRING)
# Frameless
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMinimizeButtonHint)
self.rubberBand = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle)
self.mousePosition = mousePosition(self)
self.installEventFilter(self)
self.minimize = QtGui.QToolButton(self)
self.minimize.setIcon(util.icon("client/minimize-button.png"))
self.maximize = QtGui.QToolButton(self)
self.maximize.setIcon(util.icon("client/maximize-button.png"))
close = QtGui.QToolButton(self)
close.setIcon(util.icon("client/close-button.png"))
self.minimize.setMinimumHeight(10)
close.setMinimumHeight(10)
self.maximize.setMinimumHeight(10)
close.setIconSize(QtCore.QSize(22, 22))
self.minimize.setIconSize(QtCore.QSize(22, 22))
self.maximize.setIconSize(QtCore.QSize(22, 22))
close.setProperty("windowControlBtn", True)
self.maximize.setProperty("windowControlBtn", True)
self.minimize.setProperty("windowControlBtn", True)
self.menu = self.menuBar()
self.topLayout.addWidget(self.menu)
self.topLayout.addWidget(self.windowsTitleLabel)
self.topLayout.addWidget(self.minimize)
self.topLayout.addWidget(self.maximize)
self.topLayout.addWidget(close)
self.topLayout.insertStretch(1, 500)
self.topLayout.setSpacing(0)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.maxNormal = False
close.clicked.connect(self.close)
self.minimize.clicked.connect(self.showSmall)
self.maximize.clicked.connect(self.showMaxRestore)
self.moving = False
self.dragging = False
self.draggingHover = False
self.offset = None
self.curSize = None
sizeGrip = QtGui.QSizeGrip(self)
self.mainGridLayout.addWidget(sizeGrip, 2, 2)
#Wire all important signals
self.mainTabs.currentChanged.connect(self.mainTabChanged)
self.topTabs.currentChanged.connect(self.vaultTabChanged)
#Verrry important step!
self.loadSettingsPrelogin()
self.players = {} # Players known to the client, contains the player_info messages sent by the server
self.urls = {}
# Handy reference to the Player object representing the logged-in user.
self.me = None
# names of the client's friends
self.friends = set()
# names of the client's foes
self.foes = set()
self.clanlist = set() # members of clients clan
self.power = 0 # current user power
self.id = 0
self.coloredNicknames = False
#Initialize the Menu Bar according to settings etc.
self.initMenus()
#Load the icons for the tabs
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.whatNewTab), util.icon("client/feed.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.chatTab), util.icon("client/chat.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.gamesTab), util.icon("client/games.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.coopTab), util.icon("client/coop.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.vaultsTab), util.icon("client/mods.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.ladderTab), util.icon("client/ladder.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tourneyTab), util.icon("client/tourney.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.livestreamTab), util.icon("client/twitch.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.replaysTab), util.icon("client/replays.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tutorialsTab), util.icon("client/tutorials.png"))
QtWebKit.QWebSettings.globalSettings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True)
#for moderator
self.modMenu = None
def eventFilter(self, obj, event):
if (event.type() == QtCore.QEvent.HoverMove):
self.draggingHover = self.dragging
if self.dragging:
self.resizeWidget(self.mapToGlobal(event.pos()))
else:
if self.maxNormal == False:
self.mousePosition.computeMousePosition(event.pos())
else:
self.mousePosition.resetToFalse()
self.updateCursorShape(event.pos())
return False
def updateCursorShape(self, pos):
if self.mousePosition.onTopLeftEdge or self.mousePosition.onBottomRightEdge:
self.mousePosition.cursorShapeChange = True
self.setCursor(QtCore.Qt.SizeFDiagCursor)
elif self.mousePosition.onTopRightEdge or self.mousePosition.onBottomLeftEdge:
self.setCursor(QtCore.Qt.SizeBDiagCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onLeftEdge or self.mousePosition.onRightEdge:
self.setCursor(QtCore.Qt.SizeHorCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onTopEdge or self.mousePosition.onBottomEdge:
self.setCursor(QtCore.Qt.SizeVerCursor)
self.mousePosition.cursorShapeChange = True
else:
if self.mousePosition.cursorShapeChange == True:
self.unsetCursor()
self.mousePosition.cursorShapeChange = False
def showSmall(self):
self.showMinimized()
def showMaxRestore(self):
if(self.maxNormal):
self.maxNormal = False
if self.curSize:
self.setGeometry(self.curSize)
else:
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
def mouseDoubleClickEvent(self, event):
self.showMaxRestore()
def mouseReleaseEvent(self, event):
self.dragging = False
self.moving = False
if self.rubberBand.isVisible():
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(self.rubberBand.geometry())
self.rubberBand.hide()
#self.showMaxRestore()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self.mousePosition.isOnEdge() and self.maxNormal == False:
self.dragging = True
return
else :
self.dragging = False
self.moving = True
self.offset = event.pos()
def mouseMoveEvent(self, event):
if self.dragging and self.draggingHover == False:
self.resizeWidget(event.globalPos())
elif self.moving and self.offset != None:
desktop = QtGui.QDesktopWidget().availableGeometry(self)
if event.globalPos().y() == 0:
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == 0:
desktop.setRight(desktop.right() / 2.0)
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == desktop.right():
desktop.setRight(desktop.right() / 2.0)
desktop.moveLeft(desktop.right())
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
else:
self.rubberBand.hide()
if self.maxNormal == True:
self.showMaxRestore()
self.move(event.globalPos() - self.offset)
def resizeWidget(self, globalMousePos):
if globalMousePos.y() == 0:
self.rubberBand.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
self.rubberBand.show()
else:
self.rubberBand.hide()
origRect = self.frameGeometry()
left, top, right, bottom = origRect.getCoords()
minWidth = self.minimumWidth()
minHeight = self.minimumHeight()
if self.mousePosition.onTopLeftEdge:
left = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomLeftEdge:
left = globalMousePos.x();
bottom = globalMousePos.y();
elif self.mousePosition.onTopRightEdge:
right = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomRightEdge:
right = globalMousePos.x()
bottom = globalMousePos.y()
elif self.mousePosition.onLeftEdge:
left = globalMousePos.x()
elif self.mousePosition.onRightEdge:
right = globalMousePos.x()
elif self.mousePosition.onTopEdge:
top = globalMousePos.y()
elif self.mousePosition.onBottomEdge:
bottom = globalMousePos.y()
newRect = QtCore.QRect(QtCore.QPoint(left, top), QtCore.QPoint(right, bottom))
if newRect.isValid():
if minWidth > newRect.width():
if left != origRect.left() :
newRect.setLeft(origRect.left())
else:
newRect.setRight(origRect.right())
if minHeight > newRect.height() :
if top != origRect.top():
newRect.setTop(origRect.top())
else:
newRect.setBottom(origRect.bottom())
self.setGeometry(newRect)
def setup(self):
import chat
import tourneys
import stats
import vault
import games
import tutorials
import modvault
import coop
from chat._avatarWidget import avatarWidget
# Initialize chat
self.chat = chat.Lobby(self)
#build main window with the now active client
self.ladder = stats.Stats(self)
self.games = games.Games(self)
self.tourneys = tourneys.Tourneys(self)
self.vault = vault.MapVault(self)
self.modvault = modvault.ModVault(self)
self.replays = replays.Replays(self)
self.tutorials = tutorials.Tutorials(self)
self.Coop = coop.Coop(self)
self.notificationSystem = ns.NotificationSystem(self)
# set menu states
self.actionNsEnabled.setChecked(self.notificationSystem.settings.enabled)
# Other windows
self.avatarAdmin = self.avatarSelection = avatarWidget(self, None)
# warning setup
self.warning = QtGui.QHBoxLayout()
self.warnPlayer = QtGui.QLabel(self)
self.warnPlayer.setText("A player of your skill level is currently searching for a 1v1 game. Click a faction to join them! ")
self.warnPlayer.setAlignment(QtCore.Qt.AlignHCenter)
self.warnPlayer.setAlignment(QtCore.Qt.AlignVCenter)
self.warnPlayer.setProperty("warning", True)
self.warning.addStretch()
def add_warning_button(faction):
button = QtGui.QToolButton(self)
button.setMaximumSize(25, 25)
button.setIcon(util.icon("games/automatch/%s.png" % faction.to_name()))
button.clicked.connect(self.games.join_ladder_listeners[faction])
self.warning.addWidget(button)
return button
self.warning_buttons = {faction: add_warning_button(faction) for faction in Factions}
self.warning.addStretch()
self.mainGridLayout.addLayout(self.warning, 2, 0)
self.warningHide()
def warningHide(self):
'''
hide the warning bar for matchmaker
'''
self.warnPlayer.hide()
for i in self.warning_buttons.values():
i.hide()
def warningShow(self):
'''
show the warning bar for matchmaker
'''
self.warnPlayer.show()
for i in self.warning_buttons.values():
i.show()
@QtCore.pyqtSlot()
def cleanup(self):
'''
Perform cleanup before the UI closes
'''
self.state = ClientState.SHUTDOWN
self.progress.setWindowTitle("FAF is shutting down")
self.progress.setMinimum(0)
self.progress.setMaximum(0)
self.progress.setValue(0)
self.progress.setCancelButton(None)
self.progress.show()
#Important: If a game is running, offer to terminate it gently
self.progress.setLabelText("Closing ForgedAllianceForever.exe")
if fa.instance.running():
fa.instance.close()
#Terminate Lobby Server connection
if self.socket.state() == QtNetwork.QTcpSocket.ConnectedState:
self.progress.setLabelText("Closing main connection.")
self.socket.disconnectFromHost()
# Clear UPnP Mappings...
if self.useUPnP:
self.progress.setLabelText("Removing UPnP port mappings")
fa.upnp.removePortMappings()
#Terminate local ReplayServer
if self.replayServer:
self.progress.setLabelText("Terminating local replay server")
self.replayServer.close()
self.replayServer = None
#Terminate local ReplayServer
if self.relayServer:
self.progress.setLabelText("Terminating local relay server")
self.relayServer.close()
self.relayServer = None
#Clean up Chat
if self.chat:
self.progress.setLabelText("Disconnecting from IRC")
self.chat.disconnect()
self.chat = None
# Get rid of the Tray icon
if self.tray:
self.progress.setLabelText("Removing System Tray icon")
self.tray.deleteLater()
self.tray = None
#Terminate UI
if self.isVisible():
self.progress.setLabelText("Closing main window")
self.close()
self.progress.close()
def closeEvent(self, event):
logger.info("Close Event for Application Main Window")
self.saveWindow()
if fa.instance.running():
if QtGui.QMessageBox.question(self, "Are you sure?", "Seems like you still have Forged Alliance running!<br/><b>Close anyway?</b>", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.No:
event.ignore()
return
return QtGui.QMainWindow.closeEvent(self, event)
def resizeEvent(self, size):
self.resizeTimer.start(400)
def resized(self):
self.resizeTimer.stop()
self.doneresize.emit()
def initMenus(self):
self.actionLinkMumble.triggered.connect(partial(self.open_url, Settings.get("MUMBLE_URL").format(login=self.login)))
self.actionLink_account_to_Steam.triggered.connect(partial(self.open_url, Settings.get("STEAMLINK_URL")))
self.actionLinkWebsite.triggered.connect(partial(self.open_url, Settings.get("WEBSITE_URL")))
self.actionLinkWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL")))
self.actionLinkForums.triggered.connect(partial(self.open_url, Settings.get("FORUMS_URL")))
self.actionLinkUnitDB.triggered.connect(partial(self.open_url, Settings.get("UNITDB_URL")))
self.actionNsSettings.triggered.connect(lambda : self.notificationSystem.on_showSettings())
self.actionNsEnabled.triggered.connect(lambda enabled : self.notificationSystem.setNotificationEnabled(enabled))
self.actionWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL")))
self.actionReportBug.triggered.connect(partial(self.open_url, Settings.get("TICKET_URL")))
self.actionShowLogs.triggered.connect(self.linkShowLogs)
self.actionTechSupport.triggered.connect(partial(self.open_url, Settings.get("SUPPORT_URL")))
self.actionAbout.triggered.connect(self.linkAbout)
self.actionClearCache.triggered.connect(self.clearCache)
self.actionClearSettings.triggered.connect(self.clearSettings)
self.actionClearGameFiles.triggered.connect(self.clearGameFiles)
self.actionSetGamePath.triggered.connect(self.switchPath)
self.actionSetGamePort.triggered.connect(self.switchPort)
self.actionSetMumbleOptions.triggered.connect(self.setMumbleOptions)
#Toggle-Options
self.actionSetAutoLogin.triggered.connect(self.updateOptions)
self.actionSetSoundEffects.triggered.connect(self.updateOptions)
self.actionSetOpenGames.triggered.connect(self.updateOptions)
self.actionSetJoinsParts.triggered.connect(self.updateOptions)
self.actionSetLiveReplays.triggered.connect(self.updateOptions)
self.actionSaveGamelogs.triggered.connect(self.updateOptions)
self.actionColoredNicknames.triggered.connect(self.updateOptions)
self.actionActivateMumbleSwitching.triggered.connect(self.saveMumbleSwitching)
#Init themes as actions.
themes = util.listThemes()
for theme in themes:
action = self.menuTheme.addAction(str(theme))
action.triggered.connect(self.switchTheme)
action.theme = theme
action.setCheckable(True)
if util.getTheme() == theme:
action.setChecked(True)
# Nice helper for the developers
self.menuTheme.addSeparator()
self.menuTheme.addAction("Reload Stylesheet", lambda: self.setStyleSheet(util.readstylesheet("client/client.css")))
@QtCore.pyqtSlot()
def updateOptions(self):
self.autologin = self.actionSetAutoLogin.isChecked()
self.soundeffects = self.actionSetSoundEffects.isChecked()
self.opengames = self.actionSetOpenGames.isChecked()
self.joinsparts = self.actionSetJoinsParts.isChecked()
self.livereplays = self.actionSetLiveReplays.isChecked()
self.gamelogs = self.actionSaveGamelogs.isChecked()
self.coloredNicknames = self.actionColoredNicknames.isChecked()
self.saveChat()
self.saveCredentials()
@QtCore.pyqtSlot()
def switchTheme(self):
util.setTheme(self.sender().theme, True)
@QtCore.pyqtSlot()
def switchPath(self):
fa.wizards.Wizard(self).exec_()
@QtCore.pyqtSlot()
def switchPort(self):
import loginwizards
loginwizards.gameSettingsWizard(self).exec_()
@QtCore.pyqtSlot()
def setMumbleOptions(self):
import loginwizards
loginwizards.mumbleOptionsWizard(self).exec_()
@QtCore.pyqtSlot()
def clearSettings(self):
result = QtGui.QMessageBox.question(None, "Clear Settings", "Are you sure you wish to clear all settings, login info, etc. used by this program?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if (result == QtGui.QMessageBox.Yes):
util.settings.clear()
util.settings.sync()
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot()
def clearGameFiles(self):
util.clearDirectory(util.BIN_DIR)
util.clearDirectory(util.GAMEDATA_DIR)
@QtCore.pyqtSlot()
def clearCache(self):
changed = util.clearDirectory(util.CACHE_DIR)
if changed:
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot()
def open_url(self, url):
QtGui.QDesktopServices.openUrl(url)
@QtCore.pyqtSlot()
def linkShowLogs(self):
util.showInExplorer(util.LOG_DIR)
@QtCore.pyqtSlot()
def linkAbout(self):
dialog = util.loadUi("client/about.ui")
dialog.exec_()
def saveCredentials(self):
util.settings.beginGroup("user")
util.settings.setValue("user/remember", self.remember) #always remember to remember
if self.remember:
util.settings.setValue("user/login", self.login)
util.settings.setValue("user/password", self.password)
util.settings.setValue("user/autologin", self.autologin) #only autologin if remembering
else:
util.settings.setValue("user/login", None)
util.settings.setValue("user/password", None)
util.settings.setValue("user/autologin", False)
util.settings.endGroup()
util.settings.sync()
def clearAutologin(self):
self.autologin = False
self.actionSetAutoLogin.setChecked(False)
util.settings.beginGroup("user")
util.settings.setValue("user/autologin", False)
util.settings.endGroup()
util.settings.sync()
def saveWindow(self):
util.settings.beginGroup("window")
util.settings.setValue("geometry", self.saveGeometry())
util.settings.endGroup()
util.settings.beginGroup("ForgedAlliance")
util.settings.setValue("app/falogs", self.gamelogs)
util.settings.endGroup()
def savePort(self):
util.settings.beginGroup("ForgedAlliance")
util.settings.setValue("app/gameport", self.gamePort)
util.settings.setValue("app/upnp", self.useUPnP)
util.settings.endGroup()
util.settings.sync()
def saveMumble(self):
util.settings.beginGroup("Mumble")
util.settings.setValue("app/mumble", self.enableMumble)
util.settings.endGroup()
util.settings.sync()
@QtCore.pyqtSlot()
def saveMumbleSwitching(self):
self.activateMumbleSwitching = self.actionActivateMumbleSwitching.isChecked()
util.settings.beginGroup("Mumble")
util.settings.setValue("app/activateMumbleSwitching", self.activateMumbleSwitching)
util.settings.endGroup()
util.settings.sync()
def saveChat(self):
util.settings.beginGroup("chat")
util.settings.setValue("soundeffects", self.soundeffects)
util.settings.setValue("livereplays", self.livereplays)
util.settings.setValue("opengames", self.opengames)
util.settings.setValue("joinsparts", self.joinsparts)
util.settings.setValue("coloredNicknames", self.coloredNicknames)
util.settings.endGroup()
def loadSettingsPrelogin(self):
util.settings.beginGroup("user")
self.login = util.settings.value("user/login")
self.password = util.settings.value("user/password")
self.remember = (util.settings.value("user/remember") == "true")
# This is the new way we do things.
self.autologin = (util.settings.value("user/autologin") == "true")
self.actionSetAutoLogin.setChecked(self.autologin)
util.settings.endGroup()
def loadSettings(self):
#Load settings
util.settings.beginGroup("window")
geometry = util.settings.value("geometry", None)
if geometry:
self.restoreGeometry(geometry)
util.settings.endGroup()
util.settings.beginGroup("ForgedAlliance")
self.gamePort = int(util.settings.value("app/gameport", GAME_PORT_DEFAULT))
self.useUPnP = (util.settings.value("app/upnp", "true") == "true")
self.gamelogs = (util.settings.value("app/falogs", "false") == "true")
self.actionSaveGamelogs.setChecked(self.gamelogs)
util.settings.endGroup()
util.settings.beginGroup("Mumble")
if util.settings.value("app/mumble", "firsttime") == "firsttime":
# The user has never configured mumble before. Be a little intrusive and ask him if he wants to use it.
if QtGui.QMessageBox.question(self, "Enable Voice Connector?", "FA Forever can connect with <a href=\"http://mumble.sourceforge.net/\">Mumble</a> to support the automatic setup of voice connections between you and your team mates. Would you like to enable this feature? You can change the setting at any time by going to options -> settings -> Voice", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
util.settings.setValue("app/mumble", "true")
else:
util.settings.setValue("app/mumble", "false")
if util.settings.value("app/activateMumbleSwitching", "firsttime") == "firsttime":
util.settings.setValue("app/activateMumbleSwitching", "true")
self.enableMumble = (util.settings.value("app/mumble", "false") == "true")
self.activateMumbleSwitching = (util.settings.value("app/activateMumbleSwitching", "false") == "true")
util.settings.endGroup()
self.actionActivateMumbleSwitching.setChecked(self.activateMumbleSwitching)
self.loadChat()
def loadChat(self):
try:
util.settings.beginGroup("chat")
self.soundeffects = (util.settings.value("soundeffects", "true") == "true")
self.opengames = (util.settings.value("opengames", "true") == "true")
self.joinsparts = (util.settings.value("joinsparts", "false") == "true")
self.livereplays = (util.settings.value("livereplays", "true") == "true")
self.coloredNicknames = (util.settings.value("coloredNicknames", "false") == "true")
util.settings.endGroup()
self.actionColoredNicknames.setChecked(self.coloredNicknames)
self.actionSetSoundEffects.setChecked(self.soundeffects)
self.actionSetLiveReplays.setChecked(self.livereplays)
self.actionSetOpenGames.setChecked(self.opengames)
self.actionSetJoinsParts.setChecked(self.joinsparts)
except:
pass
def doConnect(self):
if not self.replayServer.doListen(LOCAL_REPLAY_PORT):
return False
if not self.relayServer.doListen():
return False
self.progress.setCancelButtonText("Cancel")
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(False)
self.progress.setAutoReset(False)
self.progress.setModal(1)
self.progress.setWindowTitle("Connecting...")
self.progress.setLabelText("Establishing connection to {}:{}".format(LOBBY_HOST, LOBBY_PORT))
self.progress.show()
# Begin connecting.
self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT)
while (self.socket.state() != QtNetwork.QAbstractSocket.ConnectedState) and self.progress.isVisible():
QtGui.QApplication.processEvents()
self.state = ClientState.NONE
self.localIP = str(self.socket.localAddress().toString())
# #Perform Version Check first
if not self.socket.state() == QtNetwork.QAbstractSocket.ConnectedState:
self.progress.close() # in case it was still showing...
# We either cancelled or had a TCP error, meaning the connection failed..
if self.progress.wasCanceled():
logger.warn("doConnect() aborted by user.")
else:
logger.error("doConnect() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
return False
else:
return True
def reconnect(self):
''' try to reconnect to the server'''
self.socket.disconnected.disconnect(self.disconnectedFromServer)
self.socket.disconnectFromHost()
self.socket.disconnected.connect(self.disconnectedFromServer)
self.progress.setCancelButtonText("Cancel")
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(False)
self.progress.setAutoReset(False)
self.progress.setModal(1)
self.progress.setWindowTitle("Re-connecting...")
self.progress.setLabelText("Re-establishing connection ...")
self.progress.show()
# Begin connecting.
self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT)
while (self.socket.state() != QtNetwork.QAbstractSocket.ConnectedState) and self.progress.isVisible():
QtGui.QApplication.processEvents()
self.state = ClientState.NONE
self.localIP = str(self.socket.localAddress().toString())
# #Perform Version Check first
if not self.socket.state() == QtNetwork.QAbstractSocket.ConnectedState:
self.progress.close() # in case it was still showing...
# We either cancelled or had a TCP error, meaning the connection failed..
if self.progress.wasCanceled():
logger.warn("doConnect() aborted by user.")
else:
logger.error("doConnect() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
return False
else:
self.send(dict(command="hello", version=0, login=self.login, password=self.password, unique_id=self.uniqueId, local_ip=self.localIP, session=self.session))
return True
def waitSession(self):
self.progress.setLabelText("Setting up Session...")
self.send(dict(command="ask_session"))
start = time.time()
while self.session == None and self.progress.isVisible() :
QtGui.QApplication.processEvents()
if time.time() - start > 15 :
break
if not self.session :
if self.progress.wasCanceled():
logger.warn("waitSession() aborted by user.")
else :
logger.error("waitSession() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
QtGui.QMessageBox.critical(self, "Notice from Server", "Unable to get a session : <br> Server under maintenance.<br><br>Please retry in some minutes.")
return False
self.uniqueId = util.uniqueID(self.login, self.session)
self.loadSettings()
#
# Voice connector (This isn't supposed to be here, but I need the settings to be loaded before I can determine if we can hook in the mumbleConnector
#
if self.enableMumble:
self.progress.setLabelText("Setting up Mumble...")
import mumbleconnector
self.mumbleConnector = mumbleconnector.MumbleConnector(self)
return True
def doLogin(self):
#Determine if a login wizard needs to be displayed and do so
if not self.autologin or not self.password or not self.login:
import loginwizards
if not loginwizards.LoginWizard(self).exec_():
return False;
self.progress.setLabelText("Logging in...")
self.progress.reset()
self.progress.show()
self.login = self.login.strip()
logger.info("Attempting to login as: " + str(self.login))
self.state = ClientState.NONE
if not self.uniqueId :
QtGui.QMessageBox.warning(QtGui.QApplication.activeWindow(), "Unable to login", "It seems that you miss some important DLL.<br>Please install :<br><a href =\"http://www.microsoft.com/download/en/confirmation.aspx?id=8328\">http://www.microsoft.com/download/en/confirmation.aspx?id=8328</a> and <a href = \"http://www.microsoft.com/en-us/download/details.aspx?id=17851\">http://www.microsoft.com/en-us/download/details.aspx?id=17851</a><br><br>You probably have to restart your computer after installing them.<br><br>Please visit this link in case of problems : <a href=\"http://forums.faforever.com/forums/viewforum.php?f=3\">http://forums.faforever.com/forums/viewforum.php?f=3</a>", QtGui.QMessageBox.Close)
return False
else:
self.send(dict(command="hello", version=0, login=self.login, password=self.password, unique_id=self.uniqueId, local_ip=self.localIP))
while (not self.state) and self.progress.isVisible():
QtGui.QApplication.processEvents()
if self.progress.wasCanceled():
logger.warn("Login aborted by user.")
return False
self.progress.close()
if self.state == ClientState.OUTDATED :
logger.warn("Client is OUTDATED.")
elif self.state == ClientState.ACCEPTED:
logger.info("Login accepted.")
# update what's new page
self.whatNewsView.setUrl(QtCore.QUrl("http://www.faforever.com/?page_id=114&username={user}&pwdhash={pwdhash}".format(user=self.login, pwdhash=self.password)))
# live streams
self.LivestreamWebView.setUrl(QtCore.QUrl("http://www.faforever.com/?page_id=974"))
util.crash.CRASH_REPORT_USER = self.login
if self.useUPnP:
fa.upnp.createPortMapping(self.localIP, self.gamePort, "UDP")
#success: save login data (if requested) and carry on
self.actionSetAutoLogin.setChecked(self.autologin)
self.updateOptions()
self.progress.close()
self.connected.emit()
return True
elif self.state == ClientState.REJECTED:
logger.warning("Login rejected.")
#seems that there isa bug in a key ..
util.settings.beginGroup("window")
util.settings.remove("geometry")
util.settings.endGroup()
self.clearAutologin()
return self.doLogin() #Just try to login again, slightly hackish but I can get away with it here, I guess.
else:
# A more profound error has occurred (cancellation or disconnection)
return False
def isFriend(self, name):
'''
Convenience function for other modules to inquire about a user's friendliness.
'''
return name in self.friends
def isFoe(self, name):
'''
Convenience function for other modules to inquire about a user's foeliness.
'''
return name in self.foes
def isPlayer(self, name):
'''
Convenience function for other modules to inquire about a user's civilian status.
'''
return name in self.players or name == self.login
#Color table used by the following method
# CAVEAT: This will break if the theme is loaded after the client package is imported
colors = json.loads(util.readfile("client/colors.json"))
randomcolors = json.loads(util.readfile("client/randomcolors.json"))
def getUserColor(self, name):
'''
Returns a user's color depending on their status with relation to the FAF client
'''
if name == self.login:
return self.getColor("self")
elif name in self.friends:
return self.getColor("friend")
elif name in self.foes:
return self.getColor("foe")
elif name in self.clanlist:
return self.getColor("clan")
else:
if self.coloredNicknames:
return self.getRandomColor(name)
if name in self.players:
return self.getColor("player")
return self.getColor("default")
def getRandomColor(self, name):
'''Generate a random color from a name'''
random.seed(name)
return random.choice(self.randomcolors)
def getColor(self, name):
if name in self.colors:
return self.colors[name]
else:
return self.colors["default"]
@QtCore.pyqtSlot()
def startedFA(self):
'''
Slot hooked up to fa.instance when the process has launched.
It will notify other modules through the signal gameEnter().
'''
logger.info("FA has launched in an attached process.")
self.gameEnter.emit()
@QtCore.pyqtSlot(int)
def finishedFA(self, exit_code):
'''
Slot hooked up to fa.instance when the process has ended.
It will notify other modules through the signal gameExit().
'''
if not exit_code:
logger.info("FA has finished with exit code: " + str(exit_code))
else:
logger.warn("FA has finished with exit code: " + str(exit_code))
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def errorFA(self, error_code):
'''
Slot hooked up to fa.instance when the process has failed to start.
'''
if error_code == 0:
logger.error("FA has failed to start")
QtGui.QMessageBox.critical(self, "Error from FA", "FA has failed to start.")
elif error_code == 1:
logger.error("FA has crashed or killed after starting")
else:
text = "FA has failed to start with error code: " + str(error_code)
logger.error(text)
QtGui.QMessageBox.critical(self, "Error from FA", text)
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def mainTabChanged(self, index):
'''
The main visible tab (module) of the client's UI has changed.
In this case, other modules may want to load some data or cease
particularly CPU-intensive interactive functionality.
LATER: This can be rewritten as a simple Signal that each module can then individually connect to.
'''
new_tab = self.mainTabs.widget(index)
if new_tab is self.gamesTab:
self.showGames.emit()
if new_tab is self.chatTab:
self.showChat.emit()
if new_tab is self.replaysTab:
self.showReplays.emit()
if new_tab is self.ladderTab:
self.showLadder.emit()
if new_tab is self.tourneyTab:
self.showTourneys.emit()
if new_tab is self.coopTab:
self.showCoop.emit()
@QtCore.pyqtSlot(int)
def vaultTabChanged(self, index):
new_tab = self.topTabs.widget(index)
if new_tab is self.mapsTab:
self.showMaps.emit()
if new_tab is self.modsTab:
self.showMods.emit()
def joinGameFromURL(self, url):
'''
Tries to join the game at the given URL
'''
logger.debug("joinGameFromURL: " + url.toString())
if fa.instance.available():
add_mods = []
try:
modstr = url.queryItemValue("mods")
add_mods = json.loads(modstr) # should be a list
except:
logger.info("Couldn't load urlquery value 'mods'")
if fa.check.game(self):
if fa.check.check(url.queryItemValue("mod"), url.queryItemValue("map"), sim_mods=add_mods):
self.send(dict(command="game_join", uid=int(url.queryItemValue("uid")), gameport=self.gamePort))
def writeToServer(self, action, *args, **kw):
'''
Writes data to the deprecated stream API. Do not use.
'''
logger.debug("Client: " + action)
block = QtCore.QByteArray()
out = QtCore.QDataStream(block, QtCore.QIODevice.ReadWrite)
out.setVersion(QtCore.QDataStream.Qt_4_2)
out.writeUInt32(0)
out.writeQString(action)
out.writeQString(self.login or "")
out.writeQString(self.session or "")
for arg in args :
if type(arg) is IntType:
out.writeInt(arg)
elif isinstance(arg, basestring):
out.writeQString(arg)
elif type(arg) is FloatType:
out.writeFloat(arg)
elif type(arg) is ListType:
out.writeQVariantList(arg)
elif type(arg) is DictType:
out.writeQString(json.dumps(arg))
elif type(arg) is QtCore.QFile :
arg.open(QtCore.QIODevice.ReadOnly)
fileDatas = QtCore.QByteArray(arg.readAll())
out.writeInt(fileDatas.size())
out.writeRawData(fileDatas)
# This may take a while. We display the progress bar so the user get a feedback
self.sendFile = True
self.progress.setLabelText("Sending file to server")
self.progress.setCancelButton(None)
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(True)
self.progress.setMinimum(0)
self.progress.setMaximum(100)
self.progress.setModal(1)
self.progress.setWindowTitle("Uploading in progress")
self.progress.show()
arg.close()
else:
logger.warn("Uninterpreted Data Type: " + str(type(arg)) + " sent as str: " + str(arg))
out.writeQString(str(arg))
out.device().seek(0)
out.writeUInt32(block.size() - 4)
self.bytesToSend = block.size() - 4
self.socket.write(block)
def serverTimeout(self):
if self.timeout == 0:
logger.info("Connection timeout - Checking if server is alive.")
self.writeToServer("PING")
self.timeout = self.timeout + 1
else:
self.socket.abort()
@QtCore.pyqtSlot()
def readFromServer(self):
ins = QtCore.QDataStream(self.socket)
ins.setVersion(QtCore.QDataStream.Qt_4_2)
while ins.atEnd() == False :
if self.blockSize == 0:
if self.socket.bytesAvailable() < 4:
return
self.blockSize = ins.readUInt32()
if self.socket.bytesAvailable() < self.blockSize:
return
action = ins.readQString()
logger.info("Server: '%s'" % action)
if action == "PING":
self.writeToServer("PONG")
self.blockSize = 0
return
try:
self.dispatch(json.loads(action))
except:
logger.error("Error dispatching JSON: " + action, exc_info=sys.exc_info())
self.blockSize = 0
@QtCore.pyqtSlot()
def disconnectedFromServer(self):
logger.warn("Disconnected from lobby server.")
if self.state == ClientState.ACCEPTED:
QtGui.QMessageBox.warning(QtGui.QApplication.activeWindow(), "Disconnected from FAF", "The lobby lost the connection to the FAF server.<br/><b>You might still be able to chat.<br/>To play, try reconnecting a little later!</b>", QtGui.QMessageBox.Close)
#Clear the online users lists
oldplayers = self.players.keys()
self.players = {}
self.urls = {}
self.usersUpdated.emit(oldplayers)
self.disconnected.emit()
self.mainTabs.setCurrentIndex(0)
for i in range(2, self.mainTabs.count()):
self.mainTabs.setTabEnabled(i, False)
self.mainTabs.setTabText(i, "offline")
self.state = ClientState.DROPPED
@QtCore.pyqtSlot(QtNetwork.QAbstractSocket.SocketError)
def socketError(self, error):
logger.error("TCP Socket Error: " + self.socket.errorString())
if self.state > ClientState.NONE: # Positive client states deserve user notification.
QtGui.QMessageBox.critical(None, "TCP Error", "A TCP Connection Error has occurred:<br/><br/><b>" + self.socket.errorString() + "</b>", QtGui.QMessageBox.Close)
self.progress.cancel()
@QtCore.pyqtSlot()
def forwardLocalBroadcast(self, source, message):
self.localBroadcast.emit(source, message)
def manage_power(self):
''' update the interface accordingly to the power of the user'''
if self.power >= 1 :
if self.modMenu == None :
self.modMenu = self.menu.addMenu("Administration")
actionAvatar = QtGui.QAction("Avatar manager", self.modMenu)
actionAvatar.triggered.connect(self.avatarManager)
self.modMenu.addAction(actionAvatar)
def requestAvatars(self, personal):
if personal :
self.send(dict(command="avatar", action="list_avatar"))
else :
self.send(dict(command="admin", action="requestavatars"))
def joinChannel(self, username, channel):
'''Join users to a channel'''
self.send(dict(command="admin", action="join_channel", user_ids=[self.players[username].id], channel=channel))
def closeFA(self, username):
'''Close FA remotly'''
self.send(dict(command="admin", action="closeFA", user_id=self.players[username].id))
def closeLobby(self, username):
'''Close lobby remotly'''
self.send(dict(command="admin", action="closelobby", user_id=self.players[username].id))
def addFriend(self, friend_name):
'''Adding a new friend by user'''
self.friends.add(friend_name)
self.send(dict(command="social_add", friend=self.players[friend_name].id))
self.usersUpdated.emit([friend_name])
def addFoe(self, foe_name):
'''Adding a new foe by user'''
self.foes.add(foe_name)
self.send(dict(command="social_add", foe=self.players[foe_name].id))
self.usersUpdated.emit([foe_name])
def remFriend(self, friend_name):
'''Removal of a friend by user'''
self.friends.remove(friend_name)
self.send(dict(command="social_remove", friend=self.players[friend_name].id))
self.usersUpdated.emit([friend_name])
def remFoe(self, foe_name):
'''Removal of a foe by user'''
self.foes.remove(foe_name)
self.send(dict(command="social_remove", foe=self.players[foe_name].id))
self.usersUpdated.emit([foe_name])
#
# JSON Protocol v2 Implementation below here
#
def send(self, message):
data = json.dumps(message)
logger.info("Outgoing JSON Message: " + data)
self.writeToServer(data)
def dispatch(self, message):
'''
A fairly pythonic way to process received strings as JSON messages.
'''
if "command" in message:
cmd = "handle_" + message['command']
if hasattr(self, cmd):
getattr(self, cmd)(message)
else:
logger.error("Unknown JSON command: %s" % message['command'])
raise ValueError
else:
logger.debug("No command in message.")
def handle_stats(self, message):
self.statsInfo.emit(message)
def handle_session(self, message):
self.session = str(message["session"])
def handle_update(self, message):
# Mystereous voodoo nonsense.
# fix a problem with Qt.
util.settings.beginGroup("window")
util.settings.remove("geometry")
util.settings.endGroup()
logger.warn("Server says that Updating is needed.")
self.progress.close()
self.state = ClientState.OUTDATED
fetchClientUpdate(message["update"])
def handle_welcome(self, message):
self.id = message["id"]
self.login = message["login"]
logger.debug("Login success")
self.state = ClientState.ACCEPTED
def handle_registration_response(self, message):
if message["result"] == "SUCCESS":
self.state = ClientState.CREATED
return
self.state = ClientState.REJECTED
self.handle_notice({"style": "notice", "text": message["error"]})
def handle_game_launch(self, message):
logger.info("Handling game_launch via JSON " + str(message))
silent = False
if 'args' in message:
arguments = message['args']
else:
arguments = []
# Do some special things depending of the reason of the game launch.
rank = False
# HACK: Ideally, this comes from the server, too. LATER: search_ranked message
if message["featured_mod"] == "ladder1v1":
arguments.append('/' + self.games.race)
#Player 1v1 rating
arguments.append('/mean')
arguments.append(str(self.players[self.login]["ladder_rating_mean"]))
arguments.append('/deviation')
arguments.append(str(self.players[self.login]["ladder_rating_deviation"]))
# Launch the auto lobby
self.relayServer.init_mode = 1
else :
#Player global rating
arguments.append('/mean')
arguments.append(str(self.players[self.login]["rating_mean"]))
arguments.append('/deviation')
arguments.append(str(self.players[self.login]["rating_deviation"]))
if self.me.country is not None:
arguments.append('/country ')
arguments.append(self.me.country)
# Launch the normal lobby
self.relayServer.init_mode = 0
if self.me.clan is not None:
arguments.append('/clan')
arguments.append(self.me.clan)
# Ensure we have the map
if "mapname" in message:
fa.check.map(message['mapname'], force=True, silent=silent)
if "sim_mods" in message:
fa.mods.checkMods(message['sim_mods'])
# Writing a file for options
if "options" in message:
filename = os.path.join(util.CACHE_DIR, "options.lua")
options = QtCore.QFile(filename)
options.open(QtCore.QIODevice.WriteOnly | QtCore.QIODevice.Text)
numOpt = 0
options.write("Options = { ")
lenopt = len(message['options'])
for option in message['options'] :
if option == True :
options.write("'1'")
else :
options.write("'0'")
numOpt = numOpt + 1
if lenopt != numOpt :
options.write(", ")
options.write(" }")
options.close()
#Experimental UPnP Mapper - mappings are removed on app exit
if self.useUPnP:
fa.upnp.createPortMapping(self.localIP, self.gamePort, "UDP")
info = dict(uid=message['uid'], recorder=self.login, featured_mod=message[modkey], game_time=time.time())
fa.run(game_info, self.relayServer.serverPort(), arguments)
def handle_coop_info(self, message):
self.coopInfo.emit(message)
def handle_tournament_types_info(self, message):
self.tourneyTypesInfo.emit(message)
def handle_tournament_info(self, message):
self.tourneyInfo.emit(message)
def handle_tutorials_info(self, message):
self.tutorialsInfo.emit(message)
def handle_mod_info(self, message):
self.modInfo.emit(message)
def handle_game_info(self, message):
self.gameInfo.emit(message)
def handle_modvault_list_info(self, message):
modList = message["modList"]
for mod in modList:
self.handle_modvault_info(mod)
def handle_modvault_info(self, message):
self.modVaultInfo.emit(message)
def handle_replay_vault(self, message):
self.replayVault.emit(message)
def handle_coop_leaderboard(self, message):
self.coopLeaderBoard.emit(message)
def handle_matchmaker_info(self, message):
if "action" in message:
self.matchmakerInfo.emit(message)
elif "potential" in message:
if message["potential"] :
self.warningShow()
else:
self.warningHide()
def handle_avatar(self, message):
if "avatarlist" in message :
self.avatarList.emit(message["avatarlist"])
def handle_admin(self, message):
if "avatarlist" in message :
self.avatarList.emit(message["avatarlist"])
elif "player_avatar_list" in message :
self.playerAvatarList.emit(message)
def handle_social(self, message):
if "friends" in message:
self.friends = set(message["friends"])
self.usersUpdated.emit(self.players.keys())
if "foes" in message:
self.foes = set(message["foes"])
self.usersUpdated.emit(self.players.keys())
if "channels" in message:
# Add a delay to the notification system (insane cargo cult)
self.notificationSystem.disabledStartup = False
self.channelsUpdated.emit(message["channels"])
if "autojoin" in message:
self.autoJoin.emit(message["autojoin"])
if "power" in message:
self.power = message["power"]
self.manage_power()
def handle_player_info(self, message):
players = message["players"]
# Firstly, find yourself. Things get easier one "me" is assigned.
for player in players:
if player["login"] == self.login:
self.me = Player(player)
for player in players:
name = player["login"]
new_player = Player(player)
self.players[name] = new_player
self.usersUpdated.emit([name])
if new_player.clan == self.me.clan:
self.clanlist.add(name)
def avatarManager(self):
self.requestAvatars(0)
self.avatarSelection.show()
def handle_notice(self, message):
if "text" in message:
if message["style"] == "error" :
if self.state != ClientState.NONE :
QtGui.QMessageBox.critical(self, "Error from Server", message["text"])
else :
QtGui.QMessageBox.critical(self, "Login Failed", message["text"])
self.state = ClientState.REJECTED
elif message["style"] == "warning":
QtGui.QMessageBox.warning(self, "Warning from Server", message["text"])
elif message["style"] == "scores":
self.tray.showMessage("Scores", message["text"], QtGui.QSystemTrayIcon.Information, 3500)
self.localBroadcast.emit("Scores", message["text"])
else:
QtGui.QMessageBox.information(self, "Notice from Server", message["text"])
if message["style"] == "kill":
logger.info("Server has killed your Forged Alliance Process.")
fa.instance.kill()
if message["style"] == "kick":
logger.info("Server has kicked you from the Lobby.")
self.cleanup()
| gpl-3.0 | -2,850,407,173,679,299,600 | 36.39101 | 721 | 0.630008 | false |
myemma/EmmaPython | emma/model/message.py | 1 | 2194 | """Audience mailing message models"""
from emma import exceptions as ex
from emma.model import BaseApiModel
class Message(BaseApiModel):
"""
Encapsulates operations for a :class:`Message`
:param mailing: The Mailing which owns this Message
:type mailing: :class:`Mailing`
:param member_id: The Member ID to whom this Message was sent
:type member_id: :class:`int`
:param raw: The raw values of this :class:`Message`
:type raw: :class:`dict`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mlng = acct.mailings[123]
>>> mlng.messages[12]
<Message>
"""
def __init__(self, mailing, member_id=None, raw=None):
self.mailing = mailing
self.member_id = member_id
super(Message, self).__init__(raw)
def forward(self, emails=None, note=None):
"""
Forward a previous message to additional recipients. If these recipients
are not already in the audience, they will be added with a status of
FORWARDED.
:param emails: The emails to receive this forward
:type emails: :class:`list` of :class:`str`
:param note: A note to be sent with this forward
:type note: :class:`str`
:rtype: :class:`int`
Usage::
>>> from emma.model.account import Account
>>> acct = Account(1234, "08192a3b4c5d6e7f", "f7e6d5c4b3a29180")
>>> mssg = acct.mailings[123].messages[12]
>>> mssg.forward(["[email protected]", "[email protected]"])
124
"""
if 'mailing_id' not in self.mailing:
raise ex.NoMailingIdError()
if not self.member_id:
raise ex.NoMemberIdError()
if not emails:
return None
path = "/forwards/%s/%s" % (self.mailing['mailing_id'], self.member_id)
data = {'recipient_emails': emails}
if note:
data['note'] = note
result = self.mailing.account.adapter.post(path, data)
if not result:
raise ex.MailingForwardError()
return result['mailing_id']
| mit | -8,210,768,294,507,816,000 | 31.264706 | 80 | 0.596627 | false |
QQuick/Transcrypt | transcrypt/development/automated_tests/transcrypt/dictionaries/__init__.py | 1 | 3596 | from org.transcrypt.stubs.browser import __pragma__
__pragma__ ('iconv')
# Dictionaries are translated to JavaScript objects,
# to achieve JSON-compatible syntax for JavaScript library object initialisation.
# Keys that may denote a number are interpreted as such in Transcrypt.
# All other keys are interpreted as strings.
def run (autoTester):
tel = {'guido': 4127, 'jack': 4098}
autoTester.check (len (tel))
tel ['sape'] = 4139
autoTester.check (tel)
autoTester.check (tel ['jack'])
del tel ['sape']
tel ['irv'] = 4127
autoTester.check (tel)
autoTester.check (sorted (list (tel.keys ())), False)
autoTester.check (sorted (tel.keys ()))
autoTester.check ('guido' in tel)
autoTester.check ('jack' not in tel)
autoTester.check (dict ([('guido', 4127), ('jack', 4098), ('sape', 4139)]))
autoTester.check (
autoTester.expectException( lambda: dict(1) )
)
autoTester.check (
autoTester.expectException( lambda: dict(134.34) )
)
autoTester.check (
autoTester.expectException( lambda: dict('asdf') )
)
autoTester.check (
autoTester.expectException( lambda: dict(['1234', 1]) )
)
autoTester.check( dict ([]))
autoTester.check (dict ({}))
autoTester.check (dict ({'asdf': 1, 'qwer': 2}) )
# check dict copy, Issue # 221
b = {'a' : 2.01, 'b': -3.3}
d = dict (b)
autoTester.check (d)
b = {'a' : 2, 'b': [1,2,3]}
d = dict (b)
autoTester.check (d)
b = {'a' : None, 'b': set([1,2,3])}
d = dict (b)
autoTester.check (d)
b = {'a' : {'c': 2}, 'b': (1,2)}
d = dict (b)
autoTester.check (d)
autoTester.check (d['a']['c'])
autoTester.check (d.get('a').get('c'))
autoTester.check (b.get('a').get('c'))
d['a']['c'] = 3
autoTester.check (d.get('a').get('c'))
autoTester.check (b.get('a').get('c'))
knights = {'robin': 'the brave', 'gallahad': 'the pure'}
for k, v in sorted (knights.items ()):
autoTester.check (k, v)
if 'gallahad' in knights:
autoTester.check ('gallahad is a knight')
for k in sorted (knights):
autoTester.check (k)
knight = {'rudolph': 'the righteous'}
for k in knight: # Autotest automatic conversion with one knight, since sort order of dict undefined
autoTester.check (k)
tel = {'guido': 123}
tel.update({'edsger': 42})
autoTester.check (tel.setdefault ('linus', 456))
autoTester.check (tel ['linus'])
autoTester.check (tel.setdefault ('guido', 789))
autoTester.check (tel.pop ('guido', 1))
autoTester.check (tel.pop ('guido', 1))
autoTester.check (tel.pop ('edsger', 2))
autoTester.check (tel.pop ('foo', 'bar'))
autoTester.check (tel.pop ('foo', None))
# Check compound keys (issue 281)
d = {}
d ['a'] = 3777
d [(1, 2)] = 4777
autoTester.check (d ['a'], d [(1, 2)])
__pragma__ ('opov')
d = {}
d ['a'] = 3777
d [(1, 2)] = 4777
autoTester.check (d ['a'], d [(1, 2)])
__pragma__ ('noopov')
# Check exceptions
knights = {'robin': 'the brave', 'gallahad': 'the pure'}
autoTester.check (
autoTester.expectException ( lambda: knights.pop("batman") )
)
autoTester.check (
autoTester.expectException ( lambda: knights.pop("batman", None) )
)
autoTester.check (
autoTester.expectException ( lambda: knights.pop("batman", "the gullible") )
)
| apache-2.0 | -3,353,078,020,510,493,000 | 29.269565 | 107 | 0.561179 | false |
dovf/kitty | tests/test_model_low_level_fields.py | 1 | 36496 | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
#
# This file is part of Kitty.
#
# Kitty is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Kitty is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kitty. If not, see <http://www.gnu.org/licenses/>.
'''
Tests for low level fields:
'''
from common import metaTest, BaseTestCase
from bitstring import Bits
import types
import struct
from kitty.model import String, Delimiter, RandomBits, RandomBytes, Dynamic, Static, Group, Float
from kitty.model import BitField, UInt8, UInt16, UInt32, UInt64, SInt8, SInt16, SInt32, SInt64
from kitty.core import KittyException
import os
class ValueTestCase(BaseTestCase):
__meta__ = True
default_value = None
default_value_rendered = None
def setUp(self, cls=None):
super(ValueTestCase, self).setUp(cls)
self.default_value = self.__class__.default_value
self.default_value_rendered = self.__class__.default_value_rendered
self.rendered_type = self.get_rendered_type()
self.uut_name = 'uut'
def get_rendered_type(self):
return Bits
def get_default_field(self, fuzzable=True):
return self.cls(value=self.default_value, fuzzable=fuzzable, name=self.uut_name)
def bits_to_value(self, bits):
'''
default behavior: take the bytes
'''
return bits.bytes
def _get_all_mutations(self, field, reset=True):
res = []
while field.mutate():
res.append(field.render())
if reset:
field.reset()
return res
def _base_check(self, field):
num_mutations = field.num_mutations()
mutations = self._get_all_mutations(field)
self.assertEqual(num_mutations, len(mutations))
self.assertEqual(len(mutations), len(set(mutations)))
mutations = self._get_all_mutations(field)
self.assertEqual(num_mutations, len(mutations))
self.assertEqual(len(mutations), len(set(mutations)))
@metaTest
def testDummyToDo(self):
self.assertEqual(len(self.todo), 0)
@metaTest
def testDefaultValue(self):
field = self.get_default_field()
res = field.render()
self.assertEqual(self.default_value_rendered, res)
field.mutate()
field.reset()
res = field.render()
self.assertEqual(self.default_value_rendered, res)
@metaTest
def testMutateAllDifferent(self):
field = self.get_default_field()
mutations = self._get_all_mutations(field)
self.assertEqual(len(set(mutations)), len(mutations))
@metaTest
def testNotFuzzable(self):
field = self.get_default_field(fuzzable=False)
num_mutations = field.num_mutations()
self.assertEqual(num_mutations, 0)
rendered = field.render()
as_val = self.bits_to_value(rendered)
self.assertAlmostEqual(as_val, self.default_value, places=5)
mutated = field.mutate()
self.assertFalse(mutated)
rendered = field.render()
as_val = self.bits_to_value(rendered)
self.assertAlmostEqual(as_val, self.default_value, places=5)
field.reset()
mutated = field.mutate()
self.assertFalse(mutated)
rendered = field.render()
as_val = self.bits_to_value(rendered)
self.assertAlmostEqual(as_val, self.default_value, places=5)
@metaTest
def testNumMutations(self):
field = self.get_default_field()
num_mutations = field.num_mutations()
self._check_mutation_count(field, num_mutations)
@metaTest
def testSameResultWhenSameParams(self):
field1 = self.get_default_field()
field2 = self.get_default_field()
res1 = self._get_all_mutations(field1)
res2 = self._get_all_mutations(field2)
self.assertListEqual(res1, res2)
@metaTest
def testSameResultAfterReset(self):
field = self.get_default_field()
res1 = self._get_all_mutations(field)
res2 = self._get_all_mutations(field)
self.assertListEqual(res1, res2)
@metaTest
def testSkipZero(self):
field = self.get_default_field(fuzzable=True)
num_mutations = field.num_mutations()
to_skip = 0
expected_skipped = min(to_skip, num_mutations)
expected_mutated = num_mutations - expected_skipped
self._check_skip(field, to_skip, expected_skipped, expected_mutated)
@metaTest
def testSkipOne(self):
field = self.get_default_field(fuzzable=True)
num_mutations = field.num_mutations()
to_skip = 1
expected_skipped = min(to_skip, num_mutations)
expected_mutated = num_mutations - expected_skipped
self._check_skip(field, to_skip, expected_skipped, expected_mutated)
@metaTest
def testSkipHalf(self):
field = self.get_default_field(fuzzable=True)
num_mutations = field.num_mutations()
to_skip = num_mutations / 2
expected_skipped = min(to_skip, num_mutations)
expected_mutated = num_mutations - expected_skipped
self._check_skip(field, to_skip, expected_skipped, expected_mutated)
@metaTest
def testSkipExact(self):
field = self.get_default_field(fuzzable=True)
num_mutations = field.num_mutations()
to_skip = num_mutations
expected_skipped = min(to_skip, num_mutations)
expected_mutated = num_mutations - expected_skipped
self._check_skip(field, to_skip, expected_skipped, expected_mutated)
@metaTest
def testSkipTooMuch(self):
field = self.get_default_field(fuzzable=True)
num_mutations = field.num_mutations()
to_skip = num_mutations + 1
expected_skipped = min(to_skip, num_mutations)
expected_mutated = num_mutations - expected_skipped
self._check_skip(field, to_skip, expected_skipped, expected_mutated)
@metaTest
def testReturnTypeRenderFuzzable(self):
field = self.get_default_field(fuzzable=True)
self.assertIsInstance(field.render(), self.rendered_type)
field.mutate()
self.assertIsInstance(field.render(), self.rendered_type)
field.reset()
self.assertIsInstance(field.render(), self.rendered_type)
@metaTest
def testReturnTypeGetRenderedFuzzable(self):
field = self.get_default_field(fuzzable=True)
self.assertIsInstance(field.render(), self.rendered_type)
field.mutate()
self.assertIsInstance(field.render(), self.rendered_type)
field.reset()
self.assertIsInstance(field.render(), self.rendered_type)
@metaTest
def testReturnTypeMutateFuzzable(self):
field = self.get_default_field(fuzzable=True)
self.assertIsInstance(field.mutate(), types.BooleanType)
field.reset()
self.assertIsInstance(field.mutate(), types.BooleanType)
@metaTest
def testReturnTypeRenderNotFuzzable(self):
field = self.get_default_field(fuzzable=False)
self.assertIsInstance(field.render(), self.rendered_type)
field.mutate()
self.assertIsInstance(field.render(), self.rendered_type)
field.reset()
self.assertIsInstance(field.render(), self.rendered_type)
@metaTest
def testReturnTypeGetRenderedNotFuzzable(self):
field = self.get_default_field(fuzzable=False)
self.assertIsInstance(field.render(), self.rendered_type)
field.mutate()
self.assertIsInstance(field.render(), self.rendered_type)
field.reset()
self.assertIsInstance(field.render(), self.rendered_type)
@metaTest
def testReturnTypeMutateNotFuzzable(self):
field = self.get_default_field(fuzzable=False)
self.assertIsInstance(field.mutate(), types.BooleanType)
field.reset()
self.assertIsInstance(field.mutate(), types.BooleanType)
@metaTest
def testHashTheSameForTwoSimilarObjects(self):
field1 = self.get_default_field()
field2 = self.get_default_field()
self.assertEqual(field1.hash(), field2.hash())
@metaTest
def testHashTheSameAfterReset(self):
field = self.get_default_field()
hash_after_creation = field.hash()
field.mutate()
hash_after_mutate = field.hash()
self.assertEqual(hash_after_creation, hash_after_mutate)
field.reset()
hash_after_reset = field.hash()
self.assertEqual(hash_after_creation, hash_after_reset)
while field.mutate():
hash_after_mutate_all = field.hash()
self.assertEqual(hash_after_creation, hash_after_mutate_all)
field.render()
hash_after_render_all = field.hash()
self.assertEqual(hash_after_creation, hash_after_render_all)
@metaTest
def testGetRenderedFields(self):
field = self.get_default_field()
field_list = [field]
self.assertEqual(field.get_rendered_fields(), field_list)
while field.mutate():
if len(field.render()):
self.assertEqual(field.get_rendered_fields(), field_list)
else:
self.assertEqual(field.get_rendered_fields(), [])
@metaTest
def testInvalidFieldNameRaisesException(self):
with self.assertRaises(KittyException):
self.uut_name = 'invalid/name'
self.get_default_field()
def _check_skip(self, field, to_skip, expected_skipped, expected_mutated):
# print('_check_skip(%s, %s, %s, %s)' % (field, to_skip, expected_skipped, expected_mutated))
skipped = field.skip(to_skip)
self.assertEqual(expected_skipped, skipped)
mutated = 0
while field.mutate():
mutated += 1
self.assertEqual(expected_mutated, mutated)
field.reset()
skipped = field.skip(to_skip)
self.assertEqual(expected_skipped, skipped)
mutated = 0
while field.mutate():
mutated += 1
self.assertEqual(expected_mutated, mutated)
def _check_mutation_count(self, field, expected_num_mutations):
num_mutations = field.num_mutations()
self.assertEqual(num_mutations, expected_num_mutations)
mutation_count = 0
while field.mutate():
mutation_count += 1
self.assertEqual(mutation_count, expected_num_mutations)
class StringTests(ValueTestCase):
__meta__ = False
default_value = 'kitty'
default_value_rendered = Bits(bytes=default_value)
def setUp(self, cls=String):
super(StringTests, self).setUp(cls)
def testMaxSizeNumMutations(self):
max_size = 35
nm_field = self.cls(value=self.default_value)
excepted_mutation_count = 0
while nm_field.mutate():
res = nm_field.render().bytes
if len(res) <= max_size:
excepted_mutation_count += 1
field = self.cls(value='kitty', max_size=max_size)
num_mutations = field.num_mutations()
self.assertEqual(excepted_mutation_count, num_mutations)
self._check_mutation_count(field, excepted_mutation_count)
def testMaxSizeMutations(self):
max_size = 35
max_size_in_bits = max_size * 8
nm_field = self.cls(value=self.default_value)
all_mutations = self._get_all_mutations(nm_field)
field = self.cls(value=self.default_value, max_size=max_size)
mutations = self._get_all_mutations(field)
for mutation in all_mutations:
if len(mutation) > max_size_in_bits:
self.assertNotIn(mutation, mutations)
else:
self.assertIn(mutation, mutations)
def _testStringsFromFile(self):
values = [
'It was the summer of 95 (so what!)',
'In the backyard, shaving the old plies',
'Feeling so strong (strong!), something went wrong (wrong!)',
'Straight into my finger, what a stinger, it was so long',
'I still remember that day, like the day that I said that I swear',
'"I\'ll never hurt myself again", but it seems that I\'m deemed to be wrong',
'To be wrong, to be wrong',
'Gotta keep holding on...they always played a slow song.',
]
filename = './kitty_strings.txt'
with open(filename, 'wb') as f:
f.write('\n'.join(values))
uut = String(name=self.uut_name, value='streetlight')
all_mutations = self.get_all_mutations(uut)
for value in values:
self.assertIn(Bits(bytes=value), all_mutations)
os.remove(filename)
class DelimiterTests(StringTests):
__meta__ = False
default_value = 'kitty'
default_value_rendered = Bits(bytes=default_value)
def setUp(self, cls=Delimiter):
super(DelimiterTests, self).setUp(cls)
class DynamicTests(ValueTestCase):
__meta__ = False
default_value = 'kitty'
default_value_rendered = Bits(bytes=default_value)
def setUp(self, cls=Dynamic):
super(DynamicTests, self).setUp(cls)
self.key_exists = 'exists'
self.value_exists = 'value'
self.key_not_exist = 'not exist'
self.default_session_data = {
self.key_exists: self.value_exists
}
def get_default_field(self, fuzzable=True):
return self.cls(key='my_key', default_value=self.default_value, length=len(self.default_value), fuzzable=fuzzable, name=self.uut_name)
def testSessionDataNotFuzzable(self):
field = self.cls(key=self.key_exists, default_value=self.default_value)
self.assertEqual(self.default_value_rendered, field.render())
field.set_session_data(self.default_session_data)
self.assertEqual(Bits(bytes=self.value_exists), field.render())
self.assertEqual(Bits(bytes=self.value_exists), field.render())
def testSessionDataNotFuzzableAfterReset(self):
field = self.cls(key=self.key_exists, default_value=self.default_value)
self.assertEqual(self.default_value_rendered, field.render())
field.set_session_data(self.default_session_data)
self.assertEqual(Bits(bytes=self.value_exists), field.render())
field.reset()
self.assertEqual(self.default_value_rendered, field.render())
def testSessionDataNotFuzzableDataChangeKeyExists(self):
field = self.cls(key=self.key_exists, default_value=self.default_value)
self.assertEqual(self.default_value_rendered, field.render())
field.set_session_data(self.default_session_data)
self.assertEqual(Bits(bytes=self.value_exists), field.render())
new_val = 'new value'
field.set_session_data({self.key_exists: new_val})
self.assertEqual(Bits(bytes=new_val), field.render())
def testSessionDataNotFuzzableDataChangeKeyNotExist(self):
field = self.cls(key=self.key_exists, default_value=self.default_value)
self.assertEqual(self.default_value_rendered, field.render())
field.set_session_data(self.default_session_data)
self.assertEqual(Bits(bytes=self.value_exists), field.render())
new_val = 'new value'
field.set_session_data({self.key_not_exist: new_val})
self.assertEqual(Bits(bytes=self.value_exists), field.render())
def testSessionDataFuzzableAfterReset(self):
field = self.cls(key=self.key_exists, default_value=self.default_value, length=len(self.default_value), fuzzable=True)
self.assertEqual(self.default_value_rendered, field.render())
field.set_session_data(self.default_session_data)
self.assertEqual(Bits(bytes=self.value_exists), field.render())
field.reset()
self.assertEqual(self.default_value_rendered, field.render())
def testSessionDataFuzzableDataChangeKeyExists(self):
field = self.cls(key=self.key_exists, default_value=self.default_value, length=len(self.default_value), fuzzable=True)
self.assertEqual(self.default_value_rendered, field.render())
field.set_session_data(self.default_session_data)
self.assertEqual(Bits(bytes=self.value_exists), field.render())
new_val = 'new value'
field.set_session_data({self.key_exists: new_val})
self.assertEqual(Bits(bytes=new_val), field.render())
def testSessionDataFuzzableDataChangeKeyNotExist(self):
field = self.cls(key=self.key_exists, default_value=self.default_value, length=len(self.default_value), fuzzable=True)
self.assertEqual(self.default_value_rendered, field.render())
field.set_session_data(self.default_session_data)
self.assertEqual(Bits(bytes=self.value_exists), field.render())
new_val = 'new value'
field.set_session_data({self.key_not_exist: new_val})
self.assertEqual(Bits(bytes=self.value_exists), field.render())
class RandomBitsTests(ValueTestCase):
__meta__ = False
default_value = 'kitty'
default_unused_bits = 3
default_value_rendered = Bits(bytes=default_value)[:-3]
def setUp(self, cls=RandomBits):
super(RandomBitsTests, self).setUp(cls)
def get_default_field(self, fuzzable=True):
return self.cls(value=self.default_value, min_length=5, max_length=10, unused_bits=self.default_unused_bits, fuzzable=fuzzable, name=self.uut_name)
def testNotFuzzable(self):
field = self.get_default_field(fuzzable=False)
num_mutations = field.num_mutations()
self.assertEqual(num_mutations, 0)
rendered = field.render()
self.assertEqual(rendered, self.default_value_rendered)
mutated = field.mutate()
self.assertFalse(mutated)
rendered = field.render()
self.assertEqual(rendered, self.default_value_rendered)
field.reset()
mutated = field.mutate()
self.assertFalse(mutated)
rendered = field.render()
self.assertEqual(rendered, self.default_value_rendered)
def testNoStepNumMutations(self):
param_num_mutations = 100
field = self.cls(value=self.default_value, min_length=10, max_length=20, unused_bits=3, num_mutations=param_num_mutations)
self._check_mutation_count(field, param_num_mutations)
field.reset()
self._check_mutation_count(field, param_num_mutations)
def testNoStepSizes(self):
min_length = 10
max_length = 100
field = self.cls(value=self.default_value, min_length=min_length, max_length=max_length, unused_bits=self.default_unused_bits)
while field.mutate():
rendered = field.render()
self.assertGreaterEqual(len(rendered), min_length)
self.assertLessEqual(len(rendered), max_length)
def testNoStepMinNegative(self):
with self.assertRaises(KittyException):
self.cls(value=self.default_value, min_length=-1, max_length=4)
def testNoStepMaxNegative(self):
with self.assertRaises(KittyException):
self.cls(value=self.default_value, min_length=-2, max_length=-1)
def testNoStepMaxIs0(self):
with self.assertRaises(KittyException):
self.cls(value=self.default_value, min_length=0, max_length=0)
def testNoStepMinBiggerThanMax(self):
with self.assertRaises(KittyException):
self.cls(value=self.default_value, min_length=5, max_length=4)
def testNoStepRandomness(self):
min_length = 10
max_length = 100
field = self.cls(value=self.default_value, min_length=min_length, max_length=max_length, unused_bits=self.default_unused_bits)
mutations = self._get_all_mutations(field)
self.assertNotEqual(len(set(mutations)), 1)
def testSeedNotTheSame(self):
min_length = 10
max_length = 100
field1 = self.cls(value=self.default_value, seed=11111, min_length=min_length, max_length=max_length, unused_bits=self.default_unused_bits)
field2 = self.cls(value=self.default_value, seed=22222, min_length=min_length, max_length=max_length, unused_bits=self.default_unused_bits)
res1 = self._get_all_mutations(field1)
res2 = self._get_all_mutations(field2)
self.assertNotEqual(res1, res2)
def testStepNumMutations(self):
min_length = 10
max_length = 100
step = 3
excepted_num_mutations = (max_length - min_length) / step
field = self.cls(value=self.default_value, min_length=min_length, max_length=max_length, unused_bits=7, step=step)
self._check_mutation_count(field, excepted_num_mutations)
field.reset()
self._check_mutation_count(field, excepted_num_mutations)
def testStepSizes(self):
min_length = 10
max_length = 100
step = 3
field = self.cls(value=self.default_value, min_length=min_length, max_length=max_length, unused_bits=self.default_unused_bits, step=step)
expected_length = min_length
while field.mutate():
rendered = field.render()
self.assertEqual(len(rendered), expected_length)
expected_length += step
def testStepMinNegative(self):
with self.assertRaises(KittyException):
self.cls(value=self.default_value, min_length=-1, max_length=4, step=1)
def testStepMaxNegative(self):
with self.assertRaises(KittyException):
self.cls(value=self.default_value, min_length=-2, max_length=-1, step=1)
def testStepMaxIs0(self):
with self.assertRaises(KittyException):
self.cls(value=self.default_value, min_length=0, max_length=0, step=1)
def testStepMinBiggerThanMax(self):
with self.assertRaises(KittyException):
self.cls(value=self.default_value, min_length=5, max_length=4, step=1)
def testStepNegative(self):
with self.assertRaises(KittyException):
self.cls(value=self.default_value, min_length=1, max_length=5, step=-1)
def testStepRandomness(self):
min_length = 10
max_length = 100
step = 5
field = self.cls(value=self.default_value, min_length=min_length, max_length=max_length, unused_bits=self.default_unused_bits, step=step)
mutations = self._get_all_mutations(field)
self.assertNotEqual(len(set(mutations)), 1)
class RandomBytesTests(ValueTestCase):
__meta__ = False
default_value = 'kitty'
default_value_rendered = Bits(bytes=default_value)
def setUp(self, cls=RandomBytes):
super(RandomBytesTests, self).setUp(cls)
def get_default_field(self, fuzzable=True):
return self.cls(value=self.default_value, min_length=5, max_length=10, fuzzable=fuzzable, name=self.uut_name)
def testNoStepNumMutations(self):
param_num_mutations = 100
field = RandomBytes(value=self.default_value, min_length=10, max_length=20, num_mutations=param_num_mutations)
self._check_mutation_count(field, param_num_mutations)
field.reset()
self._check_mutation_count(field, param_num_mutations)
def testNoStepSizes(self):
min_length = 10
max_length = 100
field = RandomBytes(value=self.default_value, min_length=min_length, max_length=max_length)
while field.mutate():
rendered = field.render().bytes
self.assertGreaterEqual(len(rendered), min_length)
self.assertLessEqual(len(rendered), max_length)
def testNoStepMinNegative(self):
with self.assertRaises(KittyException):
RandomBytes(value=self.default_value, min_length=-1, max_length=4)
def testNoStepMaxNegative(self):
with self.assertRaises(KittyException):
RandomBytes(value=self.default_value, min_length=-2, max_length=-1)
def testNoStepMaxIs0(self):
with self.assertRaises(KittyException):
RandomBytes(value=self.default_value, min_length=0, max_length=0)
def testNoStepMinBiggerThanMax(self):
with self.assertRaises(KittyException):
RandomBytes(value=self.default_value, min_length=5, max_length=4)
def testNoStepRandomness(self):
min_length = 10
max_length = 100
field = RandomBytes(value=self.default_value, min_length=min_length, max_length=max_length)
mutations = self._get_all_mutations(field)
self.assertNotEqual(len(set(mutations)), 1)
def testSeedNotTheSame(self):
min_length = 10
max_length = 100
field1 = RandomBytes(value=self.default_value, seed=11111, min_length=min_length, max_length=max_length)
field2 = RandomBytes(value=self.default_value, seed=22222, min_length=min_length, max_length=max_length)
res1 = self._get_all_mutations(field1)
res2 = self._get_all_mutations(field2)
self.assertNotEqual(res1, res2)
def testStepNumMutations(self):
min_length = 10
max_length = 100
step = 3
excepted_num_mutations = (max_length - min_length) / step
field = RandomBytes(value=self.default_value, min_length=min_length, max_length=max_length, step=step)
self._check_mutation_count(field, excepted_num_mutations)
field.reset()
self._check_mutation_count(field, excepted_num_mutations)
def testStepSizes(self):
min_length = 10
max_length = 100
step = 3
field = RandomBytes(value=self.default_value, min_length=min_length, max_length=max_length, step=step)
expected_length = min_length
while field.mutate():
rendered = field.render().bytes
self.assertEqual(len(rendered), expected_length)
expected_length += step
def testStepMinNegative(self):
with self.assertRaises(KittyException):
RandomBytes(value=self.default_value, min_length=-1, max_length=4, step=1)
def testStepMaxNegative(self):
with self.assertRaises(KittyException):
RandomBytes(value=self.default_value, min_length=-2, max_length=-1, step=1)
def testStepMaxIs0(self):
with self.assertRaises(KittyException):
RandomBytes(value=self.default_value, min_length=0, max_length=0, step=1)
def testStepMinBiggerThanMax(self):
with self.assertRaises(KittyException):
RandomBytes(value=self.default_value, min_length=5, max_length=4, step=1)
def testStepNegative(self):
with self.assertRaises(KittyException):
RandomBytes(value=self.default_value, min_length=1, max_length=5, step=-1)
def testStepRandomness(self):
min_length = 10
max_length = 100
step = 5
field = RandomBytes(value=self.default_value, min_length=min_length, max_length=max_length, step=step)
mutations = self._get_all_mutations(field)
self.assertNotEqual(len(set(mutations)), 1)
class StaticTests(ValueTestCase):
__meta__ = False
default_value = 'kitty'
default_value_rendered = Bits(bytes=default_value)
def setUp(self, cls=Static):
super(StaticTests, self).setUp(cls)
def testNumMutations0(self):
field = Static(value=self.default_value)
num_mutations = field.num_mutations()
self.assertEqual(num_mutations, 0)
self._check_mutation_count(field, num_mutations)
field.reset()
self._check_mutation_count(field, num_mutations)
def get_default_field(self, fuzzable=True):
return Static(value=self.default_value, name=self.uut_name)
class GroupTests(ValueTestCase):
__meta__ = False
default_value = 'group 1'
default_value_rendered = Bits(bytes=default_value)
default_values = [default_value, 'group 2', 'group 3', 'group 4', 'group 5']
def setUp(self, cls=Group):
super(GroupTests, self).setUp(cls)
self.default_values = self.__class__.default_values
def get_default_field(self, fuzzable=True):
return self.cls(values=self.default_values, fuzzable=fuzzable, name=self.uut_name)
def testMutations(self):
field = self.get_default_field()
mutations = self._get_all_mutations(field)
self.assertListEqual([Bits(bytes=x) for x in self.default_values], mutations)
mutations = self._get_all_mutations(field)
self.assertListEqual([Bits(bytes=x) for x in self.default_values], mutations)
class FloatTests(ValueTestCase):
__meta__ = False
default_value = 15.3
default_value_rendered = Bits(bytes=struct.pack('>f', default_value))
def setUp(self):
super(FloatTests, self).setUp(Float)
def bits_to_value(self, bits):
return struct.unpack('>f', bits.tobytes())[0]
class BitFieldTests(ValueTestCase):
__meta__ = False
default_value = 500
default_length = 15
default_value_rendered = Bits('uint:%d=%d' % (default_length, default_value))
def setUp(self, cls=BitField):
super(BitFieldTests, self).setUp(cls)
self.default_length = self.__class__.default_length
def get_rendered_type(self):
return Bits
def get_default_field(self, fuzzable=True):
return self.cls(value=self.default_value, length=self.default_length, fuzzable=fuzzable, name=self.uut_name)
def bits_to_value(self, bits):
'''
BitField returns a tuple. so just give the value...
'''
return bits.uint
def testLengthNegative(self):
with self.assertRaises(KittyException):
BitField(value=self.default_value, length=-1)
def testLengthZero(self):
with self.assertRaises(KittyException):
BitField(value=self.default_value, length=0)
def testLengthVerySmall(self):
self._base_check(BitField(value=1, length=1))
def testLengthTooSmallForValueSigned(self):
with self.assertRaises(KittyException):
BitField(value=64, length=7, signed=True)
def testLengthTooSmallForValueUnsigned(self):
with self.assertRaises(KittyException):
BitField(value=64, length=6, signed=False)
def testLengthTooSmallForMaxValue(self):
with self.assertRaises(KittyException):
BitField(value=10, length=5, signed=True, max_value=17)
def testLengthVeryLarge(self):
field = BitField(value=1, length=1)
self._base_check(field)
def testLengthNonByteAlignedUnsigned(self):
signed = False
self._base_check(BitField(value=10, length=7, signed=signed))
self._base_check(BitField(value=10, length=14, signed=signed))
self._base_check(BitField(value=10, length=15, signed=signed))
self._base_check(BitField(value=10, length=16, signed=signed))
self._base_check(BitField(value=10, length=58, signed=signed))
self._base_check(BitField(value=10, length=111, signed=signed))
def testLengthNonByteAlignedSigned(self):
signed = True
self._base_check(BitField(value=10, length=7, signed=signed))
self._base_check(BitField(value=10, length=14, signed=signed))
self._base_check(BitField(value=10, length=15, signed=signed))
self._base_check(BitField(value=10, length=16, signed=signed))
self._base_check(BitField(value=10, length=58, signed=signed))
self._base_check(BitField(value=10, length=111, signed=signed))
def testValueNegative(self):
self._base_check(BitField(value=-50, length=7, signed=True))
def _testIntsFromFile(self):
values = [
'0xffffffff',
'-345345',
'123',
'0',
'333',
'56'
]
filename = './kitty_integers.txt'
with open(filename, 'wb') as f:
f.write('\n'.join(values))
self._base_check(BitField(name=self.uut_name, value=1, length=12))
os.remove(filename)
class AlignedBitTests(ValueTestCase):
#
# Ugly ? yes, but this way we avoid errors when they are not needed...
#
__meta__ = True
default_value = 500
default_length = 16
default_value_rendered = Bits('uint:%d=%d' % (default_length, default_value))
def setUp(self, cls=None):
super(AlignedBitTests, self).setUp(cls)
self.default_length = self.__class__.default_length
def get_rendered_type(self):
return Bits
def get_default_field(self, fuzzable=True):
return self.cls(value=self.default_value, fuzzable=fuzzable, name=self.uut_name)
def bits_to_value(self, bits):
return bits.uint
@metaTest
def testMaxValue(self):
max_value = self.default_value + 10
field = self.cls(value=self.default_value, max_value=max_value)
mutations = self._get_all_mutations(field)
for mutation in mutations:
self.assertGreaterEqual(max_value, self.bits_to_value(mutation))
@metaTest
def testMinValue(self):
min_value = self.default_value - 10
field = self.cls(value=self.default_value, min_value=min_value)
mutations = self._get_all_mutations(field)
for mutation in mutations:
self.assertLessEqual(min_value, self.bits_to_value(mutation))
@metaTest
def testMinMaxValue(self):
min_value = self.default_value - 10
max_value = self.default_value + 10
field = self.cls(value=self.default_value, min_value=min_value, max_value=max_value)
mutations = self._get_all_mutations(field)
for mutation in mutations:
self.assertLessEqual(min_value, self.bits_to_value(mutation))
self.assertGreaterEqual(max_value, self.bits_to_value(mutation))
class SignedAlignedBitTests(AlignedBitTests):
__meta__ = True
def bits_to_value(self, bits):
return bits.int
class SInt8Tests(SignedAlignedBitTests):
__meta__ = False
default_value = 50
default_length = 8
default_value_rendered = Bits('int:%d=%d' % (default_length, default_value))
def setUp(self, cls=SInt8):
super(SInt8Tests, self).setUp(cls)
class SInt16Tests(SignedAlignedBitTests):
__meta__ = False
default_value = 0x1000
default_length = 16
default_value_rendered = Bits('int:%d=%d' % (default_length, default_value))
def setUp(self, cls=SInt16):
super(SInt16Tests, self).setUp(cls)
class SInt32Tests(SignedAlignedBitTests):
__meta__ = False
default_value = 0x12345678
default_length = 32
default_value_rendered = Bits('int:%d=%d' % (default_length, default_value))
def setUp(self, cls=SInt32):
super(SInt32Tests, self).setUp(cls)
class SInt64Tests(SignedAlignedBitTests):
__meta__ = False
default_value = 0x1122334455667788
default_length = 64
default_value_rendered = Bits('int:%d=%d' % (default_length, default_value))
def setUp(self, cls=SInt64):
super(SInt64Tests, self).setUp(cls)
class UnsignedAlignedBitTests(AlignedBitTests):
__meta__ = True
def bits_to_value(self, bits):
return bits.uint
class UInt8Tests(UnsignedAlignedBitTests):
__meta__ = False
default_value = 50
default_length = 8
default_value_rendered = Bits('uint:%d=%d' % (default_length, default_value))
def setUp(self, cls=UInt8):
super(UInt8Tests, self).setUp(cls)
class UInt16Tests(UnsignedAlignedBitTests):
__meta__ = False
default_value = 0x1000
default_length = 16
default_value_rendered = Bits('uint:%d=%d' % (default_length, default_value))
def setUp(self, cls=UInt16):
super(UInt16Tests, self).setUp(cls)
class UInt32Tests(UnsignedAlignedBitTests):
__meta__ = False
default_value = 0x12345678
default_length = 32
default_value_rendered = Bits('uint:%d=%d' % (default_length, default_value))
def setUp(self, cls=UInt32):
super(UInt32Tests, self).setUp(cls)
class UInt64Tests(UnsignedAlignedBitTests):
__meta__ = False
default_value = 0x1122334455667788
default_length = 64
default_value_rendered = Bits('uint:%d=%d' % (default_length, default_value))
def setUp(self, cls=UInt64):
super(UInt64Tests, self).setUp(cls)
| gpl-2.0 | -7,294,158,809,536,484,000 | 36.702479 | 155 | 0.657552 | false |
localprojects/Civil-Debate-Wall-Kiosk | dev_tools/embed_generator/embedgen.py | 1 | 1047 | # Little app to generate AS3 image embed code.
# The image will be available from an eponymous static variable, without the file type suffix.
# If it's fed a directory, it wil create embed code for every image file in the directory
import os, sys
imageExtensions = ['.jpg', '.png', '.gif']
def printEmbed(filename):
shortname = filename[0:-4]
getname = shortname[0].capitalize() + shortname[1:]
buffer = '[Embed(source = \'/assets/graphics/' + filename + '\')] private static const ' + shortname + 'Class:Class;\n'
buffer += 'public static function get' + getname + '():Bitmap { return new ' + shortname + 'Class() as Bitmap; };\n'
buffer += 'public static const ' + shortname + ':Bitmap = get' + getname + '();\n'
return buffer
if (len(sys.argv) > 1):
input = sys.argv[1]
try:
dirList = os.listdir(input)
for file in dirList:
if not os.path.isdir(file):
if os.path.splitext(file)[1] in imageExtensions:
printEmbed(file)
except:
print printEmbed(input)
else:
print "Need a filename" | gpl-2.0 | -2,185,355,945,077,706,500 | 32.806452 | 122 | 0.659981 | false |
jsevilleja/letsencrypt | letsencrypt.py | 1 | 1902 | #!/usr/bin/env python
import argparse
import logging
import os
import certificate
import acme
import parser
def main():
log = logging.getLogger(__name__)
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--path", required=True, help="Path where certificate files are/will be stored")
arg_parser.add_argument("--domain", required=True, help="Domain used")
arg_parser.add_argument("--acme-dir", required=True, help="path to the .well-known/acme-challenge/ directory")
args = arg_parser.parse_args()
if not os.path.isfile(os.path.join(args.path, "letsencrypt.key")):
key = certificate.createKeyPair()
with open(os.path.join(args.path, "letsencrypt.key"), "wt") as fich:
fich.write(certificate.dumpKey(key))
if not os.path.isfile(os.path.join(args.path, args.domain)+".key"):
key = certificate.createKeyPair()
request = certificate.createCertRequest(key, args.domain)
with open(os.path.join(args.path, args.domain) + ".key", "wt") as fich:
fich.write(certificate.dumpKey(key))
with open(os.path.join(args.path, args.domain) + ".csr", "wt") as fich:
fich.write(certificate.dumpCsr(request))
key = parser.importRSA(os.path.join(args.path, "letsencrypt.key"))
csr = parser.importCSR(os.path.join(args.path, args.domain)+".csr")
acme.register_account(key, log)
challenge = acme.get_challenge(key, args.domain, log)
key_auth = acme.token2file(key, challenge['token'], args.acme_dir)
acme.challenge_done(key, challenge['uri'], key_auth)
acme.wait_verification(challenge['uri'])
result = acme.get_certificate(key, csr)
with open(os.path.join(args.path, args.domain)+".crt", "w") as fich:
fich.write(result)
if __name__ == "__main__":
main()
| gpl-2.0 | 6,048,918,412,103,494,000 | 34.222222 | 114 | 0.666141 | false |
edilio/tobeawebproperty | haweb/apps/core/migrations/0004_auto_20141130_1107.py | 1 | 1265 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import localflavor.us.models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20141130_1044'),
]
operations = [
migrations.AlterModelOptions(
name='city',
options={'verbose_name_plural': 'Cities'},
),
migrations.AddField(
model_name='tenant',
name='cell_phone',
field=localflavor.us.models.PhoneNumberField(max_length=20, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='tenant',
name='home_phone',
field=localflavor.us.models.PhoneNumberField(max_length=20, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='tenant',
name='work_phone',
field=localflavor.us.models.PhoneNumberField(max_length=20, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='zipcode',
name='zip_code',
field=models.CharField(max_length=10),
),
]
| mit | 1,210,994,129,774,920,700 | 29.119048 | 95 | 0.56996 | false |
op3/hdtv | hdtv/plugins/fitlist.py | 1 | 11488 | # -*- coding: utf-8 -*-
# HDTV - A ROOT-based spectrum analysis software
# Copyright (C) 2006-2009 The HDTV development team (see file AUTHORS)
#
# This file is part of HDTV.
#
# HDTV is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# HDTV is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with HDTV; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
Write and Read Fitlist saved in xml format
"""
import os
import glob
import hdtv.cmdline
import hdtv.options
import hdtv.fitxml
import hdtv.ui
import hdtv.util
class FitlistManager(object):
def __init__(self, spectra):
hdtv.ui.debug("Loaded fitlist plugin")
self.spectra = spectra
self.xml = hdtv.fitxml.FitXml(spectra)
self.list = dict()
self.tv = FitlistHDTVInterface(self)
def WriteXML(self, sid, fname=None):
name = self.spectra.dict[sid].name
# remember absolute pathname for later use
fname = os.path.abspath(fname)
self.list[name] = fname
with hdtv.util.open_compressed(fname, mode="wb") as f:
self.xml.WriteFitlist(f, sid)
def ReadXML(
self, sid, fname, calibrate=False, refit=False, interactive=True, associate=True
):
spec = self.spectra.dict[sid]
# remember absolute pathname for later use
fname = os.path.abspath(fname)
if associate:
self.list[spec.name] = fname
else:
self.list.pop(spec.name, None)
with hdtv.util.open_compressed(fname, mode="rb") as f:
self.xml.ReadFitlist(
f,
sid,
calibrate=calibrate,
refit=refit,
interactive=interactive,
fname=fname,
)
def WriteList(self, fname):
lines = list()
listpath = os.path.abspath(fname)
for (spec, xml) in self.list.items():
# create relativ path name
common = os.path.commonprefix([listpath, xml])
xml = xml.replace(common, "")
xml = xml.strip("/")
lines.append(spec + ": " + xml)
text = "\n".join(lines)
with open(fname, "w") as f:
f.write(text)
def ReadList(self, fname):
with open(fname, "r") as f:
dirname = os.path.dirname(fname)
for linenum, l in enumerate(f):
# Remove comments and whitespace; ignore empty lines
l = l.split("#", 1)[0].strip()
if l == "":
continue
try:
(k, v) = l.split(":", 1)
name = k.strip()
xmlfile = v.strip()
# create valid path from relative pathnames
xmlfile = os.path.join(dirname, xmlfile)
if not os.path.exists(xmlfile):
hdtv.ui.warning("No such file %s" % xmlfile)
continue
sid = None
for ID in self.spectra.ids:
if self.spectra.dict[ID].name == name:
sid = ID
break
if sid is not None:
self.ReadXML(sid, xmlfile)
else:
hdtv.ui.warning("Spectrum %s is not loaded. " % name)
except ValueError:
hdtv.ui.warning(
"Could not parse line %d of file %s: ignored."
% (linenum + 1, fname)
)
class FitlistHDTVInterface(object):
def __init__(self, FitlistIf):
self.FitlistIf = FitlistIf
self.spectra = FitlistIf.spectra
prog = "fit write"
description = "write fits to xml file"
parser = hdtv.cmdline.HDTVOptionParser(prog=prog, description=description)
parser.add_argument(
"-s",
"--spectrum",
action="store",
default="active",
help="for which the fits should be saved (default=active)",
)
parser.add_argument(
"-F",
"--force",
action="store_true",
default=False,
help="overwrite existing files without asking",
)
parser.add_argument(
"filename",
nargs="?",
default=None,
help="""may contain %%s, %%d, %%02d (or other python
format specifier) as placeholder for spectrum id""",
)
hdtv.cmdline.AddCommand(prog, self.FitWrite, fileargs=True, parser=parser)
prog = "fit read"
description = "read fits from xml file"
parser = hdtv.cmdline.HDTVOptionParser(prog=prog, description=description)
parser.add_argument(
"-s",
"--spectrum",
action="store",
default="active",
help="spectra to which the fits should be added (default=active)",
)
parser.add_argument(
"-r",
"--refit",
action="store_true",
default=False,
help="Force refitting during load",
)
parser.add_argument(
"-c",
"--calibrate",
action="store_true",
default=False,
help="Apply the stored calibration to the loaded spectrum",
)
parser.add_argument(
"-n",
"--no-associate",
action="store_true",
default=False,
help="""Do not remeber the filename of the fitlist file,
i.e. `fit write` will not write to the original fitlist file,
but create a new one according to the name of the spectrum.
Useful for reusing fits from a different spectrum.""",
)
parser.add_argument(
"filename",
nargs="+",
help="""may contain %%s, %%d, %%02d (or other python
format specifier) as placeholder for spectrum id""",
)
hdtv.cmdline.AddCommand(prog, self.FitRead, fileargs=True, parser=parser)
prog = "fit getlists"
description = "reads fitlists according to the list saved in a file"
parser = hdtv.cmdline.HDTVOptionParser(prog=prog, description=description)
parser.add_argument("filename", default=None)
hdtv.cmdline.AddCommand(prog, self.FitGetlists, fileargs=True, parser=parser)
prog = "fit savelists"
description = (
"saves a list of spectrum names and corresponding fitlist files to file"
)
parser = hdtv.cmdline.HDTVOptionParser(prog=prog, description=description)
parser.add_argument(
"-F",
"--force",
action="store_true",
default=False,
help="overwrite existing files without asking",
)
parser.add_argument("filename", metavar="output-file", default=None)
hdtv.cmdline.AddCommand(prog, self.FitSavelists, fileargs=True, parser=parser)
def FitWrite(self, args):
"""
Saving a fitlist as xml
"""
# TODO: this need urgent cleanup and testing, especially for the saving
# of fitlists from multiple spectra, but I'm really in a hurry now. Sorry. Ralf
# get spectrum
sids = hdtv.util.ID.ParseIds(args.spectrum, __main__.spectra)
if len(sids) == 0:
raise hdtv.cmdline.HDTVCommandError("There is no active spectrum")
if len(sids) > 1:
# TODO: Check if placeholder character is present in filename and
# warn if not
pass
# raise hdtv.cmdline.HDTVCommandError("Can only save fitlist of one spectrum")
for sid in sids:
# sid = sids[0]
# get filename
if args.filename is None:
name = self.spectra.dict[sid].name
try:
fname = self.FitlistIf.list[name]
except KeyError:
(base, ext) = os.path.splitext(name)
fname = base + "." + hdtv.options.Get("fit.list.default_extension")
else:
fname = os.path.expanduser(args.filename)
# Try to replace placeholder "%s" in filename with specid
try:
fname = fname % sid
except TypeError: # No placeholder found
# TODO: do something sensible here... Luckily hdtv will not
# overwrite spectra without asking...
pass
hdtv.ui.msg("Saving fits of spectrum %d to %s" % (sid, fname))
if hdtv.util.user_save_file(fname, args.force):
self.FitlistIf.WriteXML(sid, fname)
def FitRead(self, args):
"""
reading a fitlist from xml
"""
fnames = dict() # Filenames for each spectrum ID
sids = hdtv.util.ID.ParseIds(args.spectrum, __main__.spectra)
if len(sids) == 0:
raise hdtv.cmdline.HDTVCommandError("There is no active spectrum")
# Build list of files to load for each spectrum
for sid in sids:
fnames[sid] = list() # Filenames for this spectrum ID
for fname_raw in args.filename:
try:
# Try to replace format placeholder (e.g. %s) with spectrum
# ID
fname = fname_raw % sid
except TypeError: # No placeholder found
fname = fname_raw
fname = os.path.expanduser(fname)
more = glob.glob(fname)
if len(more) == 0:
hdtv.ui.warning("No such file %s" % fname)
fnames[sid].extend(more)
# Load files
for sid in sids:
for fname in fnames[sid]:
hdtv.ui.msg("Reading fitlist %s to spectrum %s" % (fname, sid))
self.FitlistIf.ReadXML(
sid,
fname,
calibrate=args.calibrate,
refit=args.refit,
associate=(not args.no_associate),
)
def FitSavelists(self, args):
if hdtv.util.user_save_file(args.filename, args.force):
self.FitlistIf.WriteList(args.filename)
def FitGetlists(self, args):
fname = glob.glob(os.path.expanduser(args.filename))
if len(fname) > 1:
raise hdtv.cmdline.HDTVCommandError("More than 1 files match the pattern")
fname = fname[0]
if not os.path.exists(fname):
raise hdtv.cmdline.HDTVCommandError("No such file %s" % fname)
self.FitlistIf.ReadList(fname)
hdtv.options.RegisterOption(
"fit.list.default_extension", hdtv.options.Option(default="xfl")
)
import __main__
fitxml = FitlistManager(__main__.spectra)
hdtv.cmdline.RegisterInteractive("fitxml", fitxml)
| gpl-2.0 | 5,580,118,264,738,536,000 | 35.938907 | 97 | 0.544829 | false |
vkris/pyzookeeper | setup.py | 1 | 1265 | #!/usr/bin/env python
from setuptools import setup, find_packages
import os, re
PKG='pyzookeeper'
VERSIONFILE = os.path.join('pyzookeeper', '_version.py')
verstr = "unknown"
try:
verstrline = open(VERSIONFILE, "rt").read()
except EnvironmentError:
pass # Okay, there is no version file.
else:
MVSRE = r"^manual_verstr *= *['\"]([^'\"]*)['\"]"
mo = re.search(MVSRE, verstrline, re.M)
if mo:
mverstr = mo.group(1)
else:
print "unable to find version in %s" % (VERSIONFILE,)
raise RuntimeError("if %s.py exists, it must be well-formed" % (VERSIONFILE,))
AVSRE = r"^auto_build_num *= *['\"]([^'\"]*)['\"]"
mo = re.search(AVSRE, verstrline, re.M)
if mo:
averstr = mo.group(1)
else:
averstr = ''
verstr = '.'.join([mverstr, averstr])
setup(name=PKG,
version=verstr,
description="library for accessing zookeeper",
author="Vivek Krishna",
author_email="[email protected]",
url="http://github.com/vkris/pyzookeeper/",
packages = ['pyzookeeper'],
install_requires = ['zc-zookeeper-static'],
license = "GS License",
keywords="zookeeper",
zip_safe = True,
test_suite="tests")
# tests_require=['coverage', 'mock'])
| mit | -3,459,132,697,945,531,000 | 29.853659 | 86 | 0.600791 | false |
habitam/habitam-core | habitam/ui/license_filter.py | 1 | 2330 | '''
This file is part of Habitam.
Habitam is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Habitam is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with Habitam. If not, see
<http://www.gnu.org/licenses/>.
Created on Apr 12, 2013
@author: Stefan Guna
'''
from datetime import date
from django.http.response import Http404
from django.shortcuts import redirect
from habitam.licensing.models import Administrator, License
def _license_valid(user):
l = user.administrator.license
return date.today() < l.valid_until
def building_accessible(request, building, enforce_admin=False):
try:
l = request.user.administrator.license
if l.buildings.filter(pk=building.id):
return
if enforce_admin:
raise Http404
except Administrator.DoesNotExist:
if enforce_admin:
raise Http404
if not building.owned_apartments(request.user.email):
raise Http404
def entity_accessible(request, entity_cls, entity, enforce_admin=False):
try:
l = request.user.administrator.license
qs = getattr(l, entity_cls.LicenseMeta.license_collection)()
if qs.filter(pk=entity.id):
return
if enforce_admin:
raise Http404
except Administrator.DoesNotExist:
if enforce_admin:
raise Http404
l = getattr(License, entity_cls.LicenseMeta.license_accessor)(entity)
buildings = l.available_buildings()
for building in buildings:
if building.owned_apartments(request.user.email):
return
raise Http404
class LicenseFilter(object):
def process_request(self, request):
try:
if _license_valid(request.user):
return None
except:
if request.method == 'GET':
return None
return redirect('license_expired')
| agpl-3.0 | 4,562,693,984,723,469,300 | 29.671053 | 73 | 0.682403 | false |
goblinhack/MundusMeus | python/biome_land.py | 1 | 10123 | import random
import mm
import thing
import biome_land_do
import tp
import game
def biome_build(self, seed=0):
while True:
random.seed(self.biome_seed)
self.biome_seed += 1
self.biome_seed *= self.biome_seed
self.biome = biome_land_do.Biome(chunk=self)
if not self.biome.generate_failed:
break
if False:
self.biome.dump()
def biome_populate(self):
c = self
m = c.biome
#
# Get the feel of the chunk for weather effects
#
snow_count = 0
grass_count = 0
water_count = 0
poor_quality_ground_count = 0
for y in range(0, mm.CHUNK_HEIGHT):
for x in range(0, mm.CHUNK_WIDTH):
if m.is_snow_at(x, y):
snow_count += 1
if m.is_ice_at(x, y):
snow_count += 1
if m.is_grass_at(x, y):
grass_count += 1
if m.is_water_at(x, y):
grass_count += 1
if m.is_dirt_at(x, y):
poor_quality_ground_count += 1
if m.is_gravel_at(x, y):
poor_quality_ground_count += 1
threshold = mm.CHUNK_WIDTH * mm.CHUNK_HEIGHT
c.is_snowy = False
if snow_count > threshold / 4:
c.is_snowy = True
c.is_grassy = False
if grass_count > threshold / 2:
c.is_grassy = True
if poor_quality_ground_count > threshold / 2:
c.is_grassy = True
c.is_watery = False
if water_count > threshold / 2:
c.is_watery = True
is_poor_soil = False
if grass_count < threshold / 2:
is_poor_soil = True
if c.is_snowy:
grass_str = "grass_snow"
dirt_str = "dirt_snow"
sand_str = "sand_snow"
else:
grass_str = "grass"
dirt_str = "dirt"
sand_str = "sand"
for y in range(0, mm.CHUNK_HEIGHT):
for x in range(0, mm.CHUNK_WIDTH):
tx = x + self.base_x
ty = y + self.base_y
grass = False
if m.is_grass_at(x, y):
grass = True
dirt = False
if m.is_dirt_at(x, y):
if random.randint(0, 10000) < 2:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name="torch1")
t.push()
dirt = True
sand = False
if m.is_sand_at(x, y):
sand = True
gravel = False
if m.is_gravel_at(x, y):
gravel = True
snow = False
if m.is_snow_at(x, y):
snow = True
ice = False
if m.is_ice_at(x, y):
ice = True
road = False
if m.is_road_at(x, y):
road = True
pushed = True
if m.is_dungeon_at(x, y):
if c.is_snowy:
r = tp.get_random_dungeon_snow()
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=r.name)
t.push()
pushed = True
else:
r = tp.get_random_dungeon()
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=r.name)
t.push()
pushed = True
if m.is_tree_at(x, y):
if c.is_snowy:
r = tp.get_random_tree_snow()
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=r.name)
t.push()
pushed = True
elif is_poor_soil:
r = tp.get_random_tree_conifer()
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=r.name)
t.push()
pushed = True
else:
for i in range(1, random.randint(1, 10)):
r = tp.get_random_tree()
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name=r.name)
t.push()
pushed = True
if grass:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=grass_str + "")
t.push()
pushed = True
if not c.is_snowy:
if m.is_water_at(x - 1, y) or \
m.is_water_at(x + 1, y) or \
m.is_water_at(x, y - 1) or \
m.is_water_at(x, y + 1):
r = tp.get_random_marsh_plant()
for i in range(1, random.randint(1, 5)):
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name=r.name)
t.push()
if random.randint(0, 1000) < 10:
r = tp.get_random_plant()
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name=r.name)
t.push()
else:
if random.randint(0, 100) < 10:
r = tp.get_random_plant()
if random.randint(0, 100) < 10:
for i in range(1, random.randint(1, 15)):
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name=r.name)
t.push()
else:
for i in range(1, random.randint(1, 5)):
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name=r.name)
t.push()
if dirt:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=dirt_str + "")
t.push()
pushed = True
if random.randint(0, 200) < 5:
r = tp.get_random_plant()
for i in range(1, random.randint(1, 5)):
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name=r.name)
t.push()
if sand:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=sand_str + "")
t.push()
pushed = True
if not grass:
if random.randint(0, 1000) < 5:
r = tp.get_random_small_rock()
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name=r.name)
t.push()
if gravel:
if c.is_snowy:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name="gravel_snow")
t.push()
pushed = True
else:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name="gravel")
t.push()
pushed = True
if random.randint(0, 1000) < 50:
r = tp.get_random_small_rock()
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=r.name)
t.push()
pushed = True
if road:
if c.is_snowy:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name="road_snow")
t.push()
pushed = True
else:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name="road")
t.push()
pushed = True
if ice:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name="ice")
t.push()
pushed = True
if snow:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name="snow")
t.push()
pushed = True
if random.randint(0, 1000) < 50:
r = tp.get_random_snow_mound()
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=r.name)
t.push()
if m.is_dungeon_way_up_at(x, y):
if game.g.player is None:
#
# Create the player on the active central chunk
#
if self.cx == 1 and self.cy == 1:
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name="player1")
t.push()
pushed = True
game.g.player = t
if m.is_rock_at(x, y):
if c.is_snowy:
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name="landrock_snow")
t.push()
pushed = True
else:
t = thing.Thing(chunk=c, x=tx, y=ty,
tp_name="landrock")
t.push()
pushed = True
if m.is_water_at(x, y):
water = "water2"
put_treasure = False
#
# Underground water
#
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=water)
t.push()
pushed = True
if put_treasure:
toughness = 1
r = tp.get_random_minable_treasure(toughness=toughness)
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=r.name)
t.push()
if m.is_treasure_at(x, y):
toughness = c.where.z
r = tp.get_random_treasure(toughness=1)
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name=r.name)
t.push()
pushed = True
if not pushed:
if c.is_snowy:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name="gravel_snow")
t.push()
else:
t = thing.Thing(chunk=c, x=tx, y=ty, tp_name="gravel")
t.push()
| lgpl-3.0 | -2,133,161,764,213,903,600 | 31.03481 | 79 | 0.378149 | false |
hyperkitty/kittystore | kittystore/storm/schema/patch_8.py | 1 | 1769 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .utils import get_db_type
SQL = {
"sqlite": ["""
CREATE TABLE "category" (
id INTEGER NOT NULL,
name VARCHAR(255) NOT NULL,
PRIMARY KEY (id)
);""",
'ALTER TABLE "thread" ADD COLUMN category_id INTEGER;',
'CREATE UNIQUE INDEX "ix_category_name" ON "category" (name);',
],
"postgres": ["""
CREATE TABLE "category" (
id INTEGER NOT NULL,
name VARCHAR(255) NOT NULL,
PRIMARY KEY (id)
);""", """
CREATE SEQUENCE category_id_seq
START WITH 1
INCREMENT BY 1
NO MAXVALUE
NO MINVALUE
CACHE 1
;""",
"ALTER SEQUENCE category_id_seq OWNED BY category.id;",
"ALTER TABLE ONLY category ALTER COLUMN id SET DEFAULT nextval('category_id_seq'::regclass);",
'ALTER TABLE "thread" ADD COLUMN category_id INTEGER;',
'ALTER TABLE "thread" ADD FOREIGN KEY (category_id) REFERENCES category(id);',
'CREATE UNIQUE INDEX "ix_category_name" ON "category" (name);',
],
"mysql": ["""
CREATE TABLE `category` (
id INTEGER NOT NULL AUTO_INCREMENT,
name VARCHAR(255) NOT NULL,
PRIMARY KEY (id)
);""",
'ALTER TABLE `thread` ADD COLUMN category_id INTEGER;',
'ALTER TABLE `thread` ADD FOREIGN KEY (category_id) REFERENCES category(id);',
'CREATE UNIQUE INDEX `ix_category_name` ON `category` (name);',
],
}
def apply(store):
"""Add the category table"""
dbtype = get_db_type(store)
for statement in SQL[dbtype]:
store.execute(statement)
store.commit()
| gpl-3.0 | -6,043,101,372,787,932,000 | 31.163636 | 102 | 0.553985 | false |
zooyalove/pyfax | include/models/DynamicConfig.py | 1 | 2651 | import pymongo
from . import MongoDB
class DynamicConfig(MongoDB):
"""DynamicConfig class
"""
def __init__(self):
super(MongoDB, self).__init__()
self._collection = self._db['dynconfs']
self.dynconf_id = None
self.device = None
self.callid = None
def close(self):
self.dynconf_id = None
self.device = None
self.callid = None
def get_dynconf_id(self):
"""get_dynconf_id() -> ObjectId"""
return self.dynconf_id
def get_device(self):
"""get_device() -> string"""
return self.device
def get_callid(self):
"""get_callid() -> string"""
return self.callid
def lookup(self, device, callid):
"""lookup(device, callid) -> Boolean"""
docs = self._collection.find({"callid": callid})
if docs:
for row in docs:
if not row['device'] or row['device'] == device:
return True
else:
return False
def list_rules(self):
"""list_rules() -> Cursor"""
return self._collection.find({}, projection={'_id': True, 'device': True, 'callid': True}, sort=('callid', pymongo.ASCENDING))
def remove(self, dc_id):
res = self._collection.delete_one({'_id': dc_id})
if res.deleted_count > 0:
return True
return False
def create(self, device, callid):
self.callid = callid
self.device = device
rule = {'$and': [{'callid': callid}, {'device': device}]}
if self._collection.find(rule):
self.set_error("Rule exists")
return False
result = self._collection.insert_one({'callid': callid, 'device': device})
if result:
self.dynconf_id = result.inserted_id
return True
self.set_error("Rule not created")
return False
def load_rule(self, dc_id):
if not dc_id:
self.set_error("DynConf not selected")
return False
data = self._collection.find_one({'_id': dc_id})
if data:
self.dynconf_id = data['_id']
self.device = data['device']
self.callid = data['callid']
return True
self.set_error("Rule" + dc_id + " doesn't exist")
return False
def save_rule(self, device, callid):
if not self.dynconf_id:
self.set_error("DynConf not loaded")
return False
self.device = device
self.callid = callid
return self._collection.update_one({'_id': self.dynconf_id}, {'device': device, 'callid': callid})
| mit | 5,169,615,109,688,537,000 | 26.905263 | 134 | 0.540174 | false |
hahaps/openstack-project-generator | template/<project_name>/cmd/manage.py | 1 | 13477 | #!/usr/bin/env python
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for <project_name> management.
"""
from __future__ import print_function
import os
import sys
from oslo_config import cfg
from oslo_db.sqlalchemy import migration
from oslo_log import log as logging
from <project_name> import i18n
i18n.enable_lazy()
# Need to register global_opts
from <project_name>.common import config # noqa
from <project_name> import context
from <project_name> import db
from <project_name>.db import migration as db_migration
from <project_name>.db.sqlalchemy import api as db_api
from <project_name>.i18n import _
from <project_name> import objects
from <project_name> import version
CONF = cfg.CONF
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell', dest="shell",
metavar='<bpython|ipython|python>',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
from IPython import embed
embed()
except ImportError:
try:
# Ipython < 0.11
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
import IPython
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
# no IPython module
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
import rlcompleter # noqa
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', required=True, help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly."""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
def _db_error(caught_exception):
print('%s' % caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'<project_name>-manage db sync' before running this command."))
exit(1)
class DbCommands(object):
"""Class for managing the database."""
def __init__(self):
pass
@args('version', nargs='?', default=None,
help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return db_migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
db_migration.INIT_VERSION))
@args('age_in_days', type=int,
help='Purge deleted rows older than age in days')
def purge(self, age_in_days):
"""Purge deleted rows older than a given age from <project_name> tables."""
age_in_days = int(age_in_days)
if age_in_days <= 0:
print(_("Must supply a positive, non-zero value for age"))
exit(1)
ctxt = context.get_admin_context()
db.purge_deleted_rows(ctxt, age_in_days)
class VersionCommands(object):
"""Class for exposing the codebase version."""
def __init__(self):
pass
def list(self):
print(version.version_string())
def __call__(self):
self.list()
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
def __init__(self):
pass
@args('param', nargs='?', default=None,
help='Configuration parameter to display (default: %(default)s)')
def list(self, param=None):
"""List parameters configured for <project_name>.
Lists all parameters configured for <project_name> unless an optional argument
is specified. If the parameter is specified we only print the
requested parameter. If the parameter is not found an appropriate
error is produced by .get*().
"""
param = param and param.strip()
if param:
print('%s = %s' % (param, CONF.get(param)))
else:
for key, value in CONF.items():
print('%s = %s' % (key, value))
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
print(_("Line %(dis)d : %(line)s") %
{'dis': len(lines) - index, 'line': line})
if error_found == 0:
print(_("No errors in logfiles!"))
@args('num_entries', nargs='?', type=int, default=10,
help='Number of entries to list (default: %(default)d)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the <project_name> syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print(_("Unable to find system log file!"))
sys.exit(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print(_("Last %s <project_name> syslog entries:-") % (entries))
for line in lines:
if line.find("<project_name>") > 0:
count += 1
print(_("%s") % (line))
if count == entries:
break
if count == 0:
print(_("No <project_name> entries in syslog!"))
CATEGORIES = {
'config': ConfigCommands,
'db': DbCommands,
'logs': GetLogCommands,
'shell': ShellCommands,
'version': VersionCommands
}
def methods_of(obj):
"""Return non-private methods from an object.
Get all callable methods of an object that don't start with underscore
:return: a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
for category in CATEGORIES:
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
handler=add_command_parsers)
def get_arg_string(args):
arg = None
if args[0] == '-':
# (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars
# is optional args. Notice that cfg module takes care of
# actual ArgParser so prefix_chars is always '-'.
if args[1] == '-':
# This is long optional arg
arg = args[2:]
else:
arg = args[1:]
else:
arg = args
return arg
def fetch_func_args(func):
fn_args = []
for args, kwargs in getattr(func, 'args', []):
arg = get_arg_string(args[0])
fn_args.append(getattr(CONF.category, arg))
return fn_args
def main():
objects.register_all()
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
script_name = sys.argv[0]
if len(sys.argv) < 2:
print(_("\nOpenStack <Project_name> version: %(version)s\n") %
{'version': version.version_string()})
print(script_name + " category action [<args>]")
print(_("Available categories:"))
for category in CATEGORIES:
print(_("\t%s") % category)
sys.exit(2)
try:
CONF(sys.argv[1:], project='<project_name>',
version=version.version_string())
logging.setup(CONF, "<project_name>")
except cfg.ConfigDirNotFoundError as details:
print(_("Invalid directory: %s") % details)
sys.exit(2)
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run <project_name>-manage as root.'))
sys.exit(2)
fn = CONF.category.action_fn
fn_args = fetch_func_args(fn)
fn(*fn_args)
| apache-2.0 | -5,231,390,574,427,992,000 | 32.776942 | 86 | 0.592936 | false |
onshape-public/onshape-clients | python/onshape_client/oas/models/bt_default_unit_info.py | 1 | 4695 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTDefaultUnitInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"key": (str,), # noqa: E501
"value": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"key": "key", # noqa: E501
"value": "value", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_default_unit_info.BTDefaultUnitInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
key (str): [optional] # noqa: E501
value (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| mit | 1,394,035,064,264,031,000 | 31.157534 | 79 | 0.570394 | false |
frqnck/apix-docs | conf.py | 1 | 8291 | # -*- coding: utf-8 -*-
#
# APIx documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 12 13:45:25 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sensio.sphinx.php', 'sensio.sphinx.refinclude', 'sensio.sphinx.configurationblock', 'sensio.sphinx.phpcode']
extensions = ['sphinxcontrib.phpdomain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'APIx Manula & Documentation'
copyright = u'2014, Franck Cassedanne'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.6'
# The full version, including alpha/beta/rc tags.
release = '0.3.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '*_OFF']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'APIxdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'APIx.tex', u'APIx Documentation',
u'Franck Cassedanne', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'apix', u'APIx Documentation',
[u'Franck Cassedanne'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'APIx', u'APIx Documentation',
u'Franck Cassedanne', 'APIx', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# enable highlighting for PHP code not between ``<?php ... ?>`` by default
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
# use PHP as the primary domain
primary_domain = 'php'
# The default language to highlight source code in.
highlight_language = 'php'
| bsd-3-clause | 6,634,593,186,116,264,000 | 31.2607 | 124 | 0.705946 | false |
d-e-s-o/copyright | copyright/src/deso/copyright/range.py | 1 | 3676 | # range.py
#/***************************************************************************
# * Copyright (C) 2015 Daniel Mueller ([email protected]) *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see <http://www.gnu.org/licenses/>. *
# ***************************************************************************/
"""Functionality for handling ranges of years."""
from collections import (
namedtuple,
)
# The character separating two years in a range.
YEAR_SEPARATOR = "-"
class Range(namedtuple("Range", ["first", "last"])):
"""A class representing a time span between (but including) to years.
A range is a tuple (first, last) of two years that mark a time span. Single
years are represented as a tuple with the first year equal to the last
year. Being a tuple, a Range is immutable.
"""
def __new__(cls, first, last):
"""Create a new instance of Range."""
if first > last:
error = "First year ({first}) is greater than second year ({last})"
error = error.format(first=first, last=last)
raise ValueError(error)
return tuple.__new__(cls, (first, last))
def __str__(self):
"""Convert a range into a string."""
if self.first == self.last:
return "%d" % self.first
else:
return "%d%s%d" % (self.first, YEAR_SEPARATOR, self.last)
def __contains__(self, year):
"""Check whether a year is contained in a range."""
return self.first <= year and year <= self.last
def __compare__(self, range_):
"""Compare two ranges."""
return self.first - range_.first or self.last - range_.last
def extendedBy(self, year):
"""Check whether a year extends a given range.
Note that for simplicity only upper extension is considered, that is, a
year smaller than the given range is never considered to extend it. By
only working on a sorted list of years this property can be satisfied
trivially.
"""
return year == self.last + 1
@staticmethod
def parse(string):
"""Parse a range from a string."""
try:
# A range of years can be represented in two ways. If it is a single
# year, we can convert it into an integer directly.
year = int(string)
return Range(year, year)
except ValueError:
# If this cast did not work we must have gotten a "true" range, e.g.,
# 2010-2012.
try:
# We might not have enough elements in the list, too many, or we might
# again fail to convert them to integers. In any case a ValueError is
# raised.
first, last = list(map(int, string.split(YEAR_SEPARATOR)))
except ValueError:
raise ValueError("Not a valid range: \"%s\"" % string)
return Range(first, last)
| gpl-3.0 | -8,904,273,902,667,896,000 | 37.291667 | 79 | 0.564744 | false |
rev112/pyope | tests/test_stat.py | 1 | 1903 | import itertools
import random
import pytest
from pyope.hgd import HGD
from pyope.ope import ValueRange
from pyope.stat import sample_uniform
def test_uniform():
# Short ranges
value = 10
unit_range = ValueRange(value, value)
assert sample_uniform(unit_range, []) == value
short_range = ValueRange(value, value + 1)
assert sample_uniform(short_range, [0]) == value
assert sample_uniform(short_range, [1]) == value + 1
assert sample_uniform(short_range, [0, 0, 1, 0, 'llama']) == value, "More bits yield no effect"
with pytest.raises(Exception):
sample_uniform(short_range, [])
# Medium ranges
start_range = 20
end_range = start_range + 15
range1 = ValueRange(start_range, start_range + 15)
assert sample_uniform(range1, [0, 0, 0, 0]) == start_range
assert sample_uniform(range1, [0, 0, 0, 1]) == start_range + 1
assert sample_uniform(range1, [1, 1, 1, 1]) == end_range
# Test with a generator object
assert sample_uniform(range1, itertools.repeat(0, 10)) == start_range
# Negative range
start_range = -32
end_range = -17
range = ValueRange(start_range, end_range)
assert sample_uniform(range, [0] * 5) == start_range
assert sample_uniform(range, [1] * 5) == end_range
# Mixed range
start_range = -32
end_range = 31
range = ValueRange(start_range, end_range)
assert sample_uniform(range, [0] * 6) == start_range
assert sample_uniform(range, [1] * 6) == end_range
def test_hypergeometric():
# Infinite random coins
coins = (x for x in iter(lambda: random.randrange(2), 2))
# Small values
assert HGD.rhyper(5, 0, 5, coins) == 0
assert HGD.rhyper(6, 6, 0, coins) == 6
# Large values
assert HGD.rhyper(2**32, 0, 2**32, coins) == 0
assert HGD.rhyper(2**64, 2**64, 0, coins) == 2**64
assert HGD.rhyper(2**32, 2, 2**32 - 2, coins) == 2
| mit | 4,097,747,294,033,899,500 | 31.254237 | 99 | 0.636889 | false |
jsafrane/openlmi-storage | test/test_create_vg.py | 1 | 12056 | #!/usr/bin/python
# -*- Coding:utf-8 -*-
#
# Copyright (C) 2012 Red Hat, Inc. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Jan Safranek <[email protected]>
from test_base import StorageTestBase, short_tests_only
import unittest
import pywbem
MEGABYTE = 1024 * 1024
class TestCreateVG(StorageTestBase):
"""
Test LMI_StorageConfigurationService.CreateOrModifyVG
(create only).
"""
VG_CLASS = "LMI_VGStoragePool"
STYLE_EMBR = 4100
STYLE_MBR = 2
STYLE_GPT = 3
PARTITION_CLASS = "LMI_GenericDiskPartition"
def setUp(self):
""" Find storage service. """
super(TestCreateVG, self).setUp()
self.service = self.wbemconnection.EnumerateInstanceNames(
"LMI_StorageConfigurationService")[0]
self.part_service = self.wbemconnection.EnumerateInstanceNames(
"LMI_DiskPartitionConfigurationService")[0]
self.capabilities = self.wbemconnection.EnumerateInstanceNames(
"LMI_VGStorageCapabilities")[0]
def _get_disk_size(self, devicename):
""" Return size of given device, in bytes."""
disk = self.wbemconnection.GetInstance(devicename)
return disk['NumberOfBlocks'] * disk['BlockSize']
def test_create_1pv(self):
""" Test CreateOrModifyVG with one PV."""
(ret, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyVG",
self.service,
InExtents=self.partition_names[:1],
ElementName='tstName')
self.assertEqual(ret, 0)
self.assertEqual(len(outparams), 2)
self.assertAlmostEqual(
outparams['size'],
self._get_disk_size(self.partition_names[0]),
delta=4 * MEGABYTE)
vgname = outparams['pool']
vg = self.wbemconnection.GetInstance(vgname)
self.assertEqual(vg['TotalManagedSpace'], outparams['size'])
self.assertEqual(vg['PoolID'], 'tstName')
self.assertEqual(vg['ElementName'], 'tstName')
self.assertNotEqual(vg['UUID'], '')
self.assertNotEqual(vg['UUID'], None)
self.assertEqual(vg['ExtentSize'], 4 * MEGABYTE)
self.assertEqual(
vg['ExtentSize'] * vg['TotalExtents'],
vg['TotalManagedSpace'])
self.assertEqual(
vg['ExtentSize'] * vg['RemainingExtents'],
vg['RemainingManagedSpace'])
self.wbemconnection.DeleteInstance(vgname)
@unittest.skipIf(short_tests_only(), "Running short tests only.")
def test_create_10pv(self):
""" Test CreateOrModifyVG with 10 PVs."""
(ret, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyVG",
self.service,
InExtents=self.partition_names[:10])
self.assertEqual(ret, 0)
self.assertEqual(len(outparams), 2)
self.assertAlmostEqual(
outparams['size'],
self._get_disk_size(self.partition_names[0]) * 10,
delta=30 * MEGABYTE)
vg = outparams['pool']
self.wbemconnection.DeleteInstance(vg)
@unittest.skipIf(short_tests_only(), "Running short tests only.")
def test_create_10vg(self):
""" Test CreateOrModifyVG with 10 VGs."""
vgs = []
for part in self.partition_names[:10]:
(ret, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyVG",
self.service,
InExtents=[part])
self.assertEqual(ret, 0)
self.assertEqual(len(outparams), 2)
vg = outparams['pool']
vgs.append(vg)
for vg in vgs:
self.wbemconnection.DeleteInstance(vg)
def test_create_unknown_setting(self):
""" Test CreateOrModifyVG with non-existing setting."""
goal = pywbem.CIMInstanceName(
classname=" LMI_VGStorageSetting",
keybindings={
'InstanceID' : 'LMI:LMI_VGStorageSetting:not-existing'
})
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyVG",
self.service,
InExtents=self.partition_names[:1],
Goal=goal
)
def test_create_wrong_setting_class(self):
""" Test CreateOrModifyVG with non-existing setting."""
goal = pywbem.CIMInstanceName(
classname=" LMI_LVStorageSetting",
keybindings={
'InstanceID' : 'LMI:LMI_LVStorageSetting:not-existing'
})
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyVG",
self.service,
InExtents=self.partition_names[:1],
Goal=goal
)
def _create_setting(self):
""" Create a VGStorageSetting and return CIMInstance of it."""
(retval, outparams) = self.wbemconnection.InvokeMethod(
"CreateSetting",
self.capabilities)
self.assertEqual(retval, 0)
self.assertEqual(len(outparams), 1)
setting_name = outparams['newsetting']
setting = self.wbemconnection.GetInstance(setting_name)
return setting
def _delete_setting(self, setting_name):
""" Delete given setting. """
self.wbemconnection.DeleteInstance(setting_name)
def test_create_default_setting(self):
"""
Test CreateOrModifyVG with default setting from
VGStroageCapabilities.CreateSetting.
"""
goal = self._create_setting()
(ret, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyVG",
self.service,
InExtents=self.partition_names[:1],
Goal=goal.path
)
self.assertEqual(ret, 0)
self.assertEqual(len(outparams), 2)
self.assertAlmostEqual(
outparams['size'],
self._get_disk_size(self.partition_names[0]),
delta=4 * MEGABYTE)
vgname = outparams['pool']
vg = self.wbemconnection.GetInstance(vgname)
self.assertEqual(vg['TotalManagedSpace'], outparams['size'])
self.assertNotEqual(vg['ElementName'], '')
self.assertNotEqual(vg['ElementName'], None)
self.assertNotEqual(vg['UUID'], '')
self.assertNotEqual(vg['UUID'], None)
self.assertEqual(vg['ExtentSize'], 4 * MEGABYTE)
self.assertEqual(
vg['ExtentSize'] * vg['TotalExtents'],
vg['TotalManagedSpace'])
self.assertEqual(
vg['ExtentSize'] * vg['RemainingExtents'],
vg['RemainingManagedSpace'])
# check it has a setting associated
settings = self.wbemconnection.Associators(
vgname,
AssocClass="LMI_VGElementSettingData")
self.assertEqual(len(settings), 1)
setting = settings[0]
self.assertEqual(setting['ExtentSize'], goal['ExtentSize'])
self.assertEqual(setting['DataRedundancyGoal'], goal['DataRedundancyGoal'])
self.assertLessEqual(setting['DataRedundancyMax'], goal['DataRedundancyMax'])
self.assertGreaterEqual(setting['DataRedundancyMin'], goal['DataRedundancyMin'])
self.assertEqual(setting['ExtentStripeLength'], goal['ExtentStripeLength'])
self.assertLessEqual(setting['ExtentStripeLengthMax'], goal['ExtentStripeLengthMax'])
self.assertGreaterEqual(setting['ExtentStripeLengthMin'], goal['ExtentStripeLengthMin'])
self.assertEqual(setting['NoSinglePointOfFailure'], goal['NoSinglePointOfFailure'])
self.assertEqual(setting['PackageRedundancyGoal'], goal['PackageRedundancyGoal'])
self.assertLessEqual(setting['PackageRedundancyMax'], goal['PackageRedundancyMax'])
self.assertGreaterEqual(setting['PackageRedundancyMin'], goal['PackageRedundancyMin'])
self.wbemconnection.DeleteInstance(vgname)
self._delete_setting(goal.path)
def test_create_setting_1m(self):
"""
Test CreateOrModifyVG with 2MiB ExtentSize.
"""
goal = self._create_setting()
goal['ExtentSize'] = pywbem.Uint64(MEGABYTE)
self.wbemconnection.ModifyInstance(goal)
(ret, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyVG",
self.service,
InExtents=self.partition_names[:1],
Goal=goal.path
)
self.assertEqual(ret, 0)
self.assertEqual(len(outparams), 2)
self.assertAlmostEqual(
outparams['size'],
self._get_disk_size(self.partition_names[0]),
delta=4 * MEGABYTE)
vgname = outparams['pool']
vg = self.wbemconnection.GetInstance(vgname)
self.assertEqual(vg['TotalManagedSpace'], outparams['size'])
self.assertNotEqual(vg['ElementName'], '')
self.assertNotEqual(vg['ElementName'], None)
self.assertNotEqual(vg['UUID'], '')
self.assertNotEqual(vg['UUID'], None)
self.assertEqual(vg['ExtentSize'], MEGABYTE)
self.assertEqual(
vg['ExtentSize'] * vg['TotalExtents'],
vg['TotalManagedSpace'])
self.assertEqual(
vg['ExtentSize'] * vg['RemainingExtents'],
vg['RemainingManagedSpace'])
# check it has a setting associated
settings = self.wbemconnection.Associators(
vgname,
AssocClass="LMI_VGElementSettingData")
self.assertEqual(len(settings), 1)
setting = settings[0]
self.assertEqual(setting['ExtentSize'], goal['ExtentSize'])
self.assertEqual(setting['DataRedundancyGoal'], goal['DataRedundancyGoal'])
self.assertLessEqual(setting['DataRedundancyMax'], goal['DataRedundancyMax'])
self.assertGreaterEqual(setting['DataRedundancyMin'], goal['DataRedundancyMin'])
self.assertEqual(setting['ExtentStripeLength'], goal['ExtentStripeLength'])
self.assertLessEqual(setting['ExtentStripeLengthMax'], goal['ExtentStripeLengthMax'])
self.assertGreaterEqual(setting['ExtentStripeLengthMin'], goal['ExtentStripeLengthMin'])
self.assertEqual(setting['NoSinglePointOfFailure'], goal['NoSinglePointOfFailure'])
self.assertEqual(setting['PackageRedundancyGoal'], goal['PackageRedundancyGoal'])
self.assertLessEqual(setting['PackageRedundancyMax'], goal['PackageRedundancyMax'])
self.assertGreaterEqual(setting['PackageRedundancyMin'], goal['PackageRedundancyMin'])
self.wbemconnection.DeleteInstance(vgname)
self._delete_setting(goal.path)
def test_create_setting_64k(self):
"""
Test CreateOrModifyVG with 64k ExtentSize.
"""
goal = self._create_setting()
goal['ExtentSize'] = pywbem.Uint64(64 * 1024)
self.assertRaises(pywbem.CIMError, self.wbemconnection.ModifyInstance,
goal)
self._delete_setting(goal.path)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | -4,422,137,888,049,898,500 | 40.861111 | 96 | 0.615544 | false |
macwis/simplehr | candidates/migrations/0019_auto_20160525_0858.py | 1 | 1799 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-25 08:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('candidates', '0018_auto_20160524_1224'),
]
operations = [
migrations.CreateModel(
name='Questionnaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(blank=True, default=uuid.uuid4, max_length=100, unique=True)),
('start_time', models.IntegerField(default=None, null=True)),
],
),
migrations.RemoveField(
model_name='questionanswer',
name='candidate',
),
migrations.AlterField(
model_name='question',
name='active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='questionchoice',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='questionnaire',
name='answers',
field=models.ManyToManyField(to='candidates.QuestionAnswer'),
),
migrations.AddField(
model_name='questionnaire',
name='candidate',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questionnaire', to='candidates.Candidate'),
),
migrations.AddField(
model_name='questionnaire',
name='questions',
field=models.ManyToManyField(to='candidates.Question'),
),
]
| gpl-3.0 | 6,831,259,579,656,450,000 | 32.314815 | 138 | 0.580322 | false |
vgrem/Office365-REST-Python-Client | tests/sharepoint/test_recycle_bin.py | 1 | 2039 | from random import randint
from tests.sharepoint.sharepoint_case import SPTestCase
from office365.sharepoint.files.file import File
from office365.sharepoint.recyclebin.recycleBinItemCollection import RecycleBinItemCollection
class TestSharePointRecycleBin(SPTestCase):
target_file = None # type: File
@classmethod
def setUpClass(cls):
super(TestSharePointRecycleBin, cls).setUpClass()
file_name = "Sample{0}.txt".format(str(randint(0, 10000)))
target_file = cls.client.web.default_document_library().root_folder \
.upload_file(file_name, "--some content goes here--").execute_query()
cls.target_file = target_file
@classmethod
def tearDownClass(cls):
pass
def test1_recycle_file(self):
result = self.__class__.target_file.recycle().execute_query()
self.assertIsNotNone(result.value)
def test2_find_removed_file(self):
file_name = self.__class__.target_file.name
items = self.client.site.recycle_bin.filter("LeafName eq '{0}'".format(file_name)).get().execute_query()
self.assertGreater(len(items), 0)
def test3_restore_file(self):
items = self.client.web.recycle_bin.get().execute_query()
self.assertGreater(len(items), 0)
items[0].restore().execute_query()
items_after = self.client.web.recycle_bin.get().execute_query()
self.assertEqual(len(items_after), len(items)-1)
def test4_get_site_recycle_bin_items(self):
items = self.client.site.get_recycle_bin_items().execute_query()
self.assertIsInstance(items, RecycleBinItemCollection)
def test5_get_web_recycle_bin_items(self):
items = self.client.web.get_recycle_bin_items().execute_query()
self.assertIsInstance(items, RecycleBinItemCollection)
def test6_clear_recycle_bin(self):
self.client.site.recycle_bin.delete_all().execute_query()
items_after = self.client.site.recycle_bin.get().execute_query()
self.assertEqual(len(items_after), 0)
| mit | 8,758,574,779,512,650,000 | 38.980392 | 112 | 0.687102 | false |
wcong/ants | ants/crawl/spidermanager.py | 1 | 1832 | """
SpiderManager is the class which locates and manages all website-specific
spiders
"""
from zope.interface import implements
from ants import signals
from ants.utils.interfaces import ISpiderManager
from ants.utils.misc import walk_modules
from ants.utils.spider import iter_spider_classes
class SpiderManager(object):
implements(ISpiderManager)
def __init__(self, spider_modules):
self.spider_modules = spider_modules
self._spiders = {}
for name in self.spider_modules:
for module in walk_modules(name):
self._load_spiders(module)
def _load_spiders(self, module):
for spcls in iter_spider_classes(module):
self._spiders[spcls.name] = spcls
@classmethod
def from_settings(cls, settings):
return cls(settings.getlist('SPIDER_MODULES'))
@classmethod
def from_crawler(cls, crawler):
sm = cls.from_settings(crawler.settings)
sm.crawler = crawler
crawler.signals.connect(sm.close_spider, signals.spider_closed)
return sm
def create(self, spider_name, **spider_kwargs):
try:
spcls = self._spiders[spider_name]
except KeyError:
raise KeyError("Spider not found: %s" % spider_name)
if hasattr(self, 'crawler') and hasattr(spcls, 'from_crawler'):
return spcls.from_crawler(self.crawler, **spider_kwargs)
else:
return spcls(**spider_kwargs)
def find_by_request(self, request):
return [name for name, cls in self._spiders.iteritems()
if cls.handles_request(request)]
def list(self):
return self._spiders.keys()
def close_spider(self, spider, reason):
closed = getattr(spider, 'closed', None)
if callable(closed):
return closed(reason)
| bsd-3-clause | 8,412,298,830,859,857,000 | 29.533333 | 73 | 0.645742 | false |
userzimmermann/robotframework-python3 | utest/running/test_imports.py | 1 | 2843 | import unittest
from six.moves import StringIO
from robot.running import TestSuite
from robot.utils.asserts import assert_equals, assert_raises_with_msg
def run(suite, **config):
result = suite.run(output=None, log=None, report=None,
stdout=StringIO(), stderr=StringIO(), **config)
return result.suite
def assert_suite(suite, name, status, message='', tests=1):
assert_equals(suite.name, name)
assert_equals(suite.status, status)
assert_equals(suite.message, message)
assert_equals(len(suite.tests), tests)
def assert_test(test, name, status, tags=(), msg=''):
assert_equals(test.name, name)
assert_equals(test.status, status)
assert_equals(test.message, msg)
assert_equals(tuple(test.tags), tags)
class TestImports(unittest.TestCase):
def test_imports(self):
suite = TestSuite(name='Suite')
suite.imports.create('Library', 'OperatingSystem')
suite.tests.create(name='Test').keywords.create('Directory Should Exist',
args=['.'])
result = run(suite)
assert_suite(result, 'Suite', 'PASS')
assert_test(result.tests[0], 'Test', 'PASS')
def test_library_imports(self):
suite = TestSuite(name='Suite')
suite.imports.library('OperatingSystem')
suite.tests.create(name='Test').keywords.create('Directory Should Exist',
args=['.'])
result = run(suite)
assert_suite(result, 'Suite', 'PASS')
assert_test(result.tests[0], 'Test', 'PASS')
def test_resource_imports(self):
suite = TestSuite(name='Suite')
suite.imports.resource('test_resource.txt')
suite.tests.create(name='Test').keywords.create('My Test Keyword')
assert_equals(suite.tests[0].keywords[0].name, 'My Test Keyword')
result = run(suite)
assert_suite(result, 'Suite', 'PASS')
assert_test(result.tests[0], 'Test', 'PASS')
def test_variable_imports(self):
suite = TestSuite(name='Suite')
suite.imports.variables('variables_file.py')
suite.tests.create(name='Test').keywords.create(
'Should Be Equal As Strings',
['${MY_VARIABLE}', 'An example string']
)
result = run(suite)
assert_suite(result, 'Suite', 'PASS')
assert_test(result.tests[0], 'Test', 'PASS')
def test_invalid_import_type(self):
assert_raises_with_msg(ValueError,
"Invalid import type 'InvalidType'. Should be "
"one of 'Library', 'Resource' or 'Variables'.",
TestSuite().imports.create,
'InvalidType', 'Name')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -1,786,881,284,123,601,000 | 36.906667 | 81 | 0.588111 | false |
foone/7gen | code/decompress3dmm.py | 1 | 1907 | #!/usr/env python
#BMDLVIEW: Views Microsoft 3D Movie Maker models (BMDLs)
#Copyright (C) 2004-2015 Foone Turing
#
#This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from ctypes import *
import os
class Decompressor(object):
def __init__(self, exe_file):
decomp_proxy_dll = os.path.join(os.path.dirname(os.path.abspath(__file__)),"DecompProxy.dll")
decompdll = cdll.LoadLibrary(decomp_proxy_dll)
DLLInit=getattr(decompdll,'DP_Init')
DLLInit.argtypes=[c_char_p]
DLLInit.restype=c_void_p
self.DLLShutdown=getattr(decompdll,'DP_Shutdown')
self.DLLShutdown.argtypes=[c_void_p]
self.GetSize=GetSize=getattr(decompdll,'DP_GetSize')
GetSize.argtypes=[c_char_p,c_int]
self.DLLDecompress=DLLDecompress=getattr(decompdll,'DP_DecompressSmart')
DLLDecompress.argtypes=[c_void_p,c_char_p,c_int,c_char_p]
ret = self.ctx = DLLInit(exe_file)
if not ret:
raise OSError("Failed to initialize decompression")
def shutdown():
self.DLLShutdown(self.ctx)
def decompress(self, compressed_string):
length=self.GetSize(compressed_string,len(compressed_string))
if length<=0:
return None
outbuffer=c_buffer(length)
if not self.DLLDecompress(self.ctx, compressed_string,len(compressed_string),outbuffer):
return None
else:
return str(outbuffer.raw)
| gpl-2.0 | 6,400,433,870,015,829,000 | 43.348837 | 240 | 0.760881 | false |
exiahuang/SalesforceXyTools | xlsxwriter/workbook.py | 1 | 54782 | ###############################################################################
#
# Workbook - A class for writing the Excel XLSX Workbook file.
#
# Copyright 2013-2016, John McNamara, [email protected]
#
# Standard packages.
import sys
import re
import os
import operator
from warnings import warn
from datetime import datetime
from zipfile import ZipFile, ZIP_DEFLATED
from struct import unpack
from .compatibility import int_types, num_types, str_types, force_unicode
# Package imports.
from . import xmlwriter
from .worksheet import Worksheet
from .chartsheet import Chartsheet
from .sharedstrings import SharedStringTable
from .format import Format
from .packager import Packager
from .utility import xl_cell_to_rowcol
from .chart_area import ChartArea
from .chart_bar import ChartBar
from .chart_column import ChartColumn
from .chart_doughnut import ChartDoughnut
from .chart_line import ChartLine
from .chart_pie import ChartPie
from .chart_radar import ChartRadar
from .chart_scatter import ChartScatter
from .chart_stock import ChartStock
class Workbook(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Workbook file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, filename=None, options={}):
"""
Constructor.
"""
super(Workbook, self).__init__()
self.filename = filename
self.tmpdir = options.get('tmpdir', None)
self.date_1904 = options.get('date_1904', False)
self.strings_to_numbers = options.get('strings_to_numbers', False)
self.strings_to_formulas = options.get('strings_to_formulas', True)
self.strings_to_urls = options.get('strings_to_urls', True)
self.nan_inf_to_errors = options.get('nan_inf_to_errors', False)
self.default_date_format = options.get('default_date_format', None)
self.optimization = options.get('constant_memory', False)
self.in_memory = options.get('in_memory', False)
self.excel2003_style = options.get('excel2003_style', False)
self.remove_timezone = options.get('remove_timezone', False)
self.default_format_properties = \
options.get('default_format_properties', {})
self.worksheet_meta = WorksheetMeta()
self.selected = 0
self.fileclosed = 0
self.filehandle = None
self.internal_fh = 0
self.sheet_name = 'Sheet'
self.chart_name = 'Chart'
self.sheetname_count = 0
self.chartname_count = 0
self.worksheets_objs = []
self.charts = []
self.drawings = []
self.sheetnames = {}
self.formats = []
self.xf_formats = []
self.xf_format_indices = {}
self.dxf_formats = []
self.dxf_format_indices = {}
self.palette = []
self.font_count = 0
self.num_format_count = 0
self.defined_names = []
self.named_ranges = []
self.custom_colors = []
self.doc_properties = {}
self.custom_properties = []
self.createtime = datetime.utcnow()
self.num_vml_files = 0
self.num_comment_files = 0
self.x_window = 240
self.y_window = 15
self.window_width = 16095
self.window_height = 9660
self.tab_ratio = 500
self.str_table = SharedStringTable()
self.vba_project = None
self.vba_is_stream = False
self.vba_codename = None
self.image_types = {}
self.images = []
self.border_count = 0
self.fill_count = 0
self.drawing_count = 0
self.calc_mode = "auto"
self.calc_on_load = True
self.allow_zip64 = False
self.calc_id = 124519
# We can't do 'constant_memory' mode while doing 'in_memory' mode.
if self.in_memory:
self.optimization = False
# Add the default cell format.
if self.excel2003_style:
self.add_format({'xf_index': 0, 'font_family': 0})
else:
self.add_format({'xf_index': 0})
# Add a default URL format.
self.default_url_format = self.add_format({'color': 'blue',
'underline': 1})
# Add the default date format.
if self.default_date_format is not None:
self.default_date_format = \
self.add_format({'num_format': self.default_date_format})
def __del__(self):
"""Close file in destructor if it hasn't been closed explicitly."""
try:
if not self.fileclosed:
self.close()
except:
raise Exception("Exception caught in workbook destructor. "
"Explicit close() may be required for workbook.")
def __enter__(self):
"""Return self object to use with "with" statement."""
return self
def __exit__(self, type, value, traceback):
"""Close workbook when exiting "with" statement."""
self.close()
def add_worksheet(self, name=None):
"""
Add a new worksheet to the Excel workbook.
Args:
name: The worksheet name. Defaults to 'Sheet1', etc.
Returns:
Reference to a worksheet object.
"""
return self._add_sheet(name, is_chartsheet=False)
def add_chartsheet(self, name=None):
"""
Add a new chartsheet to the Excel workbook.
Args:
name: The chartsheet name. Defaults to 'Sheet1', etc.
Returns:
Reference to a chartsheet object.
"""
return self._add_sheet(name, is_chartsheet=True)
def add_format(self, properties={}):
"""
Add a new Format to the Excel Workbook.
Args:
properties: The format properties.
Returns:
Reference to a Format object.
"""
format_properties = self.default_format_properties.copy()
if self.excel2003_style:
format_properties = {'font_name': 'Arial', 'font_size': 10,
'theme': 1 * -1}
format_properties.update(properties)
xf_format = Format(format_properties,
self.xf_format_indices,
self.dxf_format_indices)
# Store the format reference.
self.formats.append(xf_format)
return xf_format
def add_chart(self, options):
"""
Create a chart object.
Args:
options: The chart type and subtype options.
Returns:
Reference to a Chart object.
"""
# Type must be specified so we can create the required chart instance.
chart_type = options.get('type')
if chart_type is None:
warn("Chart type must be defined in add_chart()")
return
if chart_type == 'area':
chart = ChartArea(options)
elif chart_type == 'bar':
chart = ChartBar(options)
elif chart_type == 'column':
chart = ChartColumn(options)
elif chart_type == 'doughnut':
chart = ChartDoughnut(options)
elif chart_type == 'line':
chart = ChartLine(options)
elif chart_type == 'pie':
chart = ChartPie(options)
elif chart_type == 'radar':
chart = ChartRadar(options)
elif chart_type == 'scatter':
chart = ChartScatter(options)
elif chart_type == 'stock':
chart = ChartStock(options)
else:
warn("Unknown chart type '%s' in add_chart()" % chart_type)
return
# Set the embedded chart name if present.
if 'name' in options:
chart.chart_name = options['name']
chart.embedded = True
chart.date_1904 = self.date_1904
self.charts.append(chart)
return chart
def add_vba_project(self, vba_project, is_stream=False):
"""
Add a vbaProject binary to the Excel workbook.
Args:
vba_project: The vbaProject binary file name.
is_stream: vba_project is an in memory byte stream.
Returns:
Nothing.
"""
if not is_stream and not os.path.exists(vba_project):
warn("VBA project binary file '%s' not found."
% force_unicode(vba_project))
return -1
self.vba_project = vba_project
self.vba_is_stream = is_stream
def close(self):
"""
Call finalization code and close file.
Args:
None.
Returns:
Nothing.
"""
if not self.fileclosed:
self.fileclosed = 1
self._store_workbook()
def set_size(self, width, height):
"""
Set the size of a workbook window.
Args:
width: Width of the window in pixels.
height: Height of the window in pixels.
Returns:
Nothing.
"""
# Convert the width/height to twips at 96 dpi.
if width:
self.window_width = int(width * 1440 / 96)
else:
self.window_width = 16095
if height:
self.window_height = int(height * 1440 / 96)
else:
self.window_height = 9660
def set_properties(self, properties):
"""
Set the document properties such as Title, Author etc.
Args:
properties: Dictionary of document properties.
Returns:
Nothing.
"""
self.doc_properties = properties
def set_custom_property(self, name, value, property_type=None):
"""
Set a custom document property.
Args:
name: The name of the custom property.
value: The value of the custom property.
property_type: The type of the custom property. Optional.
Returns:
Nothing.
"""
if name is None or value is None:
warn("The name and value parameters must be non-None in "
"set_custom_property()")
return -1
if property_type is None:
# Determine the property type from the Python type.
if isinstance(value, bool):
property_type = 'bool'
elif isinstance(value, datetime):
property_type = 'date'
elif isinstance(value, int_types):
property_type = 'number_int'
elif isinstance(value, num_types):
property_type = 'number'
else:
property_type = 'text'
if property_type == 'date':
value = value.strftime("%Y-%m-%dT%H:%M:%SZ")
if property_type == 'text' and len(value) > 255:
warn("Length of 'value' parameter exceeds Excel's limit of 255 "
"characters in set_custom_property(): '%s'" %
force_unicode(value))
if len(name) > 255:
warn("Length of 'name' parameter exceeds Excel's limit of 255 "
"characters in set_custom_property(): '%s'" %
force_unicode(name))
self.custom_properties.append((name, value, property_type))
def set_calc_mode(self, mode, calc_id=None):
"""
Set the Excel calculation mode for the workbook.
Args:
mode: String containing one of:
* manual
* auto_except_tables
* auto
Returns:
Nothing.
"""
self.calc_mode = mode
if mode == 'manual':
self.calc_on_load = False
elif mode == 'auto_except_tables':
self.calc_mode = 'autoNoTable'
# Leave undocumented for now. Rarely required.
if calc_id:
self.calc_id = calc_id
def define_name(self, name, formula):
# Create a defined name in Excel. We handle global/workbook level
# names and local/worksheet names.
"""
Create a defined name in the workbook.
Args:
name: The defined name.
formula: The cell or range that the defined name refers to.
Returns:
Nothing.
"""
sheet_index = None
sheetname = ''
# Remove the = sign from the formula if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
# Local defined names are formatted like "Sheet1!name".
sheet_parts = re.compile(r'^(.*)!(.*)$')
match = sheet_parts.match(name)
if match:
sheetname = match.group(1)
name = match.group(2)
sheet_index = self._get_sheet_index(sheetname)
# Warn if the sheet index wasn't found.
if sheet_index is None:
warn("Unknown sheet name '%s' in defined_name()"
% force_unicode(sheetname))
return -1
else:
# Use -1 to indicate global names.
sheet_index = -1
# Warn if the defined name contains invalid chars as defined by Excel.
if (not re.match(r'^[\w\\][\w\\.]*$', name, re.UNICODE)
or re.match(r'^\d', name)):
warn("Invalid Excel characters in defined_name(): '%s'"
% force_unicode(name))
return -1
# Warn if the defined name looks like a cell name.
if re.match(r'^[a-zA-Z][a-zA-Z]?[a-dA-D]?[0-9]+$', name):
warn("Name looks like a cell name in defined_name(): '%s'"
% force_unicode(name))
return -1
# Warn if the name looks like a R1C1 cell reference.
if (re.match(r'^[rcRC]$', name)
or re.match(r'^[rcRC]\d+[rcRC]\d+$', name)):
warn("Invalid name '%s' like a RC cell ref in defined_name()"
% force_unicode(name))
return -1
self.defined_names.append([name, sheet_index, formula, False])
def worksheets(self):
"""
Return a list of the worksheet objects in the workbook.
Args:
None.
Returns:
A list of worksheet objects.
"""
return self.worksheets_objs
def get_worksheet_by_name(self, name):
"""
Return a worksheet object in the workbook using the sheetname.
Args:
name: The name of the worksheet.
Returns:
A worksheet object or None.
"""
return self.sheetnames.get(name)
def use_zip64(self):
"""
Allow ZIP64 extensions when writing xlsx file zip container.
Args:
None.
Returns:
Nothing.
"""
self.allow_zip64 = True
def set_vba_name(self, name=None):
"""
Set the VBA name for the workbook. By default the workbook is referred
to as ThisWorkbook in VBA.
Args:
name: The VBA name for the workbook.
Returns:
Nothing.
"""
if name is not None:
self.vba_codename = name
else:
self.vba_codename = 'ThisWorkbook'
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Prepare format object for passing to Style.pm.
self._prepare_format_properties()
# Write the XML declaration.
self._xml_declaration()
# Write the workbook element.
self._write_workbook()
# Write the fileVersion element.
self._write_file_version()
# Write the workbookPr element.
self._write_workbook_pr()
# Write the bookViews element.
self._write_book_views()
# Write the sheets element.
self._write_sheets()
# Write the workbook defined names.
self._write_defined_names()
# Write the calcPr element.
self._write_calc_pr()
# Close the workbook tag.
self._xml_end_tag('workbook')
# Close the file.
self._xml_close()
def _store_workbook(self):
# Assemble worksheets into a workbook.
packager = Packager()
# Add a default worksheet if non have been added.
if not self.worksheets():
self.add_worksheet()
# Ensure that at least one worksheet has been selected.
if self.worksheet_meta.activesheet == 0:
self.worksheets_objs[0].selected = 1
self.worksheets_objs[0].hidden = 0
# Set the active sheet.
for sheet in self.worksheets():
if sheet.index == self.worksheet_meta.activesheet:
sheet.active = 1
# Convert the SST strings data structure.
self._prepare_sst_string_data()
# Prepare the worksheet VML elements such as comments and buttons.
self._prepare_vml()
# Set the defined names for the worksheets such as Print Titles.
self._prepare_defined_names()
# Prepare the drawings, charts and images.
self._prepare_drawings()
# Add cached data to charts.
self._add_chart_data()
# Prepare the worksheet tables.
self._prepare_tables()
# Package the workbook.
packager._add_workbook(self)
packager._set_tmpdir(self.tmpdir)
packager._set_in_memory(self.in_memory)
xml_files = packager._create_package()
# Free up the Packager object.
packager = None
xlsx_file = ZipFile(self.filename, "w", compression=ZIP_DEFLATED,
allowZip64=self.allow_zip64)
# Add XML sub-files to the Zip file with their Excel filename.
for os_filename, xml_filename, is_binary in xml_files:
if self.in_memory:
# The files are in-memory StringIOs.
if is_binary:
xlsx_file.writestr(xml_filename, os_filename.getvalue())
else:
xlsx_file.writestr(xml_filename,
os_filename.getvalue().encode('utf-8'))
else:
# The files are tempfiles.
xlsx_file.write(os_filename, xml_filename)
os.remove(os_filename)
xlsx_file.close()
def _add_sheet(self, name, is_chartsheet):
# Utility for shared code in add_worksheet() and add_chartsheet().
sheet_index = len(self.worksheets_objs)
name = self._check_sheetname(name, is_chartsheet)
# Initialization data to pass to the worksheet.
init_data = {
'name': name,
'index': sheet_index,
'str_table': self.str_table,
'worksheet_meta': self.worksheet_meta,
'optimization': self.optimization,
'tmpdir': self.tmpdir,
'date_1904': self.date_1904,
'strings_to_numbers': self.strings_to_numbers,
'strings_to_formulas': self.strings_to_formulas,
'strings_to_urls': self.strings_to_urls,
'nan_inf_to_errors': self.nan_inf_to_errors,
'default_date_format': self.default_date_format,
'default_url_format': self.default_url_format,
'excel2003_style': self.excel2003_style,
'remove_timezone': self.remove_timezone,
}
if is_chartsheet:
worksheet = Chartsheet()
else:
worksheet = Worksheet()
worksheet._initialize(init_data)
self.worksheets_objs.append(worksheet)
self.sheetnames[name] = worksheet
return worksheet
def _check_sheetname(self, sheetname, is_chartsheet=False):
# Check for valid worksheet names. We check the length, if it contains
# any invalid chars and if the sheetname is unique in the workbook.
invalid_char = re.compile(r'[\[\]:*?/\\]')
# Increment the Sheet/Chart number used for default sheet names below.
if is_chartsheet:
self.chartname_count += 1
else:
self.sheetname_count += 1
# Supply default Sheet/Chart sheetname if none has been defined.
if sheetname is None:
if is_chartsheet:
sheetname = self.chart_name + str(self.chartname_count)
else:
sheetname = self.sheet_name + str(self.sheetname_count)
# Check that sheet sheetname is <= 31. Excel limit.
if len(sheetname) > 31:
raise Exception("Excel worksheet name '%s' must be <= 31 chars." %
sheetname)
# Check that sheetname doesn't contain any invalid characters
if invalid_char.search(sheetname):
raise Exception(
"Invalid Excel character '[]:*?/\\' in sheetname '%s'" %
sheetname)
# Check that the worksheet name doesn't already exist since this is a
# fatal Excel error. The check must be case insensitive like Excel.
for worksheet in self.worksheets():
if sheetname.lower() == worksheet.name.lower():
raise Exception(
"Sheetname '%s', with case ignored, is already in use." %
sheetname)
return sheetname
def _prepare_format_properties(self):
# Prepare all Format properties prior to passing them to styles.py.
# Separate format objects into XF and DXF formats.
self._prepare_formats()
# Set the font index for the format objects.
self._prepare_fonts()
# Set the number format index for the format objects.
self._prepare_num_formats()
# Set the border index for the format objects.
self._prepare_borders()
# Set the fill index for the format objects.
self._prepare_fills()
def _prepare_formats(self):
# Iterate through the XF Format objects and separate them into
# XF and DXF formats. The XF and DF formats then need to be sorted
# back into index order rather than creation order.
xf_formats = []
dxf_formats = []
# Sort into XF and DXF formats.
for xf_format in self.formats:
if xf_format.xf_index is not None:
xf_formats.append(xf_format)
if xf_format.dxf_index is not None:
dxf_formats.append(xf_format)
# Pre-extend the format lists.
self.xf_formats = [None] * len(xf_formats)
self.dxf_formats = [None] * len(dxf_formats)
# Rearrange formats into index order.
for xf_format in xf_formats:
index = xf_format.xf_index
self.xf_formats[index] = xf_format
for dxf_format in dxf_formats:
index = dxf_format.dxf_index
self.dxf_formats[index] = dxf_format
def _set_default_xf_indices(self):
# Set the default index for each format. Only used for testing.
formats = list(self.formats)
# Delete the default url format.
del formats[1]
# Skip the default date format if set.
if self.default_date_format is not None:
del formats[1]
# Set the remaining formats.
for xf_format in formats:
xf_format._get_xf_index()
def _prepare_fonts(self):
# Iterate through the XF Format objects and give them an index to
# non-default font elements.
fonts = {}
index = 0
for xf_format in self.xf_formats:
key = xf_format._get_font_key()
if key in fonts:
# Font has already been used.
xf_format.font_index = fonts[key]
xf_format.has_font = 0
else:
# This is a new font.
fonts[key] = index
xf_format.font_index = index
xf_format.has_font = 1
index += 1
self.font_count = index
# For DXF formats we only need to check if the properties have changed.
for xf_format in self.dxf_formats:
# The only font properties that can change for a DXF format are:
# color, bold, italic, underline and strikethrough.
if (xf_format.font_color or xf_format.bold or xf_format.italic
or xf_format.underline or xf_format.font_strikeout):
xf_format.has_dxf_font = 1
def _prepare_num_formats(self):
# User defined records in Excel start from index 0xA4.
num_formats = {}
index = 164
num_format_count = 0
for xf_format in (self.xf_formats + self.dxf_formats):
num_format = xf_format.num_format
# Check if num_format is an index to a built-in number format.
if not isinstance(num_format, str_types):
xf_format.num_format_index = int(num_format)
continue
if num_format in num_formats:
# Number xf_format has already been used.
xf_format.num_format_index = num_formats[num_format]
else:
# Add a new number xf_format.
num_formats[num_format] = index
xf_format.num_format_index = index
index += 1
# Only increase font count for XF formats (not DXF formats).
if xf_format.xf_index:
num_format_count += 1
self.num_format_count = num_format_count
def _prepare_borders(self):
# Iterate through the XF Format objects and give them an index to
# non-default border elements.
borders = {}
index = 0
for xf_format in self.xf_formats:
key = xf_format._get_border_key()
if key in borders:
# Border has already been used.
xf_format.border_index = borders[key]
xf_format.has_border = 0
else:
# This is a new border.
borders[key] = index
xf_format.border_index = index
xf_format.has_border = 1
index += 1
self.border_count = index
# For DXF formats we only need to check if the properties have changed.
has_border = re.compile(r'[^0:]')
for xf_format in self.dxf_formats:
key = xf_format._get_border_key()
if has_border.search(key):
xf_format.has_dxf_border = 1
def _prepare_fills(self):
# Iterate through the XF Format objects and give them an index to
# non-default fill elements.
# The user defined fill properties start from 2 since there are 2
# default fills: patternType="none" and patternType="gray125".
fills = {}
index = 2 # Start from 2. See above.
# Add the default fills.
fills['0:0:0'] = 0
fills['17:0:0'] = 1
# Store the DXF colors separately since them may be reversed below.
for xf_format in self.dxf_formats:
if xf_format.pattern or xf_format.bg_color or xf_format.fg_color:
xf_format.has_dxf_fill = 1
xf_format.dxf_bg_color = xf_format.bg_color
xf_format.dxf_fg_color = xf_format.fg_color
for xf_format in self.xf_formats:
# The following logical statements jointly take care of special
# cases in relation to cell colors and patterns:
# 1. For a solid fill (_pattern == 1) Excel reverses the role of
# foreground and background colors, and
# 2. If the user specifies a foreground or background color
# without a pattern they probably wanted a solid fill, so we fill
# in the defaults.
if (xf_format.pattern == 1 and xf_format.bg_color != 0
and xf_format.fg_color != 0):
tmp = xf_format.fg_color
xf_format.fg_color = xf_format.bg_color
xf_format.bg_color = tmp
if (xf_format.pattern <= 1 and xf_format.bg_color != 0
and xf_format.fg_color == 0):
xf_format.fg_color = xf_format.bg_color
xf_format.bg_color = 0
xf_format.pattern = 1
if (xf_format.pattern <= 1 and xf_format.bg_color == 0
and xf_format.fg_color != 0):
xf_format.bg_color = 0
xf_format.pattern = 1
key = xf_format._get_fill_key()
if key in fills:
# Fill has already been used.
xf_format.fill_index = fills[key]
xf_format.has_fill = 0
else:
# This is a new fill.
fills[key] = index
xf_format.fill_index = index
xf_format.has_fill = 1
index += 1
self.fill_count = index
def _prepare_defined_names(self):
# Iterate through the worksheets and store any defined names in
# addition to any user defined names. Stores the defined names
# for the Workbook.xml and the named ranges for App.xml.
defined_names = self.defined_names
for sheet in self.worksheets():
# Check for Print Area settings.
if sheet.autofilter_area:
hidden = 1
sheet_range = sheet.autofilter_area
# Store the defined names.
defined_names.append(['_xlnm._FilterDatabase',
sheet.index, sheet_range, hidden])
# Check for Print Area settings.
if sheet.print_area_range:
hidden = 0
sheet_range = sheet.print_area_range
# Store the defined names.
defined_names.append(['_xlnm.Print_Area',
sheet.index, sheet_range, hidden])
# Check for repeat rows/cols referred to as Print Titles.
if sheet.repeat_col_range or sheet.repeat_row_range:
hidden = 0
sheet_range = ''
if sheet.repeat_col_range and sheet.repeat_row_range:
sheet_range = (sheet.repeat_col_range + ',' +
sheet.repeat_row_range)
else:
sheet_range = (sheet.repeat_col_range +
sheet.repeat_row_range)
# Store the defined names.
defined_names.append(['_xlnm.Print_Titles',
sheet.index, sheet_range, hidden])
defined_names = self._sort_defined_names(defined_names)
self.defined_names = defined_names
self.named_ranges = self._extract_named_ranges(defined_names)
def _sort_defined_names(self, names):
# Sort the list of list of internal and user defined names in
# the same order as used by Excel.
# Add a normalize name string to each list for sorting.
for name_list in names:
(defined_name, _, sheet_name, _) = name_list
# Normalize the defined name by removing any leading '_xmln.'
# from internal names and lowercasing the string.
defined_name = defined_name.replace('_xlnm.', '').lower()
# Normalize the sheetname by removing the leading quote and
# lowercasing the string.
sheet_name = sheet_name.lstrip("'").lower()
name_list.append(defined_name + "::" + sheet_name)
# Sort based on the normalized key.
names.sort(key=operator.itemgetter(4))
# Remove the extra key used for sorting.
for name_list in names:
name_list.pop()
return names
def _prepare_drawings(self):
# Iterate through the worksheets and set up chart and image drawings.
chart_ref_id = 0
image_ref_id = 0
drawing_id = 0
x_dpi = 96
y_dpi = 96
for sheet in self.worksheets():
chart_count = len(sheet.charts)
image_count = len(sheet.images)
shape_count = len(sheet.shapes)
header_image_count = len(sheet.header_images)
footer_image_count = len(sheet.footer_images)
has_drawing = False
if not (chart_count or image_count or shape_count
or header_image_count or footer_image_count):
continue
# Don't increase the drawing_id header/footer images.
if chart_count or image_count or shape_count:
drawing_id += 1
has_drawing = True
# Prepare the worksheet charts.
for index in range(chart_count):
chart_ref_id += 1
sheet._prepare_chart(index, chart_ref_id, drawing_id)
# Prepare the worksheet images.
for index in range(image_count):
filename = sheet.images[index][2]
image_data = sheet.images[index][10]
(image_type, width, height, name, x_dpi, y_dpi) = \
self._get_image_properties(filename, image_data)
image_ref_id += 1
sheet._prepare_image(index, image_ref_id, drawing_id, width,
height, name, image_type, x_dpi, y_dpi)
# Prepare the worksheet shapes.
for index in range(shape_count):
sheet._prepare_shape(index, drawing_id)
# Prepare the header images.
for index in range(header_image_count):
filename = sheet.header_images[index][0]
image_data = sheet.header_images[index][1]
position = sheet.header_images[index][2]
(image_type, width, height, name, x_dpi, y_dpi) = \
self._get_image_properties(filename, image_data)
image_ref_id += 1
sheet._prepare_header_image(image_ref_id, width, height,
name, image_type, position,
x_dpi, y_dpi)
# Prepare the footer images.
for index in range(footer_image_count):
filename = sheet.footer_images[index][0]
image_data = sheet.footer_images[index][1]
position = sheet.footer_images[index][2]
(image_type, width, height, name, x_dpi, y_dpi) = \
self._get_image_properties(filename, image_data)
image_ref_id += 1
sheet._prepare_header_image(image_ref_id, width, height,
name, image_type, position,
x_dpi, y_dpi)
if has_drawing:
drawing = sheet.drawing
self.drawings.append(drawing)
# Remove charts that were created but not inserted into worksheets.
for chart in self.charts[:]:
if chart.id == -1:
self.charts.remove(chart)
# Sort the workbook charts references into the order that the were
# written to the worksheets above.
self.charts = sorted(self.charts, key=lambda chart: chart.id)
self.drawing_count = drawing_id
def _get_image_properties(self, filename, image_data):
# Extract dimension information from the image file.
height = 0
width = 0
x_dpi = 96
y_dpi = 96
if not image_data:
# Open the image file and read in the data.
fh = open(filename, "rb")
data = fh.read()
else:
# Read the image data from the user supplied byte stream.
data = image_data.getvalue()
# Get the image filename without the path.
image_name = os.path.basename(filename)
# Look for some common image file markers.
marker1 = (unpack('3s', data[1:4]))[0]
marker2 = (unpack('>H', data[:2]))[0]
marker3 = (unpack('2s', data[:2]))[0]
if sys.version_info < (2, 6, 0):
# Python 2.5/Jython.
png_marker = 'PNG'
bmp_marker = 'BM'
else:
# Eval the binary literals for Python 2.5/Jython compatibility.
png_marker = eval("b'PNG'")
bmp_marker = eval("b'BM'")
if marker1 == png_marker:
self.image_types['png'] = 1
(image_type, width, height, x_dpi, y_dpi) = self._process_png(data)
elif marker2 == 0xFFD8:
self.image_types['jpeg'] = 1
(image_type, width, height, x_dpi, y_dpi) = self._process_jpg(data)
elif marker3 == bmp_marker:
self.image_types['bmp'] = 1
(image_type, width, height) = self._process_bmp(data)
else:
raise Exception("%s: Unknown or unsupported image file format."
% filename)
# Check that we found the required data.
if not height or not width:
raise Exception("%s: no size data found in image file." % filename)
# Store image data to copy it into file container.
self.images.append([filename, image_type, image_data])
if not image_data:
fh.close()
# Set a default dpi for images with 0 dpi.
if x_dpi == 0:
x_dpi = 96
if y_dpi == 0:
y_dpi = 96
return image_type, width, height, image_name, x_dpi, y_dpi
def _process_png(self, data):
# Extract width and height information from a PNG file.
offset = 8
data_length = len(data)
end_marker = False
width = 0
height = 0
x_dpi = 96
y_dpi = 96
# Look for numbers rather than strings for Python 2.6/3 compatibility.
marker_ihdr = 0x49484452 # IHDR
marker_phys = 0x70485973 # pHYs
marker_iend = 0X49454E44 # IEND
# Search through the image data to read the height and width in the
# IHDR element. Also read the DPI in the pHYs element.
while not end_marker and offset < data_length:
length = (unpack('>I', data[offset + 0:offset + 4]))[0]
marker = (unpack('>I', data[offset + 4:offset + 8]))[0]
# Read the image dimensions.
if marker == marker_ihdr:
width = (unpack('>I', data[offset + 8:offset + 12]))[0]
height = (unpack('>I', data[offset + 12:offset + 16]))[0]
# Read the image DPI.
if marker == marker_phys:
x_density = (unpack('>I', data[offset + 8:offset + 12]))[0]
y_density = (unpack('>I', data[offset + 12:offset + 16]))[0]
units = (unpack('b', data[offset + 16:offset + 17]))[0]
if units == 1:
x_dpi = x_density * 0.0254
y_dpi = y_density * 0.0254
if marker == marker_iend:
end_marker = True
continue
offset = offset + length + 12
return 'png', width, height, x_dpi, y_dpi
def _process_jpg(self, data):
# Extract width and height information from a JPEG file.
offset = 2
data_length = len(data)
end_marker = False
width = 0
height = 0
x_dpi = 96
y_dpi = 96
# Search through the image data to read the height and width in the
# 0xFFC0/C2 element. Also read the DPI in the 0xFFE0 element.
while not end_marker and offset < data_length:
marker = (unpack('>H', data[offset + 0:offset + 2]))[0]
length = (unpack('>H', data[offset + 2:offset + 4]))[0]
# Read the image dimensions.
if marker == 0xFFC0 or marker == 0xFFC2:
height = (unpack('>H', data[offset + 5:offset + 7]))[0]
width = (unpack('>H', data[offset + 7:offset + 9]))[0]
# Read the image DPI.
if marker == 0xFFE0:
units = (unpack('b', data[offset + 11:offset + 12]))[0]
x_density = (unpack('>H', data[offset + 12:offset + 14]))[0]
y_density = (unpack('>H', data[offset + 14:offset + 16]))[0]
if units == 1:
x_dpi = x_density
y_dpi = y_density
if units == 2:
x_dpi = x_density * 2.54
y_dpi = y_density * 2.54
# Workaround for incorrect dpi.
if x_dpi == 1:
x_dpi = 96
if y_dpi == 1:
y_dpi = 96
if marker == 0xFFDA:
end_marker = True
continue
offset = offset + length + 2
return 'jpeg', width, height, x_dpi, y_dpi
def _process_bmp(self, data):
# Extract width and height information from a BMP file.
width = (unpack('<L', data[18:22]))[0]
height = (unpack('<L', data[22:26]))[0]
return 'bmp', width, height
def _extract_named_ranges(self, defined_names):
# Extract the named ranges from the sorted list of defined names.
# These are used in the App.xml file.
named_ranges = []
for defined_name in defined_names:
name = defined_name[0]
index = defined_name[1]
sheet_range = defined_name[2]
# Skip autoFilter ranges.
if name == '_xlnm._FilterDatabase':
continue
# We are only interested in defined names with ranges.
if '!' in sheet_range:
sheet_name, _ = sheet_range.split('!', 1)
# Match Print_Area and Print_Titles xlnm types.
if name.startswith('_xlnm.'):
xlnm_type = name.replace('_xlnm.', '')
name = sheet_name + '!' + xlnm_type
elif index != -1:
name = sheet_name + '!' + name
named_ranges.append(name)
return named_ranges
def _get_sheet_index(self, sheetname):
# Convert a sheet name to its index. Return None otherwise.
sheetname = sheetname.strip("'")
if sheetname in self.sheetnames:
return self.sheetnames[sheetname].index
else:
return None
def _prepare_vml(self):
# Iterate through the worksheets and set up the VML objects.
comment_id = 0
vml_drawing_id = 0
vml_data_id = 1
vml_header_id = 0
vml_shape_id = 1024
vml_files = 0
comment_files = 0
has_button = False
for sheet in self.worksheets():
if not sheet.has_vml and not sheet.has_header_vml:
continue
vml_files += 1
if sheet.has_vml:
if sheet.has_comments:
comment_files += 1
comment_id += 1
vml_drawing_id += 1
count = sheet._prepare_vml_objects(vml_data_id,
vml_shape_id,
vml_drawing_id,
comment_id)
# Each VML should start with a shape id incremented by 1024.
vml_data_id += 1 * int((1024 + count) / 1024)
vml_shape_id += 1024 * int((1024 + count) / 1024)
if sheet.has_header_vml:
vml_header_id += 1
vml_drawing_id += 1
sheet._prepare_header_vml_objects(vml_header_id,
vml_drawing_id)
self.num_vml_files = vml_files
self.num_comment_files = comment_files
if len(sheet.buttons_list):
has_button = True
# Set the sheet vba_codename if it has a button and the
# workbook has a vbaProject binary.
if self.vba_project and sheet.vba_codename is None:
sheet.set_vba_name()
# Add a font format for cell comments.
if comment_files > 0:
xf = self.add_format({'font_name': 'Tahoma', 'font_size': 8,
'color_indexed': 81, 'font_only': True})
xf._get_xf_index()
# Set the workbook vba_codename if one of the sheets has a button and
# the workbook has a vbaProject binary.
if has_button and self.vba_project and self.vba_codename is None:
self.set_vba_name()
def _prepare_tables(self):
# Set the table ids for the worksheet tables.
table_id = 0
seen = {}
for sheet in self.worksheets():
table_count = len(sheet.tables)
if not table_count:
continue
sheet._prepare_tables(table_id + 1, seen)
table_id += table_count
def _add_chart_data(self):
# Add "cached" data to charts to provide the numCache and strCache
# data for series and title/axis ranges.
worksheets = {}
seen_ranges = {}
charts = []
# Map worksheet names to worksheet objects.
for worksheet in self.worksheets():
worksheets[worksheet.name] = worksheet
# Build a list of the worksheet charts including any combined charts.
for chart in self.charts:
charts.append(chart)
if chart.combined:
charts.append(chart.combined)
for chart in charts:
for c_range in chart.formula_ids.keys():
r_id = chart.formula_ids[c_range]
# Skip if the series has user defined data.
if chart.formula_data[r_id] is not None:
if (c_range not in seen_ranges
or seen_ranges[c_range] is None):
data = chart.formula_data[r_id]
seen_ranges[c_range] = data
continue
# Check to see if the data is already cached locally.
if c_range in seen_ranges:
chart.formula_data[r_id] = seen_ranges[c_range]
continue
# Convert the range formula to a sheet name and cell range.
(sheetname, cells) = self._get_chart_range(c_range)
# Skip if we couldn't parse the formula.
if sheetname is None:
continue
# Handle non-contiguous ranges like:
# (Sheet1!$A$1:$A$2,Sheet1!$A$4:$A$5).
# We don't try to parse them. We just return an empty list.
if sheetname.startswith('('):
chart.formula_data[r_id] = []
seen_ranges[c_range] = []
continue
# Warn if the name is unknown since it indicates a user error
# in a chart series formula.
if sheetname not in worksheets:
warn("Unknown worksheet reference '%s' in range "
"'%s' passed to add_series()"
% (force_unicode(sheetname), force_unicode(c_range)))
chart.formula_data[r_id] = []
seen_ranges[c_range] = []
continue
# Find the worksheet object based on the sheet name.
worksheet = worksheets[sheetname]
# Get the data from the worksheet table.
data = worksheet._get_range_data(*cells)
# TODO. Handle SST string ids if required.
# Add the data to the chart.
chart.formula_data[r_id] = data
# Store range data locally to avoid lookup if seen again.
seen_ranges[c_range] = data
def _get_chart_range(self, c_range):
# Convert a range formula such as Sheet1!$B$1:$B$5 into a sheet name
# and cell range such as ( 'Sheet1', 0, 1, 4, 1 ).
# Split the range formula into sheetname and cells at the last '!'.
pos = c_range.rfind('!')
if pos > 0:
sheetname = c_range[:pos]
cells = c_range[pos + 1:]
else:
return None, None
# Split the cell range into 2 cells or else use single cell for both.
if cells.find(':') > 0:
(cell_1, cell_2) = cells.split(':', 1)
else:
(cell_1, cell_2) = (cells, cells)
# Remove leading/trailing quotes and convert escaped quotes to single.
sheetname = sheetname.strip("'")
sheetname = sheetname.replace("''", "'")
try:
# Get the row, col values from the Excel ranges. We do this in a
# try block for ranges that can't be parsed such as defined names.
(row_start, col_start) = xl_cell_to_rowcol(cell_1)
(row_end, col_end) = xl_cell_to_rowcol(cell_2)
except:
return None, None
# We only handle 1D ranges.
if row_start != row_end and col_start != col_end:
return None, None
return sheetname, [row_start, col_start, row_end, col_end]
def _prepare_sst_string_data(self):
# Convert the SST string data from a dict to a list.
self.str_table._sort_string_data()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_workbook(self):
# Write <workbook> element.
schema = 'http://schemas.openxmlformats.org'
xmlns = schema + '/spreadsheetml/2006/main'
xmlns_r = schema + '/officeDocument/2006/relationships'
attributes = [
('xmlns', xmlns),
('xmlns:r', xmlns_r),
]
self._xml_start_tag('workbook', attributes)
def _write_file_version(self):
# Write the <fileVersion> element.
app_name = 'xl'
last_edited = 4
lowest_edited = 4
rup_build = 4505
attributes = [
('appName', app_name),
('lastEdited', last_edited),
('lowestEdited', lowest_edited),
('rupBuild', rup_build),
]
if self.vba_project:
attributes.append(
('codeName', '{37E998C4-C9E5-D4B9-71C8-EB1FF731991C}'))
self._xml_empty_tag('fileVersion', attributes)
def _write_workbook_pr(self):
# Write <workbookPr> element.
default_theme_version = 124226
attributes = []
if self.vba_codename:
attributes.append(('codeName', self.vba_codename))
if self.date_1904:
attributes.append(('date1904', 1))
attributes.append(('defaultThemeVersion', default_theme_version))
self._xml_empty_tag('workbookPr', attributes)
def _write_book_views(self):
# Write <bookViews> element.
self._xml_start_tag('bookViews')
self._write_workbook_view()
self._xml_end_tag('bookViews')
def _write_workbook_view(self):
# Write <workbookView> element.
attributes = [
('xWindow', self.x_window),
('yWindow', self.y_window),
('windowWidth', self.window_width),
('windowHeight', self.window_height),
]
# Store the tabRatio attribute when it isn't the default.
if self.tab_ratio != 500:
attributes.append(('tabRatio', self.tab_ratio))
# Store the firstSheet attribute when it isn't the default.
if self.worksheet_meta.firstsheet > 0:
firstsheet = self.worksheet_meta.firstsheet + 1
attributes.append(('firstSheet', firstsheet))
# Store the activeTab attribute when it isn't the first sheet.
if self.worksheet_meta.activesheet > 0:
attributes.append(('activeTab', self.worksheet_meta.activesheet))
self._xml_empty_tag('workbookView', attributes)
def _write_sheets(self):
# Write <sheets> element.
self._xml_start_tag('sheets')
id_num = 1
for worksheet in self.worksheets():
self._write_sheet(worksheet.name, id_num, worksheet.hidden)
id_num += 1
self._xml_end_tag('sheets')
def _write_sheet(self, name, sheet_id, hidden):
# Write <sheet> element.
attributes = [
('name', name),
('sheetId', sheet_id),
]
if hidden:
attributes.append(('state', 'hidden'))
attributes.append(('r:id', 'rId' + str(sheet_id)))
self._xml_empty_tag('sheet', attributes)
def _write_calc_pr(self):
# Write the <calcPr> element.
attributes = [('calcId', self.calc_id)]
if self.calc_mode == 'manual':
attributes.append(('calcMode', self.calc_mode))
attributes.append(('calcOnSave', "0"))
elif self.calc_mode == 'autoNoTable':
attributes.append(('calcMode', self.calc_mode))
if self.calc_on_load:
attributes.append(('fullCalcOnLoad', '1'))
self._xml_empty_tag('calcPr', attributes)
def _write_defined_names(self):
# Write the <definedNames> element.
if not self.defined_names:
return
self._xml_start_tag('definedNames')
for defined_name in self.defined_names:
self._write_defined_name(defined_name)
self._xml_end_tag('definedNames')
def _write_defined_name(self, defined_name):
# Write the <definedName> element.
name = defined_name[0]
sheet_id = defined_name[1]
sheet_range = defined_name[2]
hidden = defined_name[3]
attributes = [('name', name)]
if sheet_id != -1:
attributes.append(('localSheetId', sheet_id))
if hidden:
attributes.append(('hidden', 1))
self._xml_data_element('definedName', sheet_range, attributes)
# A metadata class to share data between worksheets.
class WorksheetMeta(object):
"""
A class to track worksheets data such as the active sheet and the
first sheet.
"""
def __init__(self):
self.activesheet = 0
self.firstsheet = 0
| apache-2.0 | -5,126,902,016,835,148,000 | 32.48533 | 79 | 0.534902 | false |
ric2b/Vivaldi-browser | chromium/third_party/blink/renderer/bindings/scripts/build_web_idl_database.py | 1 | 1272 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Builds Web IDL database.
Web IDL database is a Python object that supports a variety of accessors to
IDL definitions such as IDL interface and IDL attribute.
"""
import optparse
import sys
import web_idl
def parse_options():
parser = optparse.OptionParser()
parser.add_option('--output', type='string',
help="filepath of the resulting database")
options, args = parser.parse_args()
if options.output is None:
parser.error("Specify a filepath of the database with --output.")
if not args:
parser.error("No argument specified.")
return options, args
def main():
options, filepaths = parse_options()
was_error_reported = [False]
def report_error(message):
was_error_reported[0] = True
sys.stderr.writelines([message, "\n"])
database = web_idl.build_database(filepaths=filepaths,
report_error=report_error)
if was_error_reported[0]:
sys.exit("Aborted due to error.")
database.write_to_file(options.output)
if __name__ == '__main__':
main()
| bsd-3-clause | -5,031,196,945,547,410,000 | 23.461538 | 75 | 0.65173 | false |
jiadaizhao/LeetCode | 0601-0700/0639-Decode Ways II/0639-Decode Ways II.py | 1 | 2404 | class Solution:
def numDecodings(self, s: str) -> int:
MOD = 1000000007
dp = [0] * (1 + len(s))
dp[0] = 1
for i in range(1, len(dp)):
if s[i - 1] == '*':
dp[i] = dp[i - 1]*9 % MOD
elif s[i - 1] != '0':
dp[i] = dp[i - 1]
if i >= 2:
if s[i - 2] == '*':
if s[i - 1] == '*':
dp[i] = (dp[i] + dp[i - 2]*15) % MOD
elif '0' <= s[i - 1] <= '6':
dp[i] = (dp[i] + dp[i - 2]*2) % MOD
else:
dp[i] = (dp[i] + dp[i - 2]) % MOD
elif s[i - 2] == '1':
if s[i - 1] == '*':
dp[i] = (dp[i] + dp[i - 2]*9) % MOD
else:
dp[i] = (dp[i] + dp[i - 2]) % MOD
elif s[i - 2] == '2':
if s[i - 1] == '*':
dp[i] = (dp[i] + dp[i - 2]*6) % MOD
elif '0' <= s[i - 1] <= '6':
dp[i] = (dp[i] + dp[i - 2]) % MOD
return dp[-1]
# Optimize to O(1) space
class Solution2:
def numDecodings(self, s: str) -> int:
MOD = 1000000007
dp0 = 1
if s[0] == '*':
dp1 = 9
elif s[0] != '0':
dp1 = 1
else:
dp1 = 0
for i in range(2, len(s) + 1):
if s[i - 1] == '*':
dp2 = dp1 * 9 % MOD
elif s[i - 1] != '0':
dp2 = dp1
else:
dp2 = 0
if s[i - 2] == '*':
if s[i - 1] == '*':
dp2 = (dp2 + dp0 * 15) % MOD
elif '0' <= s[i - 1] <= '6':
dp2 = (dp2 + dp0 * 2) % MOD
else:
dp2 = (dp2 + dp0) % MOD
elif s[i - 2] == '1':
if s[i - 1] == '*':
dp2 = (dp2 + dp0 * 9) % MOD
else:
dp2 = (dp2 + dp0) % MOD
elif s[i - 2] == '2':
if s[i - 1] == '*':
dp2 = (dp2 + dp0 * 6) % MOD
elif '0' <= s[i - 1] <= '6':
dp2 = (dp2 + dp0) % MOD
dp0 = dp1
dp1 = dp2
return dp1
| mit | -7,319,157,839,342,609,000 | 32.388889 | 60 | 0.253744 | false |
h-hirokawa/swampdragon | swampdragon/pubsub_providers/channel_utils.py | 1 | 3543 | from ..model_tools import string_to_list, get_property
from .channel_filters import filter_options, in_compare, term_match_check
try:
from urllib.parse import quote_plus, unquote_plus
except ImportError:
from urllib import quote_plus, unquote_plus
def make_safe(val):
"""
Make strings in filters save.
i.e 'foo bar' becomes 'foo+bar'
"""
if not isinstance(val, str):
return val
return quote_plus(val)
def remove_channel_filter(channel):
"""
Remove filters from channel strings
i.e foo_contains becomes foo
"""
if '__' not in channel:
return channel
chan, channel_filter = channel.rsplit('__', 1)
if filter_options.get(channel_filter):
return chan
return channel
def get_channel_filter(channel):
if '__' not in channel:
return filter_options['eq']
chan, channel_filter_name = channel.rsplit('__', 1)
channel_filter = filter_options.get(channel_filter_name)
if not channel_filter:
return filter_options['eq']
return channel_filter
def get_property_and_value_from_channel(channel):
"""
Get a list of tuples with properties and channels.
i.e foo|bar__name__contains:baz returns a list: [('bar__name__contains', 'baz')]
"""
filters = filter(None, str(channel).split('|')[1:])
if not filters:
return None
properties = []
for channel_filter, val in [tuple(f.split(':', 1)) for f in filters]:
filter_option = filter_options.get(channel_filter.split('__')[-1])
if filter_option == in_compare:
val = string_to_list(val)
properties.append((channel_filter, val))
return properties
def channel_match_check(channel, data):
terms = filter(None, channel.split('|')[1:])
option = None
for term in terms:
key, val = term.split(':')
if '__' in key and key.split('__')[-1] in filter_options.keys():
option = key.rsplit('__', 1)[-1]
if key not in data:
return False
if not term_match_check(data[key], val, option):
return False
return True
def properties_match_channel_by_object(obj, channel_properties):
result = True
for prop, val in channel_properties:
if not has_val(obj, prop, val) and not has_related_value(obj, prop, val):
return False
return result
def properties_match_channel_by_dict(dict, channel_properties):
result = True
for prop, val in channel_properties:
if prop not in dict:
return False
val_type = type(val)
if not val_type(dict[prop]) == val:
return False
return result
def get_value(obj, prop):
data = {}
val = get_property(obj, prop)
if val:
data[prop] = val
return data
def has_val(obj, prop, val):
obj_val = get_property(obj, remove_channel_filter(prop))
if not obj_val:
return False
channel_filter = get_channel_filter(prop)
return channel_filter(val, obj_val)
def has_related_value(obj, field, channel_val):
if '__' not in field:
filter_by_val = channel_val
property_name = field
else:
property_name, filter_by_val = field.split('__', 1)
attr = getattr(obj, property_name)
if hasattr(attr, 'all'):
return getattr(obj, property_name).filter(**{filter_by_val: channel_val}).exists()
else:
filter_query = {'pk': obj.pk}
filter_query[field] = channel_val
return obj.__class__.objects.filter(**filter_query).exists()
| bsd-3-clause | 7,975,938,618,541,718,000 | 28.040984 | 90 | 0.619249 | false |
mcardacci/python_smtp_emailer | app/tests/test_format_view.py | 1 | 1297 | import unittest
from app import Format
class FormatViewMethods(unittest.TestCase):
def setUp(self):
self.formatter=Format({u'ABC084-S': {u'type': u'Windows 7 Professional', u'installDate': u'2011-08-31 11:07:00.000'}, u'GMC088-POS1': {u'type': u'Windows 7 Professional', u'installDate': u'2011-08-31 11:07:00.000'}})
def tearDown(self):
self.formatter=None
def test_object_has_content_named_attribute(self):
self.assertEqual(hasattr(self.formatter, 'content'), True, 'CONTENT FIELD EXISTS')
def test_object_content_is_a_dictionary(self):
self.assertEqual(type(self.formatter.content), dict, 'CONTENT IS NOT A DICTIONARY')
def test_os_report_function_responds_correctly_when_content_is_empty(self):
self.formatter.content={}
self.assertEqual(self.formatter.os_report(), "No machines have been added, No Operating Systems have been changed.", "DIDN'T RESPOND WITH WARNING STRING.")
def test_os_report_function_responds_correctly_when_content_is_populated(self):
os_report=self.formatter.os_report()
self.assertEqual(type(os_report), unicode, "DIDN'T RESPOND WITH CORRECT TYPE (str)")
self.assertIsNot(os_report, "No machines have been added, No Operating Systems have been changed.", "DIDN'T RESPOND WITH CORRECT STRING.")
if __name__=='__main__':
unittest.main() | mit | 5,349,898,510,400,005,000 | 42.266667 | 218 | 0.744025 | false |
konstantinKim/vd-backend | app/facilities/models.py | 1 | 4354 | from marshmallow_jsonapi import Schema, fields
from marshmallow import validate
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.exc import SQLAlchemyError
from app.helper.helper import Calc
db = SQLAlchemy(session_options={"autoflush": False})
class CRUD():
def add(self, resource):
db.session.add(resource)
return db.session.commit()
def update(self):
return db.session.commit()
def delete(self, resource):
db.session.delete(resource)
return db.session.commit()
class FacilitiesMaterials(db.Model):
__tablename__ = 'facilities_materials'
MATERIAL_ID = db.Column(db.Integer, primary_key=True)
FACILITY_ID = db.Column(db.Integer, db.ForeignKey('facilities.FACILITY_ID'))
class Facilities(db.Model, CRUD):
__tablename__ = 'facilities'
FACILITY_ID = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), unique=True, nullable=False)
street = db.Column(db.String(250))
zipcode = db.Column(db.String(250))
facility_materials = db.relationship(FacilitiesMaterials, backref="facility", lazy='joined')
class FacilitiesSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
id = fields.Integer()
FACILITY_ID = fields.Integer(primary_key=True)
name = fields.String(validate=not_blank)
street = fields.String()
zipcode = fields.String()
#self links
def get_top_level_links(self, data, many):
if many:
self_link = "/facilities/"
else:
self_link = "/facilities/{}".format(data['attributes']['FACILITY_ID'])
return {'self': self_link}
class Meta:
type_ = 'facilities'
class RecyclersSearch():
def __init__(self, **kwargs):
self.LAT = 0
self.LON = 0
self.distance = 0
self.name = ""
self.city = ""
self.county = ""
self.state = ""
self.street = ""
self.phone = ""
self.url = ""
self.zipcode = ""
def find(MATERIAL_ID, zipcode, radius):
query = db.engine.execute("SELECT * FROM zipcodes WHERE zipcode >= '"+ zipcode +"' ORDER BY zipcode ASC LIMIT 1")
zipcode = query.fetchone()
select_clause = ", zipcodes.LAT, zipcodes.LON , SQRT( (69.1 * (zipcodes.LAT - "+str(zipcode['LAT'])+")) * (69.1 * (zipcodes.LAT - "+str(zipcode['LAT'])+")) + (53.0 * (zipcodes.LON - "+str(zipcode['LON'])+")) * (53.0 * (zipcodes.LON - "+str(zipcode['LON'])+")) ) AS distance , facilities_materials.conversion_rate, facilities_materials.conversion_leed "
join_clause_zipcodes = ", zipcodes"
join_clause = " INNER JOIN facilities_materials ON facilities_materials.FACILITY_ID=facilities.FACILITY_ID "
where_clause_zipcodes = " AND zipcodes.zipcode=facilities.zipcode AND facilities_materials.MATERIAL_ID=" + str(MATERIAL_ID) + " AND SQRT( (69.1 * (zipcodes.LAT - "+str(zipcode['LAT'])+")) * (69.1 * (zipcodes.LAT - "+str(zipcode['LAT'])+")) + (53.0 * (zipcodes.LON - "+str(zipcode['LON'])+")) * (53.0 * (zipcodes.LON - "+str(zipcode['LON'])+")) ) <= " + str(radius)
query = db.engine.execute("SELECT DISTINCT(facilities.FACILITY_ID), facilities.*, counties.name AS county, counties.state, cities.name AS city, \
facilities.FACILITY_ID AS CONTEXT_ID "+ select_clause + " FROM cities, facilities \
INNER JOIN counties ON facilities.COUNTY_ID=counties.COUNTY_ID " + join_clause + " " + join_clause_zipcodes + " \
WHERE facilities.CITY_ID=cities.CITY_ID AND facilities.enabled='true' " + where_clause_zipcodes + " ORDER BY distance ASC " )
data = query.fetchall()
result = []
for f in data:
rs = RecyclersSearch()
rs.LAT = str(f.LAT)
rs.LON = str(f.LON)
rs.name = f.name
rs.city = f.city
rs.county = f.county
rs.state = f.state
rs.distance = Calc.myRound(f.distance)
rs.street = f.street
rs.phone = f.phone
rs.url = f.url
rs.zipcode = f.zipcode
result.append(rs.__dict__)
return(result)
| mit | 5,645,028,438,968,486,000 | 38.944954 | 376 | 0.590032 | false |
Josef-Friedrich/audiorename | audiorename/audiofile.py | 1 | 12041 | """This module contains all functionality on the level of a single audio file.
"""
from __future__ import print_function
from audiorename.meta import Meta, dict_diff
from phrydy.utils import as_string
from tmep import Functions
from tmep import Template
import errno
import os
import phrydy
import re
import shutil
class AudioFile(object):
"""
:param path: The path string of the audio file.
:param string file_type: Either “source” or “target”.
:param string prefix: The path prefix of the audio file, for example the
base folder of your music collection. Used to shorten the path strings
in the progress messaging.
:param job: The `job` object.
:type job: audiorename.job.Job
"""
def __init__(self, path=None, file_type='source', prefix=None, job=None):
self.__path = path
self.type = file_type
self.job = job
self.__prefix = prefix
self.shorten_symbol = '[…]'
if not self.job:
shell_friendly = True
else:
shell_friendly = self.job.shell_friendly
if self.exists:
try:
self.meta = Meta(self.abspath, shell_friendly)
except phrydy.mediafile.UnreadableFileError:
self.meta = False
@property
def abspath(self):
return os.path.abspath(self.__path)
@property
def prefix(self):
if self.__prefix and len(self.__prefix) > 1:
if self.__prefix[-1] != os.path.sep:
return self.__prefix + os.path.sep
else:
return self.__prefix
@property
def exists(self):
return os.path.exists(self.abspath)
@property
def extension(self):
return self.abspath.split('.')[-1].lower()
@property
def short(self):
if self.prefix:
short = self.abspath.replace(self.prefix, '')
else:
short = os.path.basename(self.abspath)
return self.shorten_symbol + short
@property
def filename(self):
return os.path.basename(self.abspath)
@property
def dir_and_file(self):
path_segments = self.abspath.split(os.path.sep)
return os.path.sep.join(path_segments[-2:])
class MBTrackListing(object):
def __init__(self):
self.counter = 0
def format_audiofile(self, album, title, length):
self.counter += 1
m, s = divmod(length, 60)
mmss = '{:d}:{:02d}'.format(int(m), int(s))
output = '{:d}. {:s}: {:s} ({:s})'.format(self.counter, album,
title, mmss)
output = output.replace('Op.', 'op.')
return output.replace('- ', '')
mb_track_listing = MBTrackListing()
def get_target(target, extensions):
"""Get the path of a existing audio file target. Search for audio files
with different extensions.
"""
target = os.path.splitext(target)[0]
for extension in extensions:
audio_file = target + '.' + extension
if os.path.exists(audio_file):
return audio_file
def best_format(source, target, job):
"""
:param source: The metadata object of the source file.
:type source: audiorename.meta.Meta
:param target: The metadata object of the target file.
:type target: audiorename.meta.Meta
:return: Either the string `source` or the string `target`
:param job: The `job` object.
:type job: audiorename.job.Job
:rtype: string
"""
def get_highest(dictionary):
for key, value in sorted(dictionary.items()):
out = value
return out
if source.format == target.format:
bitrates = {}
bitrates[source.bitrate] = 'source'
bitrates[target.bitrate] = 'target'
best = get_highest(bitrates)
job.msg.best_format(best, 'bitrate', source, target)
return best
else:
# All types:
#
# 'aac'
# 'aiff'
# 'alac': Apple Lossless Audio Codec (losless)
# 'ape'
# 'asf'
# 'dsf'
# 'flac'
# 'mp3'
# 'mpc'
# 'ogg'
# 'opus'
# 'wv': WavPack (losless)
ranking = {
'flac': 10,
'alac': 9,
'aac': 8,
'mp3': 5,
'ogg': 2,
'wma': 1,
}
types = {}
types[ranking[source.type]] = 'source'
types[ranking[target.type]] = 'target'
best = get_highest(types)
job.msg.best_format(best, 'type', source, target)
return best
def process_target_path(meta, format_string, shell_friendly=True):
"""
:param dict meta: The to a dictionary converted attributes of a
meta object :class:`audiorename.meta.Meta`.
:param string format_string:
:param boolean shell_friendly:
"""
template = Template(as_string(format_string))
functions = Functions(meta)
target = template.substitute(meta, functions.functions())
if isinstance(target, str):
if shell_friendly:
target = Functions.tmpl_asciify(target)
target = Functions.tmpl_delchars(target, '().,!"\'’')
target = Functions.tmpl_replchars(target, '-', ' ')
# asciify generates new characters which must be sanitzed, e. g.:
# ¿ -> ?
target = Functions.tmpl_delchars(target, ':*?"<>|\\~&{}')
target = Functions.tmpl_deldupchars(target)
return re.sub(r'\.$', '', target)
class Action(object):
"""
:param job: The `job` object.
:type job: audiorename.job.Job
"""
def __init__(self, job):
self.job = job
self.dry_run = job.dry_run
def count(self, counter_name):
self.job.stats.counter.count(counter_name)
def cleanup(self, audio_file):
if self.job.rename.cleanup == 'backup':
self.backup(audio_file)
elif self.job.rename.cleanup == 'delete':
self.delete(audio_file)
def backup(self, audio_file):
backup_file = AudioFile(
os.path.join(
self.job.rename.backup_folder,
os.path.basename(audio_file.abspath)
), file_type='target'
)
self.job.msg.action_two_path('Backup', audio_file, backup_file)
self.count('backup')
if not self.dry_run:
self.create_dir(backup_file)
shutil.move(audio_file.abspath, backup_file.abspath)
def copy(self, source, target):
self.job.msg.action_two_path('Copy', source, target)
self.count('copy')
if not self.dry_run:
self.create_dir(target)
shutil.copy2(source.abspath, target.abspath)
def create_dir(self, audio_file):
path = os.path.dirname(audio_file.abspath)
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def delete(self, audio_file):
self.job.msg.action_one_path('Delete', audio_file)
self.count('delete')
if not self.dry_run:
os.remove(audio_file.abspath)
def move(self, source, target):
self.job.msg.action_two_path('Move', source, target)
self.count('move')
if not self.dry_run:
self.create_dir(target)
shutil.move(source.abspath, target.abspath)
def metadata(self, audio_file, enrich=False, remap=False):
pre = audio_file.meta.export_dict(sanitize=False)
def single_action(audio_file, method_name, message):
pre = audio_file.meta.export_dict(sanitize=False)
method = getattr(audio_file.meta, method_name)
method()
post = audio_file.meta.export_dict(sanitize=False)
diff = dict_diff(pre, post)
if diff:
self.count(method_name)
self.job.msg.output(message)
for change in diff:
self.job.msg.diff(change[0], change[1], change[2])
if enrich:
single_action(audio_file, 'enrich_metadata', 'Enrich metadata')
if remap:
single_action(audio_file, 'remap_classical', 'Remap classical')
post = audio_file.meta.export_dict(sanitize=False)
diff = dict_diff(pre, post)
if not self.dry_run and diff:
audio_file.meta.save()
def do_job_on_audiofile(source, job=None):
"""
:param job: The `job` object.
:type job: audiorename.job.Job
"""
def count(key):
job.stats.counter.count(key)
skip = False
action = Action(job)
source = AudioFile(source, prefix=os.getcwd(), file_type='source', job=job)
if not job.output.mb_track_listing:
job.msg.next_file(source)
if not source.meta:
skip = True
##
# Skips
##
if skip:
job.msg.status(u'Broken file', status='error')
count('broken_file')
return
##
# Output only
##
if job.output.mb_track_listing:
print(mb_track_listing.format_audiofile(source.meta.album,
source.meta.title,
source.meta.length))
return
if job.output.debug:
phrydy.doc.print_debug(
source.abspath,
Meta,
Meta.fields,
job.output.color,
)
return
if job.field_skip and (not hasattr(source.meta,
job.field_skip) or not getattr(source.meta, job.field_skip)):
job.msg.status(u'No field', status='error')
count('no_field')
return
##
# Metadata actions
##
if job.metadata_actions.remap_classical or \
job.metadata_actions.enrich_metadata:
action.metadata(
source,
job.metadata_actions.enrich_metadata,
job.metadata_actions.remap_classical
)
##
# Rename action
##
if job.rename.move != 'no_rename':
if source.meta.ar_combined_soundtrack:
format_string = job.format.soundtrack
elif source.meta.comp:
format_string = job.format.compilation
else:
format_string = job.format.default
meta_dict = source.meta.export_dict()
desired_target_path = process_target_path(meta_dict, format_string,
job.shell_friendly)
desired_target_path = os.path.join(
job.target,
desired_target_path + '.' + source.extension
)
desired_target = AudioFile(desired_target_path, prefix=job.target,
file_type='target', job=job)
# Do nothing
if source.abspath == desired_target.abspath:
job.msg.status('Renamed', status='ok')
count('renamed')
return
# Search existing target
target = False
target_path = get_target(desired_target.abspath, job.filter.extension)
if target_path:
target = AudioFile(target_path, prefix=job.target,
file_type='target', job=job)
# Both file exist
if target:
best = best_format(source.meta, target.meta, job)
if job.rename.cleanup:
# delete source
if not job.rename.best_format or \
(job.rename.best_format and best == 'target'):
action.cleanup(source)
# delete target
if job.rename.best_format and best == 'source':
action.cleanup(target)
# Unset target object to trigger copy or move actions.
target = None
if target:
job.msg.status('Exists', status='error')
# copy
elif job.rename.move == 'copy':
action.copy(source, desired_target)
# move
elif job.rename.move == 'move':
action.move(source, desired_target)
| mit | 1,818,951,839,829,924,400 | 28.123487 | 79 | 0.559528 | false |
alogg/dolfin | test/unit/mesh/python/MeshIterator.py | 1 | 4235 | "Unit tests for MeshIterator and subclasses"
# Copyright (C) 2006-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2006-08-08
# Last changed: 2011-08-21
import unittest
import numpy
from dolfin import *
class MeshIterator(unittest.TestCase):
def test_vertex_iterators(self):
"Iterate over vertices"
mesh = UnitCubeMesh(5, 5, 5)
# Test connectivity
cons = [(i, mesh.topology()(0,i)) for i in xrange(4)]
# Test writability
for i, con in cons:
def assign(con, i):
con(i)[0] = 1
self.assertRaises(StandardError, assign, con, i)
n = 0
for i, v in enumerate(vertices(mesh)):
n += 1
for j, con in cons:
self.assertTrue(numpy.all(con(i) == v.entities(j)))
self.assertEqual(n, mesh.num_vertices())
# Check coordinate assignment
# FIXME: Outcomment to hopefully please Mac-buildbot
#end_point = numpy.array([v.x(0), v.x(1), v.x(2)])
#mesh.coordinates()[:] += 2
#self.assertEqual(end_point[0] + 2, mesh.coordinates()[-1,0])
#self.assertEqual(end_point[1] + 2, mesh.coordinates()[-1,1])
#self.assertEqual(end_point[2] + 2, mesh.coordinates()[-1,2])
def test_edge_iterators(self):
"Iterate over edges"
mesh = UnitCubeMesh(5, 5, 5)
# Test connectivity
cons = [(i, mesh.topology()(1,i)) for i in xrange(4)]
# Test writability
for i, con in cons:
def assign(con, i):
con(i)[0] = 1
self.assertRaises(StandardError, assign, con, i)
n = 0
for i, e in enumerate(edges(mesh)):
n += 1
for j, con in cons:
self.assertTrue(numpy.all(con(i) == e.entities(j)))
self.assertEqual(n, mesh.num_edges())
def test_face_iterator(self):
"Iterate over faces"
mesh = UnitCubeMesh(5, 5, 5)
# Test connectivity
cons = [(i, mesh.topology()(2,i)) for i in xrange(4)]
# Test writability
for i, con in cons:
def assign(con, i):
con(i)[0] = 1
self.assertRaises(StandardError, assign, con, i)
n = 0
for i, f in enumerate(faces(mesh)):
n += 1
for j, con in cons:
self.assertTrue(numpy.all(con(i) == f.entities(j)))
self.assertEqual(n, mesh.num_faces())
def test_facet_iterators(self):
"Iterate over facets"
mesh = UnitCubeMesh(5, 5, 5)
n = 0
for f in facets(mesh):
n += 1
self.assertEqual(n, mesh.num_facets())
def test_cell_iterators(self):
"Iterate over cells"
mesh = UnitCubeMesh(5, 5, 5)
# Test connectivity
cons = [(i, mesh.topology()(3,i)) for i in xrange(4)]
# Test writability
for i, con in cons:
def assign(con, i):
con(i)[0] = 1
self.assertRaises(StandardError, assign, con, i)
n = 0
for i, c in enumerate(cells(mesh)):
n += 1
for j, con in cons:
self.assertTrue(numpy.all(con(i) == c.entities(j)))
self.assertEqual(n, mesh.num_cells())
def test_mixed_iterators(self):
"Iterate over vertices of cells"
mesh = UnitCubeMesh(5, 5, 5)
n = 0
for c in cells(mesh):
for v in vertices(c):
n += 1
self.assertEqual(n, 4*mesh.num_cells())
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -2,187,166,778,391,369,200 | 28.615385 | 77 | 0.565525 | false |
mbeaver502/CyberLawBot | engine.py | 1 | 16014 | """
Description:
This program implements a simple Twitter bot that tweets information about bills in Congress
that are (in)directly related to cyber issues. This bot uses a MySQL database backend to
keep track of bills, both posted and unposted (i.e., tweeted and yet to be tweeted, respectively).
For this initial proof of concept, bill data are scraped from the official US Government
Publishing Office website. For future versions, it would probably be better to connect to a
less cumbersome endpoint like ProPublica.
Module:
This module implements the driver functionality for the bot. This is the main entrypoint.
Libraries:
This program makes use of the following libraries:
lxml
Stephan Richter / Infrae
BSD License
http://lxml.de/
xmltodict
Martin Blech & contribs.
MIT License
https://github.com/martinblech/xmltodict
python-twitter
Mike Taylor ('bear') & contribs.
Apache License 2.0
https://github.com/bear/python-twitter
requests
Kenneth Reitz
Apache License 2.0
http://docs.python-requests.org/en/master
MySQL Connector
Oracle & affiliates
Misc. License
https://dev.mysql.com/doc/connector-python/en/
License:
Copyright 2017 J. Michael Beaver
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
References:
https://www.gpo.gov/fdsys/bulkdata/BILLSTATUS/resources/BILLSTATUS-XML_User-Guide-v1.pdf
https://github.com/usgpo/bill-status/blob/master/BILLSTATUS-XML_User_User-Guide.md
https://projects.propublica.org/api-docs/congress-api/endpoints/
https://github.com/bear/python-twitter
https://github.com/martinblech/xmltodict
http://docs.python-requests.org/en/master
https://dev.mysql.com/doc/connector-python/en/
http://lxml.de/
https://www.python.org/dev/peps/pep-0249
https://is.gd/apishorteningreference.php
https://www.pantz.org/software/mysql/mysqlcommands.html
https://bitbucket.org/ned/coveragepy/commits/f8e9d62f1412
https://www.govtrack.us/api/v2/role
https://choosealicense.com/licenses/apache-2.0/
http://www.mysqltutorial.org/getting-started-mysql-python-connector/
"""
from bill import Bill
from bill_db import BillDB
from collections import OrderedDict
from lxml import html
import constants
import json
import logging as log
import re
import requests
import sys
import time
import twitter
import xmltodict
#-------------------------------------------------------------------------------------------------------------------------------------------------
log.basicConfig(filename='cyber_law_bot.log',
level=log.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M')
REGEX = re.compile(r'(?P<position>Sen.|Rep.) (?P<last_name>\w+), (?P<first_name>\w+) (?P<initial>\w+.)*')
REGEX2 = re.compile(r'(?P<position>Sen.|Rep.) (?P<last_name>\w+) (?P<last_name2>\w+), (?P<first_name>\w+) (?P<initial>\w+.)*')
#-------------------------------------------------------------------------------------------------------------------------------------------------
def get_bill_urls(data):
"""Scrapes bill URLs from USGPO page.
Args:
data: HTML data downloaded from USGPO page.
Returns:
A list of URLs, one per bill (on GPO's servers).
"""
tree = html.fromstring(data)
bills = tree.xpath('//div[@class="bulkdata"]/table[@class="styles3"]/tr/td//a/@href')
bills = bills[1:-1] # Strip out 'Parent Directory' at the beginning and the .zip archive at the end
return [str(constants.ROOT_URL + bill) for bill in bills]
#-------------------------------------------------------------------------------------------------------------------------------------------------
def get_bill(session, bill):
"""Attempts to download a bill's XML data from GPO's server.
Args:
session: A requests session object.
bill: The corresponding URL string for the bill.
Returns:
A Bill object with relevant information from the XML, or
None upon failure or if bad args.
Raises:
BaseException: Something horribly wrong happened when downloading.
Generally the exception results from a timeout, which is rare.
"""
if session and bill:
try:
r = session.get(bill, timeout=5)
if r.status_code == requests.codes.ok:
return Bill(xmltodict.parse(r.text))
except BaseException as ex:
log.warning('Error downloading ' + bill)
raise ex
return None
#-------------------------------------------------------------------------------------------------------------------------------------------------
def is_relevant(bill):
"""Determines if a given bill meets set criteria for relevance.
Args:
bill: A Bill object.
Returns:
A boolean value. True => relevant, False => irrelevant or bad arg.
"""
if bill:
title = (bill.get_bill_title()).lower()
summary = bill.get_bill_summary()
if summary:
summary = summary.lower()
else:
summary = ''
subjects = list()
if bill.bill_subjects:
items = bill.bill_subjects['item']
for item in items:
if isinstance(item, OrderedDict):
subjects.append(item['name'].lower())
else:
subjects.append(items[item].lower())
"""
We use ultra-lazy, ultra-greedy keyword matching here. We basically want to find
any instance of a given KEYWORD within a bill's title, summary, or specified
legislative subjects (as determined by Congress). This laziness can result in some
interesting 'false positives', but these can generally be controlled by setting
conservative/thoughtful KEYWORDS.
"""
for keyword in constants.KEYWORDS:
if (keyword in title) or (keyword in summary) or (keyword in subjects):
return True
return False
#-------------------------------------------------------------------------------------------------------------------------------------------------
def download_bills():
"""Downloads bills from GPO's servers.
Returns:
A list of relevant bills, which could be empty.
References:
Timing from http://stackoverflow.com/a/27780763.
"""
session = requests.Session()
relevant_bills = list()
start = time.time()
for url in constants.URL_LIST:
log.info('Connecting {}'.format(url))
try:
r = session.get(url, timeout=5)
except requests.exceptions.RequestException as ex:
log.warning(ex)
continue # No point in doing anything else if there was a connection error
if r.status_code != requests.codes.ok:
r.raise_for_status()
else:
bills = get_bill_urls(r.content)
num_bills = len(bills)
bill_idx = 0
for bill in bills:
bill_idx += 1
sys.stdout.write('Processing bill %d / %d \r' % (bill_idx, num_bills))
sys.stdout.flush()
b = get_bill(session, bill)
if is_relevant(b):
relevant_bills.append(b)
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
log.info('Elapsed {:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds))
log.info('Found {} relevant bills!'.format(len(relevant_bills)))
return relevant_bills
#-------------------------------------------------------------------------------------------------------------------------------------------------
def get_name(name):
"""Gets a sponsor's name using regex.
Args:
name: String with a name to be extracted.
Returns:
A dict containing the name's parts (see REGEX and REGEX2).
None if something went wrong.
"""
m = re.match(REGEX, name)
if m:
return m.groupdict()
else:
m = re.match(REGEX2, name)
if m:
return m.groupdict()
return None
#-------------------------------------------------------------------------------------------------------------------------------------------------
def build_tweet(row):
"""Constructs a tweet string to be sent to Twitter API.
Args:
row: Row of data from the database table (tuple).
Returns:
A tweet as a string. None if bad arg. A tweet string has
the following general format:
Bill <type>. <number>: ["<title>"] ([sponsor,] <introduction date>) | <short URL>
The sponsor really is the only thing that can be omitted (e.g., bad Twitter handle).
In exceptionally rare cases, the title may be omitted due to length.
"""
if not row or not isinstance(row, tuple):
return None
bill_type = row[1] + '.'
bill_number = str(row[2])
name = get_name(row[3])
title = row[4]
short_url = row[6]
intro_date = row[7]
"""
We're going to try really hard to give attribution by including the
sponsor's Twitter handle in the tweet (e.g., @JohnDoe). To do that,
we're going to reconstruct the sponsor's name as Doe, John and try
to lazy match with the CONGRESS_TWITTER dict constant. If we can't
do that, we'll try to give their position and their last name (e.g.,
Sen. Doe). If we're too lazy or no match exists, we just give an
empty string as the name.
"""
if name:
lname = ''
if 'last_name2' in name: # Some people have two last names.
lname = name['last_name'] + ' ' + name['last_name2']
else:
lname = name['last_name']
temp = lname + ', ' + name['first_name']
if temp in constants.CONGRESS_TWITTER:
twitter_handle = constants.CONGRESS_TWITTER[temp]
elif name['position']:
twitter_handle = name['position'] + ' ' + lname
else:
twitter_handle = lname
else:
twitter_handle = ''
tweet_start = 'Bill ' + bill_type + ' ' + bill_number + ': '
if twitter_handle:
tweet_end = ' ({0}, {1}) '.format(twitter_handle, intro_date)
else:
tweet_end = ' ({0}) '.format(intro_date)
tweet_end += '| ' + short_url
"""
Since the bill title is the only thing we can really control in terms of length
and since bill titles tend to be rather long, we're going to truncate as necessary
to meet the TWEET_MAX_LENGTH bound. If the tweet is already too long, then we skip
the title altogether. But that really shouldn't be an issue. (Famous last words.)
"""
tw_title = ''
tw_len = len(tweet_start + tweet_end)
if tw_len < constants.TWEET_MAX_LENGTH:
diff = constants.TWEET_MAX_LENGTH - tw_len - 2 # -2 to account for quotation marks ("")
if len(title) > diff:
tw_title = '"{}..."'.format(title[:diff-3]) # -3 to account for ellipsis (...)
else:
tw_title = '"{}"'.format(title)
return str(tweet_start + tw_title + tweet_end)
#-------------------------------------------------------------------------------------------------------------------------------------------------
def main():
"""Main program driver.
Raises:
Exception: Critical failure in the beginning. Immediately exit.
Elsewhere, the exception is reported and execution continues.
BaseException: Critical failure in the beginning. Immediately exit.
KeyboardInterrupt: Used to terminate the infinite loop.
"""
log.info('*** START LOG ***')
try:
db = BillDB()
except Exception as e:
log.critical(e)
log.info('*** END LOG ***')
exit(-1)
try:
api = twitter.Api(consumer_key='',
consumer_secret='',
access_token_key='',
access_token_secret='')
except BaseException as e:
log.critical(e)
log.info('*** END LOG ***')
exit(-1)
# This downloading should happen only once every 24 hours so GPO's servers don't get slammed.
try:
relevant_bills = download_bills()
for bill in relevant_bills:
info = bill.bill_to_dict()
if not db.row_exists(info):
db.insert_row(info)
except Exception as e:
log.warning(e)
"""
This is probably worthy of criminal prosecution.
This program is meant to function as a kind of quasi-daemon with minimal
human interaction. If execution makes it to this infinite loop, we want
to perform two basic functions:
1) Try to shorten as many URL as possible, as necessary.
2) Try to generate and post a tweet about a previously unposted bill.
We sleep for one hour (3600 seconds) between each iteration, mostly
so is.gd doesn't get mad at us. But it also keeps Twitter happy by not
spamming them with hundreds of tweets in the span of a few seconds.
If something bad happens during URL shortening (i.e., an exception),
we bail out and immediately kill the program. Not a graceful recovery by
any stretch of the imagination, but that should be a rare happenstance.
We also exit the infinite loop after SLEEP_LIMIT (+1) hours. That's meant to
correspond roughly to the standard (American) work day. Ideally, change this
to a sleep(X hours [until 9am]) and reset times_slept = 0 to act as a
quasi-daemon, or kill the process and use a cron job to relaunch the
process every (week)day at 9am.
"""
try:
times_slept = 0
log.info('Table size: {} rows'.format(db.get_table_size())) # Merely for diagnostic purposes.
while True:
rows = db.rows_to_shorten()
try:
for row in rows:
db.gen_short_url(row)
except Exception as e:
log.critical(e)
raise KeyboardInterrupt
if api.VerifyCredentials():
row = db.get_row_to_post()
tw = build_tweet(row)
if tw:
status = api.PostUpdate(tw)
if status:
log.info('Posted new tweet @ {}'.format(time.asctime(time.localtime(time.time()))))
log.info('\t{}'.format(tw))
_row = list(row[:-1]) # I apologize for this, but you can blame Python's tuples.
_row.append(True)
_row = tuple(_row)
new = db.tuple_to_dict(_row)
db.update_row(new['id'], new)
log.info('Sleeping for one hour...')
time.sleep(3600)
times_slept += 1
if times_slept == constants.SLEEP_LIMIT:
raise KeyboardInterrupt
except KeyboardInterrupt:
pass
finally:
db.close()
log.info('*** END LOG ***')
#-------------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
| apache-2.0 | 3,652,089,476,477,341,700 | 36.503513 | 146 | 0.554078 | false |
lakewik/storj-gui-client | UI/qt_interfaces/file_mirrors_ui_new.py | 1 | 11722 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'file_mirrors_list_new.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_FileMirrorsList(object):
def setupUi(self, FileMirrorsList):
FileMirrorsList.setObjectName(_fromUtf8("FileMirrorsList"))
FileMirrorsList.resize(1241, 491)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
FileMirrorsList.setPalette(palette)
FileMirrorsList.setAutoFillBackground(False)
FileMirrorsList.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.label_6 = QtGui.QLabel(FileMirrorsList)
self.label_6.setGeometry(QtCore.QRect(10, 10, 101, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.file_name = QtGui.QLabel(FileMirrorsList)
self.file_name.setGeometry(QtCore.QRect(110, 10, 511, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.file_name.setFont(font)
self.file_name.setStyleSheet(_fromUtf8("QLabel{\n"
"color: #2683ff;\n"
"}\n"
""))
self.file_name.setObjectName(_fromUtf8("file_name"))
self.label_11 = QtGui.QLabel(FileMirrorsList)
self.label_11.setGeometry(QtCore.QRect(10, 30, 81, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.file_id = QtGui.QLabel(FileMirrorsList)
self.file_id.setGeometry(QtCore.QRect(110, 30, 511, 16))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.file_id.setFont(font)
self.file_id.setStyleSheet(_fromUtf8("QLabel{\n"
"color: #2683ff;\n"
"}\n"
""))
self.file_id.setObjectName(_fromUtf8("file_id"))
self.available_mirrors_tree = QtGui.QTreeView(FileMirrorsList)
self.available_mirrors_tree.setGeometry(QtCore.QRect(620, 90, 611, 351))
self.available_mirrors_tree.setObjectName(_fromUtf8("available_mirrors_tree"))
self.established_mirrors_tree = QtGui.QTreeView(FileMirrorsList)
self.established_mirrors_tree.setGeometry(QtCore.QRect(10, 90, 601, 351))
self.established_mirrors_tree.setObjectName(_fromUtf8("established_mirrors_tree"))
self.established_mirrors_count = QtGui.QLabel(FileMirrorsList)
self.established_mirrors_count.setGeometry(QtCore.QRect(160, 50, 301, 41))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.established_mirrors_count.setFont(font)
self.established_mirrors_count.setObjectName(_fromUtf8("established_mirrors_count"))
self.available_mirrors_count = QtGui.QLabel(FileMirrorsList)
self.available_mirrors_count.setGeometry(QtCore.QRect(820, 50, 241, 41))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.available_mirrors_count.setFont(font)
self.available_mirrors_count.setObjectName(_fromUtf8("available_mirrors_count"))
self.mirror_details_bt = QtGui.QPushButton(FileMirrorsList)
self.mirror_details_bt.setGeometry(QtCore.QRect(110, 450, 271, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.mirror_details_bt.setFont(font)
self.mirror_details_bt.setStyleSheet(_fromUtf8("QPushButton {\n"
" background-color: #2683ff;\n"
" border: 1px solid #2683ff;\n"
" color: #fff;\n"
" border-radius: 7px;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: #2274e2;\n"
" border-color: #2274e2;\n"
"}\n"
"QPushButton:active{\n"
" background-color: #4393ff;\n"
" border-color: #4393ff;\n"
"}"))
self.mirror_details_bt.setObjectName(_fromUtf8("mirror_details_bt"))
self.mirror_details_bt_2 = QtGui.QPushButton(FileMirrorsList)
self.mirror_details_bt_2.setEnabled(True)
self.mirror_details_bt_2.setGeometry(QtCore.QRect(910, 450, 291, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.mirror_details_bt_2.setFont(font)
self.mirror_details_bt_2.setStyleSheet(_fromUtf8("QPushButton {\n"
" background-color: #2683ff;\n"
" border: 1px solid #2683ff;\n"
" color: #fff;\n"
" border-radius: 7px;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: #2274e2;\n"
" border-color: #2274e2;\n"
"}\n"
"QPushButton:active{\n"
" background-color: #4393ff;\n"
" border-color: #4393ff;\n"
"}"))
self.mirror_details_bt_2.setObjectName(_fromUtf8("mirror_details_bt_2"))
self.quit_bt = QtGui.QPushButton(FileMirrorsList)
self.quit_bt.setGeometry(QtCore.QRect(10, 450, 91, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.quit_bt.setFont(font)
self.quit_bt.setStyleSheet(_fromUtf8("QPushButton {\n"
" background-color: #CC0000;\n"
" border: 1px solid #CC0000;\n"
" color: #fff;\n"
" border-radius: 7px;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: #bb0a0a;\n"
" border-color: #bb0a0a;\n"
"}\n"
"QPushButton:active {\n"
" background-color: #ce0e0e;\n"
" border-color: #ce0e0e;\n"
"}"))
self.quit_bt.setObjectName(_fromUtf8("quit_bt"))
self.loading_img = QtGui.QLabel(FileMirrorsList)
self.loading_img.setEnabled(True)
self.loading_img.setGeometry(QtCore.QRect(590, 10, 71, 61))
self.loading_img.setText(_fromUtf8(""))
self.loading_img.setScaledContents(True)
self.loading_img.setObjectName(_fromUtf8("loading_img"))
self.contract_details_bt = QtGui.QPushButton(FileMirrorsList)
self.contract_details_bt.setGeometry(QtCore.QRect(390, 450, 221, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.contract_details_bt.setFont(font)
self.contract_details_bt.setStyleSheet(_fromUtf8("QPushButton:hover{\n"
" background-color: #83bf20;\n"
" border-color: #83bf20;\n"
"}\n"
"QPushButton:active {\n"
" background-color: #93cc36;\n"
" border-color: #93cc36;\n"
"}\n"
"QPushButton{\n"
" background-color: #88c425;\n"
" border: 1px solid #88c425;\n"
" color: #fff;\n"
" border-radius: 7px;\n"
"}"))
self.contract_details_bt.setObjectName(_fromUtf8("contract_details_bt"))
self.contract_details_bt_2 = QtGui.QPushButton(FileMirrorsList)
self.contract_details_bt_2.setGeometry(QtCore.QRect(680, 450, 221, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Lato"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.contract_details_bt_2.setFont(font)
self.contract_details_bt_2.setStyleSheet(_fromUtf8("QPushButton:hover{\n"
" background-color: #83bf20;\n"
" border-color: #83bf20;\n"
"}\n"
"QPushButton:active {\n"
" background-color: #93cc36;\n"
" border-color: #93cc36;\n"
"}\n"
"QPushButton{\n"
" background-color: #88c425;\n"
" border: 1px solid #88c425;\n"
" color: #fff;\n"
" border-radius: 7px;\n"
"}"))
self.contract_details_bt_2.setObjectName(_fromUtf8("contract_details_bt_2"))
self.retranslateUi(FileMirrorsList)
QtCore.QMetaObject.connectSlotsByName(FileMirrorsList)
FileMirrorsList.setTabOrder(self.established_mirrors_tree, self.available_mirrors_tree)
FileMirrorsList.setTabOrder(self.available_mirrors_tree, self.quit_bt)
FileMirrorsList.setTabOrder(self.quit_bt, self.mirror_details_bt)
FileMirrorsList.setTabOrder(self.mirror_details_bt, self.mirror_details_bt_2)
def retranslateUi(self, FileMirrorsList):
FileMirrorsList.setWindowTitle(_translate("FileMirrorsList", "File mirrors list - Storj GUI", None))
self.label_6.setText(_translate("FileMirrorsList", "<html><head/><body><p><span style=\" color:#555555;\">FILE NAME:</span></p></body></html>", None))
self.file_name.setText(_translate("FileMirrorsList", "<html><head/><body><p><span style=\" color:#2683ff;\">N/A</span></p></body></html>", None))
self.label_11.setText(_translate("FileMirrorsList", "<html><head/><body><p><span style=\" color:#555555;\">FILE ID:</span></p></body></html>", None))
self.file_id.setText(_translate("FileMirrorsList", "<html><head/><body><p><span style=\" color:#2683ff;\">2000.00MB</span></p></body></html>", None))
self.established_mirrors_count.setText(_translate("FileMirrorsList", "<html><head/><body><p align=\"center\"><span style=\" color:#555555;\">ESTABLISHED (XXXX)</span></p></body></html>", None))
self.available_mirrors_count.setText(_translate("FileMirrorsList", "<html><head/><body><p align=\"center\"><span style=\" color:#555555;\">AVAILABLE (XXXX)</span></p></body></html>", None))
self.mirror_details_bt.setText(_translate("FileMirrorsList", "MORE MIRROR DETAILS", None))
self.mirror_details_bt_2.setText(_translate("FileMirrorsList", "MORE MIRROR DETAILS", None))
self.quit_bt.setText(_translate("FileMirrorsList", "CLOSE", None))
self.contract_details_bt.setText(_translate("FileMirrorsList", "CONTRACT DETAILS", None))
self.contract_details_bt_2.setText(_translate("FileMirrorsList", "CONTRACT DETAILS", None))
import resources_rc
| mit | 2,790,875,658,646,326,300 | 43.570342 | 201 | 0.663197 | false |
icemoon1987/python_toolkit | mail_office365.py | 1 | 2127 | #!/usr/bin/env python
#coding: utf-8
import smtplib
import email
import mimetypes
import json
from email.MIMEMultipart import MIMEMultipart
from email.mime.text import MIMEText
mail_host = ""
mail_user = ""
mail_pwd = ""
mail_postfix = ""
def sendmail(to_list,subject,content):
# translation
me = mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEMultipart('related')
msg['Subject'] = email.Header.Header(subject,'utf-8')
msg['From'] = mail_user
msg['To'] = ";".join(to_list)
msg.preamble = 'This is a multi-part message in MIME format.'
msgAlternative = MIMEMultipart('alternative')
msgText = MIMEText(content, 'plain', 'utf-8')
msgAlternative.attach(msgText)
msg.attach(msgAlternative)
try:
s = smtplib.SMTP_SSL()
s.connect(mail_host, 587)
s.starttls()
s.login(mail_user,mail_pwd)
s.sendmail(mail_user, to_list, msg.as_string())
s.quit()
except Exception,e:
print e
return False
return True
def sendhtmlmail(to_list,subject,content):
# translation
me = mail_user+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEMultipart('related')
msg['Subject'] = email.Header.Header(subject,'utf-8')
msg['From'] = mail_user
msg['To'] = ";".join(to_list)
msg.preamble = 'This is a multi-part message in MIME format.'
msgAlternative = MIMEMultipart('alternative')
msgText = MIMEText(content, 'html', 'utf-8')
msgAlternative.attach(msgText)
msg.attach(msgAlternative)
try:
print dir(smtplib)
s = smtplib.SMTP()
s.connect(mail_host, 587)
s.starttls()
s.login(mail_user,mail_pwd)
s.sendmail(mail_user, to_list, msg.as_string())
s.quit()
except Exception,e:
print e
return False
return True
if __name__ == '__main__':
detail = """
测试一下邮件看看
"""
if sendhtmlmail(["[email protected]"],"测试邮件", detail):
print "Success!"
else:
print "Fail!"
| gpl-3.0 | 2,149,991,689,210,147,800 | 24.961538 | 65 | 0.588683 | false |
derek-dalle/olab | matrix.py | 1 | 4107 | """
Provide sane Matlab-like functions
"""
# Import NumPy because we're building on some of its functions.
import numpy as _np
# Function to give length regardless of the damn type.
def numel(x):
"""
Give number of elements in a variable without being ridiculously dependent
on the type. Ever wanted to know how big a variable is but don't know
beforehand if it's a list or NumPy array or maybe just a number? TOO BAD!
THIS IS PYTHON AND WE DON'T LIKE FLEXIBLE THINGS. Or you can just use this
function.
:Call:
>>> n = numel(x)
:Inputs:
*x*: :class:`numpy.ndarray` or :class:`list` or :class:`float`, ...
This can hopefully be any variable, but is really intended for
lists, NumPy arrays, and scalars.
:Outputs:
*n*: :class:`int`
Number of elements in *x*. If *x* is an array, this gives the
total number in all dimensions, i.e. ``x.size``
:Examples:
This is supposed to just work without providing a mountain of
in-your-face Python caveats
>>> numel(3)
1
>>> numel([1, ['2a', '2b'], 3])
3
>>> numel(np.array([1, 2, 3]))
3
>>> numel(np.array([[1, 2, 3], [4, 5, 6]])
6
"""
# Versions:
# 2014.05.31 @ddalle : First version
# Check the input type.
if type(x) is _np.ndarray:
# NumPy arrays store the thing we're looking for.
return x.size
elif hasattr(x, '__len__'):
# For anything else that has len(x), use that.
return len(x)
else:
# This is arguable; anything else has numel(x) == 1.
return 1
# Function to concatenate arrays horizontally
def horzcat(*x):
"""
Concatenate arrays horizontally.
:Call:
>>> X = horzcat(x1, x2, ... )
>>> X = horzcat(x1, x2, x3, ... )
:Inputs:
*x1*: :class:`numpy.array`
An array or list
*x2*: :class:`numpy.array`
An array with the same dimensions as *x1* except possibly the number
of rows
:Outputs:
*X*: :class:`numpy.array`
Concatenated array, like ``[x1, x2]`` in Matlab
"""
# Versions:
# 2014.06.01 @ddalle : First version
# Convert to arrays if necessary.
X = (_np.array(xi) for xi in x)
# For now, just use the built-in function (which has unusual syntax).
return _np.hstack(X)
# Function to concatenate arrays horizontally
def vertcat(*x):
"""
Concatenate arrays vertically.
:Call:
>>> X = vertcat(x1, x2, ... )
>>> X = vertcat(x1, x2, x3, ... )
:Inputs:
*x1*: :class:`numpy.array`
An array or list
*x2*: :class:`numpy.array`
An array with the same dimensions as *x1* except possibly the number
of rows
:Outputs:
*X*: :class:`numpy.array`
Concatenated array, like ``[x1; x2]`` in Matlab
"""
# Versions:
# 2014.06.01 @ddalle : First version
# Convert to arrays if necessary.
X = (_np.array(xi) for xi in x)
# For now, just use the built-in function (which has unusual syntax).
return _np.vstack(X)
# Convert to single array.
def tovector(x):
"""
Convert to a one-dimensional NumPy array. This has some of the same
characteristics as columnization in Matlab. That is, it's similar to `x(:)`
in that language.
:Call:
>>> v = tovector(x)
:Inputs:
*x*: :class:`numpy.array`
Any *n*-dimensional array object
:Outputs:
*v*: :class:`numpy.array`
"""
# Versions:
# 2014.06.01 @ddalle : First version
# Check if it's an array.
if type(x) is _np.ndarray:
# Use it.
X = x
else:
# Convert it.
X = _np.array(x)
# Use the built-in method that I can never remember.
return X.flatten()
| lgpl-2.1 | 3,550,653,919,833,609,700 | 26.945578 | 80 | 0.538593 | false |
jajgarcia/DoFits | dofit.py | 1 | 3988 | #!/usr/bin/python
#
# dofit.py (Based on fitxsp.py)
#
# Load data and perform a model fit using PyXspec
#
# Requires: xspec
# Make sure to set:
# export VERSIONER_PYTHON_PREFER_32_BIT=yes
# (only for heasoft earlier than 6.16)
#
import sys
from xspec import *
from optparse import OptionParser
import os,os.path
import glob
from astropy.io import fits as pyfits
#import pyfits
#
# ------------------------------------------------------------------------------
#
# MAIN PROGRAM
#
#
#
version='0.1a'
date='- Tue Jul 21 11:16:23 EDT 2015 -'
author='Javier Garcia (Harvard-Smithsonian CfA)'
#
ul=[]
ul.append("usage: %prog [options] PREFIX")
ul.append("")
ul.append("Get total counts in different bands for a given observation")
ul.append("PREFIX can be a single PHA file or a group (e.g. *.pha)")
usage=""
for u in ul: usage+=u+'\n'
parser=OptionParser(usage=usage)
parser.add_option("-v","--version",action="store_true",dest="version",default=False,help="show version number")
(options,args)=parser.parse_args()
if options.version:
print 'dofit.py version:',version,date
print 'Author:',author
sys.exit()
if len(args) == 0:
parser.print_help()
sys.exit(0)
#-----
# No chatter
#Xset.chatter = 0
# Query
Fit.query = 'yes'
# Load local models
#AllModels.lmod("relxill")
# Get current directory
currpath = os.getcwd()
# Observations path
obspath='/Users/javier/crab-hexte/rebinned-clean-observations/'
# Change dir to observations path
os.chdir(obspath)
# List of spectrum files
files=glob.glob(args[0])
#----- LOOP OVER OBSERVATIONS ---#
for specfile in files:
# Change dir to observations path
os.chdir(obspath)
# Check if specfile exist
if not os.path.isfile(specfile):
print 'Warning: spectrum file',specfile,'does not exist!'
print 'Skiping...'
else: # Here I need to discriminate between pcu, mjd, etc...
# Load data
s1 = Spectrum(specfile);
# Go back to the working directory
os.chdir(currpath)
# Exposure time
et = s1.exposure
# Ignore/notice data
s1.ignore("0.-20.,250.-**")
# Define the Model
m1 = Model("const*tbabs*pow")
m1(1).values = "1. -1"
m1(2).values = "0.34 -1"
m1(3).values = "2.0 1"
m1(4).values = "10. 1"
# Fit
Fit.renorm()
Fit.perform()
# Create and open a log file
logFile = Xset.openLog('complete/fit-'+specfile+'.log')
logFile = Xset.log
# Calculate Errors
Fit.error("maximum 20. 2.706 3 4")
# Close currently opened log file.
Xset.closeLog()
# Equivalent to show all
logFile = Xset.openLog('complete/fit-'+specfile+'.txt')
logFile = Xset.log
s1.show()
m1.show()
Fit.show()
Xset.closeLog()
# Get total flux
AllModels.calcFlux("20. 250.")
fx = s1.flux
outfile='complete/fit-'+specfile+'.fluxes'
f = open(outfile, 'w')
f.write('# Mean Fluxes: Total ---\n')
f.write(str(fx[0])+'\n')
f.close()
# Get residuals for all channels
s1.notice("**")
Plot.xAxis = "keV"
Plot("residuals")
xvals = Plot.x() # Center bin energy (keV)
yvals = Plot.y() # Residuals: Data-Model (counts/sec/keV)
xErrs = Plot.xErr() # Half of bin width (keV)
yErrs = Plot.yErr() # Sigma: Error bar (counts/sec/keV)
outfile='complete/fit-'+specfile+'.res'
f = open(outfile, 'w')
for i in range(len(xvals)):
f.write(str(xvals[i])+' '+str(xErrs[i])+' '+str("%1.8f" %yvals[i])+' '+str(yErrs[i])+'\n')
f.close()
outfile='complete/fit-'+specfile+'.error'
f = open(outfile, 'w')
f.write('Param Value Low_lim Up_lim\n')
f.write(str(m1(3).values[0])+' '+str(m1(3).error[0])+' '+str(m1(3).error[1])+'\n')
f.write(str(m1(4).values[0])+' '+str(m1(4).error[0])+' '+str(m1(4).error[1])+'\n')
f.close()
# Unload data
AllData -= s1
# Unload the model
AllModels.clear()
# Output
#
#
sys.exit()
# ------------------------------------------------------------------------------
| gpl-2.0 | -6,353,604,172,897,491,000 | 22.052023 | 111 | 0.599549 | false |
iRapha/140_MD | app/client.py | 1 | 2653 | # Credit: https://github.com/pabluk/twitter-application-only-auth/blob/master/application_only_auth/client.py
import base64
import json
import sys
try:
# For Python 3.0 and later
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen, Request, HTTPError
API_ENDPOINT = 'https://api.twitter.com'
API_VERSION = '1.1'
REQUEST_TOKEN_URL = '%s/oauth2/token' % API_ENDPOINT
REQUEST_RATE_LIMIT = '%s/%s/application/rate_limit_status.json' % \
(API_ENDPOINT, API_VERSION)
class ClientException(Exception):
pass
class Client(object):
"""This class implements the Twitter's Application-only authentication."""
def __init__(self, consumer_key, consumer_secret):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = ''
def request(self, url):
"""Send an authenticated request to the Twitter API."""
if not self.access_token:
self.access_token = self._get_access_token()
request = Request(url)
request.add_header('Authorization', 'Bearer %s' % self.access_token)
try:
response = urlopen(request)
except HTTPError:
raise ClientException
raw_data = response.read().decode('utf-8')
data = json.loads(raw_data)
return data
def rate_limit_status(self, resource=''):
"""Returns a dict of rate limits by resource."""
response = self.request(REQUEST_RATE_LIMIT)
if resource:
resource_family = resource.split('/')[1]
return response['resources'][resource_family][resource]
return response
def _get_access_token(self):
"""Obtain a bearer token."""
bearer_token = '%s:%s' % (self.consumer_key, self.consumer_secret)
encoded_bearer_token = base64.b64encode(bearer_token.encode('ascii'))
request = Request(REQUEST_TOKEN_URL)
request.add_header('Content-Type',
'application/x-www-form-urlencoded;charset=UTF-8')
request.add_header('Authorization',
'Basic %s' % encoded_bearer_token.decode('utf-8'))
request_data = 'grant_type=client_credentials'.encode('ascii')
if sys.version_info < (3,4):
request.add_data(request_data)
else:
request.data = request_data
response = urlopen(request)
raw_data = response.read().decode('utf-8')
data = json.loads(raw_data)
return data['access_token']
| gpl-3.0 | 7,204,466,494,864,592,000 | 32.582278 | 109 | 0.62533 | false |
cuttlefishh/emp | code/01-metadata/metadata_template_generator.py | 1 | 4911 | #!/usr/bin/env python
import click
import pandas as pd
import re
# Hard-coded variables
investigation_type = 'metagenome'
# Function: return dataframe of environmental package-specific metadata items
# A single environmental package (soil) or list can be provided (soil,water).
def show_items_of_env_pkg(df_env_pkg, list_of_env_pkg):
"""Return dataframe of environmental package-specific metadata items"""
df_items = df_env_pkg[df_env_pkg['Environmental package'].isin(list_of_env_pkg)]
return df_items
# Function: return dataframe of metadata template
def create_template_for_env_pkg(df_QiitaEBI, df_MIMS, df_env_pkg, list_of_env_pkg, number_of_samples, sample_prefix):
"""Return dataframe of metadata template"""
# get headers/requirement/example of Qiita-EBI/MIMS/env_pkg columns
pkg_items = show_items_of_env_pkg(df_env_pkg, list_of_env_pkg)
headers_env_pkg = pkg_items['Structured comment name'].values
require_env_pkg = pkg_items['Requirement']
example_env_pkg = pkg_items['Value syntax']
headers_all = list(df_QiitaEBI.iloc[0]) + list(df_MIMS.iloc[0]) + list(headers_env_pkg)
require_all = pd.concat([df_QiitaEBI.iloc[1], df_MIMS.iloc[1], require_env_pkg])
example_all = pd.concat([df_QiitaEBI.iloc[2], df_MIMS.iloc[2], example_env_pkg])
# populate template dataframe
df_template = pd.DataFrame(columns=headers_all, dtype=object)
df_template.loc['Requirement'] = require_all.values
df_template.loc['Format'] = example_all.values
string_of_env_pkg = re.sub(r'\W', '.', '.'.join(list_of_env_pkg))
for i in range(0, number_of_samples):
df_template.loc[i+1] = ['' for x in range(len(df_template.columns))]
df_template.loc[i+1]['sample_name'] = '%s.%s.%s' % (sample_prefix, string_of_env_pkg, i+1)
df_template.loc[i+1]['investigation_type'] = investigation_type
df_template.loc[i+1]['env_package'] = ' or '.join(list_of_env_pkg)
return df_template
@click.command()
@click.option('--qiita_ebi_mims_path', required=True, type=click.Path(resolve_path=True, readable=True, exists=True), help='Excel file with Qiita/EBI and MIMS required fields. Example: Qiita_EBI_MIMS_v1.xlsx')
@click.option('--migs_mims_path', required=True, type=click.Path(resolve_path=True, readable=True, exists=True), help='Excel file with MIxS standards. Example: MIGS_MIMS_v4.xls')
@click.option('--list_of_env_pkg', required=True, type=click.STRING, help="One (recommended) or more (separated by commas) environmental package. Choose from: air, built environment, host-associated, human-associated, human-skin, human-oral, human-gut, human-vaginal, microbial mat/biofilm, misc environment, plant-associated, sediment, soil, wastewater/sludge, water")
@click.option('--number_of_samples', required=True, type=click.INT, help='Number of samples (per environmental package) to create rows for in the template')
@click.option('--sample_prefix', required=True, type=click.STRING, help='Prefix string to prepend to sample numbers in row indexes. Example: Metcalf40 (EMP500 PI and study number)')
# Main function: generate metadata template and readme csv files
def generate_metadata_template(qiita_ebi_mims_path, migs_mims_path, list_of_env_pkg, number_of_samples, sample_prefix):
"""Generate metadata template and readme csv files"""
# Qiita/EBI/MIMS Excel file to DataFrames
df_QiitaEBI = pd.read_excel(qiita_ebi_mims_path, sheetname='QiitaEBI', header=None)
df_MIMS = pd.read_excel(qiita_ebi_mims_path, sheetname='MIMS', header=None)
list_of_env_pkg = list_of_env_pkg.split(",")
# MIGS/MIMS Excel file to DataFrames
df_README = pd.read_excel(migs_mims_path, sheetname='README', header=None)
df_MIGS_MIMS = pd.read_excel(migs_mims_path, sheetname='MIGS_MIMS', header=0, index_col=0)
df_env_pkg = pd.read_excel(migs_mims_path, sheetname='environmental_packages', header=0)
# generate template file
df_template = create_template_for_env_pkg(df_QiitaEBI, df_MIMS, df_env_pkg, list_of_env_pkg, number_of_samples, sample_prefix)
string_of_env_pkg = re.sub(r'\W', '_', '_'.join(list_of_env_pkg))
df_template.to_csv('%s_%s_%s_samples.csv' % (sample_prefix, string_of_env_pkg, number_of_samples), index_label='index')
# generate info file
df_MIMS_select = df_MIGS_MIMS[df_MIGS_MIMS.Section.isin(['investigation', 'environment', 'migs/mims/mimarks extension'])]
df_MIMS_select.to_csv('README_MIMS_metadata.csv')
df_env_pkg_select = show_items_of_env_pkg(df_env_pkg, list_of_env_pkg)
del df_env_pkg_select['Environmental package']
df_env_pkg_select.set_index('Structured comment name', inplace=True)
string_of_env_pkg = re.sub(r'\W', '_', '_'.join(list_of_env_pkg))
df_env_pkg_select.to_csv('README_%s_metadata.csv' % string_of_env_pkg)
# Execute main function
if __name__ == '__main__':
generate_metadata_template()
| bsd-3-clause | 1,272,224,902,334,078,200 | 61.164557 | 369 | 0.707188 | false |
cpcloud/numba | numba/tests/cache_usecases.py | 1 | 3450 | """
This file will be copied to a temporary directory in order to
exercise caching compiled Numba functions.
See test_dispatcher.py.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numba import jit, generated_jit, types
from numba.tests.ctypes_usecases import c_sin
from numba.tests.support import TestCase, captured_stderr
@jit(cache=True, nopython=True)
def simple_usecase(x):
return x
def simple_usecase_caller(x):
return simple_usecase(x)
@jit(cache=True, nopython=True)
def add_usecase(x, y):
return x + y + Z
@jit(cache=True, forceobj=True)
def add_objmode_usecase(x, y):
object()
return x + y + Z
@jit(nopython=True)
def add_nocache_usecase(x, y):
return x + y + Z
@generated_jit(cache=True, nopython=True)
def generated_usecase(x, y):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
@jit(cache=True, nopython=True)
def inner(x, y):
return x + y + Z
@jit(cache=True, nopython=True)
def outer(x, y):
return inner(-y, x)
@jit(cache=False, nopython=True)
def outer_uncached(x, y):
return inner(-y, x)
@jit(cache=True, forceobj=True)
def looplifted(n):
object()
res = 0
for i in range(n):
res = res + i
return res
@jit(cache=True, nopython=True)
def use_c_sin(x):
return c_sin(x)
@jit(cache=True, nopython=True)
def ambiguous_function(x):
return x + 2
renamed_function1 = ambiguous_function
@jit(cache=True, nopython=True)
def ambiguous_function(x):
return x + 6
renamed_function2 = ambiguous_function
def make_closure(x):
@jit(cache=True, nopython=True)
def closure(y):
return x + y
return closure
closure1 = make_closure(3)
closure2 = make_closure(5)
biggie = np.arange(10**6)
@jit(cache=True, nopython=True)
def use_big_array():
return biggie
Z = 1
# Exercise returning a record instance. This used to hardcode the dtype
# pointer's value in the bitcode.
packed_record_type = np.dtype([('a', np.int8), ('b', np.float64)])
aligned_record_type = np.dtype([('a', np.int8), ('b', np.float64)], align=True)
packed_arr = np.empty(2, dtype=packed_record_type)
for i in range(packed_arr.size):
packed_arr[i]['a'] = i + 1
packed_arr[i]['b'] = i + 42.5
aligned_arr = np.array(packed_arr, dtype=aligned_record_type)
@jit(cache=True, nopython=True)
def record_return(ary, i):
return ary[i]
class _TestModule(TestCase):
"""
Tests for functionality of this module's functions.
Note this does not define any "test_*" method, instead check_module()
should be called by hand.
"""
def check_module(self, mod):
self.assertPreciseEqual(mod.add_usecase(2, 3), 6)
self.assertPreciseEqual(mod.add_objmode_usecase(2, 3), 6)
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.assertPreciseEqual(mod.generated_usecase(3, 2), 1)
packed_rec = mod.record_return(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(packed_rec), (2, 43.5))
aligned_rec = mod.record_return(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(aligned_rec), (2, 43.5))
# For 2.x
def runTest(self):
raise NotImplementedError
def self_test():
mod = sys.modules[__name__]
_TestModule().check_module(mod)
| bsd-2-clause | -3,698,239,837,539,475,500 | 20.835443 | 79 | 0.658551 | false |
torchbox/django-mailer-2 | django_mailer/engine.py | 1 | 8307 | """
The "engine room" of django mailer.
Methods here actually handle the sending of queued messages.
"""
from django.utils.encoding import smart_str
from django_mailer import constants, models, settings
from lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as SocketError
import logging
import smtplib
import tempfile
import time
import os
if constants.EMAIL_BACKEND_SUPPORT:
from django.core.mail import get_connection
else:
from django.core.mail import SMTPConnection as get_connection
LOCK_PATH = settings.LOCK_PATH or os.path.join(tempfile.gettempdir(),
'send_mail')
logger = logging.getLogger('django_mailer.engine')
def _message_queue(block_size):
"""
A generator which iterates queued messages in blocks so that new
prioritised messages can be inserted during iteration of a large number of
queued messages.
To avoid an infinite loop, yielded messages *must* be deleted or deferred.
"""
def get_block():
queue = models.QueuedMessage.objects.non_deferred().select_related()
if block_size:
queue = queue[:block_size]
return queue
queue = get_block()
while queue:
for message in queue:
yield message
queue = get_block()
def send_all(block_size=500, backend=None):
"""
Send all non-deferred messages in the queue.
A lock file is used to ensure that this process can not be started again
while it is already running.
The ``block_size`` argument allows for queued messages to be iterated in
blocks, allowing new prioritised messages to be inserted during iteration
of a large number of queued messages.
"""
lock = FileLock(LOCK_PATH)
logger.debug("Acquiring lock...")
try:
# lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
# is the default if it's not provided) systems which use a LinkFileLock
# so ensure that it is never a negative number.
lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
#lock.acquire(settings.LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logger.debug("Lock already in place. Exiting.")
return
except LockTimeout:
logger.debug("Waiting for the lock timed out. Exiting.")
return
logger.debug("Lock acquired.")
start_time = time.time()
sent = deferred = skipped = 0
try:
if constants.EMAIL_BACKEND_SUPPORT:
connection = get_connection(backend=backend)
else:
connection = get_connection()
blacklist = models.Blacklist.objects.values_list('email', flat=True)
connection.open()
for message in _message_queue(block_size):
result = send_queued_message(message, smtp_connection=connection,
blacklist=blacklist)
if result == constants.RESULT_SENT:
sent += 1
elif result == constants.RESULT_FAILED:
deferred += 1
elif result == constants.RESULT_SKIPPED:
skipped += 1
connection.close()
finally:
logger.debug("Releasing lock...")
lock.release()
logger.debug("Lock released.")
logger.debug("")
if sent or deferred or skipped:
log = logger.warning
else:
log = logger.info
log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
def send_loop(empty_queue_sleep=None):
"""
Loop indefinitely, checking queue at intervals and sending and queued
messages.
The interval (in seconds) can be provided as the ``empty_queue_sleep``
argument. The default is attempted to be retrieved from the
``MAILER_EMPTY_QUEUE_SLEEP`` setting (or if not set, 30s is used).
"""
empty_queue_sleep = empty_queue_sleep or settings.EMPTY_QUEUE_SLEEP
while True:
while not models.QueuedMessage.objects.all():
logger.debug("Sleeping for %s seconds before checking queue "
"again." % empty_queue_sleep)
time.sleep(empty_queue_sleep)
send_all()
def send_queued_message(queued_message, smtp_connection=None, blacklist=None,
log=True):
"""
Send a queued message, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_SKIPPED`` for a blacklisted email,
``RESULT_FAILED`` for a deferred message or ``RESULT_SENT`` for a
successful sent message.
To allow optimizations if multiple messages are to be sent, an SMTP
connection can be provided and a list of blacklisted email addresses.
Otherwise an SMTP connection will be opened to send this message and the
email recipient address checked against the ``Blacklist`` table.
If the message recipient is blacklisted, the message will be removed from
the queue without being sent. Otherwise, the message is attempted to be
sent with an SMTP failure resulting in the message being flagged as
deferred so it can be tried again later.
By default, a log is created as to the action. Either way, the original
message is not deleted.
"""
message = queued_message.message
if smtp_connection is None:
smtp_connection = get_connection()
opened_connection = False
if blacklist is None:
blacklisted = models.Blacklist.objects.filter(email=message.to_address)
else:
blacklisted = message.to_address in blacklist
log_message = ''
if blacklisted:
logger.info("Not sending to blacklisted email: %s" %
message.to_address.encode("utf-8"))
queued_message.delete()
result = constants.RESULT_SKIPPED
else:
try:
logger.info("Sending message to %s: %s" %
(message.to_address.encode("utf-8"),
message.subject.encode("utf-8")))
opened_connection = smtp_connection.open()
smtp_connection.connection.sendmail(message.from_address,
[message.to_address],
smart_str(message.encoded_message))
queued_message.delete()
result = constants.RESULT_SENT
except (SocketError, smtplib.SMTPSenderRefused,
smtplib.SMTPRecipientsRefused,
smtplib.SMTPAuthenticationError), err:
queued_message.defer()
logger.warning("Message to %s deferred due to failure: %s" %
(message.to_address.encode("utf-8"), err))
log_message = unicode(err)
result = constants.RESULT_FAILED
if log:
models.Log.objects.create(message=message, result=result,
log_message=log_message)
if opened_connection:
smtp_connection.close()
return result
def send_message(email_message, smtp_connection=None):
"""
Send an EmailMessage, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_FAILED`` for a failed send or
``RESULT_SENT`` for a successfully sent message.
To allow optimizations if multiple messages are to be sent, an SMTP
connection can be provided. Otherwise an SMTP connection will be opened
to send this message.
This function does not perform any logging or queueing.
"""
if smtp_connection is None:
smtp_connection = get_connection()
opened_connection = False
try:
opened_connection = smtp_connection.open()
smtp_connection.connection.sendmail(email_message.from_email,
email_message.recipients(),
email_message.message().as_string())
result = constants.RESULT_SENT
except (SocketError, smtplib.SMTPSenderRefused,
smtplib.SMTPRecipientsRefused,
smtplib.SMTPAuthenticationError):
result = constants.RESULT_FAILED
if opened_connection:
smtp_connection.close()
return result
| mit | 5,162,136,227,422,725,000 | 34.806034 | 83 | 0.642952 | false |
luci/luci-py | client/third_party/google/auth/aws.py | 4 | 27889 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AWS Credentials and AWS Signature V4 Request Signer.
This module provides credentials to access Google Cloud resources from Amazon
Web Services (AWS) workloads. These credentials are recommended over the
use of service account credentials in AWS as they do not involve the management
of long-live service account private keys.
AWS Credentials are initialized using external_account arguments which are
typically loaded from the external credentials JSON file.
Unlike other Credentials that can be initialized with a list of explicit
arguments, secrets or credentials, external account clients use the
environment and hints/guidelines provided by the external_account JSON
file to retrieve credentials and exchange them for Google access tokens.
This module also provides a basic implementation of the
`AWS Signature Version 4`_ request signing algorithm.
AWS Credentials use serialized signed requests to the
`AWS STS GetCallerIdentity`_ API that can be exchanged for Google access tokens
via the GCP STS endpoint.
.. _AWS Signature Version 4: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
.. _AWS STS GetCallerIdentity: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity.html
"""
import hashlib
import hmac
import io
import json
import os
import re
from six.moves import http_client
from six.moves import urllib
from google.auth import _helpers
from google.auth import environment_vars
from google.auth import exceptions
from google.auth import external_account
# AWS Signature Version 4 signing algorithm identifier.
_AWS_ALGORITHM = "AWS4-HMAC-SHA256"
# The termination string for the AWS credential scope value as defined in
# https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
_AWS_REQUEST_TYPE = "aws4_request"
# The AWS authorization header name for the security session token if available.
_AWS_SECURITY_TOKEN_HEADER = "x-amz-security-token"
# The AWS authorization header name for the auto-generated date.
_AWS_DATE_HEADER = "x-amz-date"
class RequestSigner(object):
"""Implements an AWS request signer based on the AWS Signature Version 4 signing
process.
https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
"""
def __init__(self, region_name):
"""Instantiates an AWS request signer used to compute authenticated signed
requests to AWS APIs based on the AWS Signature Version 4 signing process.
Args:
region_name (str): The AWS region to use.
"""
self._region_name = region_name
def get_request_options(
self,
aws_security_credentials,
url,
method,
request_payload="",
additional_headers={},
):
"""Generates the signed request for the provided HTTP request for calling
an AWS API. This follows the steps described at:
https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
Args:
aws_security_credentials (Mapping[str, str]): A dictionary containing
the AWS security credentials.
url (str): The AWS service URL containing the canonical URI and
query string.
method (str): The HTTP method used to call this API.
request_payload (Optional[str]): The optional request payload if
available.
additional_headers (Optional[Mapping[str, str]]): The optional
additional headers needed for the requested AWS API.
Returns:
Mapping[str, str]: The AWS signed request dictionary object.
"""
# Get AWS credentials.
access_key = aws_security_credentials.get("access_key_id")
secret_key = aws_security_credentials.get("secret_access_key")
security_token = aws_security_credentials.get("security_token")
additional_headers = additional_headers or {}
uri = urllib.parse.urlparse(url)
# Validate provided URL.
if not uri.hostname or uri.scheme != "https":
raise ValueError("Invalid AWS service URL")
header_map = _generate_authentication_header_map(
host=uri.hostname,
canonical_uri=os.path.normpath(uri.path or "/"),
canonical_querystring=_get_canonical_querystring(uri.query),
method=method,
region=self._region_name,
access_key=access_key,
secret_key=secret_key,
security_token=security_token,
request_payload=request_payload,
additional_headers=additional_headers,
)
headers = {
"Authorization": header_map.get("authorization_header"),
"host": uri.hostname,
}
# Add x-amz-date if available.
if "amz_date" in header_map:
headers[_AWS_DATE_HEADER] = header_map.get("amz_date")
# Append additional optional headers, eg. X-Amz-Target, Content-Type, etc.
for key in additional_headers:
headers[key] = additional_headers[key]
# Add session token if available.
if security_token is not None:
headers[_AWS_SECURITY_TOKEN_HEADER] = security_token
signed_request = {"url": url, "method": method, "headers": headers}
if request_payload:
signed_request["data"] = request_payload
return signed_request
def _get_canonical_querystring(query):
"""Generates the canonical query string given a raw query string.
Logic is based on
https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
Args:
query (str): The raw query string.
Returns:
str: The canonical query string.
"""
# Parse raw query string.
querystring = urllib.parse.parse_qs(query)
querystring_encoded_map = {}
for key in querystring:
quote_key = urllib.parse.quote(key, safe="-_.~")
# URI encode key.
querystring_encoded_map[quote_key] = []
for item in querystring[key]:
# For each key, URI encode all values for that key.
querystring_encoded_map[quote_key].append(
urllib.parse.quote(item, safe="-_.~")
)
# Sort values for each key.
querystring_encoded_map[quote_key].sort()
# Sort keys.
sorted_keys = list(querystring_encoded_map.keys())
sorted_keys.sort()
# Reconstruct the query string. Preserve keys with multiple values.
querystring_encoded_pairs = []
for key in sorted_keys:
for item in querystring_encoded_map[key]:
querystring_encoded_pairs.append("{}={}".format(key, item))
return "&".join(querystring_encoded_pairs)
def _sign(key, msg):
"""Creates the HMAC-SHA256 hash of the provided message using the provided
key.
Args:
key (str): The HMAC-SHA256 key to use.
msg (str): The message to hash.
Returns:
str: The computed hash bytes.
"""
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def _get_signing_key(key, date_stamp, region_name, service_name):
"""Calculates the signing key used to calculate the signature for
AWS Signature Version 4 based on:
https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
Args:
key (str): The AWS secret access key.
date_stamp (str): The '%Y%m%d' date format.
region_name (str): The AWS region.
service_name (str): The AWS service name, eg. sts.
Returns:
str: The signing key bytes.
"""
k_date = _sign(("AWS4" + key).encode("utf-8"), date_stamp)
k_region = _sign(k_date, region_name)
k_service = _sign(k_region, service_name)
k_signing = _sign(k_service, "aws4_request")
return k_signing
def _generate_authentication_header_map(
host,
canonical_uri,
canonical_querystring,
method,
region,
access_key,
secret_key,
security_token,
request_payload="",
additional_headers={},
):
"""Generates the authentication header map needed for generating the AWS
Signature Version 4 signed request.
Args:
host (str): The AWS service URL hostname.
canonical_uri (str): The AWS service URL path name.
canonical_querystring (str): The AWS service URL query string.
method (str): The HTTP method used to call this API.
region (str): The AWS region.
access_key (str): The AWS access key ID.
secret_key (str): The AWS secret access key.
security_token (Optional[str]): The AWS security session token. This is
available for temporary sessions.
request_payload (Optional[str]): The optional request payload if
available.
additional_headers (Optional[Mapping[str, str]]): The optional
additional headers needed for the requested AWS API.
Returns:
Mapping[str, str]: The AWS authentication header dictionary object.
This contains the x-amz-date and authorization header information.
"""
# iam.amazonaws.com host => iam service.
# sts.us-east-2.amazonaws.com host => sts service.
service_name = host.split(".")[0]
current_time = _helpers.utcnow()
amz_date = current_time.strftime("%Y%m%dT%H%M%SZ")
date_stamp = current_time.strftime("%Y%m%d")
# Change all additional headers to be lower case.
full_headers = {}
for key in additional_headers:
full_headers[key.lower()] = additional_headers[key]
# Add AWS session token if available.
if security_token is not None:
full_headers[_AWS_SECURITY_TOKEN_HEADER] = security_token
# Required headers
full_headers["host"] = host
# Do not use generated x-amz-date if the date header is provided.
# Previously the date was not fixed with x-amz- and could be provided
# manually.
# https://github.com/boto/botocore/blob/879f8440a4e9ace5d3cf145ce8b3d5e5ffb892ef/tests/unit/auth/aws4_testsuite/get-header-value-trim.req
if "date" not in full_headers:
full_headers[_AWS_DATE_HEADER] = amz_date
# Header keys need to be sorted alphabetically.
canonical_headers = ""
header_keys = list(full_headers.keys())
header_keys.sort()
for key in header_keys:
canonical_headers = "{}{}:{}\n".format(
canonical_headers, key, full_headers[key]
)
signed_headers = ";".join(header_keys)
payload_hash = hashlib.sha256((request_payload or "").encode("utf-8")).hexdigest()
# https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
canonical_request = "{}\n{}\n{}\n{}\n{}\n{}".format(
method,
canonical_uri,
canonical_querystring,
canonical_headers,
signed_headers,
payload_hash,
)
credential_scope = "{}/{}/{}/{}".format(
date_stamp, region, service_name, _AWS_REQUEST_TYPE
)
# https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
string_to_sign = "{}\n{}\n{}\n{}".format(
_AWS_ALGORITHM,
amz_date,
credential_scope,
hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(),
)
# https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
signing_key = _get_signing_key(secret_key, date_stamp, region, service_name)
signature = hmac.new(
signing_key, string_to_sign.encode("utf-8"), hashlib.sha256
).hexdigest()
# https://docs.aws.amazon.com/general/latest/gr/sigv4-add-signature-to-request.html
authorization_header = "{} Credential={}/{}, SignedHeaders={}, Signature={}".format(
_AWS_ALGORITHM, access_key, credential_scope, signed_headers, signature
)
authentication_header = {"authorization_header": authorization_header}
# Do not use generated x-amz-date if the date header is provided.
if "date" not in full_headers:
authentication_header["amz_date"] = amz_date
return authentication_header
class Credentials(external_account.Credentials):
"""AWS external account credentials.
This is used to exchange serialized AWS signature v4 signed requests to
AWS STS GetCallerIdentity service for Google access tokens.
"""
def __init__(
self,
audience,
subject_token_type,
token_url,
credential_source=None,
service_account_impersonation_url=None,
client_id=None,
client_secret=None,
quota_project_id=None,
scopes=None,
default_scopes=None,
):
"""Instantiates an AWS workload external account credentials object.
Args:
audience (str): The STS audience field.
subject_token_type (str): The subject token type.
token_url (str): The STS endpoint URL.
credential_source (Mapping): The credential source dictionary used
to provide instructions on how to retrieve external credential
to be exchanged for Google access tokens.
service_account_impersonation_url (Optional[str]): The optional
service account impersonation getAccessToken URL.
client_id (Optional[str]): The optional client ID.
client_secret (Optional[str]): The optional client secret.
quota_project_id (Optional[str]): The optional quota project ID.
scopes (Optional[Sequence[str]]): Optional scopes to request during
the authorization grant.
default_scopes (Optional[Sequence[str]]): Default scopes passed by a
Google client library. Use 'scopes' for user-defined scopes.
Raises:
google.auth.exceptions.RefreshError: If an error is encountered during
access token retrieval logic.
ValueError: For invalid parameters.
.. note:: Typically one of the helper constructors
:meth:`from_file` or
:meth:`from_info` are used instead of calling the constructor directly.
"""
super(Credentials, self).__init__(
audience=audience,
subject_token_type=subject_token_type,
token_url=token_url,
credential_source=credential_source,
service_account_impersonation_url=service_account_impersonation_url,
client_id=client_id,
client_secret=client_secret,
quota_project_id=quota_project_id,
scopes=scopes,
default_scopes=default_scopes,
)
credential_source = credential_source or {}
self._environment_id = credential_source.get("environment_id") or ""
self._region_url = credential_source.get("region_url")
self._security_credentials_url = credential_source.get("url")
self._cred_verification_url = credential_source.get(
"regional_cred_verification_url"
)
self._region = None
self._request_signer = None
self._target_resource = audience
# Get the environment ID. Currently, only one version supported (v1).
matches = re.match(r"^(aws)([\d]+)$", self._environment_id)
if matches:
env_id, env_version = matches.groups()
else:
env_id, env_version = (None, None)
if env_id != "aws" or self._cred_verification_url is None:
raise ValueError("No valid AWS 'credential_source' provided")
elif int(env_version or "") != 1:
raise ValueError(
"aws version '{}' is not supported in the current build.".format(
env_version
)
)
def retrieve_subject_token(self, request):
"""Retrieves the subject token using the credential_source object.
The subject token is a serialized `AWS GetCallerIdentity signed request`_.
The logic is summarized as:
Retrieve the AWS region from the AWS_REGION environment variable or from
the AWS metadata server availability-zone if not found in the
environment variable.
Check AWS credentials in environment variables. If not found, retrieve
from the AWS metadata server security-credentials endpoint.
When retrieving AWS credentials from the metadata server
security-credentials endpoint, the AWS role needs to be determined by
calling the security-credentials endpoint without any argument. Then the
credentials can be retrieved via: security-credentials/role_name
Generate the signed request to AWS STS GetCallerIdentity action.
Inject x-goog-cloud-target-resource into header and serialize the
signed request. This will be the subject-token to pass to GCP STS.
.. _AWS GetCallerIdentity signed request:
https://cloud.google.com/iam/docs/access-resources-aws#exchange-token
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
Returns:
str: The retrieved subject token.
"""
# Initialize the request signer if not yet initialized after determining
# the current AWS region.
if self._request_signer is None:
self._region = self._get_region(request, self._region_url)
self._request_signer = RequestSigner(self._region)
# Retrieve the AWS security credentials needed to generate the signed
# request.
aws_security_credentials = self._get_security_credentials(request)
# Generate the signed request to AWS STS GetCallerIdentity API.
# Use the required regional endpoint. Otherwise, the request will fail.
request_options = self._request_signer.get_request_options(
aws_security_credentials,
self._cred_verification_url.replace("{region}", self._region),
"POST",
)
# The GCP STS endpoint expects the headers to be formatted as:
# [
# {key: 'x-amz-date', value: '...'},
# {key: 'Authorization', value: '...'},
# ...
# ]
# And then serialized as:
# quote(json.dumps({
# url: '...',
# method: 'POST',
# headers: [{key: 'x-amz-date', value: '...'}, ...]
# }))
request_headers = request_options.get("headers")
# The full, canonical resource name of the workload identity pool
# provider, with or without the HTTPS prefix.
# Including this header as part of the signature is recommended to
# ensure data integrity.
request_headers["x-goog-cloud-target-resource"] = self._target_resource
# Serialize AWS signed request.
# Keeping inner keys in sorted order makes testing easier for Python
# versions <=3.5 as the stringified JSON string would have a predictable
# key order.
aws_signed_req = {}
aws_signed_req["url"] = request_options.get("url")
aws_signed_req["method"] = request_options.get("method")
aws_signed_req["headers"] = []
# Reformat header to GCP STS expected format.
for key in sorted(request_headers.keys()):
aws_signed_req["headers"].append(
{"key": key, "value": request_headers[key]}
)
return urllib.parse.quote(
json.dumps(aws_signed_req, separators=(",", ":"), sort_keys=True)
)
def _get_region(self, request, url):
"""Retrieves the current AWS region from either the AWS_REGION
environment variable or from the AWS metadata server.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
url (str): The AWS metadata server region URL.
Returns:
str: The current AWS region.
Raises:
google.auth.exceptions.RefreshError: If an error occurs while
retrieving the AWS region.
"""
# The AWS metadata server is not available in some AWS environments
# such as AWS lambda. Instead, it is available via environment
# variable.
env_aws_region = os.environ.get(environment_vars.AWS_REGION)
if env_aws_region is not None:
return env_aws_region
if not self._region_url:
raise exceptions.RefreshError("Unable to determine AWS region")
response = request(url=self._region_url, method="GET")
# Support both string and bytes type response.data.
response_body = (
response.data.decode("utf-8")
if hasattr(response.data, "decode")
else response.data
)
if response.status != 200:
raise exceptions.RefreshError(
"Unable to retrieve AWS region", response_body
)
# This endpoint will return the region in format: us-east-2b.
# Only the us-east-2 part should be used.
return response_body[:-1]
def _get_security_credentials(self, request):
"""Retrieves the AWS security credentials required for signing AWS
requests from either the AWS security credentials environment variables
or from the AWS metadata server.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
Returns:
Mapping[str, str]: The AWS security credentials dictionary object.
Raises:
google.auth.exceptions.RefreshError: If an error occurs while
retrieving the AWS security credentials.
"""
# Check environment variables for permanent credentials first.
# https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html
env_aws_access_key_id = os.environ.get(environment_vars.AWS_ACCESS_KEY_ID)
env_aws_secret_access_key = os.environ.get(
environment_vars.AWS_SECRET_ACCESS_KEY
)
# This is normally not available for permanent credentials.
env_aws_session_token = os.environ.get(environment_vars.AWS_SESSION_TOKEN)
if env_aws_access_key_id and env_aws_secret_access_key:
return {
"access_key_id": env_aws_access_key_id,
"secret_access_key": env_aws_secret_access_key,
"security_token": env_aws_session_token,
}
# Get role name.
role_name = self._get_metadata_role_name(request)
# Get security credentials.
credentials = self._get_metadata_security_credentials(request, role_name)
return {
"access_key_id": credentials.get("AccessKeyId"),
"secret_access_key": credentials.get("SecretAccessKey"),
"security_token": credentials.get("Token"),
}
def _get_metadata_security_credentials(self, request, role_name):
"""Retrieves the AWS security credentials required for signing AWS
requests from the AWS metadata server.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
role_name (str): The AWS role name required by the AWS metadata
server security_credentials endpoint in order to return the
credentials.
Returns:
Mapping[str, str]: The AWS metadata server security credentials
response.
Raises:
google.auth.exceptions.RefreshError: If an error occurs while
retrieving the AWS security credentials.
"""
headers = {"Content-Type": "application/json"}
response = request(
url="{}/{}".format(self._security_credentials_url, role_name),
method="GET",
headers=headers,
)
# support both string and bytes type response.data
response_body = (
response.data.decode("utf-8")
if hasattr(response.data, "decode")
else response.data
)
if response.status != http_client.OK:
raise exceptions.RefreshError(
"Unable to retrieve AWS security credentials", response_body
)
credentials_response = json.loads(response_body)
return credentials_response
def _get_metadata_role_name(self, request):
"""Retrieves the AWS role currently attached to the current AWS
workload by querying the AWS metadata server. This is needed for the
AWS metadata server security credentials endpoint in order to retrieve
the AWS security credentials needed to sign requests to AWS APIs.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
Returns:
str: The AWS role name.
Raises:
google.auth.exceptions.RefreshError: If an error occurs while
retrieving the AWS role name.
"""
if self._security_credentials_url is None:
raise exceptions.RefreshError(
"Unable to determine the AWS metadata server security credentials endpoint"
)
response = request(url=self._security_credentials_url, method="GET")
# support both string and bytes type response.data
response_body = (
response.data.decode("utf-8")
if hasattr(response.data, "decode")
else response.data
)
if response.status != http_client.OK:
raise exceptions.RefreshError(
"Unable to retrieve AWS role name", response_body
)
return response_body
@classmethod
def from_info(cls, info, **kwargs):
"""Creates an AWS Credentials instance from parsed external account info.
Args:
info (Mapping[str, str]): The AWS external account info in Google
format.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.aws.Credentials: The constructed credentials.
Raises:
ValueError: For invalid parameters.
"""
return cls(
audience=info.get("audience"),
subject_token_type=info.get("subject_token_type"),
token_url=info.get("token_url"),
service_account_impersonation_url=info.get(
"service_account_impersonation_url"
),
client_id=info.get("client_id"),
client_secret=info.get("client_secret"),
credential_source=info.get("credential_source"),
quota_project_id=info.get("quota_project_id"),
**kwargs
)
@classmethod
def from_file(cls, filename, **kwargs):
"""Creates an AWS Credentials instance from an external account json file.
Args:
filename (str): The path to the AWS external account json file.
kwargs: Additional arguments to pass to the constructor.
Returns:
google.auth.aws.Credentials: The constructed credentials.
"""
with io.open(filename, "r", encoding="utf-8") as json_file:
data = json.load(json_file)
return cls.from_info(data, **kwargs)
| apache-2.0 | 2,968,763,667,629,845,500 | 38.060224 | 141 | 0.635663 | false |
chrrrles/c3px | app/models/address.py | 1 | 1464 | # Copyright (c) 2013 - The C3PX authors.
#
# This file is part of C3PX.
#
# C3PX is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# C3PX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with C3PX. If not, see <http://www.gnu.org/licenses/>.
from base import *
class AddressModel(BaseModel):
street1 = StringType (
required=True,
serialized_name = "Street Line 1",
max_length = 60 )
street2 = StringType (
serialized_name = "Street Line 2",
max_length = 60 )
city = StringType (
required=True,
serialized_name = "City",
max_length = 60)
state = StringType (
required=True,
serialized_name = "State",
max_length="2")
country = StringType (
required=True,
default="US",
serialized_name = "Country Code",
max_length = 2 )
zipcode = StringType(
required=True,
serialized_name = "Postal Code",
max_length = 12 )
geopoint = GeoPointType(
required=False,
serialized_name = "Geolocation" )
| agpl-3.0 | -1,253,687,468,705,276,000 | 25.142857 | 71 | 0.679645 | false |
stganser/polyite | src/polyite/fitness/scikit_learn/init.py | 1 | 2788 | from enum import Enum
import csv
import math
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
class LearningAlgorithms(Enum):
cart = 0,
random_forest = 1
class RandomForestsConfig:
def __init__(self, n_tree, max_features):
self.n_tree = n_tree
self.max_features = max_features
def learn(feature_fields, learning_sets_file_names, rescale, min_samples_leaf,
learning_algorithm, random_forest_config):
features_learn = []
classes_learn = []
for path in learning_sets_file_names:
(curr_features, curr_classes) = load_data(path, feature_fields, rescale)
features_learn = features_learn + curr_features
classes_learn = classes_learn + curr_classes
if learning_algorithm is LearningAlgorithms.cart:
clf = tree.DecisionTreeClassifier(min_samples_leaf=min_samples_leaf, criterion="gini")
else:
clf = RandomForestClassifier(n_estimators=random_forest_config.n_tree,
max_features=random_forest_config.max_features,
criterion="gini", bootstrap=True,
min_samples_leaf=min_samples_leaf)
clf = clf.fit(features_learn, classes_learn)
return clf
def rescale_feature_values(features, n_features):
maxima = list(map(lambda col: max(map(lambda row: row[col], features)), range(0, n_features)))
minima = list(map(lambda col: min(map(lambda row: row[col], features)), range(0, n_features)))
return list(map(
lambda row: list(map(lambda col: (row[col] - minima[col]) / (maxima[col] - minima[col]), range(0, n_features))),
features))
# result is sorted, starting from smallest speedup to largest
def load_data(csv_file_name, feature_fields, rescale):
result = []
with open(csv_file_name, newline='') as csvFile:
content = csv.reader(csvFile, delimiter="\t", quotechar="\"")
head = next(content)
fields = {}
for i in range(0, len(head)):
fields[head[i]] = i
for row in content:
curr_result = []
if row[fields["class"]] != "-":
curr_feature_vals = []
for field in feature_fields:
curr_feature_vals.append(float(row[fields[field]]))
curr_result = curr_result + [float(row[fields["class"]])]
curr_result = curr_result + curr_feature_vals
result.append(curr_result)
result = sorted(result, key=lambda r : r[1])
features = list(map(lambda row : row[1:] , result))
classes = list(map(lambda row : row[0] , result))
if rescale:
return rescale_feature_values(features, len(feature_fields)), classes
return features, classes
| mit | -2,570,103,128,079,093,000 | 38.267606 | 120 | 0.61944 | false |
butozerca/fireplace | fireplace/cards/data/buffs.py | 1 | 9369 | from fireplace.enums import GameTag
# Buff helper
def buff(atk=0, health=0):
ret = {}
if atk:
ret[GameTag.ATK] = atk
if health:
ret[GameTag.HEALTH] = health
return ret
###
# Game/Brawl set
#
TB_007e = {
GameTag.ATTACK_HEALTH_SWAP: True,
}
TB_Pilot1 = {
GameTag.DEATHRATTLE: True,
}
###
# Classic set
#
##
# Druid
# Claws
CS2_017o = buff(atk=1)
# Soul of the Forest
EX1_158e = {
GameTag.DEATHRATTLE: True,
}
# Rooted (Ancient of War)
EX1_178ae = {
GameTag.HEALTH: 5,
GameTag.TAUNT: True,
}
# Uproot (Ancient of War)
EX1_178be = buff(atk=5)
# Claw
CS2_005o = buff(atk=2)
# Mark of the Wild
CS2_009e = {
GameTag.ATK: 2,
GameTag.HEALTH: 2,
GameTag.TAUNT: True,
}
# Savage Roar
CS2_011o = buff(atk=2)
# Mark of Nature (Attack)
EX1_155ae = buff(atk=4)
# Mark of Nature (Health)
EX1_155be = {
GameTag.HEALTH: 4,
GameTag.TAUNT: True,
}
# Leader of the Pack (Power of the Wild)
EX1_160be = buff(+1, +1)
# Bite
EX1_570e = buff(atk=4)
# Demigod's Favor (Cenarius)
EX1_573ae = buff(+2, +2)
##
# Hunter
# Master's Presence (Houndmaster)
DS1_070o = {
GameTag.ATK: 2,
GameTag.HEALTH: 2,
GameTag.TAUNT: True,
}
# Furious Howl (Timber Wolf)
DS1_175o = buff(atk=1)
# Charge (Tundra Rhino)
DS1_178e = {
GameTag.CHARGE: True,
}
# Well Fed (Scavenging Hyena)
EX1_531e = buff(+2, +1)
# Bestial Wrath
EX1_549o = {
GameTag.ATK: 2,
GameTag.CANT_BE_DAMAGED: True,
}
# Trapped (Freezing Trap)
EX1_611e = {
GameTag.COST: 2,
}
# Eye in the Sky (Leokk)
NEW1_033o = buff(atk=1)
# Upgraded (Eaglehorn Bow)
EX1_536e = buff(health=1)
##
# Mage
# Raw Power! (Ethereal Arcanist)
EX1_274e = buff(+2, +2)
# Mana Gorged (Mana Wyrm)
NEW1_012o = buff(atk=1)
##
# Paladin
# Blessing of Might
CS2_087e = buff(atk=3)
# Blessing of Kings
CS2_092e = buff(+4, +4)
# Justice Served (Sword of Justice)
EX1_366e = buff(+1, +1)
##
# Priest
# Warded (Lightwarden)
EX1_001e = buff(atk=2)
# Infusion (Temple Enforcer)
EX1_623e = buff(health=3)
# Power Word: Shield
CS2_004e = buff(health=2)
##
# Rogue
# VanCleef's Vengeance (Edwin VanCleef)
EX1_613e = buff(+2, +2)
# Cold Blood (+2)
CS2_073e = buff(atk=2)
# Cold Blood (+4)
CS2_073e2 = buff(atk=4)
# Deadly Poison
CS2_074e = buff(atk=2)
# Conceal
EX1_128e = {
GameTag.STEALTH: True,
}
##
# Shaman
# Overloading (Unbound Elemental)
EX1_258e = buff(+1, +1)
# Flametongue (Flametongue Totem)
EX1_565o = buff(atk=2)
# Ancestral Spirit
CS2_038e = {
GameTag.DEATHRATTLE: True,
}
# Ancestral Infusion (Ancestral Healing)
CS2_041e = {
GameTag.TAUNT: True,
}
# Rockbiter Weapon
CS2_045e = buff(atk=3)
# Bloodlust
CS2_046e = buff(atk=3)
# Far Sight
CS2_053e = {
GameTag.COST: -3,
}
# Totemic Might
EX1_244e = buff(health=2)
##
# Warlock
# Blood Pact (Blood Imp)
CS2_059o = buff(health=1)
# Power Overwhelming
EX1_316e = buff(+4, +4)
# Demonfire
EX1_596e = buff(+2, +2)
##
# Warrior
# Charge (Warsong Commander)
EX1_084e = {
GameTag.CHARGE: True,
}
# Berserk (Frothing Berserker)
EX1_604o = buff(atk=1)
# Whipped Into Shape (Cruel Taskmaster)
EX1_603e = buff(atk=2)
# Charge
CS2_103e2 = {
GameTag.ATK: 2,
GameTag.CHARGE: True,
}
# Rampage
CS2_104e = buff(+3, +3)
# Heroic Strike
CS2_105e = buff(atk=4)
# Upgraded (Upgrade!)
EX1_409e = buff(+1, +1)
# Inner Rage
EX1_607e = buff(atk=2)
# Commanding Shout
NEW1_036e = {
GameTag.HEALTH_MINIMUM: 1,
}
##
# Neutral common
# Enhanced (Raid Leader)
CS2_122e = buff(atk=1)
# Might of Stormwind (Stormwind Champion)
CS2_222o = buff(+1, +1)
# Frostwolf Banner (Frostwolf Warlord)
CS2_226e = buff(+1, +1)
# Berserking (Gurubashi Berserker)
EX1_399e = buff(atk=3)
# Sharp! (Spiteful Smith)
CS2_221e = buff(atk=2)
# 'Inspired' (Abusive Seargent)
CS2_188o = buff(atk=2)
# Cleric's Blessing (Shattered Sun Cleric)
EX1_019e = buff(+1, +1)
# Tempered (Dark Iron Dwarf)
EX1_046e = buff(atk=2)
# Strength of the Pack (Dire Wolf Alpha)
EX1_162o = buff(atk=1)
# Mlarggragllabl! (Grimscale Oracle)
EX1_508o = buff(atk=1)
# Cannibalize (Flesheating Ghoul)
tt_004o = buff(atk=1)
##
# Neutral rare
# Elune's Grace (Young Priestess)
EX1_004e = buff(health=1)
# Hour of Twilight (Twilight Drake)
EX1_043e = buff(health=1)
# Level Up! (Questing Adventurer)
EX1_044e = buff(+1, +1)
# Empowered (Mana Addict)
EX1_055o = buff(atk=2)
# Experiments! (Crazed Alchemist)
EX1_059e = {
GameTag.ATTACK_HEALTH_SWAP: True,
}
# Keeping Secrets (Secretkeeper)
EX1_080o = buff(+1, +1)
# Hand of Argus (Defender of Argus)
EX1_093e = {
GameTag.ATK: 1,
GameTag.HEALTH: 1,
GameTag.TAUNT: True,
}
# Mrghlglhal (Coldlight Seer)
EX1_103e = buff(health=2)
# Blarghghl (Murloc Tidecaller)
EX1_509e = buff(atk=1)
# Equipped (Master Swordsmith)
NEW1_037e = buff(atk=1)
##
# Neutral epic
# Shadows of M'uru (Blood Knight)
EX1_590e = buff(+3, +3)
# Mrgglaargl! (Murloc Warleader)
EX1_507e = buff(+2, +1)
# Full Belly (Hungry Crab)
NEW1_017e = buff(+2, +2)
# Yarrr! (Southsea Captain)
NEW1_027e = buff(+1, +1)
##
# Neutral legendary
# Bananas (King Mukla)
EX1_014te = buff(+1, +1)
# Greenskin's Command (Captain Greenskin)
NEW1_024o = buff(+1, +1)
# Growth (Gruul)
NEW1_038o = buff(+1, +1)
# Emboldened! (Emboldener 3000)
Mekka3e = buff(+1, +1)
##
# Curse of Naxxramas set
#
# Consume (Shade of Naxxramas)
FP1_005e = buff(+1, +1)
# Vengeance (Avenge)
FP1_020e = buff(+3, +2)
# Power of the Ziggurat (Dark Cultist)
FP1_023e = buff(health=3)
# Darkness Calls (Undertaker)
FP1_028e = buff(atk=1)
##
# Naxxramas Adventure
# Fungal Growth (Spore)
NAX6_03te = buff(atk=8)
# Mark of the Horsement
NAX9_07e = buff(+1, +1)
# Mutating Injection
NAX11_04e = {
GameTag.ATK: 4,
GameTag.HEALTH: 4,
GameTag.TAUNT: True,
}
# Extra Teeth (Jaws)
NAX12_03e = buff(atk=2)
# Enrage
NAX12_04e = buff(atk=6)
# Polarity (Polarity Shift)
NAX13_02e = {
GameTag.ATTACK_HEALTH_SWAP: True,
}
# Supercharge
NAX13_03e = buff(health=2)
##
# Goblins vs. Gnomes set
#
##
# Druid
# Attack Mode (Anodized Robo Cub)
GVG_030ae = buff(atk=1)
# Tank Mode (Anodized Robo Cub)
GVG_030be = buff(health=1)
# Dark Wispers
GVG_041c = {
GameTag.ATK: 5,
GameTag.HEALTH: 5,
GameTag.TAUNT: True,
}
##
# Hunter
# The King (King of Beasts)
GVG_046e = buff(atk=1)
# Metal Teeth (Metaltooth Leaper)
GVG_048e = buff(atk=2)
# Glaivezooka
GVG_043e = buff(atk=1)
##
# Paladin
# Well Equipped (Quartermaster)
GVG_060e = buff(+2, +2)
# Retribution (Bolvar Fordragon)
GVG_063a = buff(atk=1)
# Seal of Light
GVG_057a = buff(atk=2)
##
# Priest
# Repairs! (Upgraded Repair Bot)
GVG_069a = buff(health=4)
# Velen's Chosen
GVG_010b = {
GameTag.ATK: 2,
GameTag.HEALTH: 4,
GameTag.SPELLPOWER: 1,
}
# Shrink Ray (Shrinkmeister)
GVG_011a = buff(atk=-2)
##
# Rogue
# Tinker's Sharpsword Oil
GVG_022a = buff(atk=3) # Weapon
GVG_022b = buff(atk=3) # Minion
# Extra Sharp (Goblin Auto-Barber)
GVG_023a = buff(atk=1)
# Ironed Out (Iron Sensei)
GVG_027 = buff(+2, +2)
##
# Shaman
# Powered (Powermace)
GVG_036e = buff(+2, +2)
##
# Warlock
# Demonheart
GVG_019e = buff(+5, +5)
# Grasp of Mal'Ganis (Mal'Ganis)
GVG_021e = buff(+2, +2)
# Brow Furrow (Floating Watcher)
GVG_100e = buff(+2, +2)
##
# Warrior
# Armor Plated (Siege Engine)
GVG_086e = buff(atk=1)
##
# Neutral common
# Metabolized Magic (Stonesplinter Trogg)
GVG_067a = buff(atk=1)
# Metabolized Magic (Burly Rockjaw Trogg)
GVG_068a = buff(atk=2)
# Pistons (Micro Machine)
GVG_076a = buff(atk=1)
# Might of Tinkertown (Tinkertown Technician)
GVG_102e = buff(+1, +1)
##
# Neutral rare
# Screwy Jank (Screwjank Clunker)
GVG_055e = buff(+2, +2)
# Pure (Lil' Exorcist)
GVG_101e = buff(+1, +1)
##
# Neutral epic
# HERE, TAKE BUFF. (Hobgoblin)
GVG_104a = buff(+2, +2)
# Junked Up (Junkbot)
GVG_106e = buff(+2, +2)
##
# Spare parts
# Armor Plating
PART_001e = buff(health=1)
# Cloaked (Finicky Cloakfield)
PART_004e = {
GameTag.STEALTH: True,
}
# Switched (Reversing Switch)
PART_006a = {
GameTag.ATTACK_HEALTH_SWAP: True,
}
# Whirling Blades
PART_007e = buff(atk=1)
###
# Blackrock Mountain set
#
# Dragon's Might (Unused)
BRM_003e = {
GameTag.COST: -3,
}
# Twilight Endurance (Twilight Whelp)
BRM_004e = buff(health=2)
# On Fire! (Fireguard Destroyer)
BRM_012e = buff(atk=1)
# Power Rager (Core Rager)
BRM_014e = buff(+3, +3)
# Unchained! (Dragon Consort)
BRM_018e = {
GameTag.COST: -3,
}
# Draconic Power (Dragonkin Sorcerer)
BRM_020e = buff(+1, +1)
# Large Talons (Drakonid Crusher)
BRM_024e = buff(+3, +3)
# Imperial Favor (Emperor Thaurissan)
BRM_028e = {
GameTag.COST: -1,
}
# Dragon Blood (Blackwing Technician)
BRM_033e = buff(+1, +1)
##
# Blackrock Adventure
# Incubation (The Rookery)
BRMA10_3e = buff(health=1)
# Blind With Rage (Razorgore's Claws)
BRMA10_6e = buff(atk=1)
# Potion of Might (The Alchemist)
BRMA15_2He = buff(+2, +2)
# Sonic Breath
BRMA16_3e = buff(atk=3)
# I hear you... (Dragonteeth)
BRMA16_5e = buff(atk=1)
##
# Blackrock Brawl
# I Hear You... (Atramedes)
BRMC_86e = buff(atk=2)
# Dragonlust (Razorgore)
BRMC_98e = buff(atk=3)
###
# Tutorial set
#
# Might of Mukla (Unused)
TU4c_008e = buff(atk=8)
# Legacy of the Emperor
TU4f_004o = buff(+2, +2)
# Bananas
TU4c_006e = buff(+1, +1)
# Transcendence
TU4f_006o = {
GameTag.CANT_BE_ATTACKED: True,
GameTag.CANT_BE_TARGETED_BY_OPPONENTS: True,
}
###
# Debug set
#
# Weapon Buff Enchant
XXX_054e = buff(+100, +100)
# 1000 Stats Enchant
XXX_055e = buff(+1000, +1000)
| agpl-3.0 | 5,784,976,088,777,001,000 | 12.859467 | 45 | 0.659088 | false |
skluth/sjmanalysis | test.py | 1 | 38623 |
from ROOT import TFile, gROOT, gPad, TVectorD, TObject
from ROOT import TGraphErrors, TH1D, TLegend, TCanvas, TLatex
TGraphErrors.__init__._creates= False
TH1D.__init__._creates= False
TLegend.__init__._creates= False
TCanvas.__init__._creates= False
TLatex.__init__._creates= False
gROOT.LoadMacro( "libAnalysisDict.so" )
from ROOT import Analysis, TH1DAnalysisObject, TGEAnalysisObject
from array import array
import numpy as np
# Read numbers columnwise from ascii txt files into arrays indexed by column number:
def ascii2arrays( filename ):
lines= [ line.rstrip( '\n' ) for line in open( filename ) ]
arrays= dict()
for line in lines:
tokens= line.split()
for itoken in range( len( tokens ) ):
if not itoken in arrays:
arrays[itoken]= array( "d" )
arrays[itoken].append( float( tokens[itoken] ) )
return arrays
# Factory method to create AnalysisObject instances
def getAnalysisObjectFromFile( tfile, obs, analysis ):
ao= None
key= obs+" "+analysis.getTag()+";1"
obj= tfile.Get( key )
if not obj:
raise RuntimeError( "getAnalysisObjectFromFile: AnalysisObject with key "+key+" not in file "+tfile.GetName() )
if obj.ClassName() == "TH1D":
errobj= tfile.Get( "errm "+obs+" "+analysis.getTag() )
if errobj:
ao= TH1DAnalysisObject( obj, errobj )
else:
ao= TH1DAnalysisObject( obj )
elif obj.ClassName() == "TGraphErrors":
ao= TGEAnalysisObject( obj )
else:
raise RuntimeError( "getAnalysisObjectFromFile: can't handle class name"+obj.ClassName() )
return ao
# Interface for analyses
class AnalysisObservable:
def __init__( self, name ):
self.obs= name
self.aostand=None
self.points=None
self.values=None
self.sterrs=None
self.syerrs=None
self.variationsDelta=None
self.events= dict()
self.rawEvents= dict()
return
def setupStandardAnalysis( self, standardAnalysis, tfile ):
self.aostand= getAnalysisObjectFromFile( tfile, self.obs, standardAnalysis )
self.points= array( "d", self.aostand.getPoints() )
self.values= array( "d", self.aostand.getValues() )
self.sterrs= array( "d", self.aostand.getErrors() )
self.events["stand"]= self.aostand.getNEvents()
return
def subtractVariations( self, analysisVariations, tfile ):
self.variationsDelta= dict()
for key in analysisVariations.keys():
ao= getAnalysisObjectFromFile( tfile, self.obs, analysisVariations[key] )
variationData= array( "d", ao.getValues() )
self.variationsDelta[key]= np.subtract( variationData, self.values )
self.events[key]= ao.getNEvents()
return
def calcSystSumSq( self, keys ):
self.syerrs= 0.0
for key in keys:
self.syerrs+= np.square( self.variationsDelta[key] )
self.syerrs= np.sqrt( self.syerrs )
return
def printResults( self, width=7, precision=3, pointwidth=4, pointprec=2, opt="?" ):
print "Results for", self.obs
print self.aostand.getPointLabel( pointwidth ),
fmt= "{:>"+str(width)+"}"
for key in [ "val", "stat", "sys" ]:
print fmt.format( key ),
if "d" in opt:
for key in sorted( self.variationsDelta.keys() ):
print fmt.format( key ),
print
if "m" in opt:
sterrs= self.aostand.getErrors( "m" )
else:
sterrs= self.sterrs
fmt="{:"+str(width)+"."+str(precision)+"f}"
for i in range( len( self.values ) ):
if self.obs.find( "EEC" ) >= 0 and i < len( self.values )-1:
rad2grad= 180.0/3.14159
leftedge= self.points[i]*rad2grad
rightedge= self.points[i+1]*rad2grad
print "{0:3.0f} {1:3.0f} ".format( leftedge, rightedge ),
else:
print self.aostand.getPointStr( i, pointwidth, pointprec ),
print fmt.format( self.values[i] ),
print fmt.format( sterrs[i] ),
print fmt.format( self.syerrs[i] ),
if "d" in opt:
for key in sorted( self.variationsDelta.keys() ):
print fmt.format( self.variationsDelta[key][i] ),
print
return
def printErrors( self, width=7, precision=4 ):
from math import sqrt
errorMatrix= self.aostand.getErrorMatrix()
fmt="{:"+str(width)+"."+str(precision)+"f}"
for i in range( len( self.sterrs )-1 ):
binw= self.points[i+1]-self.points[i]
diagError= sqrt( errorMatrix(i,i) )/binw
print fmt.format( self.sterrs[i] ), fmt.format( diagError )
return
def plot( self, plotoptions, opt="?" ):
vx= array( "d", self.aostand.getPointsCenter() )
values= self.values
sterrs= self.sterrs
if "m" in opt:
print "AnalysisObservable::plot: use errors from error matrix"
sterrs= array( "d", self.aostand.getErrors( "m" ) )
syerrs= self.syerrs
npoints= len(vx)
if "xshift" in plotoptions:
for i in range(npoints):
vx[i]+= plotoptions["xshift"]
vex= array( "d", npoints*[0.0] )
tgest= TGraphErrors( npoints, vx, values, vex, sterrs )
toterrs= np.sqrt( np.add( np.square( sterrs ), np.square( syerrs ) ) )
tgesy= TGraphErrors( npoints, vx, values, vex, toterrs )
tgesy.SetMarkerStyle( plotoptions["markerStyle"] )
tgesy.SetMarkerSize( plotoptions["markerSize"] )
drawas= plotoptions["drawas"] if "drawas" in plotoptions else "p"
tgesy.SetName( self.obs )
if "fillcolor" in plotoptions:
tgesy.SetFillColor(plotoptions["fillcolor"])
tgest.SetFillColor(plotoptions["fillcolor"])
if "s" in opt:
tgesy.Draw( "psame" )
else:
if "title" in plotoptions:
tgesy.SetTitle( plotoptions["title"] )
else:
tgesy.SetTitle( self.obs )
tgesy.SetMinimum( plotoptions["ymin"] )
tgesy.SetMaximum( plotoptions["ymax"] )
xaxis= tgesy.GetXaxis()
xaxis.SetLimits( plotoptions["xmin"], plotoptions["xmax"] )
if "xlabel" in plotoptions:
xaxis.SetTitle( plotoptions["xlabel"] )
if "ylabel" in plotoptions:
tgesy.GetYaxis().SetTitle( plotoptions["ylabel"] )
tgesy.Draw( "a"+drawas )
optlogx= plotoptions["logx"] if "logx" in plotoptions else 0
gPad.SetLogx( optlogx )
optlogy= plotoptions["logy"] if "logy" in plotoptions else 0
gPad.SetLogy( optlogy )
tgest.Draw( "same"+drawas )
return tgest, tgesy
def maxAbsErrorSq( self, errorKey1, errorKey2 ):
return np.square( np.maximum( np.absolute( self.variationsDelta[errorKey1] ),
np.absolute( self.variationsDelta[errorKey2] ) ) )
def printEvents( self ):
for key in sorted( self.events.keys() ):
print key, self.events[key]
return
def printRawEvents( self ):
for key in sorted( self.rawEvents.keys() ):
print key, self.rawEvents[key]
return
def readRawEvents( self, standardAnalysis, analysisVariations, tfile, srclist=[] ):
allAnalyses= analysisVariations.copy()
allAnalyses["stand"]= standardAnalysis
for source in [ "data", "py" ]+srclist:
for key in allAnalyses.keys():
analysis= allAnalyses[key]
rawAnalysis= Analysis( source, analysis.getReco(), analysis.getCuts() )
ao= getAnalysisObjectFromFile( tfile, self.obs, rawAnalysis )
self.rawEvents[rawAnalysis.getTag()]= ao.getNEvents()
hwRawAnalysis= Analysis( "hw", "mt", "stand" )
ao= getAnalysisObjectFromFile( tfile, self.obs, hwRawAnalysis )
self.rawEvents[hwRawAnalysis.getTag()]= ao.getNEvents()
return
# LEP1 Analysis:
class LEP1AnalysisObservable( AnalysisObservable ):
def __init__( self, obs ):
AnalysisObservable.__init__( self, obs )
return
def setupFromFile( self, tfile, unf="bbb" ):
standardAnalysis= Analysis( "data mt stand none none none py " + unf )
analysisVariations= {
"tc": Analysis( "data tc stand none none none py " + unf ),
"costt07": Analysis( "data mt costt07 none none none py " + unf ),
"hw": Analysis( "data mt stand none none none hw " + unf ) }
self.setupStandardAnalysis( standardAnalysis, tfile )
self.subtractVariations( analysisVariations, tfile )
self.calcSystSumSq( analysisVariations.keys() )
self.readRawEvents( standardAnalysis, analysisVariations, tfile )
return
# LEP1.5 Analysis:
class LEP15AnalysisObservable( AnalysisObservable ):
def __init__( self, obs ):
AnalysisObservable.__init__( self, obs )
return
def setupFromFile( self, tfile, unf="bbb" ):
standardAnalysis= Analysis( "data mt stand none none none py " + unf )
analysisVariations= {
"tc": Analysis( "data tc stand none none none py " + unf ),
"costt07": Analysis( "data mt costt07 none none none py " + unf ),
"hw": Analysis( "data mt stand none none none hw " + unf ),
"sprold": Analysis( "data mt sprold none none none py " + unf ) }
self.setupStandardAnalysis( standardAnalysis, tfile )
self.subtractVariations( analysisVariations, tfile )
self.calcSystSumSq( analysisVariations.keys() )
self.readRawEvents( standardAnalysis, analysisVariations, tfile )
return
def clone( self, values, sterrs, variationsDelta ):
aocloned= LEP15AnalysisObservable( self.obs )
aocloned.aostand= self.aostand
aocloned.points= self.points
aocloned.values= values
aocloned.sterrs= sterrs
aocloned.variationsDelta= variationsDelta
aocloned.calcSystSumSq( variationsDelta.keys() )
return aocloned
# LEP2 Analysis
class LEP2AnalysisObservable( AnalysisObservable ):
def __init__( self, obs ):
AnalysisObservable.__init__( self, obs )
return
def setupFromFile( self, tfile, unf="bbb" ):
standardAnalysis= Analysis( "data mt stand none none llqq:qqqq:eeqq py " + unf )
self.setupStandardAnalysis( standardAnalysis, tfile )
analysisVariations= {
"tc": Analysis( "data tc stand none none llqq:qqqq:eeqq py " + unf ),
"costt07": Analysis( "data mt costt07 none none llqq:qqqq:eeqq py " + unf ),
"sprold": Analysis( "data mt sprold none none llqq:qqqq:eeqq py " + unf ),
"hw": Analysis( "data mt stand none none llqq:qqqq:eeqq hw " + unf ),
"wqqlnhi": Analysis( "data mt wqqlnhi none none llqq:qqqq:eeqq py " + unf ),
"wqqlnlo": Analysis( "data mt wqqlnlo none none llqq:qqqq:eeqq py " + unf ),
"wqqqqhi": Analysis( "data mt wqqqqhi none none llqq:qqqq:eeqq py " + unf ),
"wqqqqlo": Analysis( "data mt wqqqqlo none none llqq:qqqq:eeqq py " + unf ),
"bkghi": Analysis( "data mt stand none none llqq:qqqq:eeqq:hi py " + unf ),
"bkglo": Analysis( "data mt stand none none llqq:qqqq:eeqq:lo py " + unf ) }
self.subtractVariations( analysisVariations, tfile )
self.calcSyst()
self.readRawEvents( standardAnalysis, analysisVariations, tfile,
[ "llqq", "qqqq", "eeqq" ] )
return
def calcSyst( self ):
self.calcSystSumSq( [ "tc", "costt07", "hw", "sprold" ] )
syerrbkg= self.maxAbsErrorSq( "wqqlnhi", "wqqlnlo" )
syerrbkg+= self.maxAbsErrorSq( "wqqqqhi", "wqqqqlo" )
syerrbkg+= self.maxAbsErrorSq( "bkghi", "bkglo" )
self.syerrs= np.sqrt( np.square( self.syerrs ) + syerrbkg )
return
def clone( self, values, sterrs, variationsDelta ):
aocloned= LEP2AnalysisObservable( self.obs )
aocloned.aostand= self.aostand
aocloned.points= self.points
aocloned.values= values
aocloned.sterrs= sterrs
aocloned.variationsDelta= variationsDelta
aocloned.calcSyst()
return aocloned
# Factory method to create AnalysisObservable objects:
def createAnalysisObservable( tfile, obs="thrust", unf="bbb" ):
filename= tfile.GetName()
ao= None
print "createAnalysisObservable: create for", obs, "from", filename,
if "sjm91" in filename:
print "LEP1AnalysisObservable"
ao= LEP1AnalysisObservable( obs )
elif( "sjm130" in filename or "sjm136" in filename ):
print "LEP15AnalysisObservable"
ao= LEP15AnalysisObservable( obs )
elif( "sjm161" in filename or "sjm172" in filename or "sjm183" in filename or
"sjm189" in filename or "sjm192" in filename or "sjm196" in filename or
"sjm200" in filename or "sjm202" in filename or "sjm205" in filename or
"sjm207" in filename ):
print "LEP2AnalysisObservable"
ao= LEP2AnalysisObservable( obs )
else:
print "no matching AnalysisObservable"
ao.setupFromFile( tfile, unf )
return ao
# Error weighted average of results of input observables:
def combineAnalysisObservables( aobs ):
firstao= aobs[0]
for ao in aobs:
if ao.obs != firstao.obs:
raise ValueError( "Observables don't match: "+firstao.obs+" "+ao.obs )
wgts= dict()
nvalues= len(firstao.values)
sumwgts= array( "d", nvalues*[ 0.0 ] )
for ao in aobs:
wgts[ao]= np.divide( 1.0, np.square( ao.sterrs ) )
sumwgts= np.add( wgts[ao], sumwgts )
values= array( "d", nvalues*[ 0.0 ] )
for ao in aobs:
values= np.add( np.multiply( ao.values, wgts[ao] ), values )
values= np.divide( values, sumwgts )
sterrs= np.divide( 1.0, np.sqrt( sumwgts ) )
variationsDelta= dict()
for key in firstao.variationsDelta.keys():
deltas= array( "d", nvalues*[ 0.0 ] )
for ao in aobs:
deltas= np.add( np.multiply( ao.variationsDelta[key], wgts[ao] ), deltas )
variationsDelta[key]= np.divide( deltas, sumwgts )
aocombined= firstao.clone( values, sterrs, variationsDelta )
return aocombined
# Create combined observable from file list:
def createCombineAnalysisObservables( filenames, obs="thrust" ):
if len(filenames) == 1:
f= TFile( filenames[0] )
aocomb= createAnalysisObservable( f, obs )
else:
print "createCombineAnalysisObservables: combine from",
aobs= list()
for filename in filenames:
print filename,
print
for filename in filenames:
f= TFile( filename )
ao= createAnalysisObservable( f, obs )
aobs.append( ao )
aocomb= combineAnalysisObservables( aobs )
return aocomb
# Extract ecm from file name:
def ecmFromFilename( filename ):
ecm= ""
for character in filename:
if character.isdigit():
ecm= ecm + character
return ecm
# Plot all groomed observables at combined ecms into pdf:
def plotAllGroomedAveraged():
canv= TCanvas( "canv", "All groomed shapes", 1200, 800 )
canv.Divide( 3, 2 )
observables= [ "grthrust" , "grcpar" ]
filenameslists= [ [ "sjm91_all.root" ],
[ "sjm130.root", "sjm136.root" ],
[ "sjm161.root", "sjm172.root", "sjm183.root", "sjm189.root" ],
[ "sjm192.root", "sjm196.root", "sjm200.root","sjm202.root", "sjm205.root", "sjm207.root" ] ]
ecms= [ "91", "133", "177", "197" ]
for obs in observables:
iecm= 0
for filenames in filenameslists:
postfix=""
if filenames == filenameslists[0] and obs == observables[0]:
postfix= "("
elif filenames == filenameslists[-1] and obs == observables[-1]:
postfix= ")"
ecm= ecms[iecm]
plotGroomed( obs, filenames, ecm, logy=1, canv=canv )
title= "Title: "+obs+" "+ecm+" GeV"
print title
canv.Print( "plots_averaged.pdf"+postfix, title )
iecm= iecm+1
return
# Plot all groomed observables into pdf:
def plotAllGroomed():
filenames= [ "sjm91_all.root",
"sjm130.root",
"sjm136.root",
"sjm161.root",
"sjm172.root",
"sjm183.root",
"sjm189.root",
"sjm192.root",
"sjm196.root",
"sjm200.root",
"sjm202.root",
"sjm205.root",
"sjm207.root" ]
canv= TCanvas( "canv", "All groomed shapes", 1200, 800 )
canv.Divide( 3, 2 )
observables= [ "grthrust" , "grcpar" ]
for obs in observables:
for filename in filenames:
postfix=""
if filename == filenames[0] and obs == observables[0]:
postfix= "("
elif filename == filenames[-1] and obs == observables[-1]:
postfix= ")"
ecm= ecmFromFilename( filename )
plotGroomed( obs, [ filename ], ecm, logy=1, canv=canv )
title= "Title: "+obs+" "+ecm+" GeV"
print title
canv.Print( "plots.pdf"+postfix, title )
return
# Plot groomed observables:
def plotGroomed( obs="grthrust", filenames=[ "sjm136_test.root" ], ecm="136", logy=1, canv=None ):
thplotoptions= { "xmin": 0.0, "xmax": 0.5, "ymin": 0.005, "ymax": 50.0, "markerStyle": 20, "markerSize": 0.5, "title": "groomed Thrust", "xlabel": "1-T_{gr}", "ylabel": "1/\sigma d\sigma/d(1-T_{gr})", "logy":logy }
cpplotoptions= { "xmin": 0.0, "xmax": 1.0, "ymin": 0.03, "ymax": 30.0, "markerStyle": 20, "markerSize": 0.5, "title": "groomed C-parameter", "xlabel": "C_{gr}", "ylabel": "1/\sigma d\sigma/d(C_{gr})", "logy":logy }
plotopts= { "grthrust": thplotoptions, "grcpar": cpplotoptions }
if canv == None:
canv= TCanvas( "canv", obs+" "+ecm, 1200, 800 )
icanv= 0
for beta in [ "0.0", "1.0" ]:
for zcut in [ "0.05", "0.10", "0.15" ]:
icanv= icanv+1
canv.cd( icanv )
gPad.SetLeftMargin( 0.15 )
gPad.SetRightMargin( 0.025 )
key= obs + "_" + beta + "_" + zcut
print key
aogr= createCombineAnalysisObservables( filenames, key )
aogr.plot( plotopts[obs] )
tl= TLegend( 0.4, 0.8, 0.85, 0.85 )
tl.SetTextSize( 0.05 )
tl.SetBorderSize( 0 )
tl.AddEntry( key, "OPAL "+ecm+" GeV", "ep" )
tl.Draw( "same" )
txt= TLatex( 0.6, 0.7, "#beta="+beta+ " z_{cut}="+zcut )
txt.SetNDC( True )
txt.SetTextSize( 0.035 )
txt.Draw()
return
# Check jet rates add up to one:
def checkJetrates( filename="sjm91_all_test.root", obs="durhamycut" ):
f= TFile( filename )
valuesmap= dict()
for rate in [ "R2", "R3", "R4", "R5", "R6" ]:
ao= createAnalysisObservable( f, obs+rate )
valuesmap[rate]= ao.values
valuessum= valuesmap["R2"]
for rate in [ "R3", "R4", "R5", "R6" ]:
valuessum= np.add( valuessum, valuesmap[rate] )
print valuessum
return
# Compare y23 to M.T. Ford:
def compareY23ds():
from ROOT import TCanvas
canv= TCanvas( "canv", "y_{23}(D) comparison 91 - 189", 1000, 1200 )
canv.Divide( 2, 3 )
canv.cd( 1 )
compareY23d( "sjm91_all.root" )
canv.cd( 2 )
compareY23d( "sjm133.root" )
canv.cd( 3 )
compareY23d( "sjm161.root" )
canv.cd( 4 )
compareY23d( "sjm172.root" )
canv.cd( 5 )
compareY23d( "sjm183.root" )
canv.cd( 6 )
compareY23d( "sjm189.root" )
canv2= TCanvas( "canv2", "y_{23}(D) comparison 192 - 207", 1000, 1200 )
canv2.Divide( 2, 3 )
canv2.cd( 1 )
compareY23d( "sjm192.root" )
canv2.cd( 2 )
compareY23d( "sjm196.root" )
canv2.cd( 3 )
compareY23d( "sjm200.root" )
canv2.cd( 4 )
compareY23d( "sjm202.root" )
canv2.cd( 5 )
compareY23d( "sjm205.root" )
canv2.cd( 6 )
compareY23d( "sjm207.root" )
return
def compareY23d( filename="sjm91_all.root", mtffilename=None, opt="m" ):
if mtffilename == None:
ecm= ecmFromFilename( filename )
mtffilename= "mtford-y23d"+ecm+".txt"
arrays= ascii2arrays( mtffilename )
mtfordpointsl= arrays[0]
mtfordpointsr= arrays[1]
mtfordpoints= np.divide( np.add( arrays[0], arrays[1] ), 2.0 )
mtfordvalues= arrays[2]
mtfordsterrs= arrays[3]
mtfordsyerrs= arrays[4]
mtforderrs= np.sqrt( np.add( np.square( mtfordsterrs ), np.square( mtfordsyerrs ) ) )
if filename=="sjm133.root":
f1= TFile( "sjm130.root" )
ao1= createAnalysisObservable( f1, "durhamymerge23" )
f2= TFile( "sjm136.root" )
ao2= createAnalysisObservable( f2, "durhamymerge23" )
ao= combineAnalysisObservables( [ ao1, ao2 ] )
else:
f= TFile( filename )
ao= createAnalysisObservable( f, "durhamymerge23" )
npoints= len( mtfordpoints )
vex= array( "d", npoints*[0.0] )
tgest= TGraphErrors( npoints, mtfordpoints, mtfordvalues, vex, mtfordsterrs )
tgetot= TGraphErrors( npoints, mtfordpoints, mtfordvalues, vex, mtforderrs )
plotoptions= { "xmin": 0.0003, "xmax": 0.5, "ymin": 0.5, "ymax": 500.0, "markerStyle": 20,
"markerSize": 0.75, "title": "Durham y23 "+filename, "xlabel": "y_{23}",
"ylabel": "1/\sigma d\sigma/dy_{23}", "logx":1, "logy":1 }
ao.plot( plotoptions, opt )
tgetot.SetMarkerStyle( 24 )
tgetot.SetMarkerSize( 1.25 )
tgetot.SetName( "mtford" )
tgetot.Draw( "psame" )
tgest.Draw( "psame" )
tl= TLegend( 0.7, 0.9, 0.7, 0.9 )
tl.AddEntry( "mtford", "M.T. Ford thesis", "ep" )
tl.AddEntry( "durhamymerge23", "sjmanalysis", "ep" )
tl.Draw()
return
# Compare thrust to M.T. Ford:
def compareThrusts():
from ROOT import TCanvas
canv= TCanvas( "canv", "Thrust comparison to M.T. Ford", 1000, 1200 )
canv.Divide( 2, 3 )
canv.cd( 1 )
compareThrust( "sjm91_all.root" )
canv.cd( 2 )
compareThrust( "sjm133.root" )
canv.cd( 3 )
compareThrust( "sjm161.root" )
canv.cd( 4 )
compareThrust( "sjm172.root" )
canv.cd( 5 )
compareThrust( "sjm183.root" )
canv.cd( 6 )
compareThrust( "sjm189.root" )
canv.Print( "thrustplots.pdf(", "Title: 91 - 189 GeV" )
canv.cd( 1 )
compareThrust( "sjm192.root" )
canv.cd( 2 )
compareThrust( "sjm196.root" )
canv.cd( 3 )
compareThrust( "sjm200.root" )
canv.cd( 4 )
compareThrust( "sjm202.root" )
canv.cd( 5 )
compareThrust( "sjm205.root" )
canv.cd( 6 )
compareThrust( "sjm207.root" )
canv.Print( "thrustplots.pdf)", "Title: 192 - 207 GeV" )
return
def compareThrust( filename="sjm91_all.root", mtffilename=None ):
if mtffilename == None:
ecm= ecmFromFilename( filename )
mtffilename= "mtford-thrust"+ecm+".txt"
arrays= ascii2arrays( mtffilename )
mtfordvalues= arrays[2]
mtfordsterrs= arrays[3]
mtfordsyerrs= arrays[4]
mtforderrs= np.sqrt( np.add( np.square( mtfordsterrs ), np.square( mtfordsyerrs ) ) )
if filename=="sjm133.root":
# f1= TFile( "sjm130.root" )
# aothrust1= createAnalysisObservable( f1, "thrust" )
# f2= TFile( "sjm136.root" )
# aothrust2= createAnalysisObservable( f2, "thrust" )
# aothrust= combineAnalysisObservables( [ aothrust1, aothrust2 ] )
aothrust= createCombineAnalysisObservables( ( "sjm130.root", "sjm136.root" ), "lepthrust" )
else:
f= TFile( filename )
aothrust= createAnalysisObservable( f, "lepthrust" )
vx= array( "d", aothrust.aostand.getPointsCenter() )
npoints= len(vx)-1
vex= array( "d", npoints*[0.0] )
tgethrustst= TGraphErrors( npoints, vx, mtfordvalues, vex, mtfordsterrs )
tgethrusttot= TGraphErrors( npoints, vx, mtfordvalues, vex, mtforderrs )
plotoptions= { "xmin": 0.0, "xmax": 0.5, "ymin": 0.2, "ymax": 30, "markerStyle": 20,
"markerSize": 0.8, "title": "Thrust "+filename, "logy": 1,
"xlabel": "1-T", "ylabel": "1/\sigma d\sigma/d(1-T)" }
aothrust.plot( plotoptions )
tgethrusttot.SetMarkerStyle( 24 )
tgethrusttot.SetMarkerSize( 1.25 )
tgethrusttot.SetName( "mtford" )
tgethrusttot.Draw( "psame" )
tgethrustst.Draw( "psame" )
tl= TLegend( 0.6, 0.75, 0.85, 0.9 )
tl.AddEntry( "mtford", "M.T. Ford thesis", "ep" )
tl.AddEntry( "thrust", "sjmanalysis", "ep" )
tl.Draw()
return
# Compare PCONE OPAL results for given variant and jetrate:
def comparePxcone( filename="sjm91_all.root", optKind="emin", optRate="R2" ):
pr097vals= dict()
pr097st= dict()
pr097sy= dict()
arrays= ascii2arrays( "pr097-pxcone"+optKind+".txt" )
pr097pts= arrays[0]
pr097vals["R2"]= arrays[1]
pr097st["R2"]= arrays[2]
pr097sy["R2"]= arrays[3]
pr097vals["R3"]= arrays[4]
pr097st["R3"]= arrays[5]
pr097sy["R3"]= arrays[6]
pr097vals["R4"]= arrays[7]
pr097st["R4"]= arrays[8]
pr097sy["R4"]= arrays[9]
npr097pts= len( pr097pts )
vexpr097= array( "d", npr097pts*[0.0] )
pr097vals= np.divide( pr097vals[optRate], 100.0 )
pr097st= np.divide( pr097st[optRate], 100.0 )
pr097sy= np.divide( pr097sy[optRate], 100.0 )
pr097tot= np.sqrt( np.add( np.square( pr097st ), np.square( pr097sy ) ) )
pr408vals= dict()
pr408st= dict()
pr408sy= dict()
arrays= ascii2arrays( "pr408-pxcone"+optKind+"91.txt" )
pr408pts= arrays[0]
pr408vals["R2"]= arrays[1]
pr408st["R2"]= arrays[2]
pr408sy["R2"]= arrays[3]
pr408vals["R3"]= arrays[4]
pr408st["R3"]= arrays[5]
pr408sy["R3"]= arrays[6]
pr408vals["R4"]= arrays[7]
pr408st["R4"]= arrays[8]
pr408sy["R4"]= arrays[9]
npr408pts= len( pr408pts )
vexpr408= array( "d", npr408pts*[0.0] )
pr408vals= pr408vals[optRate]
pr408st= np.divide( pr408st[optRate], 100.0 )
pr408sy= np.divide( pr408sy[optRate] , 100.0 )
pr408tot= np.sqrt( np.add( np.square( pr408st ), np.square( pr408sy ) ) )
f= TFile( filename )
aopxcone= createAnalysisObservable( f, "pxcone"+optKind+optRate )
xmax= { "R": 1.7, "emin": 27.0 }
ymax= { "R2": 1.1, "R3": 0.35, "R4": 0.18 }
xlabel= { "R": "R [rad.]", "emin": "E_{min} [GeV]" }
ylabel= { "R2": "2-jet rate", "R3": "3-jet rate", "R4": "4-jet rate" }
plotoptions= { "xmin": 0.0, "xmax": xmax[optKind], "ymin": 0.0, "ymax": ymax[optRate],
"markerStyle": 20, "markerSize": 0.8,
"xlabel": xlabel[optKind], "ylabel": ylabel[optRate],
"title": "Cone "+optKind+" "+filename }
aopxcone.plot( plotoptions )
xshift= { "R": 0.02, "emin": 0.2 }
pr097pts= np.add( pr097pts, -xshift[optKind] )
tgepr097= TGraphErrors( npr097pts, pr097pts, pr097vals, vexpr097, pr097tot )
tgepr097.SetMarkerStyle( 24 )
tgepr097.SetMarkerSize( 1.0 )
tgepr097.SetName( "pr097" )
tgepr097.Draw( "psame" )
pr408pts= np.add( pr408pts, xshift[optKind] )
tgepr408= TGraphErrors( npr408pts, pr408pts, pr408vals, vexpr408, pr408tot )
tgepr408.SetMarkerStyle( 29 )
tgepr408.SetMarkerSize( 1.0 )
tgepr408.SetName( "pr408" )
tgepr408.Draw( "psame" )
tl= TLegend( 0.7, 0.5, 0.9, 0.7 )
tl.AddEntry( "pr097", "OPAL PR097", "ep" )
tl.AddEntry( "pr408", "OPAL PR408", "ep" )
tl.AddEntry( "pxcone"+optKind+optRate, filename, "ep" )
tl.Draw()
return
# Compare OPAL PXCONE results:
def comparePxcones( filename="sjm91_all.root" ):
from ROOT import TCanvas
canv= TCanvas( "canv", "PXCONE comparison", 1000, 1200 )
canv.Divide(2,3)
canv.cd(1)
comparePxcone( filename, "R", "R2" )
canv.cd(2)
comparePxcone( filename, "emin", "R2" )
canv.cd(3)
comparePxcone( filename, "R", "R3" )
canv.cd(4)
comparePxcone( filename, "emin", "R3" )
canv.cd(5)
comparePxcone( filename, "R", "R4" )
canv.cd(6)
comparePxcone( filename, "emin", "R4" )
canv.SaveAs( "comparePxcones.pdf" )
return
# Compare antikt, siscone and PXCONE jets in same plot
def compareConejets( filename="sjm91_all.root", optKind="R", optR="R3" ):
f= TFile( filename )
algantikt= "antikt"+optKind
algsiscone= "siscone"+optKind
algpxcone= "pxcone"+optKind+"2"
aktao= createAnalysisObservable( f, algantikt+optR )
ymax= { "R2":1.0, "R3":0.5, "R4":0.3, "R5":0.3, "R6":0.3 }
xmax= { "R":1.0, "emin":0.15 }
plotoptions= { "xmin": 0.0, "xmax": xmax[optKind], "ymin": 0.0, "ymax": ymax[optR],
"markerStyle": 20, "markerSize": 0.8,
"title": "Cone "+optKind+" "+optR+" "+filename }
akttgest, akttgesy= aktao.plot( plotoptions )
sisao= createAnalysisObservable( f, algsiscone+optR )
plotoptions["markerStyle"]= 21
plotoptions["xshift"]= xmax[optKind]/100.0
sistgest, sistgesy= sisao.plot( plotoptions, "s" )
pxao= createAnalysisObservable( f, algpxcone+optR )
plotoptions["markerStyle"]= 22
plotoptions["xshift"]= -xmax[optKind]/100.0
pxtgest, pxtgesy= pxao.plot( plotoptions, "s" )
l= TLegend( 0.7, 0.7, 0.9, 0.9 )
l.AddEntry( algantikt+optR, "anti-k_t "+optR, "ep" )
l.AddEntry( algsiscone+optR, "SISCone "+optR, "ep" )
l.AddEntry( algpxcone+optR, "PXCONE "+optR, "ep" )
l.Draw()
return
# Compare Andrii's Durham jet rates
def compareAllDurhamjetrates():
from ROOT import TCanvas
canv= TCanvas( "canv", "Durham jetrates comparison", 1000, 1200 )
canv.Divide(2,3)
canv.cd( 1 )
compareDurhamjetrates( "sjm91_all.root",
"/home/iwsatlas1/skluth/Downloads/JRTMC/share/NEW/data.dat",
"donkers-durhamjets91.txt" )
canv.cd( 2 )
compareDurhamjetrates( "sjm130.root",
"/home/iwsatlas1/skluth/Downloads/JRTMC/share/NEW2/data.dat",
None )
canv.cd( 3 )
compareDurhamjetrates( "sjm136.root",
"/home/iwsatlas1/skluth/Downloads/JRTMC/share/NEW3/data.dat",
None )
canv.cd( 4 )
compareDurhamjetrates( "sjm161.root",
"/home/iwsatlas1/skluth/Downloads/JRTMC/share/NEW4/data.dat",
"donkers-durhamjets161.txt" )
canv.cd( 5 )
compareDurhamjetrates( "sjm189.root",
"/home/iwsatlas1/skluth/Downloads/JRTMC/share/NEW7/data.dat",
"donkers-durhamjets189.txt" )
canv.cd( 6 )
compareDurhamjetrates( "sjm192.root",
"/home/iwsatlas1/skluth/Downloads/JRTMC/share/NEW8/data.dat",
"donkers-durhamjets192.txt" )
return
def compareDurhamjetrates( filename="sjm91_all.root",
datafilename="/home/iwsatlas1/skluth/Downloads/JRTMC/share/NEW/data.dat",
donkersfilename="donkers-durhamjets91.txt" ):
f= TFile( filename )
R2ao= createAnalysisObservable( f, "durhamycutfjR2" )
R3ao= createAnalysisObservable( f, "durhamycutfjR3" )
plotoptions= { "xmin": 0.0005, "xmax": 0.5, "ymin": 0.0, "ymax": 1.05, "markerStyle": 20,
"markerSize": 0.75, "title": "Durham R2 and R3 "+filename,
"xlabel": "y_{cut}", "ylabel": "Jet rates", "logx": 1 }
R2tgest, R2tgesy= R2ao.plot( plotoptions )
plotoptions["markerStyle"]= 21
R3tgest, R3tgesy= R3ao.plot( plotoptions, "s" )
arrays= ascii2arrays( datafilename )
ycutpoints= arrays[0]
R2values= np.divide( arrays[1], 100.0 )
R2sterrs= np.divide( arrays[2], 100.0 )
R2syerrs= np.divide( arrays[3], 100.0 )
R3values= np.divide( arrays[4], 100.0 )
R3sterrs= np.divide( arrays[5], 100.0 )
R3syerrs= np.divide( arrays[6], 100.0 )
R2errs= np.sqrt( np.add( np.square( R2sterrs ), np.square( R2syerrs ) ) )
R3errs= np.sqrt( np.add( np.square( R3sterrs ), np.square( R3syerrs ) ) )
n= len(ycutpoints)
xerrs= array( "d", n*[0.0] )
R2datatge= TGraphErrors( n, ycutpoints, R2values, xerrs, R2errs )
R2datatge.SetMarkerStyle( 24 )
R2datatge.SetMarkerSize( 0.75 )
R2datatge.SetName( "R2datatge" )
R2datatge.Draw( "psame" )
R3datatge= TGraphErrors( n, ycutpoints, R3values, xerrs, R3errs )
R3datatge.SetMarkerStyle( 25 )
R3datatge.SetMarkerSize( 0.75 )
R3datatge.SetName( "R3datatge" )
R3datatge.Draw( "psame" )
legend= TLegend( 0.6, 0.6, 0.9, 0.9 )
R2tgesy.SetName( "R2tgesy" )
legend.AddEntry( "R2tgesy", "OPAL R2", "pe" )
R3tgesy.SetName( "R3tgesy" )
legend.AddEntry( "R3tgesy", "OPAL R3", "pe" )
legend.AddEntry( "R2datatge", "Andrii R2", "pe" )
legend.AddEntry( "R3datatge", "Andrii R3", "pe" )
if donkersfilename:
dkarrays= ascii2arrays( donkersfilename )
dkycutpoints= np.power( 10.0, dkarrays[0] )
dkR2values= dkarrays[1]
dkR2sterrs= np.divide( dkarrays[2], 100.0 )
dkR2syerrs= np.divide( dkarrays[3], 100.0 )
dkR3values= dkarrays[4]
dkR3sterrs= np.divide( dkarrays[5], 100.0 )
dkR3syerrs= np.divide( dkarrays[6], 100.0 )
dkR2errs= np.sqrt( np.add( np.square( dkR2sterrs ), np.square( dkR2syerrs ) ) )
dkR3errs= np.sqrt( np.add( np.square( dkR3sterrs ), np.square( dkR3syerrs ) ) )
dkn= len( dkycutpoints )
dkxerrs= array( "d", dkn*[0.0] )
dkR2datatge= TGraphErrors( dkn, dkycutpoints, dkR2values, dkxerrs, dkR2errs )
dkR2datatge.SetMarkerStyle( 26 )
dkR2datatge.SetMarkerSize( 0.75 )
dkR2datatge.SetName( "dkR2datatge" )
dkR2datatge.Draw( "psame" )
dkR3datatge= TGraphErrors( dkn, dkycutpoints, dkR3values, dkxerrs, dkR3errs )
dkR3datatge.SetMarkerStyle( 27 )
dkR3datatge.SetMarkerSize( 0.75 )
dkR3datatge.SetName( "dkR3datatge" );
dkR3datatge.Draw( "psame" )
legend.AddEntry( "dkR2datatge", "Donkers R2", "pe" )
legend.AddEntry( "dkR3datatge", "Donkers R3", "pe" )
legend.Draw()
return
# Compare EEC from various sources with own measurements
def compareEEC( filename="sjm91_all.root", datafilename="../EECMC/share/OPAL/data.dat" ):
f= TFile( filename )
ao= createAnalysisObservable( f, "EEC" )
tokens= datafilename.split( "/" )
exp= tokens[3]
plotoptions= { "xmin": 0.0, "xmax": 3.14159, "ymin": 0.05, "ymax": 5.0, "markerStyle": 20,
"markerSize": 0.5, "drawas": "3", "fillcolor": 6, "title": "EEC "+exp,
"xlabel": "\chi\ [rad.]", "ylabel": "1/\sigma d\Sigma/d\chi", "logy": 1 }
tgest, tgesy= ao.plot( plotoptions )
lines= [ line.rstrip( '\n' ) for line in open( datafilename ) ]
n= len( lines )
points= TVectorD( n )
values= TVectorD( n )
errors= TVectorD( n )
perrs= TVectorD(n)
grad2rad= 3.14159/180.0
for i in range( n ):
line= (lines[i]).split()
points[i]= float(line[0])*grad2rad
values[i]= float(line[3])
errors[i]= float(line[4])
perrs[i]= 0.0
datatge= TGraphErrors( points, values, perrs, errors )
datatge.SetMarkerStyle( 20 )
datatge.SetMarkerSize( 0.5 )
datatge.Draw( "psame" )
legend= TLegend( 0.2, 0.7, 0.5, 0.85 )
datatge.SetName( "datatge" );
tgesy.SetName( "tgesy" )
legend.AddEntry( "datatge", exp+" data", "pe" )
legend.AddEntry( "tgesy", "OPAL "+filename, "f" )
legend.Draw()
return
def compareEECs( filename="sjm91_all.root" ):
from ROOT import TCanvas
canv= TCanvas( "canv", "EEC comparison", 1000, 1200 )
canv.Divide(2,3)
canv.cd(1)
compareEEC( filename, datafilename="../EECMC/share/OPAL/data.dat" )
canv.cd(2)
compareEEC( filename, datafilename="../EECMC/share/OPAL2/data.dat" )
canv.cd(3)
compareEEC( filename, datafilename="../EECMC/share/OPAL3/data.dat" )
canv.cd(4)
compareEEC( filename, datafilename="../EECMC/share/DELPHI/data.dat" )
canv.cd(5)
compareEEC( filename, datafilename="../EECMC/share/SLD/data.dat" )
canv.cd(6)
compareEEC( filename, datafilename="../EECMC/share/L3/data.dat" )
canv.SaveAs( "compareEECs.pdf" )
return
def testMigrationMatrix( obs="thrust", filename="sjm91_all.root" ):
hdetstr= obs+" py mt stand"
hhstr= obs+" py hadron stand"
hhnrstr= obs+" py hadron none nonrad"
mstr= "migr "+obs+" py mt stand hadron"
f= TFile( filename )
hdet= f.Get( hdetstr )
hdet.Print()
m= f.Get( mstr )
m.Print()
hh= f.Get( hhstr )
hh.Print()
hhnr= f.Get( hhnrstr )
hhnr.Print()
nbin= hdet.GetNbinsX()
import numpy as np
valuesd= np.array( nbin*[0.0] )
valuesh= np.array( nbin*[0.0] )
valueshnr= np.array( nbin*[0.0] )
cacc= np.array( nbin*[0.0] )
R= np.array( np.zeros( (nbin,nbin) ) )
for i in range( nbin ):
valuesd[i]= hdet.GetBinContent( i+1 )*hdet.GetEntries()*hdet.GetBinWidth( i+1 )
valuesh[i]= hh.GetBinContent( i+1 )*hh.GetEntries()*hh.GetBinWidth( i+1 )
valueshnr[i]= hhnr.GetBinContent( i+1 )*hhnr.GetEntries()*hhnr.GetBinWidth( i+1 )
if valuesh[i] != 0.0:
cacc[i]= valueshnr[i]/valuesh[i]
else:
cacc[i]= 0.0
for j in range( nbin ):
R[j,i]= m.GetBinContent( i+1, j+1 )
width, precision= 7, 3
fmt= "{:"+str(width)+"."+str(precision)+"f}"
for i in range( nbin ):
print fmt.format( valueshnr[i] ),
print fmt.format( valuesh[i] ),
for j in range( nbin ):
print fmt.format( R[i,j] ),
print
print " ",
for i in range( nbin ):
print fmt.format( valuesd[i] ),
print
for i in range( nbin ):
sumcol= sum( R[:,i] )
if sumcol != 0.0:
R[:,i]/= sumcol
C= np.diag( cacc )
CR= np.dot( C, R )
valuesc= np.dot( CR, valuesd )
print valueshnr
print valuesc
return
| gpl-2.0 | -873,827,653,957,365,500 | 39.442932 | 218 | 0.590037 | false |
gem/oq-hazardlib | openquake/hazardlib/tests/geo/surface/base_surface_test.py | 1 | 17539 | # The Hazard Library
# Copyright (C) 2012-2017 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.geo.line import Line
from openquake.hazardlib.geo.mesh import Mesh, RectangularMesh
from openquake.hazardlib.geo.surface.simple_fault import SimpleFaultSurface
from openquake.hazardlib.geo.surface.base import BaseQuadrilateralSurface
from openquake.hazardlib.tests.geo.surface import _planar_test_data
class DummySurface(BaseQuadrilateralSurface):
def __init__(self, coordinates_list):
self.coordinates_list = coordinates_list
super(DummySurface, self).__init__()
def _create_mesh(self):
points = [[Point(*coordinates) for coordinates in row]
for row in self.coordinates_list]
return RectangularMesh.from_points_list(points)
def get_strike(self):
top_row = self.get_mesh()[0:2]
self.dip, self.strike = top_row.get_mean_inclination_and_azimuth()
return self.strike
def get_dip(self):
raise NotImplementedError()
def get_width(self):
raise NotImplementedError()
class GetMinDistanceTestCase(unittest.TestCase):
def test_1(self):
surface = DummySurface(_planar_test_data.TEST_7_RUPTURE_6_MESH)
sites = Mesh.from_points_list([Point(0, 0)])
self.assertAlmostEqual(8.01185807319,
surface.get_min_distance(sites)[0])
def test_2(self):
surface = DummySurface(_planar_test_data.TEST_7_RUPTURE_6_MESH)
sites = Mesh.from_points_list([Point(-0.25, 0.25)])
self.assertAlmostEqual(40.1213468,
surface.get_min_distance(sites)[0],
places=4)
def test_3(self):
surface = DummySurface(_planar_test_data.TEST_7_RUPTURE_2_MESH)
sites = Mesh.from_points_list([Point(0, 0)])
self.assertAlmostEqual(7.01186304977,
surface.get_min_distance(sites)[0])
def test_4(self):
surface = DummySurface(_planar_test_data.TEST_7_RUPTURE_2_MESH)
sites = Mesh.from_points_list([Point(-0.3, 0.4)])
self.assertAlmostEqual(55.6159556,
surface.get_min_distance(sites)[0],
places=4)
def test_several_sites(self):
surface = DummySurface(_planar_test_data.TEST_7_RUPTURE_2_MESH)
sites = Mesh.from_points_list([Point(0, 0), Point(-0.3, 0.4)])
dists = surface.get_min_distance(sites)
expected_dists = [7.01186304977, 55.6159556]
self.assertTrue(numpy.allclose(dists, expected_dists))
class GetJoynerBooreDistanceTestCase(unittest.TestCase):
def test_point_inside(self):
corners = [[(-0.1, -0.1, 1), (0.1, -0.1, 1)],
[(-0.1, 0.1, 2), (0.1, 0.1, 2)]]
surface = DummySurface(corners)
sites = Mesh.from_points_list([Point(0, 0), Point(0, 0, 20),
Point(0.01, 0.03)])
dists = surface.get_joyner_boore_distance(sites)
expected_dists = [0] * 3
self.assertTrue(numpy.allclose(dists, expected_dists))
def test_point_on_the_border(self):
corners = [[(0.1, -0.1, 1), (-0.1, -0.1, 1)],
[(0.1, 0.1, 2), (-0.1, 0.1, 2)]]
surface = DummySurface(corners)
sites = Mesh.from_points_list([Point(-0.1, 0.04), Point(0.1, 0.03)])
dists = surface.get_joyner_boore_distance(sites)
expected_dists = [0] * 2
self.assertTrue(numpy.allclose(dists, expected_dists, atol=1e-4))
def test_point_outside(self):
corners = [[(0.1, -0.1, 1), (-0.1, -0.1, 1)],
[(0.1, 0.1, 2), (-0.1, 0.1, 2)]]
surface = DummySurface(corners)
sites = Mesh.from_points_list([Point(-0.2, -0.2), Point(1, 1, 1),
Point(4, 5), Point(8, 10.4),
Point(0.05, 0.15, 10)])
dists = surface.get_joyner_boore_distance(sites)
expected_dists = [
Point(-0.2, -0.2).distance(Point(-0.1, -0.1)),
Point(1, 1).distance(Point(0.1, 0.1)),
Point(4, 5).distance(Point(0.1, 0.1)),
Point(8, 10.4).distance(Point(0.1, 0.1)),
Point(0.05, 0.15).distance(Point(0.05, 0.1))
]
self.assertTrue(numpy.allclose(dists, expected_dists, atol=0.2))
class GetRY0DistanceTestCase(unittest.TestCase):
def _test_rectangular_surface(self):
corners = [[(0, 0, 8), (-0.05, 0, 8), (-0.1, 0, 8)],
[(0, 0, 9), (-0.05, 0, 9), (-0.1, 0, 9)]]
surface = DummySurface(corners)
return surface
def test1_site_on_the_edges(self):
surface = self._test_rectangular_surface()
sites = Mesh.from_points_list([Point(0.0, 0.05), Point(0.0, -0.05)])
dists = surface.get_ry0_distance(sites)
expected_dists = [0.0]
self.assertTrue(numpy.allclose(dists, expected_dists))
def test2_sites_at_one_degree_distance(self):
surface = self._test_rectangular_surface()
sites = Mesh.from_points_list([Point(+1.0, 0.0), Point(+1.0, -1.0),
Point(+1.0, 1.0), Point(-1.1, +0.0),
Point(-1.1, 1.0), Point(-1.1, -1.0)])
dists = surface.get_ry0_distance(sites)
expected_dists = [111.19505230826488, 111.177990689, 111.177990689,
111.19505230826488, 111.177990689, 111.177990689]
self.assertTrue(numpy.allclose(dists, expected_dists))
class GetRXDistanceTestCase(unittest.TestCase):
def _test1to7surface(self):
corners = [[(0, 0, 8), (-0.05, 0, 8), (-0.1, 0, 8)],
[(0, 0, 9), (-0.05, 0, 9), (-0.1, 0, 9)]]
surface = DummySurface(corners)
return surface
def test1_site_on_the_hanging_wall(self):
surface = self._test1to7surface()
sites = Mesh.from_points_list([Point(0.05, 0.05), Point(40.0, 0.05)])
dists = surface.get_rx_distance(sites)
expected_dists = [5.559752615413244] * 2
self.assertTrue(numpy.allclose(dists, expected_dists))
def test2_site_on_the_foot_wall(self):
surface = self._test1to7surface()
sites = Mesh.from_points_list([Point(0.05, -0.05), Point(-140, -0.05)])
dists = surface.get_rx_distance(sites)
expected_dists = [-5.559752615413244] * 2
self.assertTrue(numpy.allclose(dists, expected_dists))
def test3_site_on_centroid(self):
surface = self._test1to7surface()
sites = Mesh.from_points_list([Point(0.05, 0)])
self.assertAlmostEqual(surface.get_rx_distance(sites)[0], 0)
def test4_site_along_strike(self):
surface = self._test1to7surface()
sites = Mesh.from_points_list([Point(0.2, 0), Point(67.6, 0),
Point(90.33, 0)])
dists = surface.get_rx_distance(sites)
expected_dists = [0] * 3
self.assertTrue(numpy.allclose(dists, expected_dists))
def test5_site_opposite_to_strike_direction(self):
surface = self._test1to7surface()
sites = Mesh.from_points_list([Point(-0.2, 0), Point(-67.6, 0),
Point(-90.33, 0)])
dists = surface.get_rx_distance(sites)
expected_dists = [0] * 3
self.assertTrue(numpy.allclose(dists, expected_dists))
def test6_one_degree_distance(self):
surface = self._test1to7surface()
sites = Mesh.from_points_list([Point(0.05, -1), Point(20, 1)])
dists = surface.get_rx_distance(sites)
expected_dists = [-111.19505230826488, +111.19505230826488]
self.assertTrue(numpy.allclose(dists, expected_dists))
def test7_ten_degrees_distance(self):
surface = self._test1to7surface()
sites = Mesh.from_points_list([Point(0, -10), Point(-15, 10)])
dists = surface.get_rx_distance(sites)
expected_dists = [-1111.9505230826488, +1111.9505230826488]
self.assertTrue(numpy.allclose(dists, expected_dists))
def test8_strike_of_255_degrees(self):
corners = [[(0.05, 0.05, 8), (-0.05, -0.05, 8)],
[(0.05, 0.05, 9), (-0.05, -0.05, 9)]]
surface = DummySurface(corners)
sites = Mesh.from_points_list([Point(0.05, 0)])
self.assertAlmostEqual(surface.get_rx_distance(sites)[0],
-3.9313415355436705, places=4)
def test9_non_planar_surface_case1(self):
# vertical, non-planar surface made of two segments, both of 40 km
# length. The first segment has an azimuth of 80 degrees while the
# second has an azimuth of 30 degrees. The surface presents therefore
# a kink pointing towards south-east
corners = [
[(0., 0., 0.), (0.354264, 0.062466, 0), (0.534131, 0.373999, 0)],
[(0., 0., 10.), (0.354264, 0.062466, 10), (0.534131, 0.373999, 10)]
]
surface = DummySurface(corners)
# distances are tested on 4 points. The first two are on the hanging-
# wall and foot-wall of the first segment (10 km distance), while
# the third and fourth are on the hanging-wall and foot-wall of the
# second segment (20 km distance)
sites = Mesh.from_points_list([
Point(0.192748, -0.057333), Point(0.161515, 0.119799),
Point(0.599964, 0.128300), Point(0.288427, 0.308164)
])
numpy.testing.assert_allclose(
surface.get_rx_distance(sites),
[10., -10., 20., -20], rtol=1e-5
)
def test10_non_planar_surface_case2(self):
# vertical, non-planar surface made of two segments, both of 40 km
# length. The first segment has an azimuth of 30 degrees while the
# second has an azimuth of 80 degrees. The surface presents therefore
# a kink pointing towards north-west
corners = [
[(0., 0., 0.), (0.179866, 0.311534, 0), (0.534137, 0.373994, 0)],
[(0., 0., 10.), (0.179866, 0.311534, 10), (0.534137, 0.373994, 10)]
]
surface = DummySurface(corners)
# distances are tested on 4 points. The first two are on the hanging-
# wall and foot-wall of the first segment (10 km distance), while
# the third and fourth are on the hanging-wall and foot-wall of the
# second segment (20 km distance)
sites = Mesh.from_points_list([
Point(0.167816, 0.110801), Point(0.012048, 0.200733),
Point(0.388234, 0.165633), Point(0.325767, 0.519897)
])
numpy.testing.assert_allclose(
surface.get_rx_distance(sites),
[10., -10., 20., -20], rtol=1e-5
)
def test11_non_planar_surface_case3(self):
# same geometry as 'test10_non_planar_surface_case2' but with reversed
# strike (edges specified in the opposite direction)
corners = [
[(0.534137, 0.373994, 0), (0.179866, 0.311534, 0), (0., 0., 0.)],
[(0.534137, 0.373994, 10), (0.179866, 0.311534, 10), (0., 0., 10.)]
]
surface = DummySurface(corners)
# distances are tested on 4 points. The first two are on the foot-
# wall and hanging-wall of the first segment (10 km distance), while
# the third and fourth are on the foot-wall and hanging-wall of the
# second segment (20 km distance)
sites = Mesh.from_points_list([
Point(0.167816, 0.110801), Point(0.012048, 0.200733),
Point(0.388234, 0.165633), Point(0.325767, 0.519897)
])
# distances remain the same, but signs are reversed
numpy.testing.assert_allclose(
surface.get_rx_distance(sites),
[-10., 10., -20., 20], rtol=1e-5
)
class GetTopEdgeDepthTestCase(unittest.TestCase):
def test_with_depth(self):
corners = [[(-0.5, -0.5, 3.3), (0.5, 0.5, 3.5)],
[(-0.5, -0.5, 9.3), (0.5, 0.5, 9.8)]]
surface = DummySurface(corners)
self.assertAlmostEqual(surface.get_top_edge_depth(), 3.3)
def test_with_depth_topo(self):
corners = [[(-0.5, -0.5, -3.3), (0.5, 0.5, -3.5)],
[(-0.5, -0.5, 9.3), (0.5, 0.5, 9.8)]]
surface = DummySurface(corners)
self.assertAlmostEqual(surface.get_top_edge_depth(), -3.5)
def test_one_row_no_depth(self):
corners = [[(-0.5, -0.5), (0.5, 0.5)]]
surface = DummySurface(corners)
self.assertAlmostEqual(surface.get_top_edge_depth(), 0)
class GetAreaTestCase(unittest.TestCase):
def test_get_area(self):
corners = [[(0.0, 0.0, 0.0), (0.0, 0.089932, 0.0)],
[(0.0, 0.0, 10.0), (0.0, 0.089932, 10.0)]]
surface = DummySurface(corners)
self.assertAlmostEqual(100.0, surface.get_area(), places=0)
class GetResampledTopEdge(unittest.TestCase):
def test_get_resampled_top_edge(self):
upper_seismogenic_depth = 0.
lower_seismogenic_depth = 40.
dip = 90.
mesh_spacing = 10.
fault_trace = Line([Point(0.0, 0.0), Point(0.5, 0.5), Point(1.0, 1.0)])
whole_fault_surface = SimpleFaultSurface.from_fault_data(
fault_trace, upper_seismogenic_depth,
lower_seismogenic_depth, dip, mesh_spacing
)
ref = Line([Point(0., 0.), Point(1.0, 1.0)])
result = whole_fault_surface.get_resampled_top_edge()
for ref_point, result_point in zip(ref.points, result.points):
self.assertAlmostEqual(ref_point.longitude,
result_point.longitude, delta=0.1)
self.assertAlmostEqual(ref_point.latitude,
result_point.latitude, delta=0.1)
self.assertAlmostEqual(ref_point.depth,
result_point.depth, delta=0.1)
def test_get_resampled_top_edge_non_planar(self):
upper_seismogenic_depth = 0.
lower_seismogenic_depth = 40.
dip = 90.
mesh_spacing = 10.
fault_trace = Line([Point(0.0, 0.0), Point(0.5, 0.5), Point(1.5, 1.0)])
whole_fault_surface = SimpleFaultSurface.from_fault_data(
fault_trace, upper_seismogenic_depth,
lower_seismogenic_depth, dip, mesh_spacing
)
ref = Line([Point(0., 0.), Point(0.5, 0.5), Point(1.5, 1.0)])
result = whole_fault_surface.get_resampled_top_edge()
for ref_point, result_point in zip(ref.points, result.points):
self.assertAlmostEqual(ref_point.longitude,
result_point.longitude, delta=0.1)
self.assertAlmostEqual(ref_point.latitude,
result_point.latitude, delta=0.1)
self.assertAlmostEqual(ref_point.depth,
result_point.depth, delta=0.1)
class GetBoundingBoxTestCase(unittest.TestCase):
def test_get_bounding_box(self):
corners = [[(0.0, 0.0, 0.0), (0.1, 0.2, 0.0)],
[(0.05, -0.3, 10.0), (0.3, 0.05, 10.0)]]
surface = DummySurface(corners)
west, east, north, south = surface.get_bounding_box()
self.assertEqual(0.0, west)
self.assertEqual(0.3, east)
self.assertEqual(0.2, north)
self.assertEqual(-0.3, south)
class GetMiddlePointTestCase(unittest.TestCase):
def test_get_middle_point(self):
corners = [[(0.0, 0.0, 0.0), (0.0, 0.089932, 0.0)],
[(0.0, 0.0, 10.0), (0.0, 0.089932, 10.0)]]
surface = DummySurface(corners)
self.assertTrue(
Point(0.0, 0.044966, 5.0) == surface.get_middle_point()
)
class GetAzimuthTestCase(unittest.TestCase):
def test_01(self):
corners = [[(0.0, 0.0, 0.0), (0.0, 0.1, 0.0)],
[(0.0, 0.0, 10.0), (0.0, 0.1, 10.0)]]
surface = DummySurface(corners)
mesh = Mesh.from_points_list([Point(0.0, 0.2),
Point(0.1, 0.05),
Point(0.0, -0.2)])
azimuths = surface.get_azimuth(mesh)
expected = numpy.array([0, 90, 180])
azimuths[azimuths > 180] = azimuths[azimuths > 180] - 360
numpy.testing.assert_almost_equal(expected, azimuths, 1)
def test_02(self):
corners = [[(-1.0, 0.0, 0.0), (1.0, 0.0, 0.0)],
[(-1.0, 0.0, 10.0), (1.0, 0.0, 10.0)]]
surface = DummySurface(corners)
mesh = Mesh.from_points_list([Point(0.0, 0.2),
Point(0.0, -0.2),
Point(-0.1, 0.1)])
azimuths = surface.get_azimuth(mesh)
expected = numpy.array([270., 90., 225.])
numpy.testing.assert_almost_equal(expected, azimuths, 2)
| agpl-3.0 | -7,765,860,766,505,043,000 | 42.199507 | 79 | 0.575688 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.