repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
lucidmotifs/auto-aoc | .venv/lib/python3.5/site-packages/pylint/test/functional/unused_argument.py | 1 | 1305 | # pylint: disable=missing-docstring,too-few-public-methods
def test_unused(first, second, _not_used): # [unused-argument, unused-argument]
pass
def test_prefixed_with_ignored(first, ignored_second):
first()
def test_prefixed_with_unused(first, unused_second):
first()
# for Sub.inherited, only the warning for "aay" is desired.
# The warnings for "aab" and "aac" are most likely false positives though,
# because there could be another subclass that overrides the same method and does
# use the arguments (eg Sub2)
class Base(object):
"parent"
def inherited(self, aaa, aab, aac):
"abstract method"
raise NotImplementedError
class Sub(Base):
"child 1"
def inherited(self, aaa, aab, aac):
"overridden method, though don't use every argument"
return aaa
def newmethod(self, aax, aay): # [unused-argument]
"another method, warning for aay desired"
return self, aax
class Sub2(Base):
"child 1"
def inherited(self, aaa, aab, aac):
"overridden method, use every argument"
return aaa + aab + aac
def metadata_from_dict(key):
"""
Should not raise unused-argument message because key is
used inside comprehension dict
"""
return {key: str(value) for key, value in key.items()}
| mit | 3,123,444,638,351,331,300 | 26.1875 | 81 | 0.67433 | false |
kobejean/tensorflow | tensorflow/contrib/quantize/python/quantize_graph_test.py | 1 | 13460 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the quantize_graph graph rewriting API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import quantize_graph
from tensorflow.python import training
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template
from tensorflow.python.platform import googletest
class QuantizeGraphTest(test_util.TensorFlowTestCase):
# We have a lot of other tests that test the details of the rewrite, here we
# just the specific features of the quantize_graph API.
def _RunTestOverAllRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_training_graph,
quantize_graph.create_eval_graph,
quantize_graph.experimental_create_training_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverTrainingRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_training_graph,
quantize_graph.experimental_create_training_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverEvalRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_eval_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverExperimentalRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.experimental_create_training_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverExperimentalRewritesWithScope(self, test_fn, scope):
def with_absent_scope(fn):
def fn_with_absent_scope(*args):
fn(*args, scope=scope)
return fn_with_absent_scope
rewrite_fns = [
with_absent_scope(
quantize_graph.experimental_create_training_graph),
with_absent_scope(
quantize_graph.experimental_create_eval_graph),
]
for fn in rewrite_fns:
test_fn(fn)
def testRewrite(self):
self._RunTestOverAllRewrites(self._TestRewrite)
def _TestRewrite(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
self._ConvLayer()
orig_variable_names = set(
[v.name for v in graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
rewrite_fn(graph)
q_variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Ensure that variables were added.
self.assertTrue(len(orig_variable_names) < len(q_variables))
def testDefaultGraph(self):
self._RunTestOverAllRewrites(self._TestRewrite)
def _TestDefaultGraph(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer()
orig_variable_names = set(
[v.name for v in g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
rewrite_fn()
q_variables = g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Ensure that variables were added.
self.assertTrue(len(orig_variable_names) < len(q_variables))
def testWithPostActivationBypass(self):
self._RunTestOverAllRewrites(self._TestWithPostActivationBypass)
def _TestWithPostActivationBypass(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer(post_activation_bypass=True, scope='scope1')
rewrite_fn()
op_names = [op.name for op in g.get_operations()]
self.assertTrue(any(
'scope1/post_activation_bypass_quant/' in name for name in op_names))
def testQuantDelay(self):
self._RunTestOverTrainingRewrites(self._TestQuantDelay)
def _TestQuantDelay(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
quant_delay = 100
rewrite_fn(quant_delay=quant_delay)
quant_delay_found = False
for op in g.get_operations():
# Check to see if the quant_delay is correctly set.
if 'activate_quant' in op.name and op.type == 'Const':
quant_delay_found = True
const_value = str(op.get_attr('value'))
self.assertTrue(('int64_val: %i' % quant_delay) in const_value)
self.assertTrue(quant_delay_found)
def testTrainingOpsCheck(self):
self._RunTestOverTrainingRewrites(self._TestTrainingOpsCheck)
def _TestTrainingOpsCheck(self, rewrite_fn):
with ops.Graph().as_default():
output = self._ConvLayer()
output_scalar = math_ops.reduce_sum(output)
loss = math_ops.square(output_scalar - 1)
opt = training.gradient_descent.GradientDescentOptimizer(0.0001)
opt.minimize(loss)
with self.assertRaisesRegexp(ValueError, 'Training op found in graph'):
rewrite_fn()
def testWeightBits(self):
self._RunTestOverExperimentalRewrites(self._TestWeightBits)
def _TestWeightBits(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
weight_bits = 4
rewrite_fn(weight_bits=weight_bits)
weights_quant_found = False
for op in g.get_operations():
# Check to see if FakeQuant operations for weights have the right bits
# set.
if 'weights_quant' in op.name and op.type == 'FakeQuantWithMinMaxVars':
weights_quant_found = True
self.assertEqual(op.get_attr('num_bits'), weight_bits)
self.assertTrue(weights_quant_found)
def testActivationBits(self):
self._RunTestOverExperimentalRewrites(self._TestActivationBits)
def _TestActivationBits(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
activation_bits = 4
rewrite_fn(activation_bits=activation_bits)
act_quant_found = False
for op in g.get_operations():
# Check to see if FakeQuant operations for activations have the right bits
# set.
act_quant_names = ['act_quant', 'conv_quant', 'add_quant']
if any(s in op.name
for s in act_quant_names) and op.type == 'FakeQuantWithMinMaxVars':
act_quant_found = True
self.assertEqual(op.get_attr('num_bits'), activation_bits)
self.assertTrue(act_quant_found)
def testTrainingQuantization(self):
self._RunTestOverTrainingRewrites(self._TestTrainingQuantization)
def _TestTrainingQuantization(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
# Ensure that FakeQuant and variable update nodes were found.
quant_found = False
assign_min_last_found = False
assign_min_ema_found = False
assign_max_last_found = False
assign_max_ema_found = False
for op in g.get_operations():
# Check that FakeQuant operations were added.
if op.type == 'FakeQuantWithMinMaxVars':
quant_found = True
# Check that update operations for the added min max variables exist in
# the graph.
if 'AssignMinLast' in op.name:
assign_min_last_found = True
elif 'AssignMinEma' in op.name:
assign_min_ema_found = True
elif 'AssignMaxLast' in op.name:
assign_max_last_found = True
elif 'AssignMaxEma' in op.name:
assign_max_ema_found = True
self.assertTrue(assign_min_last_found)
self.assertTrue(assign_min_ema_found)
self.assertTrue(assign_max_last_found)
self.assertTrue(assign_max_ema_found)
self.assertTrue(quant_found)
def testEvalQuantization(self):
self._RunTestOverEvalRewrites(self._TestEvalQuantization)
def _TestEvalQuantization(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
# Ensure that FakeQuant and variable update nodes were found.
quant_found = False
for op in g.get_operations():
# Check that FakeQuant operations were added.
if op.type == 'FakeQuantWithMinMaxVars':
quant_found = True
# Check that update operations for the added min max variables don't
# exist in the graph.
update_names = [
'AssignMinLast', 'AssignMinEma', 'AssignMaxLast', 'AssignMaxEma'
]
self.assertFalse(any(s in op.name for s in update_names))
self.assertTrue(quant_found)
def testIdempotent(self):
self._RunTestOverAllRewrites(self._TestIdempotent)
def _TestIdempotent(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
graph_def_before = str(g.as_graph_def())
# Ensuring that calling the rewrite again doesn't add more nodes.
rewrite_fn()
graph_def_after = str(g.as_graph_def())
self.assertEqual(graph_def_before, graph_def_after)
def testRewriteWithScope(self):
self._RunTestOverExperimentalRewritesWithScope(
self._TestRewriteWithScope, 'scope1')
def _TestRewriteWithScope(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
scope1_output = self._ConvLayer(scope='scope1')
self._ConvLayer(input_tensor=scope1_output, scope='scope2')
rewrite_fn(graph)
op_names = [op.name for op in graph.get_operations()]
# The weights and activation of scope1 is quantized, but not scope2.
self.assertTrue(
any('scope1/Conv/act_quant' in name for name in op_names))
self.assertTrue(
any('scope1/Conv/weights_quant' in name for name in op_names))
self.assertFalse(
any('scope2/Conv/act_quant' in name for name in op_names))
self.assertFalse(
any('scope2/Conv/weights_quant' in name for name in op_names))
def testRewriteWithNonMatchingScope(self):
self._RunTestOverExperimentalRewritesWithScope(
self._TestRewriteWithNonMatchingScope, 'NonExistingScope')
def _TestRewriteWithNonMatchingScope(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
self._ConvLayer()
op_names_before_rewrite = set([op.name for op in graph.get_operations()])
rewrite_fn(graph)
op_names_after_rewrite = set([op.name for op in graph.get_operations()])
# No ops should be inserted or removed.
self.assertEqual(op_names_before_rewrite, op_names_after_rewrite)
def testWithSharedWeights(self):
self._RunTestOverAllRewrites(self._TestWithSharedWeights)
self._RunTestOverTrainingRewrites(self._TestRewriteWithSharedWeights)
def _TestRewriteWithSharedWeights(self, rewrite_fn, quant_delay=1):
self._TestWithSharedWeights(rewrite_fn, quant_delay)
def _TestWithSharedWeights(self, rewrite_fn, quant_delay=None):
with ops.Graph().as_default() as g:
conv = template.make_template('shared_weights_conv', self._ConvLayer)
conv()
conv()
if quant_delay is None:
rewrite_fn()
else:
rewrite_fn(quant_delay=quant_delay)
conv_ops = [op for op in g.get_operations() if op.type == 'Conv2D']
weights_quants = [
op for op in g.get_operations()
if 'weights_quant' in op.name and op.type == 'FakeQuantWithMinMaxVars'
]
# Check that the shared weights variable is not quantized multiple times
self.assertTrue(len(weights_quants) == 1)
weights_quant_tensor = weights_quants[0].outputs[0]
if quant_delay:
delayed_weights_quants = [
op for op in g.get_operations()
if 'weights_quant' in op.name and op.type == 'Merge'
]
self.assertTrue(len(delayed_weights_quants) == 1)
weights_quant_tensor = delayed_weights_quants[0].outputs[0]
# Check that the Conv2D operations get the quantized weights
self.assertTrue(all(weights_quant_tensor in op.inputs for op in conv_ops))
def _ConvLayer(
self, input_tensor=None, scope='test', pre_activation_bypass=False,
post_activation_bypass=False):
"""Add a basic convolution layer to the default graph."""
batch_size, height, width, depth = 5, 128, 128, 3
if input_tensor is None:
input_tensor = array_ops.zeros((batch_size, height, width, depth))
weight_init = init_ops.truncated_normal_initializer
with ops.name_scope(scope):
output = layers.conv2d(
input_tensor,
depth, [5, 5],
padding='SAME',
weights_initializer=weight_init(0.09),
activation_fn=None)
if pre_activation_bypass:
output += input_tensor
output = nn_ops.relu6(output)
if post_activation_bypass:
output += input_tensor
return output
if __name__ == '__main__':
googletest.main()
| apache-2.0 | -4,282,144,178,274,135,000 | 35.378378 | 80 | 0.683507 | false |
patrickrall/CircuitSimulator | libcirc/innerprod.py | 1 | 5589 | #
# file: innerprod.py
# Methods for evaluating inner products of P|H^t> where
# P is a given projector.
#
import numpy as np
from multiprocessing import Pool, cpu_count
from libcirc.stabilizer.stabilizer import StabilizerState
from libcirc.stateprep import prepH, prepL
# Median of means calculation can be done via Chebychev and Chernoff bounds.
# http://www.cs.utexas.edu/~ecprice/courses/randomized/notes/lec5.pdf
# If sampledProjector has error worse than e with probability p,
# then multiSampledProjector has error worse than e with probability
# less than delta = exp(-2m(1/2 - p)^2) where m is the number of bins.
#
# Since L is proportional to 1/p, best total number of samples m*L is minimized
# at the minimum of m/p proportional to p^(-1) * (1/2 - p)^(-2) which is p = 1/6.
# Thus for best results pick L = 6 * (2^t - 1)/(2^t + 1) * (1/e^2)
# and m = 4.5 log(1 / delta). This is only better than mean-only sampling
# for failure probabilities smaller than 0.0076 which is the solution to
# the equation 1/p = 27 ln (1/p).
#
# In the large t limit, can achieve e = 0.1 with 99.9% chance with 600 samples,
# and 32 bins. This would need 100 000 samples for mean-only mode, over 5x more.
def multiSampledProjector(P, L, norm, bins=32, samples=600, procs=1):
means = []
for i in range(bins):
means.append(sampledProjector(P, L, norm, samples=samples, procs=procs))
return np.median(means)
# If number of samples is L, then this distribution has mean || P |H^t> ||^2
# and standard deviation sqrt((2^t-1)/(2^t+1)) * || P |H^t> ||^2 * (1/sqrt(L))
#
# On its own, this method guarantees output error e * || P |H^t> ||^2
# with probability (1 - p) if L = (2^t - 1)/(2^t + 1) * (1/p) * (1/e^2)
#
# In the large t limit, can achieve e = 0.1 with 95% chance with 2000 samples.
def sampledProjector(P, L, norm, samples=2000, procs=1):
(phases, xs, zs) = P
# empty projector
if len(phases) == 0:
return np.abs(norm)**2
# clifford circuit
if len(xs[0]) == 0:
lookup = {0: 1, 2: -1}
gens = [1] # include identity
for phase in phases: gens.append(lookup[phase])
# calculate sum of all permutations of generators
return sum(gens)/len(gens) * np.abs(norm)**2
if procs is None:
try:
procs = cpu_count()
except NotImplementedError:
procs = 1
seeds = np.random.random_integers(0, 2**32-1, samples)
queries = [(P, L, seed) for seed in seeds]
if procs > 1:
pool = Pool(procs)
out = sum(pool.map(singleProjectorSample, queries))/samples
pool.close()
return out
else:
return sum(map(singleProjectorSample, queries))/samples
# Evaluate 2^t * || <theta| P |H^t> ||^2 for a random theta.
# This distribution has mean || P |H^t> ||^2
# and standard deviation sqrt((2^t-1)/(2^t+1)) * || P |H^t> ||^2
def singleProjectorSample(args):
(P, L, seed) = args # unpack arguments (easier for parallel code)
(phases, xs, zs) = P
t = len(xs[0])
# init seed
np.random.seed(seed)
# sample random theta
theta = StabilizerState.randomStabilizerState(t)
# project random state to P
projfactor = 1
for g in range(len(phases)):
res = theta.measurePauli(phases[g], zs[g], xs[g])
projfactor *= res
if res == 0: return 0 # theta annihilated by P
total = 0
if L is None: # exact decomposition
size = int(np.ceil(t/2))
for i in range(0, 2**size):
phi = prepH(i, t)
total += StabilizerState.innerProduct(theta, phi)
else: # approximate decomposition
size = len(L)
for i in range(0, 2**size):
phi = prepL(i, t, L)
total += StabilizerState.innerProduct(theta, phi)
return 2**t * np.abs(projfactor*total)**2
# calculate projector exactly
def exactProjector(P, L, norm, procs=1):
(phases, xs, zs) = P
# empty projector
if len(phases) == 0:
return np.abs(norm)**2
t = len(xs[0])
# clifford circuit
if t == 0:
lookup = {0: 1, 2: -1}
generators = [1] # include identity
for phase in phases: generators.append(lookup[phase])
# calculate sum of all permutations of generators
return sum(generators)/len(generators)
if L is None: size = int(np.ceil(t/2))
else: size = len(L)
if procs is None:
try:
procs = cpu_count()
except NotImplementedError:
procs = 1
queries = [(P, L, l) for l in range(0, 2**(size-1) * (2**size + 1))]
if procs > 1:
pool = Pool(procs)
total = sum(pool.map(exactProjectorWork, queries))
pool.close()
else:
total = sum(map(exactProjectorWork, queries))
return np.abs(total)
def exactProjectorWork(args):
(P, L, l) = args
(phases, xs, zs) = P
t = len(xs[0])
if L is None: size = int(np.ceil(t/2))
else: size = len(L)
chi = 2**size
i = 0
while l >= chi - i:
l -= chi - i
i += 1
j = l + i
if L is None: theta = prepH(i, t)
else: theta = prepL(i, t, L)
projfactor = 1
for g in range(len(phases)):
res, status = theta.measurePauli(phases[g], zs[g], xs[g], give_status=True)
projfactor *= res
if res == 0: return 0 # theta annihilated by P
if L is None: phi = prepH(j, t)
else: phi = prepL(j, t, L)
inner = StabilizerState.innerProduct(theta, phi)
if i == j:
return inner * projfactor
else:
return 2 * np.real(inner) * projfactor
| mit | -7,175,787,578,145,242,000 | 29.210811 | 83 | 0.602254 | false |
salsita/flask-mime-encoders | lib/json.py | 1 | 1747 | """Flask MIME JSON encoder and decoder."""
__all__ = 'JsonMimeEncoder'.split()
from . import MimeEncoders
from flask import json, request, Response
from functools import wraps
class JsonMimeEncoder(MimeEncoders.base):
"""Flask MIME JSON encoder and decoder."""
name = 'json'
mimetype = 'application/json'
JSONEncoder = json.JSONEncoder
JSONDecoder = json.JSONDecoder
@classmethod
def autoencoded(cls, view):
@wraps(view)
def decorated_view(**uri_params):
response = view(**uri_params)
if not isinstance(response, (Response, basestring)):
response = cls.make_response(response)
return response
return decorated_view
@classmethod
def autodecoded(cls, view):
@wraps(view)
def decorated_view(**uri_params):
uri_params.update(cls.get_request_data())
return view(**uri_params)
return decorated_view
@classmethod
def make_response(cls, data, **options):
options['mimetype'] = cls.mimetype
return Response(cls.dumps(data), **options)
@classmethod
def get_request_data(cls, request=request, **options):
if request.json:
return request.json
elif request.data:
return cls.loads(request.data, **options)
else:
return {}
@classmethod
def loads(_cls, s, **kwargs):
if 'cls' not in kwargs:
kwargs['cls'] = _cls.JSONDecoder
return json.loads(s, **kwargs)
@classmethod
def dumps(_cls, obj, **kwargs):
if 'cls' not in kwargs:
kwargs['cls'] = _cls.JSONEncoder
return json.dumps(obj, **kwargs)
MimeEncoders.json = JsonMimeEncoder
| mit | -5,810,382,172,931,417,000 | 26.730159 | 64 | 0.608472 | false |
Unode/ete | ete3/parser/newick.py | 1 | 20069 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
import re
import os
import six
from six.moves import map
__all__ = ["read_newick", "write_newick", "print_supported_formats"]
ITERABLE_TYPES = set([list, set, tuple, frozenset])
# Regular expressions used for reading newick format
_ILEGAL_NEWICK_CHARS = ":;(),\[\]\t\n\r="
_NON_PRINTABLE_CHARS_RE = "[\x00-\x1f]+"
_NHX_RE = "\[&&NHX:[^\]]*\]"
_FLOAT_RE = "\s*[+-]?\d+\.?\d*(?:[eE][-+]?\d+)?\s*"
#_FLOAT_RE = "[+-]?\d+\.?\d*"
#_NAME_RE = "[^():,;\[\]]+"
_NAME_RE = "[^():,;]+?"
# thanks to: http://stackoverflow.com/a/29452781/1006828
_QUOTED_TEXT_RE = r"""((?=["'])(?:"[^"\\]*(?:\\[\s\S][^"\\]*)*"|'[^'\\]*(?:\\[\s\S][^'\\]*)*'))"""
#_QUOTED_TEXT_RE = r"""["'](?:(?<=")[^"\\]*(?s:\\.[^"\\]*)*"|(?<=')[^'\\]*(?s:\\.[^'\\]*)*')""]"]"""
#_QUOTED_TEXT_RE = r"""(?=["'])(?:"[^"\\]*(?:\\[\s\S][^"\\]*)*"|'[^'\\]*(?:\\[\s\S][^'\\]*)*')]"]")"]"""
_QUOTED_TEXT_PREFIX='ete3_quotref_'
DEFAULT_DIST = 1.0
DEFAULT_NAME = ''
DEFAULT_SUPPORT = 1.0
FLOAT_FORMATTER = "%0.6g"
#DIST_FORMATTER = ":"+FLOAT_FORMATTER
NAME_FORMATTER = "%s"
def set_float_format(formatter):
''' Set the conversion format used to represent float distances and support
values in the newick representation of trees.
For example, use set_float_format('%0.32f') to specify 32 decimal numbers
when exporting node distances and bootstrap values.
Scientific notation (%e) or any other custom format is allowed. The
formatter string should not contain any character that may break newick
structure (i.e.: ":;,()")
'''
global FLOAT_FORMATTER
FLOAT_FORMATTER = formatter
#DIST_FORMATTER = ":"+FLOAT_FORMATTER
# Allowed formats. This table is used to read and write newick using
# different convenctions. You can also add your own formats in an easy way.
#
#
# FORMAT: [[LeafAttr1, LeafAttr1Type, Strict?], [LeafAttr2, LeafAttr2Type, Strict?],\
# [InternalAttr1, InternalAttr1Type, Strict?], [InternalAttr2, InternalAttr2Type, Strict?]]
#
# Attributes are placed in the newick as follows:
#
# .... ,LeafAttr1:LeafAttr2)InternalAttr1:InternalAttr2 ...
#
#
# /-A
# -NoName--|
# | /-B
# \C-------|
# | /-D
# \E-------|
# \-G
#
# Format 0 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)1.000000:0.642905)1.000000:0.567737);
# Format 1 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)E:0.642905)C:0.567737);
# Format 2 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)1.000000:0.642905)1.000000:0.567737);
# Format 3 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)E:0.642905)C:0.567737);
# Format 4 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)));
# Format 5 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729):0.642905):0.567737);
# Format 6 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)E)C);
# Format 7 = (A,(B,(D,G)E)C);
# Format 8 = (A,(B,(D,G)));
# Format 9 = (,(,(,)));
NW_FORMAT = {
0: [['name', str, True], ["dist", float, True], ['support', float, True], ["dist", float, True]], # Flexible with support
1: [['name', str, True], ["dist", float, True], ['name', str, True], ["dist", float, True]], # Flexible with internal node names
2: [['name', str, False], ["dist", float, False], ['support', float, False], ["dist", float, False]],# Strict with support values
3: [['name', str, False], ["dist", float, False], ['name', str, False], ["dist", float, False]], # Strict with internal node names
4: [['name', str, False], ["dist", float, False], [None, None, False], [None, None, False]],
5: [['name', str, False], ["dist", float, False], [None, None, False], ["dist", float, False]],
6: [['name', str, False], [None, None, False], [None, None, False], ["dist", float, False]],
7: [['name', str, False], ["dist", float, False], ["name", str, False], [None, None, False]],
8: [['name', str, False], [None, None, False], ["name", str, False], [None, None, False]],
9: [['name', str, False], [None, None, False], [None, None, False], [None, None, False]], # Only topology with node names
100: [[None, None, False], [None, None, False], [None, None, False], [None, None, False]] # Only Topology
}
def format_node(node, node_type, format, dist_formatter=None,
support_formatter=None, name_formatter=None,
quoted_names=False):
if dist_formatter is None: dist_formatter = FLOAT_FORMATTER
if support_formatter is None: support_formatter = FLOAT_FORMATTER
if name_formatter is None: name_formatter = NAME_FORMATTER
if node_type == "leaf":
container1 = NW_FORMAT[format][0][0] # name
container2 = NW_FORMAT[format][1][0] # dists
converterFn1 = NW_FORMAT[format][0][1]
converterFn2 = NW_FORMAT[format][1][1]
flexible1 = NW_FORMAT[format][0][2]
else:
container1 = NW_FORMAT[format][2][0] #support/name
container2 = NW_FORMAT[format][3][0] #dist
converterFn1 = NW_FORMAT[format][2][1]
converterFn2 = NW_FORMAT[format][3][1]
flexible1 = NW_FORMAT[format][2][2]
if converterFn1 == str:
try:
if not quoted_names:
FIRST_PART = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
str(getattr(node, container1)))
else:
FIRST_PART = str(getattr(node, container1))
if not FIRST_PART and container1 == 'name' and not flexible1:
FIRST_PART = "NoName"
except (AttributeError, TypeError):
FIRST_PART = "?"
FIRST_PART = name_formatter %FIRST_PART
if quoted_names:
#FIRST_PART = '"%s"' %FIRST_PART.decode('string_escape').replace('"', '\\"')
FIRST_PART = '"%s"' %FIRST_PART
elif converterFn1 is None:
FIRST_PART = ""
else:
try:
FIRST_PART = support_formatter %(converterFn2(getattr(node, container1)))
except (ValueError, TypeError):
FIRST_PART = "?"
if converterFn2 == str:
try:
SECOND_PART = ":"+re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
str(getattr(node, container2)))
except (ValueError, TypeError):
SECOND_PART = ":?"
elif converterFn2 is None:
SECOND_PART = ""
else:
try:
#SECOND_PART = ":%0.6f" %(converterFn2(getattr(node, container2)))
SECOND_PART = ":%s" %(dist_formatter %(converterFn2(getattr(node, container2))))
except (ValueError, TypeError):
SECOND_PART = ":?"
return "%s%s" %(FIRST_PART, SECOND_PART)
def print_supported_formats():
from ..coretype.tree import TreeNode
t = TreeNode()
t.populate(4, "ABCDEFGHI")
print(t)
for f in NW_FORMAT:
print("Format", f,"=", write_newick(t, features=None, format=f))
class NewickError(Exception):
"""Exception class designed for NewickIO errors."""
def __init__(self, value):
if value is None:
value = ''
value += "\nYou may want to check other newick loading flags like 'format' or 'quoted_node_names'."
Exception.__init__(self, value)
def read_newick(newick, root_node=None, format=0, quoted_names=False):
""" Reads a newick tree from either a string or a file, and returns
an ETE tree structure.
A previously existent node object can be passed as the root of the
tree, which means that all its new children will belong to the same
class as the root(This allows to work with custom TreeNode
objects).
You can also take advantage from this behaviour to concatenate
several tree structures.
"""
if root_node is None:
from ..coretype.tree import TreeNode
root_node = TreeNode()
if isinstance(newick, six.string_types):
if os.path.exists(newick):
if newick.endswith('.gz'):
import gzip
nw = gzip.open(newick).read()
else:
nw = open(newick, 'rU').read()
else:
nw = newick
matcher = compile_matchers(formatcode=format)
nw = nw.strip()
if not nw.startswith('(') and nw.endswith(';'):
#return _read_node_data(nw[:-1], root_node, "single", matcher, format)
return _read_newick_from_string(nw, root_node, matcher, format, quoted_names)
elif not nw.startswith('(') or not nw.endswith(';'):
raise NewickError('Unexisting tree file or Malformed newick tree structure.')
else:
return _read_newick_from_string(nw, root_node, matcher, format, quoted_names)
else:
raise NewickError("'newick' argument must be either a filename or a newick string.")
def _read_newick_from_string(nw, root_node, matcher, formatcode, quoted_names):
""" Reads a newick string in the New Hampshire format. """
if quoted_names:
# Quoted text is mapped to references
quoted_map = {}
unquoted_nw = ''
counter = 0
for token in re.split(_QUOTED_TEXT_RE, nw):
counter += 1
if counter % 2 == 1 : # normal newick tree structure data
unquoted_nw += token
else: # quoted text, add to dictionary and replace with reference
quoted_ref_id= _QUOTED_TEXT_PREFIX + str(int(counter/2))
unquoted_nw += quoted_ref_id
quoted_map[quoted_ref_id]=token[1:-1] # without the quotes
nw = unquoted_nw
if not nw.startswith('(') and nw.endswith(';'):
_read_node_data(nw[:-1], root_node, "single", matcher, format)
if quoted_names:
if root_node.name.startswith(_QUOTED_TEXT_PREFIX):
root_node.name = quoted_map[root_node.name]
return root_node
if nw.count('(') != nw.count(')'):
raise NewickError('Parentheses do not match. Broken tree structure?')
# white spaces and separators are removed
nw = re.sub("[\n\r\t]+", "", nw)
current_parent = None
# Each chunk represents the content of a parent node, and it could contain
# leaves and closing parentheses.
# We may find:
# leaf, ..., leaf,
# leaf, ..., leaf))),
# leaf)), leaf, leaf))
# leaf))
# ) only if formatcode == 100
for chunk in nw.split("(")[1:]:
# If no node has been created so far, this is the root, so use the node.
current_parent = root_node if current_parent is None else current_parent.add_child()
subchunks = [ch.strip() for ch in chunk.split(",")]
# We should expect that the chunk finished with a comma (if next chunk
# is an internal sister node) or a subchunk containing closing parenthesis until the end of the tree.
#[leaf, leaf, '']
#[leaf, leaf, ')))', leaf, leaf, '']
#[leaf, leaf, ')))', leaf, leaf, '']
#[leaf, leaf, ')))', leaf), leaf, 'leaf);']
if subchunks[-1] != '' and not subchunks[-1].endswith(';'):
raise NewickError('Broken newick structure at: %s' %chunk)
# lets process the subchunks. Every closing parenthesis will close a
# node and go up one level.
for i, leaf in enumerate(subchunks):
if leaf.strip() == '' and i == len(subchunks) - 1:
continue # "blah blah ,( blah blah"
closing_nodes = leaf.split(")")
# first part after splitting by ) always contain leaf info
_read_node_data(closing_nodes[0], current_parent, "leaf", matcher, formatcode)
# next contain closing nodes and data about the internal nodes.
if len(closing_nodes)>1:
for closing_internal in closing_nodes[1:]:
closing_internal = closing_internal.rstrip(";")
# read internal node data and go up one level
_read_node_data(closing_internal, current_parent, "internal", matcher, formatcode)
current_parent = current_parent.up
# references in node names are replaced with quoted text before returning
if quoted_names:
for node in root_node.traverse():
if node.name.startswith(_QUOTED_TEXT_PREFIX):
node.name = quoted_map[node.name]
return root_node
def _parse_extra_features(node, NHX_string):
""" Reads node's extra data form its NHX string. NHX uses this
format: [&&NHX:prop1=value1:prop2=value2] """
NHX_string = NHX_string.replace("[&&NHX:", "")
NHX_string = NHX_string.replace("]", "")
for field in NHX_string.split(":"):
try:
pname, pvalue = field.split("=")
except ValueError as e:
raise NewickError('Invalid NHX format %s' %field)
node.add_feature(pname, pvalue)
def compile_matchers(formatcode):
matchers = {}
for node_type in ["leaf", "single", "internal"]:
if node_type == "leaf" or node_type == "single":
container1 = NW_FORMAT[formatcode][0][0]
container2 = NW_FORMAT[formatcode][1][0]
converterFn1 = NW_FORMAT[formatcode][0][1]
converterFn2 = NW_FORMAT[formatcode][1][1]
flexible1 = NW_FORMAT[formatcode][0][2]
flexible2 = NW_FORMAT[formatcode][1][2]
else:
container1 = NW_FORMAT[formatcode][2][0]
container2 = NW_FORMAT[formatcode][3][0]
converterFn1 = NW_FORMAT[formatcode][2][1]
converterFn2 = NW_FORMAT[formatcode][3][1]
flexible1 = NW_FORMAT[formatcode][2][2]
flexible2 = NW_FORMAT[formatcode][3][2]
if converterFn1 == str:
FIRST_MATCH = "("+_NAME_RE+")"
elif converterFn1 == float:
FIRST_MATCH = "("+_FLOAT_RE+")"
elif converterFn1 is None:
FIRST_MATCH = '()'
if converterFn2 == str:
SECOND_MATCH = "(:"+_NAME_RE+")"
elif converterFn2 == float:
SECOND_MATCH = "(:"+_FLOAT_RE+")"
elif converterFn2 is None:
SECOND_MATCH = '()'
if flexible1 and node_type != 'leaf':
FIRST_MATCH += "?"
if flexible2:
SECOND_MATCH += "?"
matcher_str= '^\s*%s\s*%s\s*(%s)?\s*$' % (FIRST_MATCH, SECOND_MATCH, _NHX_RE)
compiled_matcher = re.compile(matcher_str)
matchers[node_type] = [container1, container2, converterFn1, converterFn2, compiled_matcher]
return matchers
def _read_node_data(subnw, current_node, node_type, matcher, formatcode):
""" Reads a leaf node from a subpart of the original newick
tree """
if node_type == "leaf" or node_type == "single":
if node_type == "leaf":
node = current_node.add_child()
else:
node = current_node
else:
node = current_node
subnw = subnw.strip()
if not subnw and node_type == 'leaf' and formatcode != 100:
raise NewickError('Empty leaf node found')
elif not subnw:
return
container1, container2, converterFn1, converterFn2, compiled_matcher = matcher[node_type]
data = re.match(compiled_matcher, subnw)
if data:
data = data.groups()
# This prevents ignoring errors even in flexible nodes:
if subnw and data[0] is None and data[1] is None and data[2] is None:
raise NewickError("Unexpected newick format '%s'" %subnw)
if data[0] is not None and data[0] != '':
node.add_feature(container1, converterFn1(data[0].strip()))
if data[1] is not None and data[1] != '':
node.add_feature(container2, converterFn2(data[1][1:].strip()))
if data[2] is not None \
and data[2].startswith("[&&NHX"):
_parse_extra_features(node, data[2])
else:
raise NewickError("Unexpected newick format '%s' " %subnw[0:50])
return
def write_newick(rootnode, features=None, format=1, format_root_node=True,
is_leaf_fn=None, dist_formatter=None, support_formatter=None,
name_formatter=None, quoted_names=False):
""" Iteratively export a tree structure and returns its NHX
representation. """
newick = []
leaf = is_leaf_fn if is_leaf_fn else lambda n: not bool(n.children)
for postorder, node in rootnode.iter_prepostorder(is_leaf_fn=is_leaf_fn):
if postorder:
newick.append(")")
if node.up is not None or format_root_node:
newick.append(format_node(node, "internal", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter,
quoted_names=quoted_names))
newick.append(_get_features_string(node, features))
else:
if node is not rootnode and node != node.up.children[0]:
newick.append(",")
if leaf(node):
newick.append(format_node(node, "leaf", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter,
quoted_names=quoted_names))
newick.append(_get_features_string(node, features))
else:
newick.append("(")
newick.append(";")
return ''.join(newick)
def _get_features_string(self, features=None):
""" Generates the extended newick string NHX with extra data about
a node. """
string = ""
if features is None:
features = []
elif features == []:
features = sorted(self.features)
for pr in features:
if hasattr(self, pr):
raw = getattr(self, pr)
if type(raw) in ITERABLE_TYPES:
raw = '|'.join(map(str, raw))
elif type(raw) == dict:
raw = '|'.join(map(lambda x,y: "%s-%s" %(x, y), six.iteritems(raw)))
elif type(raw) == str:
pass
else:
raw = str(raw)
value = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
raw)
if string != "":
string +=":"
string +="%s=%s" %(pr, str(value))
if string != "":
string = "[&&NHX:"+string+"]"
return string
| gpl-3.0 | -3,665,706,475,217,138,700 | 39.299197 | 139 | 0.567891 | false |
CERN-CERT/WAD | setup.py | 1 | 1578 | from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert('README.md', to='rst', format='md')
long_description += "\n\n" + pypandoc.convert('AUTHORS.md', to='rst', format='md')
except (IOError, ImportError):
long_description = ''
setup(
name='wad',
version='0.4.6',
description='A tool for detecting technologies used by web applications.',
long_description=long_description,
url='https://github.com/CERN-CERT/WAD',
license='GPLv3',
author='Sebastian Lopienski',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
requires=['six'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Information Technology',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP',
],
entry_points={
'console_scripts': [
'wad = wad.__main__:main'
]
},
)
| gpl-3.0 | -6,747,920,885,555,944,000 | 34.066667 | 86 | 0.603929 | false |
AFEPython/comicninja | src/comicninja.py | 1 | 7324 | from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, views
from pymongo import MongoClient
from pymongo.cursor import Cursor as MongoCursor
from ConfigParser import SafeConfigParser
from datetime import datetime, timedelta
from Crypto.Hash import SHA256
import os
import os.path
import shutil
import functools
import urllib,urllib2
import json
#configuration
comicninja = Flask(__name__)
comicninja.config.from_object(__name__)
comicninja.secret_key = '\x14\xba\xd2\xc4N\xca\xc9Z\x9bJ#.\x80\x87'
mongo_client = MongoClient()
db = mongo_client['comic_ninja_database']
users = db['users']
####### SEND THIS TO ITS OWN FILE, EVENTUALLY ########
# Convenience methods
def serialize_mongo(result):
# Check if this has an _id with ObjectId
if type(result) is dict:
if '_id' in result:
result['_id'] = str(result['_id'])
if 'password' in result:
del result['password']
for key in result:
if type(result[key]) is MongoCursor:
result[key] = serialize_mongo(result[key])
new_result = result
# Otherwise, treat it as a <pymongo.cursor.Cursor>
elif (type(result) is MongoCursor or
type(result) is list):
new_result = []
for item in result:
new_result.append(serialize_mongo(item))
return new_result
def login_required(f):
@functools.wraps(f)
def wrapper(*args,**kwargs):
if "user" in session:
return f(*args,**kwargs)
else:
flash("Enter the Dojo with your secret Comic Ninja name and password.")
return redirect(url_for("login"))
return wrapper;
def handle_logout(f):
@functools.wraps(f)
def wrapper(*args,**kwargs):
if "logout" in request.form:
session.pop("username",None)
return redirect(url_for("home"))
else:
return f(*args,**kwargs)
return wrapper
def salty_password(username, password):
'''@returns a salted password given a username and plain text pw.'''
user_hashbrown = SHA256.new(username).hexdigest()
salt = ''.join(
[user_hashbrown[i] for i in range(0, len(user_hashbrown), 3)]
)
password_hash = SHA256.new(salt+password).hexdigest()
return password_hash
# The comicninja Object Classification
class Home(views.MethodView):
def get(self):
context = {}
context["page_title"] = "Welcome to the Comic Ninja Dojo"
return render_template("home.html5")
# User Login
class Login(views.MethodView):
def get(self):
# Return a login page.
context = {}
context["page_title"] = "Enter the Comic Ninja Dojo"
return render_template("login.html5")
def post(self):
# Process the login request.
u = request.form['username']
p = request.form['password']
# redirect_url = request.form['redirect_url']
# redirect_url = request.args.get['redirect_url']
# query_string = request.query_string
user = users.find_one({
'username': u,
'password': salty_password(u, p)
})
if user is not None:
session['user'] = serialize_mongo(user)
else:
flash("Either your username or password is incorrect.")
return redirect(url_for("login"))
return redirect(url_for('dashboard'))
class Logout(views.MethodView):
def get(self):
return self.logout()
def post(self):
return self.logout()
def logout(self):
session.pop("user", None)
return redirect(url_for("home"))
class Register(views.MethodView):
def get(self):
# Return a register page.
context = {}
context["page_title"] = "Join the Comic Ninja Dojo"
return render_template("register.html5", **context)
def post(self):
context = {}
context["page_title"] = "Join the Comic Ninja Dojo"
errors = []
# Process the registration request.
if request.form['username']:
if request.form['password'] == request.form['password1']:
user = self.register(request.form)
session['user'] = user
else:
errors.append('On the job, incorrectly typed passcodes may hamper your unit\'s security. Please be more careful in the future. You may attempt registration again.')
else:
errors.append('Please choose a Ninja Codename so that we may know how to address you.')
context['errors'] = errors
if len(errors) != 0:
return render_template("register.html5", **context)
return redirect(url_for('dashboard'))
def register(self, form):
print "You are registered as {0}, {1}".format(form['username'], form['name'])
new_user = {
'username': form['username'],
'name': form['name'],
'email': form['email'],
'password': salty_password(form['username'], form['password'])
}
new_user_id = users.insert(new_user)
new_user['_id'] = new_user_id
return serialize_mongo(new_user)
class Dashboard(views.MethodView):
@login_required
def get(self):
context = {}
context['page_title'] = "Your Ninja Home Base"
context['user'] = session['user']
context['errors'] = []
return render_template('dashboard.html5', **context)
class ComicList(views.MethodView):
@login_required
def get(self):
pass
@login_required
def post(self):
pass
class ComicEdit(views.MethodView):
@login_required
def get(self):
pass
@login_required
def post(self):
pass
class ComicDelete(views.MethodView):
@login_required
def delete(self):
pass
##### SEND THIS CODE TO ITS OWN FILE, EVENTUALLY #####
# Rules for the comicninja urls, so the comicninjas get to where they want to go
comicninja.add_url_rule("/",
view_func=Home.as_view('home'),
methods=["GET"])
comicninja.add_url_rule("/login",
view_func=Login.as_view('login'),
methods=["GET","POST"])
comicninja.add_url_rule("/logout",
view_func=Logout.as_view('logout'),
methods=["GET","POST"])
comicninja.add_url_rule("/register",
view_func=Register.as_view('register'),
methods=["GET","POST"])
comicninja.add_url_rule("/dashboard",
view_func = Dashboard.as_view('dashboard'),
methods=["GET"])
comicninja.add_url_rule("/comics/list",
view_func=ComicList.as_view("comic_list"),
methods=["GET","POST"])
comicninja.add_url_rule("/comics/<comic_id>/edit",
view_func=ComicEdit.as_view("edit_comic"),
methods=["GET","POST"])
comicninja.add_url_rule("/comics/<comic_id>/delete",
view_func=ComicDelete.as_view("delete_comic"),
methods=["DELETE"])
if (__name__ == "__main__"):
config = SafeConfigParser()
config_name = os.path.join(comicninja.root_path, '..', 'comicninja.cfg')
if not os.path.isfile(config_name):
shutil.copyfile(os.path.join(comicninja.root_path, '..', 'comicninja.default.cfg'), config_name)
config.read(config_name)
port = config.getint('server', 'port')
comicninja.debug = config.getboolean('server', 'debug')
comicninja.run(host="0.0.0.0",port=port)
| gpl-2.0 | 2,637,422,542,135,692,000 | 29.902954 | 180 | 0.617013 | false |
chippey/gaffer | python/GafferUI/FileMenu.py | 1 | 12763 | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import os
import IECore
import Gaffer
import GafferUI
## Appends items to the IECore.MenuDefinition object passed to build a File menu containing
# standard open/save/revert/etc
def appendDefinitions( menuDefinition, prefix="" ) :
menuDefinition.append( prefix + "/New", { "command" : new, "shortCut" : "Ctrl+N" } )
menuDefinition.append( prefix + "/Open...", { "command" : open, "shortCut" : "Ctrl+O" } )
menuDefinition.append( prefix + "/Open Recent", { "subMenu" : openRecent } )
menuDefinition.append( prefix + "/OpenDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Save", { "command" : save, "shortCut" : "Ctrl+S" } )
menuDefinition.append( prefix + "/Save As...", { "command" : saveAs, "shortCut" : "Shift+Ctrl+S" } )
menuDefinition.append( prefix + "/Revert To Saved", { "command" : revertToSaved, "active" : __revertToSavedAvailable } )
menuDefinition.append( prefix + "/SaveDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Export Selection...", { "command" : exportSelection, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Import...", { "command" : importFile } )
menuDefinition.append( prefix + "/ImportExportDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Settings...", { "command" : showSettings } )
## A function suitable as the command for a File/New menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def new( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
newScript = Gaffer.ScriptNode()
application["scripts"].addChild( newScript )
## A function suitable as the command for a File/Open menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def open( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
path, bookmarks = __pathAndBookmarks( scriptWindow )
dialogue = GafferUI.PathChooserDialogue( path, title="Open script", confirmLabel="Open", valid=True, leaf=True, bookmarks=bookmarks )
path = dialogue.waitForPath( parentWindow = scriptWindow )
if not path :
return
__open( scriptWindow.scriptNode(), str( path ) )
def __open( currentScript, fileName ) :
application = currentScript.ancestor( Gaffer.ApplicationRoot )
script = Gaffer.ScriptNode()
script["fileName"].setValue( fileName )
with GafferUI.ErrorDialogue.ErrorHandler(
title = "Errors Occurred During Loading",
closeLabel = "Oy vey",
parentWindow = GafferUI.ScriptWindow.acquire( currentScript )
) :
script.load( continueOnError = True )
application["scripts"].addChild( script )
addRecentFile( application, fileName )
removeCurrentScript = False
if not currentScript["fileName"].getValue() and not currentScript["unsavedChanges"].getValue() :
# the current script is empty - the user will think of the operation as loading
# the new script into the current window, rather than adding a new window. so make it
# look like that.
currentWindow = GafferUI.ScriptWindow.acquire( currentScript )
newWindow = GafferUI.ScriptWindow.acquire( script )
## \todo We probably want a way of querying and setting geometry in the public API
newWindow._qtWidget().restoreGeometry( currentWindow._qtWidget().saveGeometry() )
currentWindow.setVisible( False )
removeCurrentScript = True
# We must defer the removal of the old script because otherwise we trigger a crash bug
# in PySide - I think this is because the menu item that invokes us is a child of
# currentWindow, and that will get deleted immediately when the script is removed.
if removeCurrentScript :
GafferUI.EventLoop.addIdleCallback( IECore.curry( __removeScript, application, currentScript ) )
def __removeScript( application, script ) :
application["scripts"].removeChild( script )
return False # remove idle callback
## A function suitable as the submenu callable for a File/OpenRecent menu item. It must be invoked
# from a menu which has a ScriptWindow in its ancestry.
def openRecent( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
currentScript = scriptWindow.scriptNode()
applicationRoot = currentScript.ancestor( Gaffer.ApplicationRoot )
recentFiles = []
with IECore.IgnoredExceptions( AttributeError ) :
recentFiles = applicationRoot.__recentFiles
result = IECore.MenuDefinition()
if recentFiles :
for index, fileName in enumerate( recentFiles ) :
result.append(
"/" + str( index ),
{
"label": os.path.basename( fileName ),
"command" : IECore.curry( __open, currentScript, fileName ),
"description" : fileName,
"active" : os.path.isfile( fileName )
}
)
else :
result.append( "/None Available", { "active" : False } )
return result
## This function adds a file to the list shown in the File/OpenRecent menu, and saves a recentFiles.py
# in the application's user startup folder so the settings will persist.
def addRecentFile( application, fileName ) :
if isinstance( application, Gaffer.Application ) :
applicationRoot = application.root()
else :
applicationRoot = application
try :
applicationRoot.__recentFiles
except AttributeError :
applicationRoot.__recentFiles = []
if fileName in applicationRoot.__recentFiles :
applicationRoot.__recentFiles.remove( fileName )
applicationRoot.__recentFiles.insert( 0, fileName )
del applicationRoot.__recentFiles[6:]
f = file( os.path.join( applicationRoot.preferencesLocation(), "recentFiles.py" ), "w" )
f.write( "# This file was automatically generated by Gaffer.\n" )
f.write( "# Do not edit this file - it will be overwritten.\n\n" )
f.write( "import GafferUI\n" )
for fileName in reversed( applicationRoot.__recentFiles ) :
f.write( "GafferUI.FileMenu.addRecentFile( application, \"%s\" )\n" % fileName )
## A function suitable as the command for a File/Save menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def save( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
if script["fileName"].getValue() :
with GafferUI.ErrorDialogue.ErrorHandler( title = "Error Saving File", parentWindow = scriptWindow ) :
script.save()
else :
saveAs( menu )
## A function suitable as the command for a File/Save As menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def saveAs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
path, bookmarks = __pathAndBookmarks( scriptWindow )
dialogue = GafferUI.PathChooserDialogue( path, title="Save script", confirmLabel="Save", leaf=True, bookmarks=bookmarks )
path = dialogue.waitForPath( parentWindow = scriptWindow )
if not path :
return
path = str( path )
if not path.endswith( ".gfr" ) :
path += ".gfr"
script["fileName"].setValue( path )
with GafferUI.ErrorDialogue.ErrorHandler( title = "Error Saving File", parentWindow = scriptWindow ) :
script.save()
application = script.ancestor( Gaffer.ApplicationRoot )
addRecentFile( application, path )
## A function suitable as the command for a File/Revert To Saved menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def revertToSaved( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.load()
def __revertToSavedAvailable( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
if script["fileName"].getValue() and script["unsavedChanges"].getValue() :
return True
return False
## A function suitable as the command for a File/Export Selection... menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def exportSelection( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
path, bookmarks = __pathAndBookmarks( scriptWindow )
selection = script.selection()
parent = selection[0].parent()
for node in selection :
if not parent.isAncestorOf( node ) :
assert( node.parent().isAncestorOf( parent ) )
parent = node.parent()
dialogue = GafferUI.PathChooserDialogue( path, title="Export selection", confirmLabel="Export", leaf=True, bookmarks=bookmarks )
path = dialogue.waitForPath( parentWindow = scriptWindow )
if not path :
return
path = str( path )
if not path.endswith( ".gfr" ) :
path += ".gfr"
script.serialiseToFile( path, parent, script.selection() )
## A function suitable as the command for a File/Import File... menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def importFile( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
path, bookmarks = __pathAndBookmarks( scriptWindow )
dialogue = GafferUI.PathChooserDialogue( path, title="Import script", confirmLabel="Import", valid=True, leaf=True, bookmarks=bookmarks )
path = dialogue.waitForPath( parentWindow = scriptWindow )
if path is None :
return
newChildren = []
c = script.childAddedSignal().connect( lambda parent, child : newChildren.append( child ) )
with Gaffer.UndoContext( script ) :
## \todo We need to prevent the ScriptNode plugs themselves getting clobbered
# when importing an entire script.
script.executeFile( str( path ) )
newNodes = [ c for c in newChildren if isinstance( c, Gaffer.Node ) ]
script.selection().clear()
script.selection().add( newNodes )
## \todo Position the nodes somewhere sensible if there's a Node Graph available
## A function suitable as the command for a File/Settings... menu item.
def showSettings( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
settingsWindow = None
for window in scriptWindow.childWindows() :
if hasattr( window, "_settingsEditor" ) :
settingsWindow = window
break
if settingsWindow is None :
settingsWindow = GafferUI.Window( "Settings", borderWidth=8 )
settingsWindow._settingsEditor = True
settingsWindow.setChild( GafferUI.NodeUI.create( scriptWindow.scriptNode() ) )
scriptWindow.addChildWindow( settingsWindow )
settingsWindow.setVisible( True )
def __selectionAvailable( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
return True if scriptWindow.scriptNode().selection().size() else False
def __pathAndBookmarks( scriptWindow ) :
bookmarks = GafferUI.Bookmarks.acquire(
scriptWindow,
pathType = Gaffer.FileSystemPath,
category = "script",
)
currentFileName = scriptWindow.scriptNode()["fileName"].getValue()
if currentFileName :
path = Gaffer.FileSystemPath( os.path.dirname( os.path.abspath( currentFileName ) ) )
else :
path = Gaffer.FileSystemPath( bookmarks.getDefault( scriptWindow ) )
path.setFilter( Gaffer.FileSystemPath.createStandardFilter( [ "gfr" ] ) )
return path, bookmarks
| bsd-3-clause | 4,047,465,248,674,811,400 | 36.985119 | 138 | 0.726867 | false |
mrroach/CentralServer | csrv/model/cards/ice.py | 1 | 2192 | """An ice card."""
from csrv.model import actions
from csrv.model import events
from csrv.model import game_object
from csrv.model import modifiers
from csrv.model import parameters
from csrv.model.cards import card_base
from csrv.model import timing_phases
from csrv.model.cards import card_info
class Ice(card_base.CardBase):
TYPE = card_info.ICE
REZZABLE = True
WHEN_IN_HAND_PROVIDES_CHOICES_FOR = {
timing_phases.CorpTurnActions: 'install_actions',
}
WHEN_APPROACHED_PROVIDES_CHOICES_FOR = {
timing_phases.CorpRezIce: 'rez_actions',
}
WHEN_APPROACHED_LISTENS = []
WHEN_INSTALLED_LISTENS = [
events.EndEncounterIce_3_2,
]
def __init__(self, game, player):
card_base.CardBase.__init__(self, game, player)
self.subroutines = []
@property
@modifiers.modifiable(modifiers.IceStrengthModifier)
def strength(self):
return self.STRENGTH
@property
@modifiers.modifiable(modifiers.IceRezCostModifier)
def cost(self):
return self.COST
def build_actions(self):
self.install_action = actions.InstallIce(self.game, self.player, self)
self._rez_action = actions.RezIce(self.game, self.player, self)
def install_actions(self):
if self.player.clicks.value:
return [self.install_action]
return []
def rez_actions(self):
if not self.is_rezzed:
return [self._rez_action]
return []
def on_begin_approach(self):
self._setup_choices('APPROACHED')
def on_end_approach(self):
self._teardown_choices('APPROACHED')
def on_end_encounter_ice_3_2(self, event, sender):
for sub in self.subroutines:
sub.is_broken = False
def on_rez(self):
card_base.CardBase.on_rez(self)
self.trigger_event(events.RezIce(self.game, self.player))
def on_install(self):
card_base.CardBase.on_install(self)
self.trigger_event(events.InstallIce(self.game, self.player))
def on_uninstall(self):
card_base.CardBase.on_uninstall(self)
self.trigger_event(events.UninstallIce(self.game, self.player))
def _break_for_click(self, subroutine):
return actions.BreakSubroutine(
self.game, self.game.runner, self,
subroutine, credits=0, clicks=1)
| apache-2.0 | -1,993,582,687,030,194,700 | 25.095238 | 74 | 0.703467 | false |
tongpo/Holle-World | py/easyGUI/demoprograms/chapter4/textfielddemo.py | 1 | 1579 | """
File: textfielddemo.py
Author: Kenneth A. Lambert
"""
from breezypythongui import EasyFrame
class TextFieldDemo(EasyFrame):
"""Converts an input string to uppercase and displays the result."""
def __init__(self):
"""Sets up the window and widgets."""
EasyFrame.__init__(self)
# Label and field for the input
self.addLabel(text = "Input",
row = 0, column = 0)
self.inputField = self.addTextField(text = "",
row = 0,
column = 1)
# Label and field for the output
self.addLabel(text = "Output",
row = 1, column = 0)
self.outputField = self.addTextField(text = "",
row = 1,
column = 1,
state = "readonly")
# The command button
self.button = self.addButton(text = "Convert",
row = 2, column = 0,
columnspan = 2,
command = self.convert)
# The event handling method for the button
def convert(self):
"""Inputs the string, converts it to uppercase,
and outputs the result."""
text = self.inputField.getText()
result = text.upper()
self.outputField.setText(result)
#Instantiate and pop up the window."""
if __name__ == "__main__":
TextFieldDemo().mainloop()
| gpl-2.0 | 3,305,140,002,116,649,500 | 33.326087 | 73 | 0.471184 | false |
Kraymer/beets | beets/mediafile.py | 1 | 70175 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Handles low-level interfacing for files' tags. Wraps Mutagen to
automatically detect file types and provide a unified interface for a
useful subset of music files' tags.
Usage:
>>> f = MediaFile('Lucy.mp3')
>>> f.title
u'Lucy in the Sky with Diamonds'
>>> f.artist = 'The Beatles'
>>> f.save()
A field will always return a reasonable value of the correct type, even
if no tag is present. If no value is available, the value will be false
(e.g., zero or the empty string).
Internally ``MediaFile`` uses ``MediaField`` descriptors to access the
data from the tags. In turn ``MediaField`` uses a number of
``StorageStyle`` strategies to handle format specific logic.
"""
from __future__ import division, absolute_import, print_function
import mutagen
import mutagen.id3
import mutagen.mp4
import mutagen.flac
import mutagen.asf
import codecs
import datetime
import re
import base64
import binascii
import math
import struct
import imghdr
import os
import traceback
import enum
import logging
import six
__all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile']
log = logging.getLogger(__name__)
# Human-readable type names.
TYPES = {
'mp3': 'MP3',
'aac': 'AAC',
'alac': 'ALAC',
'ogg': 'OGG',
'opus': 'Opus',
'flac': 'FLAC',
'ape': 'APE',
'wv': 'WavPack',
'mpc': 'Musepack',
'asf': 'Windows Media',
'aiff': 'AIFF',
'dsf': 'DSD Stream File',
}
PREFERRED_IMAGE_EXTENSIONS = {'jpeg': 'jpg'}
# Exceptions.
class UnreadableFileError(Exception):
"""Mutagen is not able to extract information from the file.
"""
def __init__(self, path, msg):
Exception.__init__(self, msg if msg else repr(path))
class FileTypeError(UnreadableFileError):
"""Reading this type of file is not supported.
If passed the `mutagen_type` argument this indicates that the
mutagen type is not supported by `Mediafile`.
"""
def __init__(self, path, mutagen_type=None):
if mutagen_type is None:
msg = repr(path)
else:
msg = u'{0}: of mutagen type {1}'.format(repr(path), mutagen_type)
Exception.__init__(self, msg)
class MutagenError(UnreadableFileError):
"""Raised when Mutagen fails unexpectedly---probably due to a bug.
"""
def __init__(self, path, mutagen_exc):
msg = u'{0}: {1}'.format(repr(path), mutagen_exc)
Exception.__init__(self, msg)
# Interacting with Mutagen.
def mutagen_call(action, path, func, *args, **kwargs):
"""Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `path` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`.
"""
try:
return func(*args, **kwargs)
except mutagen.MutagenError as exc:
log.debug(u'%s failed: %s', action, six.text_type(exc))
raise UnreadableFileError(path, six.text_type(exc))
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug(u'%s', traceback.format_exc())
log.error(u'uncaught Mutagen exception in %s: %s', action, exc)
raise MutagenError(path, exc)
# Utility.
def _safe_cast(out_type, val):
"""Try to covert val to out_type but never raise an exception. If
the value can't be converted, then a sensible default value is
returned. out_type should be bool, int, or unicode; otherwise, the
value is just passed through.
"""
if val is None:
return None
if out_type == int:
if isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if not isinstance(val, six.string_types):
val = six.text_type(val)
# Get a number from the front of the string.
val = re.match(r'[0-9]*', val.strip()).group(0)
if not val:
return 0
else:
return int(val)
elif out_type == bool:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == six.text_type:
if isinstance(val, bytes):
return val.decode('utf-8', 'ignore')
elif isinstance(val, six.text_type):
return val
else:
return six.text_type(val)
elif out_type == float:
if isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if isinstance(val, bytes):
val = val.decode('utf-8', 'ignore')
else:
val = six.text_type(val)
match = re.match(r'[\+-]?([0-9]+\.?[0-9]*|[0-9]*\.[0-9]+)',
val.strip())
if match:
val = match.group(0)
if val:
return float(val)
return 0.0
else:
return val
# Image coding for ASF/WMA.
def _unpack_asf_image(data):
"""Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
"""
type, size = struct.unpack_from('<bi', data)
pos = 5
mime = b''
while data[pos:pos + 2] != b'\x00\x00':
mime += data[pos:pos + 2]
pos += 2
pos += 2
description = b''
while data[pos:pos + 2] != b'\x00\x00':
description += data[pos:pos + 2]
pos += 2
pos += 2
image_data = data[pos:pos + size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le"))
def _pack_asf_image(mime, data, type=3, description=""):
"""Pack image data for a WM/Picture tag.
"""
tag_data = struct.pack('<bi', type, len(data))
tag_data += mime.encode("utf-16-le") + b'\x00\x00'
tag_data += description.encode("utf-16-le") + b'\x00\x00'
tag_data += data
return tag_data
# iTunes Sound Check encoding.
def _sc_decode(soundcheck):
"""Convert a Sound Check bytestring value to a (gain, peak) tuple as
used by ReplayGain.
"""
# We decode binary data. If one of the formats gives us a text
# string, interpret it as UTF-8.
if isinstance(soundcheck, six.text_type):
soundcheck = soundcheck.encode('utf-8')
# SoundCheck tags consist of 10 numbers, each represented by 8
# characters of ASCII hex preceded by a space.
try:
soundcheck = codecs.decode(soundcheck.replace(b' ', b''), 'hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except (struct.error, TypeError, binascii.Error):
# SoundCheck isn't in the format we expect, so return default
# values.
return 0.0, 0.0
# SoundCheck stores absolute calculated/measured RMS value in an
# unknown unit. We need to find the ratio of this measurement
# compared to a reference value of 1000 to get our gain in dB. We
# play it safe by using the larger of the two values (i.e., the most
# attenuation).
maxgain = max(soundcheck[:2])
if maxgain > 0:
gain = math.log10(maxgain / 1000.0) * -10
else:
# Invalid gain value found.
gain = 0.0
# SoundCheck stores peak values as the actual value of the sample,
# and again separately for the left and right channels. We need to
# convert this to a percentage of full scale, which is 32768 for a
# 16 bit sample. Once again, we play it safe by using the larger of
# the two values.
peak = max(soundcheck[6:8]) / 32768.0
return round(gain, 2), round(peak, 6)
def _sc_encode(gain, peak):
"""Encode ReplayGain gain/peak values as a Sound Check string.
"""
# SoundCheck stores the peak value as the actual value of the
# sample, rather than the percentage of full scale that RG uses, so
# we do a simple conversion assuming 16 bit samples.
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses. We can calculate these absolute values
# from the gain ratio using a reference value of 1000 units. We also
# enforce the maximum value here, which is equivalent to about
# -18.2dB.
g1 = int(min(round((10 ** (gain / -10)) * 1000), 65534))
# Same as above, except our reference level is 2500 units.
g2 = int(min(round((10 ** (gain / -10)) * 2500), 65534))
# The purpose of these values are unknown, but they also seem to be
# unused so we just use zero.
uk = 0
values = (g1, g1, g2, g2, uk, uk, int(peak), int(peak), uk, uk)
return (u' %08X' * 10) % values
# Cover art and other images.
def _imghdr_what_wrapper(data):
"""A wrapper around imghdr.what to account for jpeg files that can only be
identified as such using their magic bytes
See #1545
See https://github.com/file/file/blob/master/magic/Magdir/jpeg#L12
"""
# imghdr.what returns none for jpegs with only the magic bytes, so
# _wider_test_jpeg is run in that case. It still returns None if it didn't
# match such a jpeg file.
return imghdr.what(None, h=data) or _wider_test_jpeg(data)
def _wider_test_jpeg(data):
"""Test for a jpeg file following the UNIX file implementation which
uses the magic bytes rather than just looking for the bytes that
represent 'JFIF' or 'EXIF' at a fixed position.
"""
if data[:2] == b'\xff\xd8':
return 'jpeg'
def image_mime_type(data):
"""Return the MIME type of the image data (a bytestring).
"""
# This checks for a jpeg file with only the magic bytes (unrecognized by
# imghdr.what). imghdr.what returns none for that type of file, so
# _wider_test_jpeg is run in that case. It still returns None if it didn't
# match such a jpeg file.
kind = _imghdr_what_wrapper(data)
if kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']:
return 'image/{0}'.format(kind)
elif kind == 'pgm':
return 'image/x-portable-graymap'
elif kind == 'pbm':
return 'image/x-portable-bitmap'
elif kind == 'ppm':
return 'image/x-portable-pixmap'
elif kind == 'xbm':
return 'image/x-xbitmap'
else:
return 'image/x-{0}'.format(kind)
def image_extension(data):
ext = _imghdr_what_wrapper(data)
return PREFERRED_IMAGE_EXTENSIONS.get(ext, ext)
class ImageType(enum.Enum):
"""Indicates the kind of an `Image` stored in a file's tag.
"""
other = 0
icon = 1
other_icon = 2
front = 3
back = 4
leaflet = 5
media = 6
lead_artist = 7
artist = 8
conductor = 9
group = 10
composer = 11
lyricist = 12
recording_location = 13
recording_session = 14
performance = 15
screen_capture = 16
fish = 17
illustration = 18
artist_logo = 19
publisher_logo = 20
class Image(object):
"""Structure representing image data and metadata that can be
stored and retrieved from tags.
The structure has four properties.
* ``data`` The binary data of the image
* ``desc`` An optional description of the image
* ``type`` An instance of `ImageType` indicating the kind of image
* ``mime_type`` Read-only property that contains the mime type of
the binary data
"""
def __init__(self, data, desc=None, type=None):
assert isinstance(data, bytes)
if desc is not None:
assert isinstance(desc, six.text_type)
self.data = data
self.desc = desc
if isinstance(type, int):
try:
type = list(ImageType)[type]
except IndexError:
log.debug(u"ignoring unknown image type index %s", type)
type = ImageType.other
self.type = type
@property
def mime_type(self):
if self.data:
return image_mime_type(self.data)
@property
def type_index(self):
if self.type is None:
# This method is used when a tag format requires the type
# index to be set, so we return "other" as the default value.
return 0
return self.type.value
# StorageStyle classes describe strategies for accessing values in
# Mutagen file objects.
class StorageStyle(object):
"""A strategy for storing a value for a certain tag format (or set
of tag formats). This basic StorageStyle describes simple 1:1
mapping from raw values to keys in a Mutagen file object; subclasses
describe more sophisticated translations or format-specific access
strategies.
MediaFile uses a StorageStyle via three methods: ``get()``,
``set()``, and ``delete()``. It passes a Mutagen file object to
each.
Internally, the StorageStyle implements ``get()`` and ``set()``
using two steps that may be overridden by subtypes. To get a value,
the StorageStyle first calls ``fetch()`` to retrieve the value
corresponding to a key and then ``deserialize()`` to convert the raw
Mutagen value to a consumable Python value. Similarly, to set a
field, we call ``serialize()`` to encode the value and then
``store()`` to assign the result into the Mutagen object.
Each StorageStyle type has a class-level `formats` attribute that is
a list of strings indicating the formats that the style applies to.
MediaFile only uses StorageStyles that apply to the correct type for
a given audio file.
"""
formats = ['FLAC', 'OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac', 'APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio']
"""List of mutagen classes the StorageStyle can handle.
"""
def __init__(self, key, as_type=six.text_type, suffix=None,
float_places=2):
"""Create a basic storage strategy. Parameters:
- `key`: The key on the Mutagen file object used to access the
field's data.
- `as_type`: The Python type that the value is stored as
internally (`unicode`, `int`, `bool`, or `bytes`).
- `suffix`: When `as_type` is a string type, append this before
storing the value.
- `float_places`: When the value is a floating-point number and
encoded as a string, the number of digits to store after the
decimal point.
"""
self.key = key
self.as_type = as_type
self.suffix = suffix
self.float_places = float_places
# Convert suffix to correct string type.
if self.suffix and self.as_type is six.text_type \
and not isinstance(self.suffix, six.text_type):
self.suffix = self.suffix.decode('utf-8')
# Getter.
def get(self, mutagen_file):
"""Get the value for the field using this style.
"""
return self.deserialize(self.fetch(mutagen_file))
def fetch(self, mutagen_file):
"""Retrieve the raw value of for this tag from the Mutagen file
object.
"""
try:
return mutagen_file[self.key][0]
except (KeyError, IndexError):
return None
def deserialize(self, mutagen_value):
"""Given a raw value stored on a Mutagen object, decode and
return the represented value.
"""
if self.suffix and isinstance(mutagen_value, six.text_type) \
and mutagen_value.endswith(self.suffix):
return mutagen_value[:-len(self.suffix)]
else:
return mutagen_value
# Setter.
def set(self, mutagen_file, value):
"""Assign the value for the field using this style.
"""
self.store(mutagen_file, self.serialize(value))
def store(self, mutagen_file, value):
"""Store a serialized value in the Mutagen file object.
"""
mutagen_file[self.key] = [value]
def serialize(self, value):
"""Convert the external Python value to a type that is suitable for
storing in a Mutagen file object.
"""
if isinstance(value, float) and self.as_type is six.text_type:
value = u'{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is six.text_type:
if isinstance(value, bool):
# Store bools as 1/0 instead of True/False.
value = six.text_type(int(bool(value)))
elif isinstance(value, bytes):
value = value.decode('utf-8', 'ignore')
else:
value = six.text_type(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value
def delete(self, mutagen_file):
"""Remove the tag from the file.
"""
if self.key in mutagen_file:
del mutagen_file[self.key]
class ListStorageStyle(StorageStyle):
"""Abstract storage style that provides access to lists.
The ListMediaField descriptor uses a ListStorageStyle via two
methods: ``get_list()`` and ``set_list()``. It passes a Mutagen file
object to each.
Subclasses may overwrite ``fetch`` and ``store``. ``fetch`` must
return a (possibly empty) list and ``store`` receives a serialized
list of values as the second argument.
The `serialize` and `deserialize` methods (from the base
`StorageStyle`) are still called with individual values. This class
handles packing and unpacking the values into lists.
"""
def get(self, mutagen_file):
"""Get the first value in the field's value list.
"""
try:
return self.get_list(mutagen_file)[0]
except IndexError:
return None
def get_list(self, mutagen_file):
"""Get a list of all values for the field using this style.
"""
return [self.deserialize(item) for item in self.fetch(mutagen_file)]
def fetch(self, mutagen_file):
"""Get the list of raw (serialized) values.
"""
try:
return mutagen_file[self.key]
except KeyError:
return []
def set(self, mutagen_file, value):
"""Set an individual value as the only value for the field using
this style.
"""
self.set_list(mutagen_file, [value])
def set_list(self, mutagen_file, values):
"""Set all values for the field using this style. `values`
should be an iterable.
"""
self.store(mutagen_file, [self.serialize(value) for value in values])
def store(self, mutagen_file, values):
"""Set the list of all raw (serialized) values for this field.
"""
mutagen_file[self.key] = values
class SoundCheckStorageStyleMixin(object):
"""A mixin for storage styles that read and write iTunes SoundCheck
analysis values. The object must have an `index` field that
indicates which half of the gain/peak pair---0 or 1---the field
represents.
"""
def get(self, mutagen_file):
data = self.fetch(mutagen_file)
if data is not None:
return _sc_decode(data)[self.index]
def set(self, mutagen_file, value):
data = self.fetch(mutagen_file)
if data is None:
gain_peak = [0, 0]
else:
gain_peak = list(_sc_decode(data))
gain_peak[self.index] = value or 0
data = self.serialize(_sc_encode(*gain_peak))
self.store(mutagen_file, data)
class ASFStorageStyle(ListStorageStyle):
"""A general storage style for Windows Media/ASF files.
"""
formats = ['ASF']
def deserialize(self, data):
if isinstance(data, mutagen.asf.ASFBaseAttribute):
data = data.value
return data
class MP4StorageStyle(StorageStyle):
"""A general storage style for MPEG-4 tags.
"""
formats = ['MP4']
def serialize(self, value):
value = super(MP4StorageStyle, self).serialize(value)
if self.key.startswith('----:') and isinstance(value, six.text_type):
value = value.encode('utf-8')
return value
class MP4TupleStorageStyle(MP4StorageStyle):
"""A style for storing values as part of a pair of numbers in an
MPEG-4 file.
"""
def __init__(self, key, index=0, **kwargs):
super(MP4TupleStorageStyle, self).__init__(key, **kwargs)
self.index = index
def deserialize(self, mutagen_value):
items = mutagen_value or []
packing_length = 2
return list(items) + [0] * (packing_length - len(items))
def get(self, mutagen_file):
value = super(MP4TupleStorageStyle, self).get(mutagen_file)[self.index]
if value == 0:
# The values are always present and saved as integers. So we
# assume that "0" indicates it is not set.
return None
else:
return value
def set(self, mutagen_file, value):
if value is None:
value = 0
items = self.deserialize(self.fetch(mutagen_file))
items[self.index] = int(value)
self.store(mutagen_file, items)
def delete(self, mutagen_file):
if self.index == 0:
super(MP4TupleStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP4ListStorageStyle(ListStorageStyle, MP4StorageStyle):
pass
class MP4SoundCheckStorageStyle(SoundCheckStorageStyleMixin, MP4StorageStyle):
def __init__(self, key, index=0, **kwargs):
super(MP4SoundCheckStorageStyle, self).__init__(key, **kwargs)
self.index = index
class MP4BoolStorageStyle(MP4StorageStyle):
"""A style for booleans in MPEG-4 files. (MPEG-4 has an atom type
specifically for representing booleans.)
"""
def get(self, mutagen_file):
try:
return mutagen_file[self.key]
except KeyError:
return None
def get_list(self, mutagen_file):
raise NotImplementedError(u'MP4 bool storage does not support lists')
def set(self, mutagen_file, value):
mutagen_file[self.key] = value
def set_list(self, mutagen_file, values):
raise NotImplementedError(u'MP4 bool storage does not support lists')
class MP4ImageStorageStyle(MP4ListStorageStyle):
"""Store images as MPEG-4 image atoms. Values are `Image` objects.
"""
def __init__(self, **kwargs):
super(MP4ImageStorageStyle, self).__init__(key='covr', **kwargs)
def deserialize(self, data):
return Image(data)
def serialize(self, image):
if image.mime_type == 'image/png':
kind = mutagen.mp4.MP4Cover.FORMAT_PNG
elif image.mime_type == 'image/jpeg':
kind = mutagen.mp4.MP4Cover.FORMAT_JPEG
else:
raise ValueError(u'MP4 files only supports PNG and JPEG images')
return mutagen.mp4.MP4Cover(image.data, kind)
class MP3StorageStyle(StorageStyle):
"""Store data in ID3 frames.
"""
formats = ['MP3', 'AIFF', 'DSF']
def __init__(self, key, id3_lang=None, **kwargs):
"""Create a new ID3 storage style. `id3_lang` is the value for
the language field of newly created frames.
"""
self.id3_lang = id3_lang
super(MP3StorageStyle, self).__init__(key, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text[0]
except (KeyError, IndexError):
return None
def store(self, mutagen_file, value):
frame = mutagen.id3.Frames[self.key](encoding=3, text=[value])
mutagen_file.tags.setall(self.key, [frame])
class MP3PeopleStorageStyle(MP3StorageStyle):
"""Store list of people in ID3 frames.
"""
def __init__(self, key, involvement='', **kwargs):
self.involvement = involvement
super(MP3PeopleStorageStyle, self).__init__(key, **kwargs)
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
# Try modifying in place.
found = False
for frame in frames:
if frame.encoding == mutagen.id3.Encoding.UTF8:
for pair in frame.people:
if pair[0].lower() == self.involvement.lower():
pair[1] = value
found = True
# Try creating a new frame.
if not found:
frame = mutagen.id3.Frames[self.key](
encoding=mutagen.id3.Encoding.UTF8,
people=[[self.involvement, value]]
)
mutagen_file.tags.add(frame)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
for pair in frame.people:
if pair[0].lower() == self.involvement.lower():
try:
return pair[1]
except IndexError:
return None
class MP3ListStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Store lists of data in multiple ID3 frames.
"""
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].text
except KeyError:
return []
def store(self, mutagen_file, values):
frame = mutagen.id3.Frames[self.key](encoding=3, text=values)
mutagen_file.tags.setall(self.key, [frame])
class MP3UFIDStorageStyle(MP3StorageStyle):
"""Store string data in a UFID ID3 frame with a particular owner.
"""
def __init__(self, owner, **kwargs):
self.owner = owner
super(MP3UFIDStorageStyle, self).__init__('UFID:' + owner, **kwargs)
def fetch(self, mutagen_file):
try:
return mutagen_file[self.key].data
except KeyError:
return None
def store(self, mutagen_file, value):
# This field type stores text data as encoded data.
assert isinstance(value, six.text_type)
value = value.encode('utf-8')
frames = mutagen_file.tags.getall(self.key)
for frame in frames:
# Replace existing frame data.
if frame.owner == self.owner:
frame.data = value
else:
# New frame.
frame = mutagen.id3.UFID(owner=self.owner, data=value)
mutagen_file.tags.setall(self.key, [frame])
class MP3DescStorageStyle(MP3StorageStyle):
"""Store data in a TXXX (or similar) ID3 frame. The frame is
selected based its ``desc`` field.
"""
def __init__(self, desc=u'', key='TXXX', **kwargs):
assert isinstance(desc, six.text_type)
self.description = desc
super(MP3DescStorageStyle, self).__init__(key=key, **kwargs)
def store(self, mutagen_file, value):
frames = mutagen_file.tags.getall(self.key)
if self.key != 'USLT':
value = [value]
# Try modifying in place.
found = False
for frame in frames:
if frame.desc.lower() == self.description.lower():
frame.text = value
frame.encoding = mutagen.id3.Encoding.UTF8
found = True
# Try creating a new frame.
if not found:
frame = mutagen.id3.Frames[self.key](
desc=self.description,
text=value,
encoding=mutagen.id3.Encoding.UTF8,
)
if self.id3_lang:
frame.lang = self.id3_lang
mutagen_file.tags.add(frame)
def fetch(self, mutagen_file):
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
if self.key == 'USLT':
return frame.text
try:
return frame.text[0]
except IndexError:
return None
def delete(self, mutagen_file):
found_frame = None
for frame in mutagen_file.tags.getall(self.key):
if frame.desc.lower() == self.description.lower():
found_frame = frame
break
if found_frame is not None:
del mutagen_file[frame.HashKey]
class MP3SlashPackStorageStyle(MP3StorageStyle):
"""Store value as part of pair that is serialized as a slash-
separated string.
"""
def __init__(self, key, pack_pos=0, **kwargs):
super(MP3SlashPackStorageStyle, self).__init__(key, **kwargs)
self.pack_pos = pack_pos
def _fetch_unpacked(self, mutagen_file):
data = self.fetch(mutagen_file)
if data:
items = six.text_type(data).split('/')
else:
items = []
packing_length = 2
return list(items) + [None] * (packing_length - len(items))
def get(self, mutagen_file):
return self._fetch_unpacked(mutagen_file)[self.pack_pos]
def set(self, mutagen_file, value):
items = self._fetch_unpacked(mutagen_file)
items[self.pack_pos] = value
if items[0] is None:
items[0] = ''
if items[1] is None:
items.pop() # Do not store last value
self.store(mutagen_file, '/'.join(map(six.text_type, items)))
def delete(self, mutagen_file):
if self.pack_pos == 0:
super(MP3SlashPackStorageStyle, self).delete(mutagen_file)
else:
self.set(mutagen_file, None)
class MP3ImageStorageStyle(ListStorageStyle, MP3StorageStyle):
"""Converts between APIC frames and ``Image`` instances.
The `get_list` method inherited from ``ListStorageStyle`` returns a
list of ``Image``s. Similarly, the `set_list` method accepts a
list of ``Image``s as its ``values`` argument.
"""
def __init__(self):
super(MP3ImageStorageStyle, self).__init__(key='APIC')
self.as_type = bytes
def deserialize(self, apic_frame):
"""Convert APIC frame into Image."""
return Image(data=apic_frame.data, desc=apic_frame.desc,
type=apic_frame.type)
def fetch(self, mutagen_file):
return mutagen_file.tags.getall(self.key)
def store(self, mutagen_file, frames):
mutagen_file.tags.setall(self.key, frames)
def delete(self, mutagen_file):
mutagen_file.tags.delall(self.key)
def serialize(self, image):
"""Return an APIC frame populated with data from ``image``.
"""
assert isinstance(image, Image)
frame = mutagen.id3.Frames[self.key]()
frame.data = image.data
frame.mime = image.mime_type
frame.desc = image.desc or u''
# For compatibility with OS X/iTunes prefer latin-1 if possible.
# See issue #899
try:
frame.desc.encode("latin-1")
except UnicodeEncodeError:
frame.encoding = mutagen.id3.Encoding.UTF16
else:
frame.encoding = mutagen.id3.Encoding.LATIN1
frame.type = image.type_index
return frame
class MP3SoundCheckStorageStyle(SoundCheckStorageStyleMixin,
MP3DescStorageStyle):
def __init__(self, index=0, **kwargs):
super(MP3SoundCheckStorageStyle, self).__init__(**kwargs)
self.index = index
class ASFImageStorageStyle(ListStorageStyle):
"""Store images packed into Windows Media/ASF byte array attributes.
Values are `Image` objects.
"""
formats = ['ASF']
def __init__(self):
super(ASFImageStorageStyle, self).__init__(key='WM/Picture')
def deserialize(self, asf_picture):
mime, data, type, desc = _unpack_asf_image(asf_picture.value)
return Image(data, desc=desc, type=type)
def serialize(self, image):
pic = mutagen.asf.ASFByteArrayAttribute()
pic.value = _pack_asf_image(image.mime_type, image.data,
type=image.type_index,
description=image.desc or u'')
return pic
class VorbisImageStorageStyle(ListStorageStyle):
"""Store images in Vorbis comments. Both legacy COVERART fields and
modern METADATA_BLOCK_PICTURE tags are supported. Data is
base64-encoded. Values are `Image` objects.
"""
formats = ['OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac']
def __init__(self):
super(VorbisImageStorageStyle, self).__init__(
key='metadata_block_picture'
)
self.as_type = bytes
def fetch(self, mutagen_file):
images = []
if 'metadata_block_picture' not in mutagen_file:
# Try legacy COVERART tags.
if 'coverart' in mutagen_file:
for data in mutagen_file['coverart']:
images.append(Image(base64.b64decode(data)))
return images
for data in mutagen_file["metadata_block_picture"]:
try:
pic = mutagen.flac.Picture(base64.b64decode(data))
except (TypeError, AttributeError):
continue
images.append(Image(data=pic.data, desc=pic.desc,
type=pic.type))
return images
def store(self, mutagen_file, image_data):
# Strip all art, including legacy COVERART.
if 'coverart' in mutagen_file:
del mutagen_file['coverart']
if 'coverartmime' in mutagen_file:
del mutagen_file['coverartmime']
super(VorbisImageStorageStyle, self).store(mutagen_file, image_data)
def serialize(self, image):
"""Turn a Image into a base64 encoded FLAC picture block.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
# Encoding with base64 returns bytes on both Python 2 and 3.
# Mutagen requires the data to be a Unicode string, so we decode
# it before passing it along.
return base64.b64encode(pic.write()).decode('ascii')
class FlacImageStorageStyle(ListStorageStyle):
"""Converts between ``mutagen.flac.Picture`` and ``Image`` instances.
"""
formats = ['FLAC']
def __init__(self):
super(FlacImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
return mutagen_file.pictures
def deserialize(self, flac_picture):
return Image(data=flac_picture.data, desc=flac_picture.desc,
type=flac_picture.type)
def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances.
"""
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic)
def serialize(self, image):
"""Turn a Image into a mutagen.flac.Picture.
"""
pic = mutagen.flac.Picture()
pic.data = image.data
pic.type = image.type_index
pic.mime = image.mime_type
pic.desc = image.desc or u''
return pic
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
mutagen_file.clear_pictures()
class APEv2ImageStorageStyle(ListStorageStyle):
"""Store images in APEv2 tags. Values are `Image` objects.
"""
formats = ['APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio', 'OptimFROG']
TAG_NAMES = {
ImageType.other: 'Cover Art (other)',
ImageType.icon: 'Cover Art (icon)',
ImageType.other_icon: 'Cover Art (other icon)',
ImageType.front: 'Cover Art (front)',
ImageType.back: 'Cover Art (back)',
ImageType.leaflet: 'Cover Art (leaflet)',
ImageType.media: 'Cover Art (media)',
ImageType.lead_artist: 'Cover Art (lead)',
ImageType.artist: 'Cover Art (artist)',
ImageType.conductor: 'Cover Art (conductor)',
ImageType.group: 'Cover Art (band)',
ImageType.composer: 'Cover Art (composer)',
ImageType.lyricist: 'Cover Art (lyricist)',
ImageType.recording_location: 'Cover Art (studio)',
ImageType.recording_session: 'Cover Art (recording)',
ImageType.performance: 'Cover Art (performance)',
ImageType.screen_capture: 'Cover Art (movie scene)',
ImageType.fish: 'Cover Art (colored fish)',
ImageType.illustration: 'Cover Art (illustration)',
ImageType.artist_logo: 'Cover Art (band logo)',
ImageType.publisher_logo: 'Cover Art (publisher logo)',
}
def __init__(self):
super(APEv2ImageStorageStyle, self).__init__(key='')
def fetch(self, mutagen_file):
images = []
for cover_type, cover_tag in self.TAG_NAMES.items():
try:
frame = mutagen_file[cover_tag]
text_delimiter_index = frame.value.find(b'\x00')
if text_delimiter_index > 0:
comment = frame.value[0:text_delimiter_index]
comment = comment.decode('utf-8', 'replace')
else:
comment = None
image_data = frame.value[text_delimiter_index + 1:]
images.append(Image(data=image_data, type=cover_type,
desc=comment))
except KeyError:
pass
return images
def set_list(self, mutagen_file, values):
self.delete(mutagen_file)
for image in values:
image_type = image.type or ImageType.other
comment = image.desc or ''
image_data = comment.encode('utf-8') + b'\x00' + image.data
cover_tag = self.TAG_NAMES[image_type]
mutagen_file[cover_tag] = image_data
def delete(self, mutagen_file):
"""Remove all images from the file.
"""
for cover_tag in self.TAG_NAMES.values():
try:
del mutagen_file[cover_tag]
except KeyError:
pass
# MediaField is a descriptor that represents a single logical field. It
# aggregates several StorageStyles describing how to access the data for
# each file type.
class MediaField(object):
"""A descriptor providing access to a particular (abstract) metadata
field.
"""
def __init__(self, *styles, **kwargs):
"""Creates a new MediaField.
:param styles: `StorageStyle` instances that describe the strategy
for reading and writing the field in particular
formats. There must be at least one style for
each possible file format.
:param out_type: the type of the value that should be returned when
getting this property.
"""
self.out_type = kwargs.get('out_type', six.text_type)
self._styles = styles
def styles(self, mutagen_file):
"""Yields the list of storage styles of this field that can
handle the MediaFile's format.
"""
for style in self._styles:
if mutagen_file.__class__.__name__ in style.formats:
yield style
def __get__(self, mediafile, owner=None):
out = None
for style in self.styles(mediafile.mgfile):
out = style.get(mediafile.mgfile)
if out:
break
return _safe_cast(self.out_type, out)
def __set__(self, mediafile, value):
if value is None:
value = self._none_value()
for style in self.styles(mediafile.mgfile):
style.set(mediafile.mgfile, value)
def __delete__(self, mediafile):
for style in self.styles(mediafile.mgfile):
style.delete(mediafile.mgfile)
def _none_value(self):
"""Get an appropriate "null" value for this field's type. This
is used internally when setting the field to None.
"""
if self.out_type == int:
return 0
elif self.out_type == float:
return 0.0
elif self.out_type == bool:
return False
elif self.out_type == six.text_type:
return u''
class ListMediaField(MediaField):
"""Property descriptor that retrieves a list of multiple values from
a tag.
Uses ``get_list`` and set_list`` methods of its ``StorageStyle``
strategies to do the actual work.
"""
def __get__(self, mediafile, _):
values = []
for style in self.styles(mediafile.mgfile):
values.extend(style.get_list(mediafile.mgfile))
return [_safe_cast(self.out_type, value) for value in values]
def __set__(self, mediafile, values):
for style in self.styles(mediafile.mgfile):
style.set_list(mediafile.mgfile, values)
def single_field(self):
"""Returns a ``MediaField`` descriptor that gets and sets the
first item.
"""
options = {'out_type': self.out_type}
return MediaField(*self._styles, **options)
class DateField(MediaField):
"""Descriptor that handles serializing and deserializing dates
The getter parses value from tags into a ``datetime.date`` instance
and setter serializes such an instance into a string.
For granular access to year, month, and day, use the ``*_field``
methods to create corresponding `DateItemField`s.
"""
def __init__(self, *date_styles, **kwargs):
"""``date_styles`` is a list of ``StorageStyle``s to store and
retrieve the whole date from. The ``year`` option is an
additional list of fallback styles for the year. The year is
always set on this style, but is only retrieved if the main
storage styles do not return a value.
"""
super(DateField, self).__init__(*date_styles)
year_style = kwargs.get('year', None)
if year_style:
self._year_field = MediaField(*year_style)
def __get__(self, mediafile, owner=None):
year, month, day = self._get_date_tuple(mediafile)
if not year:
return None
try:
return datetime.date(
year,
month or 1,
day or 1
)
except ValueError: # Out of range values.
return None
def __set__(self, mediafile, date):
if date is None:
self._set_date_tuple(mediafile, None, None, None)
else:
self._set_date_tuple(mediafile, date.year, date.month, date.day)
def __delete__(self, mediafile):
super(DateField, self).__delete__(mediafile)
if hasattr(self, '_year_field'):
self._year_field.__delete__(mediafile)
def _get_date_tuple(self, mediafile):
"""Get a 3-item sequence representing the date consisting of a
year, month, and day number. Each number is either an integer or
None.
"""
# Get the underlying data and split on hyphens and slashes.
datestring = super(DateField, self).__get__(mediafile, None)
if isinstance(datestring, six.string_types):
datestring = re.sub(r'[Tt ].*$', '', six.text_type(datestring))
items = re.split('[-/]', six.text_type(datestring))
else:
items = []
# Ensure that we have exactly 3 components, possibly by
# truncating or padding.
items = items[:3]
if len(items) < 3:
items += [None] * (3 - len(items))
# Use year field if year is missing.
if not items[0] and hasattr(self, '_year_field'):
items[0] = self._year_field.__get__(mediafile)
# Convert each component to an integer if possible.
items_ = []
for item in items:
try:
items_.append(int(item))
except (TypeError, ValueError):
items_.append(None)
return items_
def _set_date_tuple(self, mediafile, year, month=None, day=None):
"""Set the value of the field given a year, month, and day
number. Each number can be an integer or None to indicate an
unset component.
"""
if year is None:
self.__delete__(mediafile)
return
date = [u'{0:04d}'.format(int(year))]
if month:
date.append(u'{0:02d}'.format(int(month)))
if month and day:
date.append(u'{0:02d}'.format(int(day)))
date = map(six.text_type, date)
super(DateField, self).__set__(mediafile, u'-'.join(date))
if hasattr(self, '_year_field'):
self._year_field.__set__(mediafile, year)
def year_field(self):
return DateItemField(self, 0)
def month_field(self):
return DateItemField(self, 1)
def day_field(self):
return DateItemField(self, 2)
class DateItemField(MediaField):
"""Descriptor that gets and sets constituent parts of a `DateField`:
the month, day, or year.
"""
def __init__(self, date_field, item_pos):
self.date_field = date_field
self.item_pos = item_pos
def __get__(self, mediafile, _):
return self.date_field._get_date_tuple(mediafile)[self.item_pos]
def __set__(self, mediafile, value):
items = self.date_field._get_date_tuple(mediafile)
items[self.item_pos] = value
self.date_field._set_date_tuple(mediafile, *items)
def __delete__(self, mediafile):
self.__set__(mediafile, None)
class CoverArtField(MediaField):
"""A descriptor that provides access to the *raw image data* for the
cover image on a file. This is used for backwards compatibility: the
full `ImageListField` provides richer `Image` objects.
When there are multiple images we try to pick the most likely to be a front
cover.
"""
def __init__(self):
pass
def __get__(self, mediafile, _):
candidates = mediafile.images
if candidates:
return self.guess_cover_image(candidates).data
else:
return None
@staticmethod
def guess_cover_image(candidates):
if len(candidates) == 1:
return candidates[0]
try:
return next(c for c in candidates if c.type == ImageType.front)
except StopIteration:
return candidates[0]
def __set__(self, mediafile, data):
if data:
mediafile.images = [Image(data=data)]
else:
mediafile.images = []
def __delete__(self, mediafile):
delattr(mediafile, 'images')
class ImageListField(ListMediaField):
"""Descriptor to access the list of images embedded in tags.
The getter returns a list of `Image` instances obtained from
the tags. The setter accepts a list of `Image` instances to be
written to the tags.
"""
def __init__(self):
# The storage styles used here must implement the
# `ListStorageStyle` interface and get and set lists of
# `Image`s.
super(ImageListField, self).__init__(
MP3ImageStorageStyle(),
MP4ImageStorageStyle(),
ASFImageStorageStyle(),
VorbisImageStorageStyle(),
FlacImageStorageStyle(),
APEv2ImageStorageStyle(),
out_type=Image,
)
# MediaFile is a collection of fields.
class MediaFile(object):
"""Represents a multimedia file on disk and provides access to its
metadata.
"""
def __init__(self, path, id3v23=False):
"""Constructs a new `MediaFile` reflecting the file at path. May
throw `UnreadableFileError`.
By default, MP3 files are saved with ID3v2.4 tags. You can use
the older ID3v2.3 standard by specifying the `id3v23` option.
"""
self.path = path
self.mgfile = mutagen_call('open', path, mutagen.File, path)
if self.mgfile is None:
# Mutagen couldn't guess the type
raise FileTypeError(path)
elif (type(self.mgfile).__name__ == 'M4A' or
type(self.mgfile).__name__ == 'MP4'):
info = self.mgfile.info
if info.codec and info.codec.startswith('alac'):
self.type = 'alac'
else:
self.type = 'aac'
elif (type(self.mgfile).__name__ == 'ID3' or
type(self.mgfile).__name__ == 'MP3'):
self.type = 'mp3'
elif type(self.mgfile).__name__ == 'FLAC':
self.type = 'flac'
elif type(self.mgfile).__name__ == 'OggOpus':
self.type = 'opus'
elif type(self.mgfile).__name__ == 'OggVorbis':
self.type = 'ogg'
elif type(self.mgfile).__name__ == 'MonkeysAudio':
self.type = 'ape'
elif type(self.mgfile).__name__ == 'WavPack':
self.type = 'wv'
elif type(self.mgfile).__name__ == 'Musepack':
self.type = 'mpc'
elif type(self.mgfile).__name__ == 'ASF':
self.type = 'asf'
elif type(self.mgfile).__name__ == 'AIFF':
self.type = 'aiff'
elif type(self.mgfile).__name__ == 'DSF':
self.type = 'dsf'
else:
raise FileTypeError(path, type(self.mgfile).__name__)
# Add a set of tags if it's missing.
if self.mgfile.tags is None:
self.mgfile.add_tags()
# Set the ID3v2.3 flag only for MP3s.
self.id3v23 = id3v23 and self.type == 'mp3'
def save(self):
"""Write the object's tags back to the file. May
throw `UnreadableFileError`.
"""
# Possibly save the tags to ID3v2.3.
kwargs = {}
if self.id3v23:
id3 = self.mgfile
if hasattr(id3, 'tags'):
# In case this is an MP3 object, not an ID3 object.
id3 = id3.tags
id3.update_to_v23()
kwargs['v2_version'] = 3
mutagen_call('save', self.path, self.mgfile.save, **kwargs)
def delete(self):
"""Remove the current metadata tag from the file. May
throw `UnreadableFileError`.
"""
mutagen_call('delete', self.path, self.mgfile.delete)
# Convenient access to the set of available fields.
@classmethod
def fields(cls):
"""Get the names of all writable properties that reflect
metadata tags (i.e., those that are instances of
:class:`MediaField`).
"""
for property, descriptor in cls.__dict__.items():
if isinstance(descriptor, MediaField):
if isinstance(property, bytes):
# On Python 2, class field names are bytes. This method
# produces text strings.
yield property.decode('utf8', 'ignore')
else:
yield property
@classmethod
def _field_sort_name(cls, name):
"""Get a sort key for a field name that determines the order
fields should be written in.
Fields names are kept unchanged, unless they are instances of
:class:`DateItemField`, in which case `year`, `month`, and `day`
are replaced by `date0`, `date1`, and `date2`, respectively, to
make them appear in that order.
"""
if isinstance(cls.__dict__[name], DateItemField):
name = re.sub('year', 'date0', name)
name = re.sub('month', 'date1', name)
name = re.sub('day', 'date2', name)
return name
@classmethod
def sorted_fields(cls):
"""Get the names of all writable metadata fields, sorted in the
order that they should be written.
This is a lexicographic order, except for instances of
:class:`DateItemField`, which are sorted in year-month-day
order.
"""
for property in sorted(cls.fields(), key=cls._field_sort_name):
yield property
@classmethod
def readable_fields(cls):
"""Get all metadata fields: the writable ones from
:meth:`fields` and also other audio properties.
"""
for property in cls.fields():
yield property
for property in ('length', 'samplerate', 'bitdepth', 'bitrate',
'channels', 'format'):
yield property
@classmethod
def add_field(cls, name, descriptor):
"""Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
"""
if not isinstance(descriptor, MediaField):
raise ValueError(
u'{0} must be an instance of MediaField'.format(descriptor))
if name in cls.__dict__:
raise ValueError(
u'property "{0}" already exists on MediaField'.format(name))
setattr(cls, name, descriptor)
def update(self, dict):
"""Set all field values from a dictionary.
For any key in `dict` that is also a field to store tags the
method retrieves the corresponding value from `dict` and updates
the `MediaFile`. If a key has the value `None`, the
corresponding property is deleted from the `MediaFile`.
"""
for field in self.sorted_fields():
if field in dict:
if dict[field] is None:
delattr(self, field)
else:
setattr(self, field, dict[field])
# Field definitions.
title = MediaField(
MP3StorageStyle('TIT2'),
MP4StorageStyle('\xa9nam'),
StorageStyle('TITLE'),
ASFStorageStyle('Title'),
)
artist = MediaField(
MP3StorageStyle('TPE1'),
MP4StorageStyle('\xa9ART'),
StorageStyle('ARTIST'),
ASFStorageStyle('Author'),
)
album = MediaField(
MP3StorageStyle('TALB'),
MP4StorageStyle('\xa9alb'),
StorageStyle('ALBUM'),
ASFStorageStyle('WM/AlbumTitle'),
)
genres = ListMediaField(
MP3ListStorageStyle('TCON'),
MP4ListStorageStyle('\xa9gen'),
ListStorageStyle('GENRE'),
ASFStorageStyle('WM/Genre'),
)
genre = genres.single_field()
lyricist = MediaField(
MP3StorageStyle('TEXT'),
MP4StorageStyle('----:com.apple.iTunes:LYRICIST'),
StorageStyle('LYRICIST'),
ASFStorageStyle('WM/Writer'),
)
composer = MediaField(
MP3StorageStyle('TCOM'),
MP4StorageStyle('\xa9wrt'),
StorageStyle('COMPOSER'),
ASFStorageStyle('WM/Composer'),
)
arranger = MediaField(
MP3PeopleStorageStyle('TIPL', involvement='arranger'),
MP4StorageStyle('----:com.apple.iTunes:Arranger'),
StorageStyle('ARRANGER'),
ASFStorageStyle('beets/Arranger'),
)
grouping = MediaField(
MP3StorageStyle('TIT1'),
MP4StorageStyle('\xa9grp'),
StorageStyle('GROUPING'),
ASFStorageStyle('WM/ContentGroupDescription'),
)
track = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=0),
MP4TupleStorageStyle('trkn', index=0),
StorageStyle('TRACK'),
StorageStyle('TRACKNUMBER'),
ASFStorageStyle('WM/TrackNumber'),
out_type=int,
)
tracktotal = MediaField(
MP3SlashPackStorageStyle('TRCK', pack_pos=1),
MP4TupleStorageStyle('trkn', index=1),
StorageStyle('TRACKTOTAL'),
StorageStyle('TRACKC'),
StorageStyle('TOTALTRACKS'),
ASFStorageStyle('TotalTracks'),
out_type=int,
)
disc = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=0),
MP4TupleStorageStyle('disk', index=0),
StorageStyle('DISC'),
StorageStyle('DISCNUMBER'),
ASFStorageStyle('WM/PartOfSet'),
out_type=int,
)
disctotal = MediaField(
MP3SlashPackStorageStyle('TPOS', pack_pos=1),
MP4TupleStorageStyle('disk', index=1),
StorageStyle('DISCTOTAL'),
StorageStyle('DISCC'),
StorageStyle('TOTALDISCS'),
ASFStorageStyle('TotalDiscs'),
out_type=int,
)
lyrics = MediaField(
MP3DescStorageStyle(key='USLT'),
MP4StorageStyle('\xa9lyr'),
StorageStyle('LYRICS'),
ASFStorageStyle('WM/Lyrics'),
)
comments = MediaField(
MP3DescStorageStyle(key='COMM'),
MP4StorageStyle('\xa9cmt'),
StorageStyle('DESCRIPTION'),
StorageStyle('COMMENT'),
ASFStorageStyle('WM/Comments'),
ASFStorageStyle('Description')
)
bpm = MediaField(
MP3StorageStyle('TBPM'),
MP4StorageStyle('tmpo', as_type=int),
StorageStyle('BPM'),
ASFStorageStyle('WM/BeatsPerMinute'),
out_type=int,
)
comp = MediaField(
MP3StorageStyle('TCMP'),
MP4BoolStorageStyle('cpil'),
StorageStyle('COMPILATION'),
ASFStorageStyle('WM/IsCompilation', as_type=bool),
out_type=bool,
)
albumartist = MediaField(
MP3StorageStyle('TPE2'),
MP4StorageStyle('aART'),
StorageStyle('ALBUM ARTIST'),
StorageStyle('ALBUMARTIST'),
ASFStorageStyle('WM/AlbumArtist'),
)
albumtype = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Type'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Type'),
StorageStyle('MUSICBRAINZ_ALBUMTYPE'),
ASFStorageStyle('MusicBrainz/Album Type'),
)
label = MediaField(
MP3StorageStyle('TPUB'),
MP4StorageStyle('----:com.apple.iTunes:Label'),
MP4StorageStyle('----:com.apple.iTunes:publisher'),
StorageStyle('LABEL'),
StorageStyle('PUBLISHER'), # Traktor
ASFStorageStyle('WM/Publisher'),
)
artist_sort = MediaField(
MP3StorageStyle('TSOP'),
MP4StorageStyle('soar'),
StorageStyle('ARTISTSORT'),
ASFStorageStyle('WM/ArtistSortOrder'),
)
albumartist_sort = MediaField(
MP3DescStorageStyle(u'ALBUMARTISTSORT'),
MP4StorageStyle('soaa'),
StorageStyle('ALBUMARTISTSORT'),
ASFStorageStyle('WM/AlbumArtistSortOrder'),
)
asin = MediaField(
MP3DescStorageStyle(u'ASIN'),
MP4StorageStyle('----:com.apple.iTunes:ASIN'),
StorageStyle('ASIN'),
ASFStorageStyle('MusicBrainz/ASIN'),
)
catalognum = MediaField(
MP3DescStorageStyle(u'CATALOGNUMBER'),
MP4StorageStyle('----:com.apple.iTunes:CATALOGNUMBER'),
StorageStyle('CATALOGNUMBER'),
ASFStorageStyle('WM/CatalogNo'),
)
disctitle = MediaField(
MP3StorageStyle('TSST'),
MP4StorageStyle('----:com.apple.iTunes:DISCSUBTITLE'),
StorageStyle('DISCSUBTITLE'),
ASFStorageStyle('WM/SetSubTitle'),
)
encoder = MediaField(
MP3StorageStyle('TENC'),
MP4StorageStyle('\xa9too'),
StorageStyle('ENCODEDBY'),
StorageStyle('ENCODER'),
ASFStorageStyle('WM/EncodedBy'),
)
script = MediaField(
MP3DescStorageStyle(u'Script'),
MP4StorageStyle('----:com.apple.iTunes:SCRIPT'),
StorageStyle('SCRIPT'),
ASFStorageStyle('WM/Script'),
)
language = MediaField(
MP3StorageStyle('TLAN'),
MP4StorageStyle('----:com.apple.iTunes:LANGUAGE'),
StorageStyle('LANGUAGE'),
ASFStorageStyle('WM/Language'),
)
country = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Release Country'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz '
'Album Release Country'),
StorageStyle('RELEASECOUNTRY'),
ASFStorageStyle('MusicBrainz/Album Release Country'),
)
albumstatus = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Status'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Status'),
StorageStyle('MUSICBRAINZ_ALBUMSTATUS'),
ASFStorageStyle('MusicBrainz/Album Status'),
)
media = MediaField(
MP3StorageStyle('TMED'),
MP4StorageStyle('----:com.apple.iTunes:MEDIA'),
StorageStyle('MEDIA'),
ASFStorageStyle('WM/Media'),
)
albumdisambig = MediaField(
# This tag mapping was invented for beets (not used by Picard, etc).
MP3DescStorageStyle(u'MusicBrainz Album Comment'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Comment'),
StorageStyle('MUSICBRAINZ_ALBUMCOMMENT'),
ASFStorageStyle('MusicBrainz/Album Comment'),
)
# Release date.
date = DateField(
MP3StorageStyle('TDRC'),
MP4StorageStyle('\xa9day'),
StorageStyle('DATE'),
ASFStorageStyle('WM/Year'),
year=(StorageStyle('YEAR'),))
year = date.year_field()
month = date.month_field()
day = date.day_field()
# *Original* release date.
original_date = DateField(
MP3StorageStyle('TDOR'),
MP4StorageStyle('----:com.apple.iTunes:ORIGINAL YEAR'),
StorageStyle('ORIGINALDATE'),
ASFStorageStyle('WM/OriginalReleaseYear'))
original_year = original_date.year_field()
original_month = original_date.month_field()
original_day = original_date.day_field()
# Nonstandard metadata.
artist_credit = MediaField(
MP3DescStorageStyle(u'Artist Credit'),
MP4StorageStyle('----:com.apple.iTunes:Artist Credit'),
StorageStyle('ARTIST_CREDIT'),
ASFStorageStyle('beets/Artist Credit'),
)
albumartist_credit = MediaField(
MP3DescStorageStyle(u'Album Artist Credit'),
MP4StorageStyle('----:com.apple.iTunes:Album Artist Credit'),
StorageStyle('ALBUMARTIST_CREDIT'),
ASFStorageStyle('beets/Album Artist Credit'),
)
# Legacy album art field
art = CoverArtField()
# Image list
images = ImageListField()
# MusicBrainz IDs.
mb_trackid = MediaField(
MP3UFIDStorageStyle(owner='http://musicbrainz.org'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Track Id'),
StorageStyle('MUSICBRAINZ_TRACKID'),
ASFStorageStyle('MusicBrainz/Track Id'),
)
mb_albumid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Id'),
StorageStyle('MUSICBRAINZ_ALBUMID'),
ASFStorageStyle('MusicBrainz/Album Id'),
)
mb_artistid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Artist Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Artist Id'),
StorageStyle('MUSICBRAINZ_ARTISTID'),
ASFStorageStyle('MusicBrainz/Artist Id'),
)
mb_albumartistid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Album Artist Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Album Artist Id'),
StorageStyle('MUSICBRAINZ_ALBUMARTISTID'),
ASFStorageStyle('MusicBrainz/Album Artist Id'),
)
mb_releasegroupid = MediaField(
MP3DescStorageStyle(u'MusicBrainz Release Group Id'),
MP4StorageStyle('----:com.apple.iTunes:MusicBrainz Release Group Id'),
StorageStyle('MUSICBRAINZ_RELEASEGROUPID'),
ASFStorageStyle('MusicBrainz/Release Group Id'),
)
# Acoustid fields.
acoustid_fingerprint = MediaField(
MP3DescStorageStyle(u'Acoustid Fingerprint'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Fingerprint'),
StorageStyle('ACOUSTID_FINGERPRINT'),
ASFStorageStyle('Acoustid/Fingerprint'),
)
acoustid_id = MediaField(
MP3DescStorageStyle(u'Acoustid Id'),
MP4StorageStyle('----:com.apple.iTunes:Acoustid Id'),
StorageStyle('ACOUSTID_ID'),
ASFStorageStyle('Acoustid/Id'),
)
# ReplayGain fields.
rg_track_gain = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=u' dB'
),
MP3DescStorageStyle(
u'replaygain_track_gain',
float_places=2, suffix=u' dB'
),
MP3SoundCheckStorageStyle(
key='COMM',
index=0, desc=u'iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_gain',
float_places=2, suffix=' dB'
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=0
),
StorageStyle(
u'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=u' dB'
),
ASFStorageStyle(
u'replaygain_track_gain',
float_places=2, suffix=u' dB'
),
out_type=float
)
rg_album_gain = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=u' dB'
),
MP3DescStorageStyle(
u'replaygain_album_gain',
float_places=2, suffix=u' dB'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_album_gain',
float_places=2, suffix=' dB'
),
StorageStyle(
u'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=u' dB'
),
ASFStorageStyle(
u'replaygain_album_gain',
float_places=2, suffix=u' dB'
),
out_type=float
)
rg_track_peak = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_TRACK_PEAK',
float_places=6
),
MP3DescStorageStyle(
u'replaygain_track_peak',
float_places=6
),
MP3SoundCheckStorageStyle(
key=u'COMM',
index=1, desc=u'iTunNORM',
id3_lang='eng'
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_track_peak',
float_places=6
),
MP4SoundCheckStorageStyle(
'----:com.apple.iTunes:iTunNORM',
index=1
),
StorageStyle(u'REPLAYGAIN_TRACK_PEAK', float_places=6),
ASFStorageStyle(u'replaygain_track_peak', float_places=6),
out_type=float,
)
rg_album_peak = MediaField(
MP3DescStorageStyle(
u'REPLAYGAIN_ALBUM_PEAK',
float_places=6
),
MP3DescStorageStyle(
u'replaygain_album_peak',
float_places=6
),
MP4StorageStyle(
'----:com.apple.iTunes:replaygain_album_peak',
float_places=6
),
StorageStyle(u'REPLAYGAIN_ALBUM_PEAK', float_places=6),
ASFStorageStyle(u'replaygain_album_peak', float_places=6),
out_type=float,
)
initial_key = MediaField(
MP3StorageStyle('TKEY'),
MP4StorageStyle('----:com.apple.iTunes:initialkey'),
StorageStyle('INITIALKEY'),
ASFStorageStyle('INITIALKEY'),
)
@property
def length(self):
"""The duration of the audio in seconds (a float)."""
return self.mgfile.info.length
@property
def samplerate(self):
"""The audio's sample rate (an int)."""
if hasattr(self.mgfile.info, 'sample_rate'):
return self.mgfile.info.sample_rate
elif self.type == 'opus':
# Opus is always 48kHz internally.
return 48000
return 0
@property
def bitdepth(self):
"""The number of bits per sample in the audio encoding (an int).
Only available for certain file formats (zero where
unavailable).
"""
if hasattr(self.mgfile.info, 'bits_per_sample'):
return self.mgfile.info.bits_per_sample
return 0
@property
def channels(self):
"""The number of channels in the audio (an int)."""
if hasattr(self.mgfile.info, 'channels'):
return self.mgfile.info.channels
return 0
@property
def bitrate(self):
"""The number of bits per seconds used in the audio coding (an
int). If this is provided explicitly by the compressed file
format, this is a precise reflection of the encoding. Otherwise,
it is estimated from the on-disk file size. In this case, some
imprecision is possible because the file header is incorporated
in the file size.
"""
if hasattr(self.mgfile.info, 'bitrate') and self.mgfile.info.bitrate:
# Many formats provide it explicitly.
return self.mgfile.info.bitrate
else:
# Otherwise, we calculate bitrate from the file size. (This
# is the case for all of the lossless formats.)
if not self.length:
# Avoid division by zero if length is not available.
return 0
size = os.path.getsize(self.path)
return int(size * 8 / self.length)
@property
def format(self):
"""A string describing the file format/codec."""
return TYPES[self.type]
| mit | -502,309,375,197,695,400 | 32.983051 | 79 | 0.597962 | false |
MadsJensen/agency_connectivity | predict_grp_2.py | 1 | 4855 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 19:20:31 2017
@author: au194693
"""
import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
from sklearn.model_selection import (StratifiedKFold, cross_val_score,
permutation_test_score)
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import scale
from sklearn.feature_selection import RFECV
import os
os.chdir('/Users/au194693/projects/agency_connectivity/data')
# Create dataframe to extract values for X
pow_data = pd.read_csv("power_data_no-step_both_grps_all-freqs.csv")
pow_data = pow_data[pow_data.trial_status == True]
pow_data = pow_data[(pow_data.band != "theta") & (pow_data.band != "gamma2")]
pow_data_mean = pow_data.groupby(by=["subject", "group", "band",
"label"]).mean().reset_index()
pow_data_mean = pow_data_mean.sort_values(
by=["group", "subject", "band", "label"])
labels = list(pow_data.label.unique())
subjects = list(pow_data_mean.subject.unique())
pow_X = np.empty([len(pow_data_mean.subject.unique()), 84])
for i, sub in enumerate(subjects):
fdata = pow_data_mean[pow_data_mean.subject == sub]
pow_X[i, :] = fdata.r.values.reshape(-1)
ispc_data = pd.read_csv("itc_data_no-step_both_grps_all-freqs.csv")
ispc_data = ispc_data[ispc_data.trial_status == True]
ispc_data = ispc_data[(ispc_data.band != "theta") &
(ispc_data.band != "gamma2")]
ispc_data_mean = ispc_data.groupby(by=["subject", "group", "band",
"label"]).mean().reset_index()
ispc_data_mean = ispc_data_mean.sort_values(
by=["group", "subject", "band", "label"])
# Dataframe to generate labels
df = ispc_data_mean[ispc_data_mean.subject == "p21"][["band", "label"]]
df = df.append(df)
df["condition"] = "nan"
df["condition"][84:] = "ispc"
df["condition"][:84] = "power"
df["comb_name"] = df.condition + "_" + df.band + "_" + df.label
labels = list(df.comb_name.get_values())
ispc_X = np.empty([len(ispc_data_mean.subject.unique()), 84])
for i, sub in enumerate(subjects):
fdata = ispc_data_mean[ispc_data_mean.subject == sub]
ispc_X[i, :] = fdata.ISPC.values.reshape(-1)
# Concatenate into X and create y
X = np.concatenate((pow_X, ispc_X), axis=1)
y = np.concatenate((np.zeros(18), np.ones(18)))
# Scale X
pow_scl = scale(pow_X)
ispc_scl = scale(ispc_X)
X_scl = np.concatenate((pow_scl, ispc_scl), axis=1)
cv = StratifiedKFold(n_splits=9, shuffle=True)
# Logistic Regression with cross validation for C
scores = []
coefs = []
Cs = []
LRs = []
for train, test in cv.split(X, y):
# clf = LogisticRegression(C=1)
clf = LogisticRegressionCV()
clf.fit(X[train], y[train])
y_pred = clf.predict(X[test])
scores.append(roc_auc_score(y[test], y_pred))
coefs.append(clf.coef_)
Cs.append(clf.C_)
LRs.append(clf)
lr_mean = LogisticRegression()
lr_mean.coef_ = np.asarray(coefs).mean(axis=0)
lr_mean.C = np.asarray(Cs).mean()
lr_mean.intercept_ = np.asarray([est.intercept_ for est in LRs]).mean()
lr_coef_mean = np.asarray(coefs).mean(axis=0)
lr_coef_std = np.asarray(coefs).std(axis=0)
cv_scores = cross_val_score(
lr_mean, X, y, scoring="roc_auc", cv=StratifiedKFold(9))
score_full_X, perm_scores_full_X, pvalue_full_X = permutation_test_score(
lr_mean,
X,
y,
scoring="roc_auc",
cv=StratifiedKFold(9),
n_permutations=2000,
n_jobs=2)
lr_coef_or_mean = np.exp(lr_coef_mean)
lr_coef_or_std = np.exp(lr_coef_std)
# plt.rc('xtick', labelsize=5)
# plt.figure()
# plt.plot(lr_coef_mean.T, 'b', linewidth=1)
# plt.plot(lr_coef_mean.T + lr_coef_sem.T, 'b--', linewidth=1)
# plt.plot(lr_coef_mean.T - lr_coef_sem.T, 'b--', linewidth=1)
# plt.xticks(np.arange(0, 168, 1), labels, rotation='vertical')
# plt.margins(0.4)
# # Tweak spacing to prevent clipping of tick-labels
# plt.subplots_adjust(bottom=0.15)
rfecv = RFECV(
estimator=lr_mean, step=1, cv=StratifiedKFold(9), scoring='roc_auc')
rfecv.fit(X, y)
X_rfecv = rfecv.transform(X)
rfecv_scores = cross_val_score(
lr_mean, X_rfecv, y, scoring="roc_auc", cv=StratifiedKFold(9))
score_rfecv, perm_scores_rfecv, pvalue_rfecv = permutation_test_score(
lr_mean,
X_rfecv,
y,
scoring="roc_auc",
cv=StratifiedKFold(9),
n_permutations=2000,
n_jobs=2)
# printing results
print("score no reduction: %s (std %s)" % (cv_scores.mean(), cv_scores.std()))
print("rfecv number of features: %s" % rfecv.n_features_)
print("score rfecv: %s (std %s)" % (rfecv_scores.mean(), rfecv_scores.std()))
print("permutation result (full): %s, p-value: %s" %
(score_full_X, pvalue_full_X))
print("permutation result (rfecv): %s, p-value: %s" %
(score_rfecv, pvalue_rfecv))
| bsd-3-clause | 8,815,647,184,900,538,000 | 30.121795 | 78 | 0.651905 | false |
Hwesta/advent-of-code | aoc2017/day19.py | 1 | 4797 | #!/usr/bin/env python
"""
--- Day 19: A Series of Tubes ---
Somehow, a network packet got lost and ended up here. It's trying to follow a routing diagram (your puzzle input), but it's confused about where to go.
Its starting point is just off the top of the diagram. Lines (drawn with |, -, and +) show the path it needs to take, starting by going down onto the only line connected to the top of the diagram. It needs to follow this path until it reaches the end (located somewhere within the diagram) and stop there.
Sometimes, the lines cross over each other; in these cases, it needs to continue going the same direction, and only turn left or right when there's no other option. In addition, someone has left letters on the line; these also don't change its direction, but it can use them to keep track of where it's been. For example:
|
| +--+
A | C
F---|----E|--+
| | | D
+B-+ +--+
Given this diagram, the packet needs to take the following path:
Starting at the only line touching the top of the diagram, it must go down, pass through A, and continue onward to the first +.
Travel right, up, and right, passing through B in the process.
Continue down (collecting C), right, and up (collecting D).
Finally, go all the way left through E and stopping at F.
Following the path to the end, the letters it sees on its path are ABCDEF.
The little packet looks up at you, hoping you can help it find the way. What letters will it see (in the order it would see them) if it follows the path? (The routing diagram is very wide; make sure you view it without line wrapping.)
--- Part Two ---
The packet is curious how many steps it needs to go.
For example, using the same routing diagram from the example above...
|
| +--+
A | C
F---|--|-E---+
| | | D
+B-+ +--+
...the packet would go:
6 steps down (including the first line at the top of the diagram).
3 steps right.
4 steps up.
3 steps right.
4 steps down.
3 steps right.
2 steps up.
13 steps left (including the F it stops on).
This would result in a total of 38 steps.
How many steps does the packet need to go?
"""
from __future__ import print_function
import os
import string
def move_direction(x, y, direction):
if direction == 'U':
y -= 1
elif direction == 'D':
y += 1
elif direction == 'R':
x += 1
elif direction == 'L':
x -= 1
return x, y
def get_data(data, x, y):
try:
return data[y][x]
except IndexError:
return ' '
def generate_directions(data, x, y, direction, letters=False):
if letters:
addwith = string.ascii_letters
else:
addwith = ''
new_dirs = set()
if get_data(data, x - 1, y) in '-' + addwith:
new_dirs.add('L')
if get_data(data, x + 1, y) in '-' + addwith:
new_dirs.add('R')
if get_data(data, x, y + 1) in '|' + addwith:
new_dirs.add('D')
if get_data(data, x, y - 1) in '|' + addwith:
new_dirs.add('U')
# Don't double back
if direction == 'U':
new_dirs -= set('D')
elif direction == 'D':
new_dirs -= set('U')
elif direction == 'R':
new_dirs -= set('L')
elif direction == 'L':
new_dirs -= set('R')
return new_dirs
def solve(data, flag=False):
data = data.split('\n')
x, y = 0, 0
x = data[0].index('|')
letters = []
direction = 'D'
stepcount = 0
while data[y][x] != ' ':
active = get_data(data, x, y)
if active == '+':
# Where to go next?
# Never 2 corners next to each other
# Don't go back same way
# Always - | or letter, try the letter second
new_dirs = generate_directions(data, x, y, direction)
if len(new_dirs) == 0:
# print('Check for letters')
new_dirs = generate_directions(data, x, y, direction, letters=True)
if len(new_dirs) == 0:
print('Nowhere to go. Done??')
break
if len(new_dirs) > 1:
print("Too many direction options!")
break
direction = new_dirs.pop()
elif active not in '|-+':
letters.append(active)
x, y = move_direction(x, y, direction)
stepcount += 1
if stepcount > 5000000:
print('Too far!')
break
if flag:
return stepcount
else:
return ''.join(letters)
if __name__ == '__main__':
this_dir = os.path.dirname(__file__)
with open(os.path.join(this_dir, 'day19.input')) as f:
data = f.read()
print('The packet sees', solve(data, False))
print('The packet takes', solve(data, True), 'steps.')
| mit | 436,195,257,876,169,500 | 30.768212 | 321 | 0.583281 | false |
pagarme/pagarme-python | tests/plan_test.py | 1 | 1174 | from pagarme import plan
from tests.resources.dictionaries import plan_dictionary
import time
def test_create_boleto_plan():
_plan = plan.create(plan_dictionary.BOLETO_PLAN)
assert _plan['payment_methods'] == ["boleto"]
def test_create_credit_card_plan():
_plan = plan.create(plan_dictionary.CREDIT_CARD_PLAN)
assert _plan['payment_methods'] == ["credit_card"]
def test_create_no_trial_plan():
_plan = plan.create(plan_dictionary.NO_TRIAL_PLAN)
assert _plan['trial_days'] == 0
def test_create_trial_plan():
_plan = plan.create(plan_dictionary.TRIAL_PLAN)
assert _plan['trial_days'] == 30
def test_find_all_plans():
all_plans = plan.find_all()
assert all_plans is not None
def test_find_by(retry):
_plan = plan.create(plan_dictionary.TRIAL_PLAN)
search_params = {'id': str(_plan['id'])}
find_plan = retry(lambda: plan.find_by(search_params))
assert _plan['id'] == find_plan[0]['id']
def test_update():
_plan = plan.create(plan_dictionary.TRIAL_PLAN)
assert _plan['trial_days'] == 30
update_plan = plan.update(_plan['id'], plan_dictionary.UPDATE_PLAN)
assert update_plan['trial_days'] == 7
| mit | 4,295,825,015,958,596,600 | 26.952381 | 71 | 0.67121 | false |
leemac/rocketlaunches | rocketlaunches/rocketapp/migrations/0007_auto_20150429_1228.py | 1 | 1693 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('rocketapp', '0006_auto_20150429_1227'),
]
operations = [
migrations.CreateModel(
name='PayloadFamily',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('name', models.CharField(max_length=200)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='payload',
name='payload_family',
field=models.ForeignKey(default='', to='rocketapp.PayloadFamily', null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='launch',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 29, 12, 28, 52, 709255), verbose_name='date created'),
preserve_default=True,
),
migrations.AlterField(
model_name='rocket',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 29, 12, 28, 52, 706469), verbose_name='date created'),
preserve_default=True,
),
migrations.AlterField(
model_name='subscriber',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 29, 12, 28, 52, 710867), verbose_name='date created'),
preserve_default=True,
),
]
| mit | 7,374,288,151,315,174,000 | 33.55102 | 128 | 0.565859 | false |
wichmann/bbss | gui/gui.py | 1 | 20958 |
"""
bbss - BBS Student Management
Default graphical user interface for bbss.
Main window was generated by calling:
pyuic5 bbss_tabbed_gui.ui > main.py
Created on Mon Feb 23 15:08:56 2014
@author: Christian Wichmann
"""
import os
import sys
import logging
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from gui.main import Ui_BBSS_Main_Window
from bbss import bbss
__all__ = ['start_gui']
logger = logging.getLogger('bbss.gui')
APP_NAME = "BBSS"
class StudentTableFilterProxyModel(QtCore.QSortFilterProxyModel):
"""Filters student table for regular expression in all columns."""
def filterAcceptsRow(self, sourceRow, sourceParent):
index0 = self.sourceModel().index(sourceRow, 0, sourceParent)
index1 = self.sourceModel().index(sourceRow, 1, sourceParent)
index2 = self.sourceModel().index(sourceRow, 2, sourceParent)
return (self.filterRegExp().indexIn(self.sourceModel().data(index0)) >= 0
or self.filterRegExp().indexIn(self.sourceModel().data(index1)) >= 0
or self.filterRegExp().indexIn(self.sourceModel().data(index2)) >= 0)
class StudentTableModel(QtCore.QAbstractTableModel):
def __init__(self, student_list, parent=None):
super(StudentTableModel, self).__init__()
self.student_list = student_list
self.column_list = ('surname', 'firstname', 'classname', 'birthday')
self.column_list_i18n = ('Nachname', 'Vorname', 'Klasse', 'Geburtstag')
def update(self, student_list):
self.student_list = student_list
self.layoutChanged.emit()
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.student_list)
def columnCount(self, parent=QtCore.QModelIndex()):
return len(self.column_list)
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid():
return ''
elif role != QtCore.Qt.DisplayRole:
return None
student = self.student_list[index.row()]
return '{0}'.format(getattr(student, self.column_list[index.column()]))
def student_data(self, index, role=QtCore.Qt.DisplayRole):
student = self.student_list[index.row()]
return student
def headerData(self, count, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.column_list_i18n[count]
elif orientation == QtCore.Qt.Vertical:
return str(count+1)
def setData(self, index, value, role=QtCore.Qt.DisplayRole):
message = 'Updating of student data (row={row}, column={column}) not yet implemented.'
logger.warning(message.format(row=index.row(), column=index.column()))
def flags(self, index):
# TODO: Check whether data should be editable (QtCore.Qt.ItemIsEditable)?
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
class DateDialog(QtWidgets.QDialog):
"""
Shows a dialog to input a date and time similar to what QtGui.QInputDialog
does for text, integer and floats.
Source: https://stackoverflow.com/a/18202709
Usage: date, time, ok = DateDialog.getDateTime()
"""
def __init__(self, parent=None):
super(DateDialog, self).__init__(parent)
layout = QtWidgets.QVBoxLayout(self)
# nice widget for editing the date
self.datetime = QtWidgets.QDateTimeEdit(self)
self.datetime.setCalendarPopup(True)
self.datetime.setDateTime(QtCore.QDateTime.currentDateTime().addYears(-2))
layout.addWidget(self.datetime)
# OK and Cancel buttons
buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addWidget(buttons)
# get current date and time from the dialog
def dateTime(self):
return self.datetime.dateTime()
# static method to create the dialog and return (date, time, accepted)
@staticmethod
def getDateTime(parent=None):
dialog = DateDialog(parent)
result = dialog.exec_()
date = dialog.dateTime()
return (date.date(), date.time(), result == QtWidgets.QDialog.Accepted)
class BbssGui(QtWidgets.QMainWindow, Ui_BBSS_Main_Window):
"""Main window for bbss"""
def __init__(self, parent=None):
"""Initialize main window for bbss."""
logger.info('Building main window of bbss...')
QtWidgets.QMainWindow.__init__(self, parent)
self.FILENAME = ''
self.setupUi(self)
self.setup_table_models()
self.setup_combo_boxes()
self.center_on_screen()
self.set_signals_and_slots()
self.search_students_tableView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.search_students_tableView.customContextMenuRequested.connect(self.show_context_menu)
def show_context_menu(self, pos):
menu = QtWidgets.QMenu(self)
copy_action = QtWidgets.QAction(QtGui.QIcon(), 'Kopieren', self)
#copy_action.triggered.connect(self.testfkt)
menu.addAction(copy_action)
history_action = QtWidgets.QAction(QtGui.QIcon(), 'Historie', self)
menu.addAction(history_action)
export_action = QtWidgets.QAction(QtGui.QIcon(), 'Exportieren...', self)
menu.addAction(export_action)
global_coordinates = self.search_students_tableView.mapToGlobal(pos)
# show menu and wait synchronous for click (asynchronous call: menu.popup)
action = menu.exec_(global_coordinates)
if action == copy_action:
model = self.search_students_tableView.model()
index = self.search_students_tableView.indexAt(pos)
if 0 <= index.row() < model.rowCount():
student = model.student_data(index)
clipboard = QtWidgets.QApplication.clipboard()
clipboard.clear(mode=clipboard.Clipboard)
copy_text = ','.join((student.firstname, student.surname, student.classname, student.user_id, student.password))
clipboard.setText(copy_text, mode=clipboard.Clipboard)
elif action == history_action:
model = self.search_students_tableView.model()
index = self.search_students_tableView.indexAt(pos)
if 0 <= index.row() < model.rowCount():
student = model.student_data(index)
data = bbss.get_class_history(student.user_id)
template = 'Klasse {} von {} bis {}'
message = '\n'.join([template.format(d[0], d[1], d[2]) for d in data])
QtWidgets.QMessageBox.information(self, 'Klassenhistorie',message, QtWidgets.QMessageBox.Ok)
elif action == export_action:
model = self.search_students_tableView.model()
selected_rows = self.search_students_tableView.selectionModel().selectedRows()
selected_students = [model.student_data(r) for r in selected_rows]
output_file = QtWidgets.QFileDialog.getSaveFileName(self, 'Wähle PDF-Datei zum Export...', '', 'PDF-Datei (*.pdf)')[0]
if output_file:
bbss.export_pdf_file(output_file, selected_students)
def setup_table_models(self):
"""Sets up table view and its models."""
# set up import table view
self.import_table_model = StudentTableModel(bbss.student_list)
self.proxy_import_table_model = StudentTableFilterProxyModel()
self.proxy_import_table_model.setSourceModel(self.import_table_model)
self.proxy_import_table_model.setDynamicSortFilter(True)
self.import_data_tableview.setModel(self.proxy_import_table_model)
self.import_data_tableview.horizontalHeader().setSectionResizeMode (
QtWidgets.QHeaderView.Stretch)
# set up export table views
self.added_students_table_model = StudentTableModel(list())
self.removed_students_table_model = StudentTableModel(list())
self.added_students_tableview.setModel(
self.added_students_table_model)
self.removed_students_tableview.setModel(
self.removed_students_table_model)
self.added_students_tableview.horizontalHeader().setSectionResizeMode (
QtWidgets.QHeaderView.Stretch)
self.removed_students_tableview.horizontalHeader().setSectionResizeMode (
QtWidgets.QHeaderView.Stretch)
# set up search table views
self.search_students_table_model = StudentTableModel(list())
self.search_students_tableView.setModel(self.search_students_table_model)
self.search_students_tableView.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch)
self.search_students_tableView.setSelectionBehavior(
QtWidgets.QAbstractItemView.SelectRows)
self.search_students_tableView.setSelectionMode(
QtWidgets.QAbstractItemView.ExtendedSelection)
def setup_combo_boxes(self):
# TODO get values from bbss package
export_formats = ('LogoDidact', 'Radius-Server', 'Active Directory',
'Moodle', 'WebUntis', 'LabSoft Classroom Manager')
self.export_format_combobox.addItems(export_formats)
def center_on_screen(self):
"""Centers the window on the screen."""
screen = QtWidgets.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width() - size.width()) / 2,
(screen.height() - size.height()) / 2)
def set_signals_and_slots(self):
"""Sets all signals and slots for main window."""
self.import_data_button.clicked.connect(self.on_import_data)
self.load_file_button.clicked.connect(self.on_load_file)
self.delete_database_button.clicked.connect(self.on_delete_database)
self.import_filter_text.textEdited.connect(self.on_import_filter)
self.old_import_number.textEdited.connect(
self.on_update_export_changeset)
self.new_import_number.textEdited.connect(
self.on_update_export_changeset)
self.export_data_button.clicked.connect(self.on_export_data)
self.search_student_text.textEdited.connect(self.on_search_student)
self.search_students_tableView.selectionModel().selectionChanged.connect(
self.on_select_student_from_search)
self.TaskTabbedPane.currentChanged.connect(self.on_tab_changed)
self.menu_exit.triggered.connect(self.close)
self.clear_search_field_button.clicked.connect(self.search_student_text.clear)
self.menu_delete_database.triggered.connect(self.on_delete_database)
self.menu_delete_old_data.triggered.connect(self.on_delete_old_data)
self.menu_compare_mail_addresses.triggered.connect(self.on_compare_mail_addresses)
# TODO Connect options check boxes with functions.
# (replace_classnames_checkbox, replace_characters_checkbox, store_in_db_checkbox)
@QtCore.pyqtSlot()
def on_load_file(self):
logger.info('Loading file with student data...')
# store only first element of tuple (new in PyQt5)
self.FILENAME = QtWidgets.QFileDialog\
.getOpenFileName(self, 'Öffne Schülerdatendatei...', '',
'BBS-Verwaltung (*.csv);;BBS-Planung (*.xls *.xlsx)')[0]
logger.info('Student data file chosen: "{0}".'.format(self.FILENAME))
_, ext = os.path.splitext(self.FILENAME)
if ext == '.csv':
bbss.import_bbs_verwaltung_csv_file(self.FILENAME)
elif ext == '.xls' or ext == '.xlsx':
bbss.import_excel_file(self.FILENAME)
else:
logger.warn('Given file format can not be imported.')
self.import_table_model.update(bbss.student_list)
self.proxy_import_table_model.setSourceModel(self.import_table_model)
self.import_data_tableview.resizeColumnsToContents()
@QtCore.pyqtSlot()
def on_import_data(self):
logger.info('Importing data into database...')
self.progress = QtWidgets.QProgressDialog('Importiere Schüler...',
'Abbrechen', 0, 0, self)
self.progress.setWindowModality(QtCore.Qt.WindowModal)
self.progress.canceled.connect(self.progress.close)
self.progress.show()
def update_progressbar(current, complete):
self.progress.setRange(0, complete)
self.progress.setValue(current+1)
bbss.store_students_db(self.FILENAME, callback=update_progressbar)
message = "Schülerdaten aus Datei {0} wurden erfolgreich eingelesen."\
.format(self.FILENAME)
QtWidgets.QMessageBox.information(self, 'Schülerdaten importiert.',
message, QtWidgets.QMessageBox.Ok)
@QtCore.pyqtSlot()
def on_delete_database(self):
logger.info('Deleting database file...')
message = "Soll die Datenbankdatei wirklich gelöscht werden? "\
"Alle gespeicherten Informationen gehen dabei verloren!"
reply = QtWidgets.QMessageBox.question(self, 'Datenbank löschen?',
message, QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
bbss.clear_database()
@QtCore.pyqtSlot()
def on_delete_old_data(self):
logger.info('Deleting old data from database...')
date, _, ok = DateDialog.getDateTime()
logger.debug('Ask user for date limit: {}'.format(date if ok else ''))
if ok:
self.progress = QtWidgets.QProgressDialog('Lösche alte Daten...', 'Abbrechen', 0, 0, self)
self.progress.setWindowModality(QtCore.Qt.WindowModal)
self.progress.canceled.connect(self.progress.close)
self.progress.show()
def update_progressbar(current, complete):
self.progress.setRange(0, complete)
self.progress.setValue(current+1)
bbss.delete_old_data(date.toString('yyyy-MM-dd'), callback=update_progressbar)
@QtCore.pyqtSlot()
def on_compare_mail_addresses(self):
logger.info('Comparing mail addresses from Moodle user list...')
moodle_user_list = QtWidgets.QFileDialog.getOpenFileName(self, 'Öffne Nutzerliste von Moodle...', '',
'Nutzerliste von Moodle (*.csv)')[0]
if moodle_user_list:
logger.info('Moodle user list file chosen: "{0}".'.format(moodle_user_list))
differences_export_file = 'Unterschiede_Mail-Adressen.csv'
bbss.compare_mail_addresses(moodle_user_list, differences_export_file)
@QtCore.pyqtSlot(str)
def on_import_filter(self, filter_string):
if filter_string:
logger.debug('Filtering for {0}...'.format(filter_string))
syntax = QtCore.QRegExp.PatternSyntax(QtCore.QRegExp.Wildcard)
case_sensitivity = QtCore.Qt.CaseInsensitive
regExp = QtCore.QRegExp(filter_string, case_sensitivity, syntax)
self.proxy_import_table_model.setFilterRegExp(regExp)
count = self.proxy_import_table_model.rowCount()
self.search_result_label.setText('{} Schüler gefunden...'.format(count))
else:
self.search_result_label.setText('')
@QtCore.pyqtSlot(str)
def on_search_student(self, search_string):
"""Search database each time the search text field was edited. The
result is shown in the search table view.
"""
logger.debug('Searching for "{}"...'.format(search_string))
result = bbss.search_student_in_database(search_string)
self.search_students_table_model.update(result)
@QtCore.pyqtSlot(QtCore.QItemSelection, QtCore.QItemSelection)
def on_select_student_from_search(self, selected, deselected):
"""Show student information in text boxes when student was selected in
search table view."""
if selected:
# get selected student from model
model_index = selected[0].topLeft()
selected_student = self.search_students_table_model.student_data(model_index)
# fill in text boxes with student information
self.result_username_text.setText(selected_student.user_id)
imports = [str(i) for i in bbss.get_imports_for_student(selected_student)]
self.result_imports_text.setText(', '.join(imports) +
' - GUID: {}'.format(selected_student.guid))
self.result_birthday_text.setText(selected_student.birthday)
self.result_name_text.setText(selected_student.firstname)
self.result_class_text.setText(selected_student.classname)
self.result_password_text.setText(selected_student.password)
self.result_surname_text.setText(selected_student.surname)
@QtCore.pyqtSlot()
def on_update_export_changeset(self):
self.update_changeset_from_database()
def update_changeset_from_database(self):
"""Updates import IDs and changeset based on currently set values in
user interface."""
try:
old_id = int(self.old_import_number.text())
except:
logger.warn('Import IDs must be integer values.')
old_id = 0
try:
new_id = int(self.new_import_number.text())
except:
logger.warn('Import IDs must be integer values.')
new_id = 0
self.changeset = bbss.generate_changeset(old_import_id=old_id,
new_import_id=new_id)
logger.debug('{} added, {} changed, {} removed'
.format(*self.changeset.get_statistics()))
# update tables for added and removed students
self.added_students_table_model = StudentTableModel(
self.changeset.students_added)
self.removed_students_table_model = StudentTableModel(
self.changeset.students_removed)
self.added_students_tableview.setModel(
self.added_students_table_model)
self.removed_students_tableview.setModel(
self.removed_students_table_model)
# update labels with student count
self.added_student_table_label.setText('Hinzugefügte Schüler ({}):'.format(self.changeset.get_statistics().added))
self.removed_student_table_label.setText('Entfernte Schüler ({}):'.format(self.changeset.get_statistics().removed))
@QtCore.pyqtSlot()
def on_export_data(self):
self.update_changeset_from_database()
export_format = self.export_format_combobox.currentText()
# TODO: Ask for file name after evaluating export format!
export_file = self.get_filename_for_export()
if export_file:
if export_format == 'LogoDidact':
bbss.export_csv_file(export_file, self.changeset)
elif export_format == 'Radius-Server':
bbss.export_radius_file(export_file, self.changeset)
elif export_format == 'Moodle':
bbss.export_moodle_file(export_file, self.changeset)
elif export_format == 'WebUntis':
bbss.export_webuntis_file(export_file, self.changeset)
elif export_format == 'LabSoft Classroom Manager':
bbss.export_labsoft_file(export_file, self.changeset)
else:
logger.warn('Export format not yet implemented.')
message = 'Gewünschtes Exportformat noch nicht implementiert.'
QtWidgets.QMessageBox.information(self, 'Fehler bei Export',
message, QtWidgets.QMessageBox.Ok)
def get_filename_for_export(self):
"""Gets filename for export of student data from user."""
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Speichere Datei...')[0]
logger.info('Export file chosen: "{0}".'.format(filename))
return filename
@QtCore.pyqtSlot()
def on_tab_changed(self):
if self.TaskTabbedPane.currentIndex() == 1:
self.update_changeset_from_database()
def start_gui():
# make app object global to let it be collected to prevent error messages
# http://stackoverflow.com/questions/27131294/error-qobjectstarttimer-qtimer-can-only-be-used-with-threads-started-with-qt/27155799#27155799
global app
app = QtWidgets.QApplication(sys.argv)
app.setApplicationName(APP_NAME)
main = BbssGui()
main.show()
sys.exit(app.exec_())
if __name__ == "__main__":
start_gui()
| gpl-2.0 | -3,237,691,207,682,409,000 | 45.852349 | 144 | 0.644798 | false |
daniestevez/gr-satellites | python/check_crc16_ccitt.py | 1 | 1642 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Daniel Estevez <[email protected]>
#
# This file is part of gr-satellites
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
import struct
from gnuradio import gr
import numpy
import pmt
from . import hdlc
class check_crc16_ccitt(gr.basic_block):
"""docstring for block check_crc16_ccitt"""
def __init__(self, verbose):
gr.basic_block.__init__(
self,
name='check_crc16_ccitt',
in_sig=[],
out_sig=[])
self.verbose = verbose
self.message_port_register_in(pmt.intern('in'))
self.set_msg_handler(pmt.intern('in'), self.handle_msg)
self.message_port_register_out(pmt.intern('ok'))
self.message_port_register_out(pmt.intern('fail'))
def handle_msg(self, msg_pmt):
msg = pmt.cdr(msg_pmt)
if not pmt.is_u8vector(msg):
print('[ERROR] Received invalid message type. Expected u8vector')
return
packet = pmt.u8vector_elements(msg)
if len(packet) < 3:
return
packet_out = packet[:-2]
msg_out = pmt.cons(pmt.car(msg_pmt),
pmt.init_u8vector(len(packet_out), packet_out))
crc = hdlc.crc_ccitt(packet_out)
packet_crc = struct.unpack('<H', packet[-2:])[0]
if crc == packet_crc:
if self.verbose:
print('CRC OK')
self.message_port_pub(pmt.intern('ok'), msg_out)
else:
if self.verbose:
print('CRC failed')
self.message_port_pub(pmt.intern('fail'), msg_out)
| gpl-3.0 | 5,223,410,564,134,154,000 | 26.830508 | 77 | 0.5676 | false |
dmuellner/fancontrol | statistics.py | 1 | 15513 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
Copyright © 2016 Daniel Müllner <http://danifold.net>
All changes from 2017-12-27 on: Copyright © Google Inc. <http://google.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
if sys.hexversion < 0x03000000:
from ConfigParser import RawConfigParser
else:
from configparser import RawConfigParser
from lxml import etree
import os
import shutil
import subprocess
import tempfile
import time
import datetime
import numpy as np
import calendar
config = RawConfigParser()
config.read('fancontrol.cfg')
w, h = 1440, 600 # graph size
wplus = w + 85 # image size
hplus = h + 75
intervals = [5,10,15,20,25,30,40,50,60,75,100,200,300,600] # divisors of h
today = datetime.date.today()
def nextOffTime(date, starttimestamp):
date = date + datetime.timedelta(days=1)
logfile = config.get('logging', 'logfile')
if date != today:
logfile = logfile + date.strftime('.%Y-%m-%d')
if os.path.isfile(logfile):
for line in open(logfile, 'r'):
entries = [entry.strip() for entry in line.split(',')]
t = time.strptime(entries[0], '%Y-%m-%d %H:%M:%S')
timestamp = calendar.timegm(t)
minute = int(np.floor((timestamp - starttimestamp) / 60))
assert minute >= w, (minute, w)
if entries[4] == 'fan.py' and entries[5] == 'fan':
if entries[6] == 'True':
return
elif entries[6] == 'False':
return timestamp
elif entries[4] == 'menu.py' and entries[5] == 'user':
if entries[6] == 'FanOn':
return
if entries[6] == 'FanOff':
return timestamp
elif entries[4] == 'control.py' and entries[5] in ('Startup', 'Shutdown'):
return timestamp
return time.time()
def lastOnTime(date, starttimestamp):
date = date + datetime.timedelta(days=-1)
logfile = config.get('logging', 'logfile')
if date != today:
logfile = logfile + date.strftime('.%Y-%m-%d')
lastOnTimestamp = None
if os.path.isfile(logfile):
for line in open(logfile, 'r'):
entries = [entry.strip() for entry in line.split(',')]
t = time.strptime(entries[0], '%Y-%m-%d %H:%M:%S')
timestamp = calendar.timegm(t)
minute = int(np.floor((timestamp - starttimestamp) / 60))
assert minute < 0
if entries[4] == 'fan.py' and entries[5] == 'fan':
if entries[6] == 'True':
lastOnTimestamp = timestamp
elif entries[6] == 'False':
lastOnTimestamp = None
elif entries[4] == 'menu.py' and entries[5] == 'user':
if entries[6] == 'FanOn':
lastOnTimestamp = timestamp
elif entries[6] == 'FanOff':
lastOnTimestamp = None
elif entries[4] == 'control.py' and entries[5] in ('Startup', 'Shutdown'):
lastOnTimestamp = None
return lastOnTimestamp
def read_log(*date):
date = datetime.date(*date)
logfile = config.get('logging', 'logfile')
if date != today:
logfile = logfile + date.strftime('.%Y-%m-%d')
t = date.timetuple()
starttimestamp = time.mktime(t)
onTimes = []
offTimes = []
extraOffTimes = [time.time()]
data1 = np.zeros((w, 2))
data2 = np.zeros((w, 2))
num1 = np.zeros((w, 1), dtype=int)
num2 = np.zeros((w, 1), dtype=int)
minT = np.infty
maxT = -minT
for line in open(logfile, 'r'):
entries = [entry.strip() for entry in line.split(',')]
t = time.strptime(entries[0], '%Y-%m-%d %H:%M:%S')
timestamp = calendar.timegm(t)
minute = int(np.floor((timestamp - starttimestamp) / 60))
assert minute >= 0
assert minute < w + 60
if minute >= w:
continue
if entries[4] == 'fan.py' and entries[5] == 'fan':
if entries[6] == 'True':
onTimes.append(timestamp)
elif entries[6] == 'False':
offTimes.append(timestamp)
elif entries[4] == 'control.py' and entries[5] in ('Startup', 'Shutdown'):
extraOffTimes.append(timestamp)
elif entries[4] == 'menu.py' and entries[5] == 'user':
if entries[6] == 'FanOn':
onTimes.append(timestamp)
elif entries[6] == 'FanOff':
offTimes.append(timestamp)
elif entries[4] == 'sensor.py' and entries[5] == 'measurement':
if minute >= 0 and minute<= w:
rH1, T1, tau1, Error1, rH2, T2, tau2, Error2 = entries[6:]
if Error1 == 'False':
T1 = float(T1)
tau1 = float(tau1)
data1[minute] += (T1, tau1)
num1[minute] += 1
if Error2 == 'False':
T2 = float(T2)
tau2 = float(tau2)
data2[minute] += (T2, tau2)
num2[minute] += 1
# Prevent "RuntimeWarning: invalid value encountered in true_divide"
data1 = np.where(num1>0, data1, np.nan) / num1
data2 = np.where(num2>0, data2, np.nan) / num2
minT = np.nanmin([np.nanmin(data1), np.nanmin(data2)])
maxT = np.nanmax([np.nanmax(data1), np.nanmax(data2)])
extraOnTime = lastOnTime(date, starttimestamp)
if extraOnTime is not None:
onTimes.append(extraOnTime)
extraOffTime = nextOffTime(date, starttimestamp)
if extraOffTime is not None:
offTimes.append(extraOffTime)
onTimes.sort()
offTimes.sort()
fanIntervals = []
for onTime in onTimes:
offIndex = np.searchsorted(offTimes, onTime)
if offIndex < len(offTimes):
offTime = offTimes[offIndex]
assert onTime <= offTime, (onTime, offTime)
x1 = int(np.floor((onTime - starttimestamp) / 60.0))
if x1 >= w: continue
x1 = max(0, x1)
x2 = int(np.ceil((offTime - starttimestamp) / 60.0))
if x2 < 0: continue
x2 = min(x2, w-1)
fanIntervals.append((x1, x2))
return data1, data2, minT, maxT, fanIntervals
def plotcurve(SE, elem, points, maxT, minT, color):
if points:
s = ''
for x, y in points:
assert x >= 0 and x < w
s += ' {x},{y:.1f}'.format(x=x, y=y).rstrip('0').rstrip('.')
SE(elem, 'polyline', points=s[1:], style="stroke:" + color)
def plot(SE, elem, data, maxT, minT, color):
points = []
for x, T in enumerate(data):
assert x >= 0 and x < w
if T != T:
plotcurve(SE, elem, points, maxT, minT, color)
points = []
else:
y = (maxT - T) / float(maxT - minT) * h
points.append((x,y))
plotcurve(SE, elem, points, maxT, minT, color)
def make_plot(date, upload=False, mark_end=False):
print("Make plot for {}.".format(date))
year = date.year
month = date.month
day = date.day
data1, data2, minT, maxT, fanIntervals = read_log(year, month, day)
minTf = minT
maxTf = maxT
minT = int(np.floor(minT))
maxT = int(np.ceil(maxT))
spanT = maxT - minT
for dt in intervals:
if dt > spanT:
spanT = dt
break
minT = min(minT, int(np.round((minTf + maxTf - spanT) * .5)))
maxT = minT + spanT
T1color = np.array([0,0,255], dtype=np.uint8)
tau1color = np.array([0, 127, 0], dtype=np.uint8)
T2color = np.array([255,0,0], dtype=np.uint8)
tau2color = np.array([255, 0, 255], dtype=np.uint8)
tempdirname = None
try:
svg = etree.Element('svg',
nsmap={None: 'http://www.w3.org/2000/svg',
'xlink': 'http://www.w3.org/1999/xlink'},
width="{}px".format(wplus),
height="{}px".format(hplus),
viewBox="0 0 {} {}".format(wplus, hplus),
version="1.1")
style = etree.SubElement(svg, 'style', type="text/css")
style.text=etree.CDATA('''\
*{fill:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:round;}\
line{stroke:black;}\
polyline{stroke-linecap:round;}\
text,tspan{stroke:none;fill:black;font-family:sans-serif;font-size:13px;}\
g.ylabel text{dominant-baseline:mathematical;text-anchor:end;}\
rect{fill:rgb(180,180,180)}\
.thin line{stroke-width:.1px}\
line.thicker{stroke-width:.25px}''')
defs = etree.SubElement(svg, 'defs')
SE = etree.SubElement
SE(defs, 'line', id="htick", x1="0", y1="0", x2="0", y2="10")
SE(defs, 'line', id="vtick", x1="0", y1="0", x2="10", y2="0")
SE(svg, 'rect',
width=str(wplus),
height=str(hplus),
style="fill:white")
text = SE(svg, 'text', y="13")
text.text = 'Date: {year:04}-{month:02}-{day:02} '.format(year=year, month=month, day=day)
tspan = SE(text, 'tspan', dx="2em")
tspan.text = 'Legend:'
tspan.tail = ' '
tspan = SE(text, 'tspan', dx=".5em", style="fill:blue")
tspan.text = u'■'
tspan.tail = ' Temperature indoors '
tspan = SE(text, 'tspan', dx="1em", style="fill:green")
tspan.text = u'■'
tspan.tail = ' Dew point indoors '
tspan = SE(text, 'tspan', dx="1em", style="fill:red")
tspan.text = u'■'
tspan.tail = ' Temperature outdoors '
tspan = SE(text, 'tspan', dx="1em", style="fill:magenta")
tspan.text = u'■'
tspan.tail = ' Dew point outdoors'
tspan = SE(text, 'tspan', dx="1em", style="fill:rgb(180,180,180)")
tspan.text = u'■'
tspan.tail = ' Fan is on'
text = SE(svg, 'text', x=str(wplus), y='13', style="text-anchor:end")
text.text = u'Temperature/dew point in °C'
text = SE(svg, 'text', x="0", y=str(h + 72))
text.text = 'Time in hours'
g1 = SE(svg, 'g', transform="translate(44,30)")
for x1, x2 in fanIntervals:
SE(g1, 'rect', x=str(x1), y='.5', width=str(x2-x1+1), height=str(h))
g2 = SE(g1, 'g', transform="translate(.5,.5)")
g3 = SE(g2, 'g', transform="translate(0,{})".format(h))
SE(g3, 'line', x1="0", y1="0", x2=str(w), y2="0")
for x in range(0, w+1, w//24):
use = SE(g3, 'use', x=str(x))
use.set('{http://www.w3.org/1999/xlink}href', "#htick")
g4 = SE(g3, 'g', transform="translate(0,24)", style="text-anchor:middle")
for i, x in enumerate(range(0, w+1, w//24)):
text = SE(g4, 'text', x=str(x))
text.text = str(i % 24)
SE(g2, 'line', x1="0", y1="0", x2="0", y2=str(h))
g9 = SE(g2, 'g', transform="translate(-10,0)")
for T in range(minT, maxT+1, 1):
y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.')
use = SE(g9, 'use', y=y)
use.set('{http://www.w3.org/1999/xlink}href', "#vtick")
g10 = SE(g9, 'g', transform="translate(-5,0)")
g10.set('class', "ylabel")
for T in range(minT, maxT+1, 1):
y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.')
text = SE(g10, 'text', y=y)
text.text = ('' if T>=0 else u'−') + str(abs(T))
g5 = SE(g2, 'g', transform="translate({},0)".format(w))
SE(g5, 'line', x1="0", y1="0", x2="0", y2=str(h))
g6 = SE(g5, 'g', x="0")
for T in range(minT, maxT+1, 1):
y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.')
use = SE(g6, 'use', y=y)
use.set('{http://www.w3.org/1999/xlink}href', "#vtick")
g7 = SE(g6, 'g', transform="translate(40,0)")
g7.set('class', "ylabel")
for T in range(minT, maxT+1, 1):
y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.')
text = SE(g7, 'text', y=y)
text.text = ('' if T>=0 else u'−') + str(abs(T))
g8 = SE(g2, 'g')
g8.set('class', "thin")
for T in range(minT, maxT + 1):
y = '{:.2f}'.format(h - (T - minT) / float(maxT - minT) * h).rstrip('0').rstrip('.')
l = SE(g8, 'line', x1="0", y1=y, x2=str(w), y2=y)
if T % 5 == 0:
l.attrib['class'] = 'thicker'
if mark_end:
l = 0
for ii in reversed(range(len(data1))):
if data1[ii,0]==data1[ii,0]:
l = ii + 1
break
SE(g2, 'line',
x1=str(l),
y1="0",
x2=str(l),
y2=str(h - .5),
style="stroke-dasharray:8; stroke:orange")
plot(SE, g2, data1[:,0], maxT, minT, 'blue')
plot(SE, g2, data1[:,1], maxT, minT, 'green')
plot(SE, g2, data2[:,0], maxT, minT, 'red')
plot(SE, g2, data2[:,1], maxT, minT, 'magenta')
ET = etree.ElementTree(svg)
filename = 'fancontrol_{year:04}-{month:02}-{day:02}.svg'.format(
year=year, month=month, day=day)
if upload:
tempdirname = tempfile.mkdtemp()
tempfilename = 'fancontrol.svg.tmp'
tempfilepath = os.path.join(tempdirname, tempfilename)
ET.write(tempfilepath, pretty_print=False)
print('Upload')
retval = subprocess.call(
'/usr/bin/lftp -c "open ftp.kundencontroller.de; '
'cd www/data/fangraphs; '
'put {}; '
'mv {} {}"'
.format(tempfilepath, tempfilename, filename), shell=True)
print('Return value: {}'.format(retval))
if retval != 0:
raise RuntimeError('Upload failed')
else:
dirname = 'graphs'
filepath = os.path.join(dirname, filename)
ET.write(filepath, pretty_print=False)
except:
print('Error!')
raise
finally:
if tempdirname is not None:
shutil.rmtree(tempdirname)
print('Removed temp dir')
if __name__ == "__main__":
if len(sys.argv)==2:
if sys.argv[1]=='all':
startdate = datetime.date(2016,3,16)
enddate = today
dt = datetime.timedelta(days=1)
date = startdate
while date < enddate:
print(date)
make_plot(date)
date += dt
else:
offset = int(sys.argv[1])
dt = datetime.timedelta(days=offset)
make_plot(today - dt,
upload=True,
mark_end=(offset==0))
| gpl-3.0 | 772,540,246,591,323,900 | 36.337349 | 98 | 0.521265 | false |
stevekuznetsov/tito | src/tito/common.py | 1 | 36286 | # Copyright (c) 2008-2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
from __future__ import print_function
from contextlib import contextmanager
"""
Common operations.
"""
import errno
import fileinput
import glob
import os
import pickle
import re
import sys
import subprocess
import shlex
import shutil
import tempfile
from blessings import Terminal
from bugzilla.rhbugzilla import RHBugzilla
from tito.compat import xmlrpclib, getstatusoutput
from tito.exception import TitoException
from tito.exception import RunCommandException
from tito.tar import TarFixer
DEFAULT_BUILD_DIR = "/tmp/tito"
DEFAULT_BUILDER = "builder"
DEFAULT_TAGGER = "tagger"
BUILDCONFIG_SECTION = "buildconfig"
SHA_RE = re.compile(r'\b[0-9a-f]{30,}\b')
# Define some shortcuts to fully qualified Builder classes to make things
# a little more concise for CLI users. Mock is probably the only one this
# is relevant for at this time.
BUILDER_SHORTCUTS = {
'mock': 'tito.builder.MockBuilder',
'mead': 'tito.builder.MeadBuilder',
}
def read_user_config():
config = {}
file_loc = os.path.expanduser("~/.titorc")
try:
f = open(file_loc)
except:
# File doesn't exist but that's ok because it's optional.
return config
for line in f.readlines():
if line.strip() == "":
continue
tokens = line.split("=")
if len(tokens) != 2:
raise Exception("Error parsing ~/.titorc: %s" % line)
# Remove whitespace from the values
config[tokens[0].strip()] = tokens[1].strip()
return config
def extract_sources(spec_file_lines):
"""
Returns a list of sources from the given spec file.
Some of these will be URL's, which is fine they will be ignored.
We're really just after relative filenames that might live in the same
location as the spec file, mostly used with NoTgzBuilder packages.
"""
filenames = []
source_pattern = re.compile('^Source\d+?:\s*(.*)')
for line in spec_file_lines:
match = source_pattern.match(line)
if match:
filenames.append(match.group(1))
return filenames
class MissingBugzillaCredsException(TitoException):
pass
class BugzillaExtractor(object):
"""
Parses output of a dist-git commit diff looking for changelog
entries that look like they reference bugzilla commits.
Optionally can check bugzilla for required flags on each bug.
"""
def __init__(self, diff_output, required_flags=None,
placeholder_bz=None):
self.diff_output = diff_output
self.required_flags = required_flags
self.placeholder_bz = placeholder_bz
# Tuples of bugzilla ID + commit message we extracted:
self.bzs = []
def extract(self):
self.bzs = self._extract_bzs()
if self.required_flags:
self._check_for_bugzilla_creds()
self.bzs = self._filter_bzs_with_flags()
return self._format_lines()
def _check_for_bugzilla_creds(self):
if not os.path.exists(os.path.expanduser("~/.bugzillarc")):
raise MissingBugzillaCredsException("Missing ~/.bugzillarc")
else:
debug("Found bugzilla credentials in ~/.bugzillarc")
def _extract_bzs(self):
"""
Parses the output of CVS diff or a series of git commit log entries,
looking for new lines which look like a commit of the format:
######: Commit message
Returns a list of lines of text similar to:
Resolves: #XXXXXX - Commit message
If the releaser specifies any required bugzilla flags we will
check each bug found and see if it has all required flags. If not
we skip it. If we end up with *no* bugs with the required flags
our build is likely to fail, so we look for a placeholder bugzilla
defined in relaser config and use that instead if possible, otherwise
error out.
Returns a list of lines to write to the commit message as is.
"""
regex = re.compile(r"^- (\d*)\s?[:-]+\s?(.*)")
diff_regex = re.compile(r"^(\+- )+(\d*)\s?[:-]+\s?(.*)")
bzs = []
for line in self.diff_output.split("\n"):
match = re.match(regex, line)
match2 = re.match(diff_regex, line)
if match:
bzs.append((match.group(1), match.group(2)))
elif match2:
bzs.append((match2.group(2), match2.group(3)))
return bzs
def _format_lines(self):
output = []
for bz in self.bzs:
output.append("Resolves: #%s - %s" % (bz[0], bz[1]))
if len(output) == 0 and self.required_flags:
# No bugzillas had required flags, use a placeholder if
# we have one, otherwise we have to error out.
if self.placeholder_bz:
print("No bugs with required flags were found, using placeholder: %s" % self.placeholder_bz)
output.append("Related: #%s" % self.placeholder_bz)
else:
error_out("No bugzillas found with required flags: %s" %
self.required_flags)
return output
def _filter_bzs_with_flags(self):
print("Checking flags on bugs: %s" % self.bzs)
print(" required flags: %s" % self.required_flags)
# TODO: Would be nice to load bugs in bulk here but for now we'll
# keep it simple.
filtered_bzs = []
for bz_tuple in self.bzs:
bug_id = bz_tuple[0]
try:
bug = self._load_bug(bug_id)
except xmlrpclib.Fault:
print("WARNING: Bug %s does not seem to exist." % bug_id)
continue
debug("Bug %s has flags: %s" % (bug_id, bug.flags))
flags_missing = False
for flag in self.required_flags:
if bug.get_flag_status(flag[0:-1]) != flag[-1]:
print("WARNING: Bug %s missing required flag: %s" %
(bug_id, flag))
flags_missing = True
break
else:
debug("Bug %s has required flag: %s" %
(bug_id, flag))
if not flags_missing:
filtered_bzs.append(bz_tuple)
return filtered_bzs
def _load_bug(self, bug_id):
bugzilla = RHBugzilla(url='https://bugzilla.redhat.com/xmlrpc.cgi')
return bugzilla.getbug(bug_id, include_fields=['id', 'flags'])
def _out(msgs, prefix, color_func, stream=sys.stdout):
if prefix is None:
fmt = "%(msg)s"
else:
fmt = "%(prefix)s: %(msg)s"
user_conf = read_user_config()
if 'COLOR' in user_conf and (user_conf['COLOR'] == '0' or user_conf['COLOR'].lower() == 'false'):
def color_func(x):
return x
if isinstance(msgs, list):
first_line = msgs.pop(0)
print(color_func(fmt % {'prefix': prefix, 'msg': first_line}), file=stream)
for line in msgs:
print(color_func("%s" % line), file=stream)
else:
print(color_func(fmt % {'prefix': prefix, 'msg': msgs}), file=stream)
if 'DEBUG' in os.environ and callable(getattr(stream, 'flush', None)):
stream.flush()
def error_out(error_msgs, die=True):
"""
Print the given error message (or list of messages) and exit.
"""
term = Terminal()
_out(error_msgs, "ERROR", term.red, sys.stderr)
if die:
sys.exit(1)
def info_out(msgs):
term = Terminal()
_out(msgs, None, term.blue)
def warn_out(msgs):
"""
Print the given error message (or list of messages) and exit.
"""
term = Terminal()
_out(msgs, "WARNING", term.yellow)
@contextmanager
def chdir(path):
previous_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(previous_dir)
def create_builder(package_name, build_tag,
config, build_dir, user_config, args,
builder_class=None, **kwargs):
"""
Create (but don't run) the builder class. Builder object may be
used by other objects without actually having run() called.
"""
# Allow some shorter names for builders for CLI users.
if builder_class in BUILDER_SHORTCUTS:
builder_class = BUILDER_SHORTCUTS[builder_class]
if builder_class is None:
debug("---- Builder class is None")
if config.has_option("buildconfig", "builder"):
builder_class = get_class_by_name(config.get("buildconfig",
"builder"))
else:
debug("---- Global config")
builder_class = get_class_by_name(config.get(
BUILDCONFIG_SECTION, DEFAULT_BUILDER))
else:
# We were given an explicit builder class as a str, get the actual
# class reference:
builder_class = get_class_by_name(builder_class)
debug("Using builder class: %s" % builder_class)
# Instantiate the builder:
builder = builder_class(
name=package_name,
tag=build_tag,
build_dir=build_dir,
config=config,
user_config=user_config,
args=args,
**kwargs)
return builder
def find_file_with_extension(in_dir, suffix=None):
""" Find the file with given extension in the current directory. """
file_name = None
debug("Looking for %s in %s" % (suffix, in_dir))
for f in os.listdir(in_dir):
if f.endswith(suffix):
if file_name is not None:
error_out("At least two %s files in directory: %s and %s" % (suffix, file_name, f))
file_name = f
debug("Using file: %s" % f)
if file_name:
return os.path.join(in_dir, file_name)
return None
def find_spec_file(in_dir=None):
"""
Find the first spec file in the current directory.
Returns only the file name, rather than the full path.
"""
if in_dir is None:
in_dir = os.getcwd()
result = find_file_with_extension(in_dir, '.spec')
if result is None:
error_out("Unable to locate a %s file in %s" % ('.spec', in_dir))
return os.path.basename(result)
def find_gemspec_file(in_dir=None):
"""
Find the first spec file in the current directory.
Returns only the file name, rather than the full path.
"""
if in_dir is None:
in_dir = os.getcwd()
result = find_file_with_extension(in_dir, '.gemspec')
if result is None:
error_out("Unable to locate a %s file in %s" % ('.gemspec', in_dir))
return result
def find_spec_like_file(in_dir=None):
if in_dir is None:
in_dir = os.getcwd()
extension_list = ['.spec', '.spec.tmpl']
for ext in extension_list:
result = find_file_with_extension(in_dir, ext)
if result:
return result
else:
error_out("Unable to locate files ending with %s in %s" % (list(extension_list), in_dir))
def find_cheetah_template_file(in_dir=None):
if in_dir is None:
in_dir = os.getcwd()
result = find_file_with_extension(in_dir, '.spec.tmpl')
if result is None:
error_out("Unable to locate a %s file in %s" % ('.spec.tmpl', in_dir))
return result
def find_mead_chain_file(in_dir=None):
if in_dir is None:
in_dir = os.getcwd()
result = find_file_with_extension(in_dir, '.chain')
if result is None:
error_out("Unable to locate a %s file in %s" % ('.chain', in_dir))
return result
def find_git_root():
"""
Find the top-level directory for this git repository.
Returned as a full path.
"""
(status, cdup) = getstatusoutput("git rev-parse --show-cdup")
if status > 0:
error_out(["%s does not appear to be within a git checkout." %
os.getcwd()])
if cdup.strip() == "":
cdup = "./"
return os.path.abspath(cdup)
def tito_config_dir():
""" Returns "rel-eng" for old tito projects and ".tito" for
recent projects.
"""
tito_dir = os.path.join(find_git_root(), ".tito")
if os.path.isdir(tito_dir):
return ".tito"
else:
return "rel-eng"
def extract_sha1(output):
match = SHA_RE.search(output)
if match:
return match.group(0)
else:
return ""
def run_command(command, print_on_success=False):
"""
Run command.
If command fails, print status code and command output.
"""
(status, output) = getstatusoutput(command)
if status > 0:
msgs = [
"Error running command: %s\n" % command,
"Status code: %s\n" % status,
"Command output: %s\n" % output,
]
error_out(msgs, die=False)
raise RunCommandException(command, status, output)
elif print_on_success:
print("Command: %s\n" % command)
print("Status code: %s\n" % status)
print("Command output: %s\n" % output)
else:
debug("Command: %s" % command)
debug("Status code: %s" % status)
debug("Command output: %s\n" % output)
return output
def run_command_print(command):
"""
Simliar to run_command but prints each line of output on the fly.
"""
output = []
env = os.environ.copy()
env['LC_ALL'] = 'C'
p = subprocess.Popen(shlex.split(command),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env,
universal_newlines=True)
for line in run_subprocess(p):
line = line.rstrip('\n')
print(line)
output.append(line)
print("\n"),
if p.poll() > 0:
raise RunCommandException(command, p.poll(), "\n".join(output))
return '\n'.join(output)
def run_subprocess(p):
while(True):
retcode = p.poll()
line = p.stdout.readline()
if len(line) > 0:
yield line
if(retcode is not None):
break
def render_cheetah(template_file, destination_directory, cheetah_input):
"""Cheetah doesn't exist for Python 3, but it's the templating engine
that Mead uses. Instead of importing the potentially incompatible code,
we use a command-line utility that Cheetah provides. Yes, this is a total
hack."""
pickle_file = tempfile.NamedTemporaryFile(dir=destination_directory, prefix="tito-cheetah-pickle", delete=False)
try:
pickle.dump(cheetah_input, pickle_file, protocol=2)
pickle_file.close()
run_command("cheetah fill --flat --pickle=%s --odir=%s --oext=cheetah %s" %
(pickle_file.name, destination_directory, template_file))
# Annoyingly Cheetah won't let you specify an empty string for a file extension
# and most Mead templates end with ".spec.tmpl"
rendered_files = glob.glob(os.path.join(destination_directory, "*.cheetah"))
# Cheetah returns zero even if it doesn't find the template to render. Thanks Cheetah.
if not rendered_files:
error_out("Could not find rendered file in %s for %s" % (destination_directory, template_file))
for rendered in rendered_files:
shutil.move(rendered, os.path.splitext(rendered)[0])
finally:
os.unlink(pickle_file.name)
def tag_exists_locally(tag):
(status, output) = getstatusoutput("git tag | grep %s" % tag)
if status > 0:
return False
else:
return True
def tag_exists_remotely(tag):
""" Returns True if the tag exists in the remote git repo. """
try:
get_git_repo_url()
except:
warn_out('remote.origin does not exist. Assuming --offline, for remote tag checking.\n')
return False
sha1 = get_remote_tag_sha1(tag)
debug("sha1 = %s" % sha1)
if sha1 == "":
return False
return True
def get_local_tag_sha1(tag):
tag_sha1 = run_command(
"git ls-remote ./. --tag %s | awk '{ print $1 ; exit }'"
% tag)
tag_sha1 = extract_sha1(tag_sha1)
return tag_sha1
def head_points_to_tag(tag):
"""
Ensure the current git head is the same commit as tag.
For some reason the git commands we normally use to fetch SHA1 for a tag
do not work when comparing to the HEAD SHA1. Using a different command
for now.
"""
debug("Checking that HEAD commit is %s" % tag)
head_sha1 = run_command("git rev-list --max-count=1 HEAD")
tag_sha1 = run_command("git rev-list --max-count=1 %s" % tag)
debug(" head_sha1 = %s" % head_sha1)
debug(" tag_sha1 = %s" % tag_sha1)
return head_sha1 == tag_sha1
def undo_tag(tag):
"""
Executes git commands to delete the given tag and undo the most recent
commit. Assumes you have taken necessary precautions to ensure this is
what you want to do.
"""
# Using --merge here as it appears to undo the changes in the commit,
# but preserve any modified files:
output = run_command("git tag -d %s && git reset --merge HEAD^1" % tag)
print(output)
def get_remote_tag_sha1(tag):
"""
Get the SHA1 referenced by this git tag in the remote git repo.
Will return "" if the git tag does not exist remotely.
"""
# TODO: X11 forwarding messages can appear in this output, find a better way
repo_url = get_git_repo_url()
print("Checking for tag [%s] in git repo [%s]" % (tag, repo_url))
cmd = "git ls-remote %s --tag %s | awk '{ print $1 ; exit }'" % \
(repo_url, tag)
upstream_tag_sha1 = run_command(cmd)
upstream_tag_sha1 = extract_sha1(upstream_tag_sha1)
return upstream_tag_sha1
def check_tag_exists(tag, offline=False):
"""
Check that the given git tag exists in a git repository.
"""
if not tag_exists_locally(tag):
error_out("Tag does not exist locally: [%s]" % tag)
if offline:
return
tag_sha1 = get_local_tag_sha1(tag)
debug("Local tag SHA1: %s" % tag_sha1)
try:
get_git_repo_url()
except:
warn_out('remote.origin does not exist. Assuming --offline, for remote tag checking.\n')
return
upstream_tag_sha1 = get_remote_tag_sha1(tag)
if upstream_tag_sha1 == "":
error_out(["Tag does not exist in remote git repo: %s" % tag,
"You must tag, then git push and git push --tags"])
debug("Remote tag SHA1: %s" % upstream_tag_sha1)
if upstream_tag_sha1 != tag_sha1:
error_out("Tag %s references %s locally but %s upstream." % (tag,
tag_sha1, upstream_tag_sha1))
def debug(text, cmd=None):
"""
Print the text if --debug was specified.
If cmd is specified, run the command and print its output after text.
"""
if 'DEBUG' in os.environ:
print(text)
if cmd:
run_command(cmd, True)
def get_spec_version_and_release(sourcedir, spec_file_name):
if os.path.splitext(spec_file_name)[1] == ".tmpl":
return scrape_version_and_release(spec_file_name)
command = ("""rpm -q --qf '%%{version}-%%{release}\n' --define """
""""_sourcedir %s" --define 'dist %%undefined' --specfile """
"""%s 2> /dev/null | grep -e '^$' -v | head -1""" % (sourcedir, spec_file_name))
return run_command(command)
def search_for(file_name, *args):
"""Send in a file and regular expressions as arguments. Returns the value of the
matching groups (or the entire matching string if no groups are in the regex) for
each regular expression in the same order that the expressions were provided in.
ONLY THE FIRST MATCH IS RETURNED!
Note that this method uses re.search and not re.match so your regexs don't need
to match the entire line.
"""
results = [None] * len(args)
with open(file_name, 'r') as fh:
for line in fh:
for index, regex in enumerate(args):
m = re.search(regex, line)
if not m:
continue
if results[index]:
warn_out("Multiple matches found for %s in %s" % (regex, file_name))
elif m.groups():
results[index] = m.groups()
else:
results[index] = (m.string,)
# Get the index of every regex that didn't match
missing_results = [i for i, x in enumerate(results) if x is None]
if len(missing_results) > 0:
error_out("Could not find match to %s in %s" % (map(lambda x: args[x], missing_results), file_name))
return results
def replace_spec_release(file_name, release):
for line in fileinput.input(file_name, inplace=True):
m = re.match(r"(\s*Release:\s*)(.*?)\s*$", line)
# The fileinput module redirects stdout to the file handle
# for the file being output.
# See https://docs.python.org/2/library/fileinput.html#fileinput.FileInput
if m:
print("%s%s" % (m.group(1), release))
else:
print(line.rstrip('\n'))
def munge_specfile(spec_file, commit_id, commit_count, fullname=None, tgz_filename=None):
# If making a test rpm we need to get a little crazy with the spec
# file we're building off. (Note we are modifying a temp copy of the
# spec) Swap out the actual release for one that includes the git
# SHA1 we're building for our test package.
sha = commit_id[:7]
for line in fileinput.input(spec_file, inplace=True):
m = re.match(r'^(\s*Release:\s*)(.+?)(%{\?dist})?\s*$', line)
if m:
print('%s%s.git.%s.%s%s' % (
m.group(1),
m.group(2),
commit_count,
sha,
m.group(3),
))
continue
m = re.match(r'^(\s*Source0?):\s*(.+?)$', line)
if tgz_filename and m:
print('%s: %s' % (m.group(1), tgz_filename))
continue
macro = munge_setup_macro(fullname, line)
if macro is not None:
print(macro)
continue
print(line.rstrip('\n'))
def munge_setup_macro(fullname, line):
"""
Adjust the %setup or %autosetup line in spec file to accomodate the renamed
test source.
Return None if the given line is not the setup or autosetup line.
"""
m = re.match(r'^(\s*%(?:auto)?setup)(.*?)$', line)
if fullname and m:
macro = m.group(1)
setup_arg = " -n %s" % fullname
args = m.group(2)
args_match = re.search(r'(.*?)\s+-n\s+\S+(.*)', args)
if args_match:
macro += args_match.group(1)
macro += args_match.group(2)
macro += setup_arg
else:
macro += args
macro += setup_arg
if "%autosetup" in macro:
args_match = re.search(r'(.+?)\s+-p[01]\s+\S+(.*)', args)
if not args_match:
macro = "{} -p1".format(macro)
return macro
return None
def scrape_version_and_release(template_file_name):
"""Ideally, we'd let RPM report the version and release of a spec file as
in get_spec_version_and_release. However, when we are dealing with Cheetah
templates for Mead, RPM won't be able to parse the template file. We have to
fall back to using regular expressions."""
version, release = search_for(template_file_name, r"\s*Version:\s*(.*?)\s*$", r"\s*Release:\s*(.*?)\s*$")
version = version[0]
release = release[0]
release = release.replace("%{?dist}", "")
return "%s-%s" % (version, release)
def scl_to_rpm_option(scl, silent=None):
""" Returns rpm option which disable or enable SC and print warning if needed """
rpm_options = ""
cmd = "rpm --eval '%scl'"
output = run_command(cmd).rstrip()
if scl:
if (output != scl) and (output != "%scl") and not silent:
warn_out([
"Meta package of software collection %s installed, but --scl defines %s" % (output, scl),
"Redefining scl macro to %s for this package." % scl
])
rpm_options += " --define 'scl %s'" % scl
else:
if (output != "%scl") and (not silent):
warn_out([
"Warning: Meta package of software collection %s installed, but --scl is not present." % output,
"Undefining scl macro for this package.",
])
# can be replaced by "--undefined scl" when el6 and fc17 is retired
rpm_options += " --eval '%undefine scl'"
return rpm_options
def get_project_name(tag=None, scl=None):
"""
Extract the project name from the specified tag or a spec file in the
current working directory. Error out if neither is present.
"""
if tag is not None:
p = re.compile('(.*?)-(\d.*)')
m = p.match(tag)
if not m:
error_out("Unable to determine project name in tag: %s" % tag)
return m.group(1)
else:
file_path = find_spec_like_file()
if not os.path.exists(file_path):
error_out("spec file: %s does not exist" % file_path)
if os.path.splitext(file_path)[1] == ".tmpl":
name = search_for(file_path, r"\s*Name:\s*(.*?)\s*$")[0][0]
return name
else:
output = run_command(
"rpm -q --qf '%%{name}\n' %s --specfile %s 2> /dev/null | grep -e '^$' -v | head -1" %
(scl_to_rpm_option(scl, silent=True), file_path))
if not output:
error_out(["Unable to determine project name from spec file: %s" % file_path,
"Try rpm -q --specfile %s" % file_path,
"Try rpmlint -i %s" % file_path])
return output
def replace_version(line, new_version):
"""
Attempts to replace common setup.py version formats in the given line,
and return the modified line. If no version is present the line is
returned as is.
Looking for things like version="x.y.z" with configurable case,
whitespace, and optional use of single/double quotes.
"""
# Mmmmm pretty regex!
ver_regex = re.compile("(\s*)(version)(\s*)(=)(\s*)(['\"])(.*)(['\"])(.*)",
re.IGNORECASE)
m = ver_regex.match(line)
if m:
result_tuple = list(m.group(1, 2, 3, 4, 5, 6))
result_tuple.append(new_version)
result_tuple.extend(list(m.group(8, 9)))
new_line = "%s%s%s%s%s%s%s%s%s\n" % tuple(result_tuple)
return new_line
else:
return line
def get_relative_project_dir(project_name, commit):
"""
Return the project's sub-directory relative to the git root.
This could be a different directory than where the project currently
resides, so we export a copy of the project's metadata from
.tito/packages/ at the point in time of the tag we are building.
"""
cmd = "git show %s:%s/packages/%s" % (commit, tito_config_dir(),
project_name)
try:
(status, pkg_metadata) = getstatusoutput(cmd)
except:
cmd = "git show %s:%s/packages/%s" % (commit, "rel-eng",
project_name)
(status, pkg_metadata) = getstatusoutput(cmd)
tokens = pkg_metadata.strip().split(" ")
debug("Got package metadata: %s" % tokens)
if status != 0:
return None
return tokens[1]
def get_relative_project_dir_cwd(git_root):
"""
Returns the patch to the project we're working with relative to the
git root using the cwd.
*MUST* be called before doing any os.cwd().
i.e. java/, satellite/install/Spacewalk-setup/, etc.
"""
current_dir = os.getcwd()
relative = current_dir[len(git_root) + 1:] + "/"
if relative == "/":
relative = "./"
return relative
def get_build_commit(tag, test=False):
""" Return the git commit we should build. """
if test:
return get_latest_commit(".")
else:
tag_sha1 = run_command(
"git ls-remote ./. --tag %s | awk '{ print $1 ; exit }'"
% tag)
tag_sha1 = extract_sha1(tag_sha1)
commit_id = run_command('git rev-list --max-count=1 %s' % tag_sha1)
return commit_id
def get_commit_count(tag, commit_id):
""" Return the number of commits between the tag and commit_id"""
# git describe returns either a tag-commitcount-gSHA1 OR
# just the tag.
#
# so we need to pass in the tag as well.
# output = run_command("git describe --match=%s %s" % (tag, commit_id))
# if tag == output:
# return 0
# else:
# parse the count from the output
(status, output) = getstatusoutput(
"git describe --match=%s %s" % (tag, commit_id))
debug("tag - %s" % tag)
debug("output - %s" % output)
if status != 0:
debug("git describe of tag %s failed (%d)" % (tag, status))
debug("going to use number of commits from initial commit")
(status, output) = getstatusoutput(
"git rev-list --max-parents=0 HEAD")
if status == 0:
# output is now inital commit
(status, output) = getstatusoutput(
"git rev-list %s..%s --count" % (output, commit_id))
if status == 0:
return output
return 0
if tag != output:
# tag-commitcount-gSHA1, we want the penultimate value
cnt = output.split("-")[-2]
return cnt
return 0
def get_latest_commit(path="."):
""" Return the latest git commit for the given path. """
commit_id = run_command("git log --pretty=format:%%H --max-count=1 %s" % path)
return commit_id
def get_commit_timestamp(sha1_or_tag):
"""
Get the timestamp of the git commit or tag we're building. Used to
keep the hash the same on all .tar.gz's we generate for a particular
version regardless of when they are generated.
"""
output = run_command(
"git rev-list --timestamp --max-count=1 %s | awk '{print $1}'"
% sha1_or_tag)
return output
def create_tgz(git_root, prefix, commit, relative_dir,
dest_tgz):
"""
Create a .tar.gz from a projects source in git.
"""
os.chdir(os.path.abspath(git_root))
timestamp = get_commit_timestamp(commit)
# Accomodate standalone projects with specfile i root of git repo:
relative_git_dir = "%s" % relative_dir
if relative_git_dir in ['/', './']:
relative_git_dir = ""
basename = os.path.splitext(dest_tgz)[0]
initial_tar = "%s.initial" % basename
# command to generate a git-archive
git_archive_cmd = 'git archive --format=tar --prefix=%s/ %s:%s --output=%s' % (
prefix, commit, relative_git_dir, initial_tar)
run_command(git_archive_cmd)
# Run git-archive separately if --debug was specified.
# This allows us to detect failure early.
# On git < 1.7.4-rc0, `git archive ... commit:./` fails!
debug('git-archive fails if relative dir is not in git tree',
'%s > /dev/null' % git_archive_cmd)
fixed_tar = "%s.tar" % basename
fixed_tar_fh = open(fixed_tar, 'wb')
try:
tarfixer = TarFixer(open(initial_tar, 'rb'), fixed_tar_fh, timestamp, commit)
tarfixer.fix()
finally:
fixed_tar_fh.close()
# It's a pity we can't use Python's gzip, but it doesn't offer an equivalent of -n
return run_command("gzip -n -c < %s > %s" % (fixed_tar, dest_tgz))
def get_git_repo_url():
"""
Return the url of this git repo.
Uses ~/.git/config remote origin url.
"""
return run_command("git config remote.origin.url")
def get_latest_tagged_version(package_name):
"""
Return the latest git tag for this package in the current branch.
Uses the info in .tito/packages/package-name.
Returns None if file does not exist.
"""
git_root = find_git_root()
rel_eng_dir = os.path.join(git_root, tito_config_dir())
file_path = "%s/packages/%s" % (rel_eng_dir, package_name)
debug("Getting latest package info from: %s" % file_path)
if not os.path.exists(file_path):
return None
output = run_command("awk '{ print $1 ; exit }' %s" % file_path)
if output is None or output.strip() == "":
error_out("Error looking up latest tagged version in: %s" % file_path)
return output
def normalize_class_name(name):
"""
Just a hack to accomodate tito config files with builder/tagger
classes referenced in the spacewalk.releng namespace, which has
since been renamed to just tito.
"""
look_for = "spacewalk.releng."
if name.startswith(look_for):
warn_out("spacewalk.releng.* namespace in tito.props is obsolete. Use tito.* instead.\n")
name = "%s%s" % ("tito.", name[len(look_for):])
return name
def get_script_path(scriptname):
"""
Hack to accomodate functional tests running from source, rather than
requiring tito to actually be installed. This variable is only set by
test scripts, normally we assume scripts are on PATH.
"""
# TODO: Would be nice to get rid of this hack.
scriptpath = scriptname # assume on PATH by default
if 'TITO_SRC_BIN_DIR' in os.environ:
bin_dir = os.environ['TITO_SRC_BIN_DIR']
scriptpath = os.path.join(bin_dir, scriptname)
return scriptpath
# 511 is 777 in octal. Python 2 and Python 3 disagree about the right
# way to represent octal numbers.
def mkdir_p(path, mode=511):
try:
os.makedirs(path, mode)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_class_by_name(name):
"""
Get a Python class specified by it's fully qualified name.
NOTE: Does not actually create an instance of the object, only returns
a Class object.
"""
name = normalize_class_name(name)
# Split name into module and class name:
tokens = name.split(".")
class_name = tokens[-1]
module = '.'.join(tokens[0:-1])
debug("Importing %s" % name)
mod = __import__(module, globals(), locals(), [class_name])
return getattr(mod, class_name)
def increase_version(version_string):
regex = re.compile(r"^(%.*)|(.+\.)?([0-9]+)(\..*|_.*|%.*|$)")
match = re.match(regex, version_string)
if match:
matches = list(match.groups())
# Increment the number in the third match group, if there is one
if matches[2]:
matches[2] = str(int(matches[2]) + 1)
# Join everything back up, skipping match groups with None
return "".join([x for x in matches if x])
# If no match, return the original string
return version_string
def reset_release(release_string):
regex = re.compile(r"(^|\.)([.0-9]+)(\.|%|$)")
return regex.sub(r"\g<1>1\g<3>", release_string)
def increase_zstream(release_string):
# If we do not have zstream, create .0 and then bump the version
regex = re.compile(r"^(.*%{\?dist})$")
bumped_string = regex.sub(r"\g<1>.0", release_string)
return increase_version(bumped_string)
def find_wrote_in_rpmbuild_output(output):
"""
Parse the output from rpmbuild looking for lines beginning with
"Wrote:". Return a list of file names for each path found.
"""
paths = []
look_for = "Wrote: "
for line in output.split('\n'):
if line.startswith(look_for):
paths.append(line[len(look_for):])
debug("Found wrote line: %s" % paths[-1])
if not paths:
error_out("Unable to locate 'Wrote: ' lines in rpmbuild output: '%s'" % output)
return paths
def compare_version(version1, version2):
"""
Compare two version strings, returning negative if version1 is < version2,
zero when equal and positive when version1 > version2.
"""
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
a = normalize(version1)
b = normalize(version2)
return (a > b) - (a < b)
| gpl-2.0 | -5,806,063,661,028,050,000 | 31.957312 | 116 | 0.598109 | false |
mattpitkin/GraWIToNStatisticsLectures | figures/scripts/coin_toss_2.py | 1 | 1891 | #!/usr/bin/env python
"""
Show how the posterior gets updated as a set of coin tosses are generated for a biased coin.
Assume a flat prior on the bias weighting and also a Gaussian prior.
"""
import matplotlib.pyplot as pl
from scipy.stats import norm, kstest
import numpy as np
# set plot to render labels using latex
pl.rc('text', usetex=True)
pl.rc('font', family='serif')
pl.rc('font', size=14)
fig = pl.figure(figsize=(12,10), dpi=100)
# numbers of coin tosses
nt = [0, 1, 2, 5, 10, 50, 100, 500, 1000]
bias = 0.3 # biased towards tails
# bias values
H = np.linspace(0., 1., 1000)
# priors
priorflat = np.ones(len(H))
sigmah = 0.05
muh = 0.5
priorgauss = (1./np.sqrt(2.*np.pi*sigmah**2))*np.exp(-0.5*(H-muh)**2/sigmah**2)
curheads = 0. # current number of heads
for i, n in enumerate(nt):
# generate coin tosses (making sure to include previous ones)
if n > 0:
rc = np.random.rand(n-nprev)
curheads = curheads + len(np.zeros(n-nprev)[rc<bias])
# compute likelihood
L = H**curheads * (1.-H)**(n-curheads)
# compute posterior
post1 = L*priorflat
post2 = L*priorgauss
# normalise posterior
post1 = post1/np.trapz(post1, H)
post2 = post2/np.trapz(post2, H)
# plot posterior
pl.subplot(3,3,i+1)
pl.plot(H, post1, 'b', label='$p(H|d,I)$ Uniform prior')
pl.plot(H, post2, 'r', label='$p(H|d,I)$ Gaussian prior')
pl.plot(H, priorgauss, 'k--', label='$p(H|I)$ Gaussian')
ax = pl.gca()
ax.set_yticklabels([])
ax.set_yticks([])
if i == 0:
pl.legend(loc='lower left', fancybox=True, framealpha=0.3, prop={'size': 12})
if i % 3 == 0:
ax.set_ylabel('$p(H|d,I)$')
if i > 5:
ax.set_xlabel('$H$')
else:
ax.set_xticklabels([])
ax.text(0.65, 0.8*ax.get_ylim()[1], '$n=%d$' % n, fontsize=16)
nprev = n
pl.tight_layout()
#fig.subplots_adjust(bottom=0.12)
pl.show()
fig.savefig('../coin_toss_2.pdf') | mit | 5,257,942,219,974,671,000 | 23.25641 | 92 | 0.630883 | false |
Spokepoint/chipmunk-restapi | controller/spreadsheet.py | 1 | 1185 | import gspread
import os
import string
def new_token():
gc = gspread.login(os.environ.get('GOOGLE_USER'),
os.environ.get('GOOGLE_PASS'))
return gc
def write_cell(worksheetKey, row, col, value):
gc = new_token()
worksheet = gc.open_by_key(worksheetKey).get_worksheet(0)
row = str(row)
if(worksheet.acell(col + row).value is None):
worksheet.update_acell(col + row, value)
def read_cell(worksheetKey, row, col):
gc = new_token()
worksheet = gc.open_by_key(worksheetKey).get_worksheet(0)
row = str(row)
return worksheet.acell(col + row).value
def col2num(col):
"""
Converts a spreadsheet column letter name to a column number
Reference:
http://stackoverflow.com/a/12640614/654416
"""
num = 0
for c in col:
if c in string.ascii_letters:
num = num * 26 + (ord(c.upper()) - ord('A')) + 1
return num
def read_column(worksheetKey, col):
gc = new_token()
worksheet = gc.open_by_key(worksheetKey).get_worksheet(0)
return worksheet.col_values(col2num(col))
def next_append_row(worksheetKey, col):
return len(read_column(worksheetKey, col)) + 1
| mit | 8,695,209,471,296,882,000 | 23.6875 | 64 | 0.637131 | false |
MobileWebApps/backend-python-rest-gae | lib/pygments/lexers/shell.py | 1 | 14790 | # -*- coding: utf-8 -*-
"""
pygments.lexers.shell
~~~~~~~~~~~~~~~~~~~~~
Lexers for various shells.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, include
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.util import shebang_matches
__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer',
'PowerShellLexer', 'ShellSessionLexer']
line_re = re.compile('.*?\n')
class BashLexer(RegexLexer):
"""
Lexer for (ba|k|)sh shell scripts.
*New in Pygments 0.6.*
"""
name = 'Bash'
aliases = ['bash', 'sh', 'ksh']
filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
'.bashrc', 'bashrc', '.bash_*', 'bash_*']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
'root': [
include('basic'),
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
r'select|continue|until|esac|elif)\s*\b',
Keyword),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]', Operator),
(r'<<<', Operator), # here-string
(r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r'&&|\|\|', Operator),
],
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r';', Text),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\<]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
(r'<', Text),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+', Number),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
def analyse_text(text):
return shebang_matches(text, r'(ba|z|)sh')
class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
*New in Pygments 1.1.*
"""
name = 'Bash Session'
aliases = ['console']
filenames = ['*.sh-session']
mimetypes = ['application/x-shell-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\(\S+\))?(?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)'
r'?|\[\S+[@:][^\n]+\].+)[$#%])(.*\n?)' , line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
elif line.startswith('>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:1])]))
curcode += line[1:]
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class ShellSessionLexer(Lexer):
"""
Lexer for shell sessions that works with different command prompts
*New in Pygments 1.6.*
"""
name = 'Shell Session'
aliases = ['shell-session']
filenames = ['*.shell-session']
mimetypes = ['application/x-sh-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\[?\S+@[^$#%]+)[$#%])(.*\n?)', line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
*New in Pygments 0.7.*
"""
name = 'Batchfile'
aliases = ['bat']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Lines can start with @ to prevent echo
(r'^\s*@', Punctuation),
(r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)),
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
# If made more specific, make sure you still allow expansions
# like %~$VAR:zlt
(r'%%?[~$:\w]+%?', Name.Variable),
(r'::.*', Comment), # Technically :: only works at BOL
(r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
(r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
(r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|'
r'setlocal|shift|errorlevel|exist|defined|cmdextversion|'
r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword),
(r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator),
include('basic'),
(r'.', Text),
],
'echo': [
# Escapes only valid within echo args?
(r'\^\^|\^<|\^>|\^\|', String.Escape),
(r'\n', Text, '#pop'),
include('basic'),
(r'[^\'"^]+', Text),
],
'basic': [
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
(r'`.*?`', String.Backtick),
(r'-?\d+', Number),
(r',', Punctuation),
(r'=', Operator),
(r'/\S+', Name),
(r':\w+', Name.Label),
(r'\w:\w+', Text),
(r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)),
],
}
class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
*New in Pygments 0.10.*
"""
name = 'Tcsh'
aliases = ['tcsh', 'csh']
filenames = ['*.tcsh', '*.csh']
mimetypes = ['application/x-csh']
tokens = {
'root': [
include('basic'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|endif|else|while|then|foreach|case|default|'
r'continue|goto|breaksw|end|switch|endsw)\s*\b',
Keyword),
(r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|'
r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|'
r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|'
r'source|stop|suspend|source|suspend|telltc|time|'
r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
r'ver|wait|warp|watchlog|where|which)\s*\b',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
class PowerShellLexer(RegexLexer):
"""
For Windows PowerShell code.
*New in Pygments 1.5.*
"""
name = 'PowerShell'
aliases = ['powershell', 'posh', 'ps1']
filenames = ['*.ps1']
mimetypes = ['text/x-powershell']
flags = re.DOTALL | re.IGNORECASE | re.MULTILINE
keywords = (
'while validateset validaterange validatepattern validatelength '
'validatecount until trap switch return ref process param parameter in '
'if global: function foreach for finally filter end elseif else '
'dynamicparam do default continue cmdletbinding break begin alias \\? '
'% #script #private #local #global mandatory parametersetname position '
'valuefrompipeline valuefrompipelinebypropertyname '
'valuefromremainingarguments helpmessage try catch').split()
operators = (
'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle '
'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains '
'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt '
'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like '
'lt match ne not notcontains notlike notmatch or regex replace '
'wildcard').split()
verbs = (
'write where wait use update unregister undo trace test tee take '
'suspend stop start split sort skip show set send select scroll resume '
'restore restart resolve resize reset rename remove register receive '
'read push pop ping out new move measure limit join invoke import '
'group get format foreach export expand exit enter enable disconnect '
'disable debug cxnew copy convertto convertfrom convert connect '
'complete compare clear checkpoint aggregate add').split()
commenthelp = (
'component description app_scaffolding externalhelp forwardhelpcategory '
'forwardhelptargetname functionality inputs link '
'notes outputs parameter remotehelprunspace role synopsis').split()
tokens = {
'root': [
(r'\s+', Text),
(r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp),
bygroups(Comment, String.Doc, Comment)),
(r'#[^\n]*?$', Comment),
(r'(<|<)#', Comment.Multiline, 'multline'),
(r'@"\n.*?\n"@', String.Heredoc),
(r"@'\n.*?\n'@", String.Heredoc),
# escaped syntax
(r'`[\'"$@-]', Punctuation),
(r'"', String.Double, 'string'),
(r"'([^']|'')*'", String.Single),
(r'(\$|@@|@)((global|script|private|env):)?[a-z0-9_]+',
Name.Variable),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'-(%s)\b' % '|'.join(operators), Operator),
(r'(%s)-[a-z_][a-z0-9_]*\b' % '|'.join(verbs), Name.Builtin),
(r'\[[a-z_\[][a-z0-9_. `,\[\]]*\]', Name.Constant), # .net [type]s
(r'-[a-z_][a-z0-9_]*', Name),
(r'\w+', Name),
(r'[.,{}\[\]$()=+*/\\&%!~?^`|<>-]', Punctuation),
],
'multline': [
(r'[^#&.]+', Comment.Multiline),
(r'#(>|>)', Comment.Multiline, '#pop'),
(r'\.(%s)' % '|'.join(commenthelp), String.Doc),
(r'[#&.]', Comment.Multiline),
],
'string': [
(r'[^$`"]+', String.Double),
(r'\$\(', String.Interpol, 'interpol'),
(r'`"|""', String.Double),
(r'[`$]', String.Double),
(r'"', String.Double, '#pop'),
],
'interpol': [
(r'[^$)]+', String.Interpol),
(r'\$\(', String.Interpol, '#push'),
(r'\)', String.Interpol, '#pop'),
]
}
| bsd-3-clause | -2,247,668,285,030,396,700 | 35.073171 | 84 | 0.471738 | false |
b-e-p/bep | Bep/cmds/turn_on.py | 1 | 6540 | #!/usr/bin/env python
#----------------------------------------------------------------
# Author: Jason Gors <jasonDOTgorsATgmail>
# Creation Date: 11-19-2015
# Purpose:
#----------------------------------------------------------------
from Bep.core.release_info import name
from Bep.core import utils
from Bep import package
command_and_items_to_process_when_multiple_items = {} # but not for install command
def turn_on_cmd(args, additional_args, lang_dir_name, pkg_type, noise, install_dirs,
pkgs_and_branches_on, pkgs_and_branches_off, everything_already_installed, **kwargs):
''' Turns on specified packages.
Parameters
----------
args: a class inst of the argparse namespace with the arguments parsed to use during the install.
additional_args: list of additional args parsed from the the argparse arguments.
lang_dir_name: name of lang_version dir for package to remove.
pkg_type: str of pkg_type to remove.
noise: noise class inst with the verbosity level for the amount of output to deliver to stdout.
install_dirs: dict of install locations for installed pkgs and install logs.
pkgs_and_branches_on: dict of all packages and branches currently turned on for this lang_version
and pkg_type. eg. {'ipython': ['master']}
pkgs_and_branches_off: dict of all packages and branches currently turned off for this lang_version
and pkg_type.
everything_already_installed: dict of all installed packages by lang_version, pkg_type, pkg_name,
and branches installed for that hierarchy.
'''
# only a single item can be turned on at any given time, so this is for a single command passed to turn_on.
# (a specific branch of a specific pkg of a specific pkg_type for a specific lang is what's turned on)
def how_to_turn_on_branches(pkg_to_turn_on, all_installed_for_pkg):
any_how_to_displayed = False
for quad_tuple in all_installed_for_pkg:
lang_installed, pkg_type_installed, pkg_name_installed, branch_installed = quad_tuple
if (lang_installed == lang_dir_name) and (pkg_type_installed == pkg_type):
if branch_installed.startswith('.__'):
branch_installed = branch_installed.lstrip('.__')
#if branch_installed == 'master':
##print("\n# Turn on {0} {1} with:".format(pkg_to_turn_on, lang_installed))
##print("{0} -l {1} turn_on {2}={3}".format(name, lang_installed, pkg_type_installed, pkg_name_installed))
#turn_on_cue = "Turn on {0} {1} with:".format(pkg_to_turn_on, lang_installed)
#turn_on_cmd = "{0} -l {1} turn_on {2} {3}".format(name, lang_installed,
#pkg_type_installed, pkg_name_installed)
#else:
##print("\n# Turn on {0} [{1}] {2} with:".format(pkg_to_turn_on, branch_installed, lang_installed))
##print("{0} -l {1} turn_on {2}={3}^{4}".format(name, lang_installed, pkg_type_installed, pkg_name_installed, branch_installed))
#turn_on_cue = "Turn on {0} [{1}] {2} with:".format(pkg_to_turn_on, branch_installed, lang_installed)
#turn_on_cmd = "{0} -l {1} turn_on {2} {3} --branch={4}".format(name, lang_installed,
#pkg_type_installed, pkg_name_installed, branch_installed)
turn_on_cue = "Turn on {0} [{1}] {2} with:".format(pkg_to_turn_on, branch_installed, lang_installed)
turn_on_cmd = "{0} -l {1} turn_on {2} {3} --branch={4}".format(name, lang_installed,
pkg_type_installed, pkg_name_installed, branch_installed)
elif not branch_installed.startswith('.__'):
#print("\n* {0} [{1}] {2} already turned on.".format(pkg_name_installed, branch_installed, lang_installed))
turn_on_cue = "{0} [{1}] {2} already turned on.".format(pkg_name_installed, branch_installed, lang_installed)
turn_on_cmd = "**** Already turned on ****"
command_and_items_to_process_when_multiple_items[turn_on_cue] = turn_on_cmd
any_how_to_displayed = True
if any_how_to_displayed:
#return True
return command_and_items_to_process_when_multiple_items
else:
return False
def turn_on_branch(lang_arg, pkg_to_turn_on, branch_to_turn_on):
a_pkg_was_processed = False
pkg_inst = package.create_pkg_inst(lang_arg, pkg_type, install_dirs)
lang_cmd = pkg_inst.lang_cmd # makes it so that the system default of a lang maps back on to it's particular version
if lang_dir_name == lang_cmd:
if pkg_to_turn_on in pkgs_and_branches_on:
branch_on = pkgs_and_branches_on[pkg_to_turn_on]
if branch_to_turn_on in branch_on:
print('\n{0} [{1}] {2} already turned on.'.format(pkg_to_turn_on, branch_to_turn_on, lang_cmd))
a_pkg_was_processed = True
if pkg_to_turn_on in pkgs_and_branches_off:
branches_off = pkgs_and_branches_off[pkg_to_turn_on]
if branch_to_turn_on in branches_off:
utils.when_not_quiet_mode(utils.status('\tTurning on {0} [{1}] {2} {3}'.format(
pkg_to_turn_on, branch_to_turn_on, lang_dir_name, pkg_type)), noise.quiet)
branch_to_turn_on = '.__{0}'.format(branch_to_turn_on)
pkg_inst.turn_on(pkg_to_turn_on, branch_to_turn_on, args, everything_already_installed, noise)
a_pkg_was_processed = True
if a_pkg_was_processed:
return True
else:
return False
pkg_was_processed_or_displayed = utils.package_processor(args,
additional_args,
pkg_type,
how_to_func=how_to_turn_on_branches,
processing_func=turn_on_branch,
process_str='turn_on',
everything_already_installed=everything_already_installed,
)
return pkg_was_processed_or_displayed
| bsd-3-clause | -7,830,477,266,451,147,000 | 55.37931 | 152 | 0.556422 | false |
meher92/Neural-Photos | project/model.py | 1 | 2039 | import os, time
from datetime import datetime
from project import app
from pymongo import MongoClient
import pymongo
client = MongoClient(app.config['MONGO_URI'])
db = client[app.config['MONGO_DBNAME']]
collection = db.images_collection
def save_file_in_db(filename, created_at, uid=-1, caption=''):
count = collection.count()
collection.insert_one(
{
"filename": filename,
"created_at": created_at,
"created_by": uid,
"caption": caption,
"file_id": count+1
}
)
def save_file(file_path,uid=-1):
try:
#create local images directory if not exists
if not os.path.exists(app.config['image_upload_path']):
os.makedirs(app.config['image_upload_path'])
#check for valid extension
(fname, extension)= os.path.splitext(file_path.filename)
if extension not in app.config['accepted_file_types']:
response = {"error": "Invalid extension"}
return response
#append ts to filename and save to directory
created_at = int(time.time())
final_filename = 'uploaded_images/'+fname+'_'+str(created_at)+extension
file_path.save(final_filename)
#add entry to DB
save_file_in_db(final_filename, created_at, uid)
except:
return {"error": "Server error"}
return {'message': 'Uploaded succesfully'}
def get_albums(last_image_index, uid=-1):
albums = []
try:
data = list(collection.find({'created_by':uid}).sort("created_at",pymongo.DESCENDING))[last_image_index:last_image_index+10]
for obj in data:
album = {}
album['date_str'] = datetime.fromtimestamp(obj['created_at']).strftime('%b, %d')
album['img_url'] = obj['filename']
album['caption'] = obj['caption']
album['img_id'] = obj['file_id']
albums.append(album)
except:
print "Error in get_albums!"
return [{"error": "Server error"}]
return albums
| mit | 1,790,959,567,304,970,200 | 29.432836 | 132 | 0.598823 | false |
drewp/commentserve | honeypot.py | 1 | 1657 | """
based on
http://trac.edgewall.org/browser/plugins/0.12/spam-filter-captcha/tracspamfilter/filters/httpbl.py
BSD license: http://trac.edgewall.org/wiki/TracLicense
"""
import logging
from dns.resolver import query, Timeout, NXDOMAIN, NoAnswer, NoNameservers
log = logging.getLogger()
class HoneypotChecker(object):
def __init__(self, key):
"""
key is a string you get from registering with honeypot
"""
self.key = key
def check(self, ip):
"""
raises if this ip fails the httpbl check
"""
reverse_octal = '.'.join(reversed(ip.split('.')))
addr = '%s.%s.dnsbl.httpbl.org' % (self.key, reverse_octal)
log.debug('Querying Http:BL: %s' % addr)
try:
dns_answer = query(addr)
answer = [int(i) for i in str(dns_answer[0]).split('.')]
if answer[0] != 127:
log.warn('Invalid Http:BL reply for IP "%s": %s' %
(ip, dns_answer))
return
# TODO: answer[1] represents number of days since last activity
# and answer[2] is treat score assigned by Project Honey
# Pot. We could use both to adjust karma.
is_suspicious = answer[3] & 1
is_spammer = answer[3] & 4
if is_spammer:
raise ValueError("IP %s rejected" % ip)
except NXDOMAIN:
# not blacklisted on this server
return
except (Timeout, NoAnswer, NoNameservers), e:
log.warn('Error checking Http:BL for IP "%s": %s' %
(ip, e))
| bsd-2-clause | 7,159,504,697,764,755,000 | 30.865385 | 98 | 0.542547 | false |
vperron/picasa-toolbox | ptoolbox/google/utils.py | 1 | 2017 | # -*- coding: utf-8 -*-
import requests
from xml.etree import ElementTree
from datetime import datetime, timedelta
from .constants import epoch, G_XML_ROOT, G_XML_NAMESPACES, TZ_API_URL
def ts2dt(ts, millisecs=False):
"""Convert a timestamp to a datetime."""
if millisecs:
dt = datetime.utcfromtimestamp(ts / 1000)
return dt + timedelta(milliseconds=ts % 1000)
return datetime.utcfromtimestamp(ts)
def dt2ts(dt, millisecs=False):
"""Convert a datetime to a timestamp in UTC."""
ts = (dt - epoch).total_seconds()
if millisecs:
return int(ts * 1000)
return int(ts)
def latlon2tz(lat, lon, dt=None):
if dt is None:
dt = datetime.now()
ts = dt2ts(dt)
url = TZ_API_URL.format(lat=lat, lon=lon, ts=ts)
res = requests.get(url)
if res.status_code != 200:
raise Exception('Could not reach Google Timezone API')
data = res.json()
if data[u'status'] != u'OK':
raise Exception('Could not get a valid answer from Google Timezone API')
return data
def mail2username(email):
return email.split('@')[0] # remove the e-mail part of the login
def iso8601str2datetime(s):
if s:
return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%fZ')
return None
def g_json_key(key, namespace=None):
return key if namespace is None else '%s$%s' % (namespace, key)
def g_json_value(data, key, namespace=None, accessor='$t'):
"""Returns a google-encoded value from the feed. Example:
json = {"gphoto$access": { "$t": "private" }}, then
g_json_value(json, 'access', 'gphoto') is 'private'
"""
complete_key = g_json_key(key, namespace)
if complete_key in data:
if accessor in data[complete_key]:
return data[complete_key][accessor]
return None
def g_xml_value(data, key, namespace=G_XML_ROOT):
xml_tree = ElementTree.fromstring(data)
full_key = "{%s}%s" % (G_XML_NAMESPACES[namespace], key)
id_node = xml_tree.find(full_key)
return id_node.text
| mit | -8,591,657,744,851,923,000 | 27.814286 | 80 | 0.642043 | false |
googleapis/googleapis-gen | google/cloud/translate/v3/translation-v3-py/google/cloud/translate_v3/services/translation_service/client.py | 1 | 50854 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.translate_v3.services.translation_service import pagers
from google.cloud.translate_v3.types import translation_service
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import TranslationServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import TranslationServiceGrpcTransport
from .transports.grpc_asyncio import TranslationServiceGrpcAsyncIOTransport
class TranslationServiceClientMeta(type):
"""Metaclass for the TranslationService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[TranslationServiceTransport]]
_transport_registry["grpc"] = TranslationServiceGrpcTransport
_transport_registry["grpc_asyncio"] = TranslationServiceGrpcAsyncIOTransport
def get_transport_class(cls,
label: str = None,
) -> Type[TranslationServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TranslationServiceClient(metaclass=TranslationServiceClientMeta):
"""Provides natural language translation operations."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "translate.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranslationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranslationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TranslationServiceTransport:
"""Returns the transport used by the client instance.
Returns:
TranslationServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def glossary_path(project: str,location: str,glossary: str,) -> str:
"""Returns a fully-qualified glossary string."""
return "projects/{project}/locations/{location}/glossaries/{glossary}".format(project=project, location=location, glossary=glossary, )
@staticmethod
def parse_glossary_path(path: str) -> Dict[str,str]:
"""Parses a glossary path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/glossaries/(?P<glossary>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, TranslationServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the translation service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, TranslationServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TranslationServiceTransport):
# transport is a TranslationServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError("When providing a transport instance, "
"provide its credentials directly.")
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def translate_text(self,
request: translation_service.TranslateTextRequest = None,
*,
parent: str = None,
target_language_code: str = None,
contents: Sequence[str] = None,
model: str = None,
mime_type: str = None,
source_language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> translation_service.TranslateTextResponse:
r"""Translates input text and returns translated text.
Args:
request (google.cloud.translate_v3.types.TranslateTextRequest):
The request object. The request message for synchronous
translation.
parent (str):
Required. Project or location to make a call. Must refer
to a caller's project.
Format: ``projects/{project-number-or-id}`` or
``projects/{project-number-or-id}/locations/{location-id}``.
For global calls, use
``projects/{project-number-or-id}/locations/global`` or
``projects/{project-number-or-id}``.
Non-global location is required for requests using
AutoML models or custom glossaries.
Models and glossaries must be within the same region
(have same location-id), otherwise an INVALID_ARGUMENT
(400) error is returned.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_language_code (str):
Required. The BCP-47 language code to
use for translation of the input text,
set to one of the language codes listed
in Language Support.
This corresponds to the ``target_language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
contents (Sequence[str]):
Required. The content of the input in
string format. We recommend the total
content be less than 30k codepoints. Use
BatchTranslateText for larger text.
This corresponds to the ``contents`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model (str):
Optional. The ``model`` type requested for this
translation.
The format depends on model type:
- AutoML Translation models:
``projects/{project-number-or-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-number-or-id}/locations/{location-id}/models/general/nmt``,
``projects/{project-number-or-id}/locations/{location-id}/models/general/base``
For global (non-regionalized) requests, use
``location-id`` ``global``. For example,
``projects/{project-number-or-id}/locations/global/models/general/nmt``.
If missing, the system decides which google base model
to use.
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
mime_type (str):
Optional. The format of the source
text, for example, "text/html",
"text/plain". If left blank, the MIME
type defaults to "text/html".
This corresponds to the ``mime_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
source_language_code (str):
Optional. The BCP-47 language code of
the input text if known, for example,
"en-US" or "sr-Latn". Supported language
codes are listed in Language Support. If
the source language isn't specified, the
API attempts to identify the source
language automatically and returns the
source language within the response.
This corresponds to the ``source_language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.types.TranslateTextResponse:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, target_language_code, contents, model, mime_type, source_language_code])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.TranslateTextRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.TranslateTextRequest):
request = translation_service.TranslateTextRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if target_language_code is not None:
request.target_language_code = target_language_code
if contents is not None:
request.contents = contents
if model is not None:
request.model = model
if mime_type is not None:
request.mime_type = mime_type
if source_language_code is not None:
request.source_language_code = source_language_code
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.translate_text]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def detect_language(self,
request: translation_service.DetectLanguageRequest = None,
*,
parent: str = None,
model: str = None,
mime_type: str = None,
content: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> translation_service.DetectLanguageResponse:
r"""Detects the language of text within a request.
Args:
request (google.cloud.translate_v3.types.DetectLanguageRequest):
The request object. The request message for language
detection.
parent (str):
Required. Project or location to make a call. Must refer
to a caller's project.
Format:
``projects/{project-number-or-id}/locations/{location-id}``
or ``projects/{project-number-or-id}``.
For global calls, use
``projects/{project-number-or-id}/locations/global`` or
``projects/{project-number-or-id}``.
Only models within the same region (has same
location-id) can be used. Otherwise an INVALID_ARGUMENT
(400) error is returned.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model (str):
Optional. The language detection model to be used.
Format:
``projects/{project-number-or-id}/locations/{location-id}/models/language-detection/{model-id}``
Only one language detection model is currently
supported:
``projects/{project-number-or-id}/locations/{location-id}/models/language-detection/default``.
If not specified, the default model is used.
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
mime_type (str):
Optional. The format of the source
text, for example, "text/html",
"text/plain". If left blank, the MIME
type defaults to "text/html".
This corresponds to the ``mime_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
content (str):
The content of the input stored as a
string.
This corresponds to the ``content`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.types.DetectLanguageResponse:
The response message for language
detection.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, model, mime_type, content])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.DetectLanguageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.DetectLanguageRequest):
request = translation_service.DetectLanguageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if model is not None:
request.model = model
if mime_type is not None:
request.mime_type = mime_type
if content is not None:
request.content = content
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.detect_language]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_supported_languages(self,
request: translation_service.GetSupportedLanguagesRequest = None,
*,
parent: str = None,
model: str = None,
display_language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> translation_service.SupportedLanguages:
r"""Returns a list of supported languages for
translation.
Args:
request (google.cloud.translate_v3.types.GetSupportedLanguagesRequest):
The request object. The request message for discovering
supported languages.
parent (str):
Required. Project or location to make a call. Must refer
to a caller's project.
Format: ``projects/{project-number-or-id}`` or
``projects/{project-number-or-id}/locations/{location-id}``.
For global calls, use
``projects/{project-number-or-id}/locations/global`` or
``projects/{project-number-or-id}``.
Non-global location is required for AutoML models.
Only models within the same region (have same
location-id) can be used, otherwise an INVALID_ARGUMENT
(400) error is returned.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model (str):
Optional. Get supported languages of this model.
The format depends on model type:
- AutoML Translation models:
``projects/{project-number-or-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-number-or-id}/locations/{location-id}/models/general/nmt``,
``projects/{project-number-or-id}/locations/{location-id}/models/general/base``
Returns languages supported by the specified model. If
missing, we get supported languages of Google general
base (PBMT) model.
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
display_language_code (str):
Optional. The language to use to
return localized, human readable names
of supported languages. If missing, then
display names are not returned in a
response.
This corresponds to the ``display_language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.types.SupportedLanguages:
The response message for discovering
supported languages.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, model, display_language_code])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.GetSupportedLanguagesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.GetSupportedLanguagesRequest):
request = translation_service.GetSupportedLanguagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if model is not None:
request.model = model
if display_language_code is not None:
request.display_language_code = display_language_code
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_supported_languages]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def batch_translate_text(self,
request: translation_service.BatchTranslateTextRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Translates a large volume of text in asynchronous
batch mode. This function provides real-time output as
the inputs are being processed. If caller cancels a
request, the partial results (for an input file, it's
all or nothing) may still be available on the specified
output location.
This call returns immediately and you can
use google.longrunning.Operation.name to poll the status
of the call.
Args:
request (google.cloud.translate_v3.types.BatchTranslateTextRequest):
The request object. The batch translation request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.translate_v3.types.BatchTranslateResponse` Stored in the
[google.longrunning.Operation.response][google.longrunning.Operation.response]
field returned by BatchTranslateText if at least one
sentence is translated successfully.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.BatchTranslateTextRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.BatchTranslateTextRequest):
request = translation_service.BatchTranslateTextRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_translate_text]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
translation_service.BatchTranslateResponse,
metadata_type=translation_service.BatchTranslateMetadata,
)
# Done; return the response.
return response
def create_glossary(self,
request: translation_service.CreateGlossaryRequest = None,
*,
parent: str = None,
glossary: translation_service.Glossary = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a glossary and returns the long-running operation.
Returns NOT_FOUND, if the project doesn't exist.
Args:
request (google.cloud.translate_v3.types.CreateGlossaryRequest):
The request object. Request message for CreateGlossary.
parent (str):
Required. The project name.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
glossary (google.cloud.translate_v3.types.Glossary):
Required. The glossary to create.
This corresponds to the ``glossary`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.translate_v3.types.Glossary`
Represents a glossary built from user provided data.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, glossary])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.CreateGlossaryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.CreateGlossaryRequest):
request = translation_service.CreateGlossaryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if glossary is not None:
request.glossary = glossary
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_glossary]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
translation_service.Glossary,
metadata_type=translation_service.CreateGlossaryMetadata,
)
# Done; return the response.
return response
def list_glossaries(self,
request: translation_service.ListGlossariesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListGlossariesPager:
r"""Lists glossaries in a project. Returns NOT_FOUND, if the project
doesn't exist.
Args:
request (google.cloud.translate_v3.types.ListGlossariesRequest):
The request object. Request message for ListGlossaries.
parent (str):
Required. The name of the project
from which to list all of the
glossaries.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.services.translation_service.pagers.ListGlossariesPager:
Response message for ListGlossaries.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.ListGlossariesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.ListGlossariesRequest):
request = translation_service.ListGlossariesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_glossaries]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListGlossariesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_glossary(self,
request: translation_service.GetGlossaryRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> translation_service.Glossary:
r"""Gets a glossary. Returns NOT_FOUND, if the glossary doesn't
exist.
Args:
request (google.cloud.translate_v3.types.GetGlossaryRequest):
The request object. Request message for GetGlossary.
name (str):
Required. The name of the glossary to
retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.types.Glossary:
Represents a glossary built from user
provided data.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.GetGlossaryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.GetGlossaryRequest):
request = translation_service.GetGlossaryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_glossary]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_glossary(self,
request: translation_service.DeleteGlossaryRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a glossary, or cancels glossary construction if the
glossary isn't created yet. Returns NOT_FOUND, if the glossary
doesn't exist.
Args:
request (google.cloud.translate_v3.types.DeleteGlossaryRequest):
The request object. Request message for DeleteGlossary.
name (str):
Required. The name of the glossary to
delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.translate_v3.types.DeleteGlossaryResponse` Stored in the
[google.longrunning.Operation.response][google.longrunning.Operation.response]
field returned by DeleteGlossary.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.DeleteGlossaryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.DeleteGlossaryRequest):
request = translation_service.DeleteGlossaryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_glossary]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
translation_service.DeleteGlossaryResponse,
metadata_type=translation_service.DeleteGlossaryMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-translate",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"TranslationServiceClient",
)
| apache-2.0 | 3,091,188,023,495,868,400 | 41.914768 | 142 | 0.597986 | false |
intelligent-agent/redeem | redeem/CascadingConfigParser.py | 1 | 7445 | """
Author: Elias Bakken
email: elias(dot)bakken(at)gmail(dot)com
Website: http://www.thing-printer.com
License: GNU GPL v3: http://www.gnu.org/copyleft/gpl.html
Redeem is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Redeem is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Redeem. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import os
from six import PY2
import struct
if PY2:
from ConfigParser import SafeConfigParser as Parser
else:
from configparser import ConfigParser as Parser
class CascadingConfigParser(Parser):
def __init__(self, config_files):
Parser.__init__(self)
# Write options in the case it was read.
# self.optionxform = str
# Parse to real path
self.config_files = []
for config_file in config_files:
self.config_files.append(os.path.realpath(config_file))
self.config_location = os.path.dirname(os.path.realpath(config_file))
# Parse all config files in list
for config_file in self.config_files:
if os.path.isfile(config_file):
logging.info("Using config file " + config_file)
if PY2:
self.readfp(open(config_file))
else:
self.read_file(open(config_file))
else:
logging.warning("Missing config file " + config_file)
# Might also add command line options for overriding stuff
def timestamp(self):
""" Get the largest (newest) timestamp for all the config files. """
ts = 0
for config_file in self.config_files:
if os.path.isfile(config_file):
ts = max(ts, os.path.getmtime(config_file))
printer_cfg = os.path.join(self.config_location, "printer.cfg")
if os.path.islink(printer_cfg):
ts = max(ts, os.lstat(printer_cfg).st_mtime)
return ts
def parse_capes(self):
""" Read the name and revision of each cape on the BeagleBone """
self.replicape_revision = None
self.reach_revision = None
import glob
paths = glob.glob("/sys/bus/i2c/devices/[1-2]-005[4-7]/*/nvmem")
paths.extend(glob.glob("/sys/bus/i2c/devices/[1-2]-005[4-7]/nvmem/at24-[1-4]/nvmem"))
#paths.append(glob.glob("/sys/bus/i2c/devices/[1-2]-005[4-7]/eeprom"))
for i, path in enumerate(paths):
try:
with open(path, "rb") as f:
data = f.read(120)
name = data[58:74].strip()
if name == b"BB-BONE-REPLICAP":
self.replicape_revision = data[38:42]
self.replicape_data = data
self.replicape_path = path
elif name[:13] == b"BB-BONE-REACH":
self.reach_revision = data[38:42]
self.reach_data = data
self.reach_path = path
if self.replicape_revision != None and self.reach_revision != None:
break
except IOError as e:
pass
# Parameters from the hardware
self.setup_key()
def get_default_settings(self):
fs = []
for config_file in self.config_files:
if os.path.isfile(config_file):
c_file = os.path.basename(config_file)
cp = Parser()
if PY2:
cp.readfp(open(config_file))
else:
cp.read_file(open(config_file))
fs.append((c_file, cp))
lines = []
for section in self.sections():
for option in self.options(section):
for (name, cp) in fs:
if cp.has_option(section, option):
line = [name, section, option, cp.get(section, option)]
lines.append(line)
return lines
def save(self, filename):
""" Save the changed settings to local.cfg """
current = CascadingConfigParser(self.config_files)
# Build a list of changed values
to_save = []
for section in self.sections():
#logging.debug(section)
for option in self.options(section):
if self.get(section, option) != current.get(section, option):
old = current.get(section, option)
val = self.get(section, option)
to_save.append((section, option, val, old))
# Update local config with changed values
local = Parser()
# Start each file with revision identification
local.add_section("Configuration")
local.set("Configuration", "version", "1")
if PY2:
local.readfp(open(filename))
else:
local.read_file(open(filename))
for opt in to_save:
(section, option, value, old) = opt
if not local.has_section(section):
local.add_section(section)
local.set(section, option, value)
logging.info("Update setting: {} from {} to {} ".format(option, old, value))
# Save changed values to file
local.write(open(filename, "w+"))
def check(self, filename):
""" Check the settings currently set against default.cfg """
default = Parser()
if PY2:
default.readfp(open(os.path.join(self.config_location, "default.cfg")))
else:
default.read_file(open(os.path.join(self.config_location, "default.cfg")))
local = Parser()
if PY2:
local.readfp(open(filename))
else:
local.read_file(open(filename))
local_ok = True
diff = set(local.sections()) - set(default.sections())
for section in diff:
logging.warning("Section {} does not exist in {}".format(section, "default.cfg"))
local_ok = False
for section in local.sections():
if not default.has_section(section):
continue
diff = set(local.options(section)) - set(default.options(section))
for option in diff:
logging.warning("Option {} in section {} does not exist in {}".format(
option, section, "default.cfg"))
local_ok = False
if local_ok:
logging.info("{} is OK".format(filename))
else:
logging.warning("{} contains errors.".format(filename))
return local_ok
def setup_key(self):
""" Get the generated key from the config or create one """
self.replicape_key = "".join(struct.unpack('20c', self.replicape_data[100:120]))
logging.debug("Found Replicape key: '" + self.replicape_key + "'")
if self.replicape_key == '\x00' * 20:
logging.debug("Replicape key invalid")
import random
import string
self.replicape_key = ''.join(
random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(20))
self.replicape_data = self.replicape_data[:100] + self.replicape_key
logging.debug("New Replicape key: '" + self.replicape_key + "'")
#logging.debug("".join(struct.unpack('20c', self.new_replicape_data[100:120])))
try:
with open(self.replicape_path, "wb") as f:
f.write(self.replicape_data[:120])
except IOError as e:
logging.warning("Unable to write new key to EEPROM")
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M')
c = CascadingConfigParser(
["/etc/redeem/default.cfg", "/etc/redeem/printer.cfg", "/etc/redeem/local.cfg"])
print(c.get_default_settings())
| gpl-3.0 | 953,044,173,383,253,200 | 33.953052 | 98 | 0.637609 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/models/server_properties_for_restore.py | 1 | 2567 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .server_properties_for_create import ServerPropertiesForCreate
class ServerPropertiesForRestore(ServerPropertiesForCreate):
"""The properties used to create a new server by restoring from a backup.
All required parameters must be populated in order to send to Azure.
:param version: Server version. Possible values include: '5.6', '5.7'
:type version: str or ~azure.mgmt.rdbms.mysql.models.ServerVersion
:param ssl_enforcement: Enable ssl enforcement or not when connect to
server. Possible values include: 'Enabled', 'Disabled'
:type ssl_enforcement: str or
~azure.mgmt.rdbms.mysql.models.SslEnforcementEnum
:param storage_profile: Storage profile of a server.
:type storage_profile: ~azure.mgmt.rdbms.mysql.models.StorageProfile
:param create_mode: Required. Constant filled by server.
:type create_mode: str
:param source_server_id: Required. The source server id to restore from.
:type source_server_id: str
:param restore_point_in_time: Required. Restore point creation time
(ISO8601 format), specifying the time to restore from.
:type restore_point_in_time: datetime
"""
_validation = {
'create_mode': {'required': True},
'source_server_id': {'required': True},
'restore_point_in_time': {'required': True},
}
_attribute_map = {
'version': {'key': 'version', 'type': 'str'},
'ssl_enforcement': {'key': 'sslEnforcement', 'type': 'SslEnforcementEnum'},
'storage_profile': {'key': 'storageProfile', 'type': 'StorageProfile'},
'create_mode': {'key': 'createMode', 'type': 'str'},
'source_server_id': {'key': 'sourceServerId', 'type': 'str'},
'restore_point_in_time': {'key': 'restorePointInTime', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(ServerPropertiesForRestore, self).__init__(**kwargs)
self.source_server_id = kwargs.get('source_server_id', None)
self.restore_point_in_time = kwargs.get('restore_point_in_time', None)
self.create_mode = 'PointInTimeRestore'
| mit | 7,632,846,821,311,256,000 | 44.839286 | 83 | 0.641605 | false |
andremissaglia/ekaaty_liveusbcreator | liveusb/linux_dialog.py | 1 | 8036 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'data/liveusb-creator.ui'
#
# Created: Tue Jan 15 12:09:54 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(422, 388)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
self.startButton = QtGui.QPushButton(Dialog)
self.startButton.setEnabled(True)
self.startButton.setGeometry(QtCore.QRect(130, 350, 158, 34))
self.startButton.setObjectName(_fromUtf8("startButton"))
self.textEdit = QtGui.QTextEdit(Dialog)
self.textEdit.setGeometry(QtCore.QRect(10, 200, 401, 111))
font = QtGui.QFont()
font.setPointSize(8)
self.textEdit.setFont(font)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.progressBar = QtGui.QProgressBar(Dialog)
self.progressBar.setGeometry(QtCore.QRect(10, 320, 401, 23))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.downloadGroup = QtGui.QGroupBox(Dialog)
self.downloadGroup.setGeometry(QtCore.QRect(210, 80, 201, 51))
font = QtGui.QFont()
font.setPointSize(8)
self.downloadGroup.setFont(font)
self.downloadGroup.setObjectName(_fromUtf8("downloadGroup"))
self.downloadCombo = QtGui.QComboBox(self.downloadGroup)
self.downloadCombo.setGeometry(QtCore.QRect(10, 20, 161, 22))
self.downloadCombo.setObjectName(_fromUtf8("downloadCombo"))
self.refreshReleasesButton = QtGui.QPushButton(self.downloadGroup)
self.refreshReleasesButton.setGeometry(QtCore.QRect(170, 20, 30, 20))
self.refreshReleasesButton.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/refresh.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.refreshReleasesButton.setIcon(icon)
self.refreshReleasesButton.setFlat(True)
self.refreshReleasesButton.setObjectName(_fromUtf8("refreshReleasesButton"))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(180, 100, 23, 24))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.groupBox = QtGui.QGroupBox(Dialog)
self.groupBox.setGeometry(QtCore.QRect(10, 80, 161, 51))
font = QtGui.QFont()
font.setPointSize(8)
self.groupBox.setFont(font)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.isoBttn = QtGui.QPushButton(self.groupBox)
self.isoBttn.setGeometry(QtCore.QRect(11, 18, 141, 25))
self.isoBttn.setObjectName(_fromUtf8("isoBttn"))
self.groupBox_2 = QtGui.QGroupBox(Dialog)
self.groupBox_2.setGeometry(QtCore.QRect(10, 140, 191, 51))
font = QtGui.QFont()
font.setPointSize(8)
self.groupBox_2.setFont(font)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.driveBox = QtGui.QComboBox(self.groupBox_2)
self.driveBox.setGeometry(QtCore.QRect(10, 20, 151, 21))
self.driveBox.setEditable(False)
self.driveBox.setInsertPolicy(QtGui.QComboBox.InsertAtTop)
self.driveBox.setDuplicatesEnabled(False)
self.driveBox.setObjectName(_fromUtf8("driveBox"))
self.refreshDevicesButton = QtGui.QPushButton(self.groupBox_2)
self.refreshDevicesButton.setGeometry(QtCore.QRect(160, 20, 30, 20))
self.refreshDevicesButton.setText(_fromUtf8(""))
self.refreshDevicesButton.setIcon(icon)
self.refreshDevicesButton.setFlat(True)
self.refreshDevicesButton.setObjectName(_fromUtf8("refreshDevicesButton"))
self.overlayTitle = QtGui.QGroupBox(Dialog)
self.overlayTitle.setGeometry(QtCore.QRect(210, 140, 201, 51))
font = QtGui.QFont()
font.setPointSize(8)
self.overlayTitle.setFont(font)
self.overlayTitle.setObjectName(_fromUtf8("overlayTitle"))
self.overlaySlider = QtGui.QSlider(self.overlayTitle)
self.overlaySlider.setGeometry(QtCore.QRect(10, 20, 181, 21))
self.overlaySlider.setMaximum(2047)
self.overlaySlider.setOrientation(QtCore.Qt.Horizontal)
self.overlaySlider.setTickPosition(QtGui.QSlider.NoTicks)
self.overlaySlider.setObjectName(_fromUtf8("overlaySlider"))
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(0, 0, 430, 72))
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8(":/liveusb-header.png")))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Ekaaty LiveUSB Creator", None))
self.startButton.setWhatsThis(_translate("Dialog", "This button will begin the LiveUSB creation process. This entails optionally downloading a release (if an existing one wasn\'t selected), extracting the ISO to the USB device, creating the persistent overlay, and installing the bootloader.", None))
self.startButton.setText(_translate("Dialog", "Create Live USB", None))
self.textEdit.setWhatsThis(_translate("Dialog", "This is the status console, where all messages get written to.", None))
self.progressBar.setWhatsThis(_translate("Dialog", "This is the progress bar that will indicate how far along in the LiveUSB creation process you are", None))
self.downloadGroup.setWhatsThis(_translate("Dialog", "If you do not select an existing Live CD, the selected release will be downloaded for you.", None))
self.downloadGroup.setTitle(_translate("Dialog", "Download Ekaaty", None))
self.label_2.setText(_translate("Dialog", "or", None))
self.groupBox.setWhatsThis(_translate("Dialog", "This button allows you to browse for an existing Live CD ISO that you have previously downloaded. If you do not select one, a release will be downloaded for you automatically.", None))
self.groupBox.setTitle(_translate("Dialog", "Use existing Live CD", None))
self.isoBttn.setText(_translate("Dialog", "Browse", None))
self.isoBttn.setShortcut(_translate("Dialog", "Alt+B", None))
self.groupBox_2.setWhatsThis(_translate("Dialog", "This is the USB stick that you want to install your Live CD on. This device must be formatted with the FAT filesystem.", None))
self.groupBox_2.setTitle(_translate("Dialog", "Target Device", None))
self.overlayTitle.setWhatsThis(_translate("Dialog", "By allocating extra space on your USB stick for a persistent overlay, you will be able to store data and make permanent modifications to your live operating system. Without it, you will not be able to save data that will persist after a reboot.", "comment!"))
self.overlayTitle.setTitle(_translate("Dialog", "Persistent Storage (0 MB)", None))
import resources_rc
| gpl-2.0 | -7,968,805,798,018,169,000 | 55.591549 | 321 | 0.699477 | false |
Drapegnik/bsu | decision-science/lab6/main.py | 1 | 4125 | import numpy as np
def edges_list_to_matrix(filename):
inp = file(filename, 'r')
n, m = map(lambda x: int(x), inp.readline().split())
t = np.ndarray((n, n))
t.fill(-1)
for _ in range(m):
i, j, time, _ = inp.readline().split()
i, j, time = map(lambda x: int(x), [i, j, time])
t[i][j] = time
return t
def format_array(array, prefix):
return '\t'.join(map(lambda (ind, x): '{0}[{1}]={2}'.format(prefix, ind, x), enumerate(array)))
def print_matrix(matrix, time_matrix, prefix):
n = len(matrix)
for i in range(n):
for j in range(n):
if time_matrix[i][j] != -1:
print '\t{0}({1}, {2}) = {3}'.format(prefix, i, j, matrix[i][j])
print
def count_params(filename):
time_matrix = edges_list_to_matrix(filename)
n = len(time_matrix)
events_early_terms = np.array([-float('inf') for _ in range(n)])
events_early_terms[0] = 0
for i in range(n):
for j in range(n):
if time_matrix[j][i] != -1:
events_early_terms[i] = max(events_early_terms[i], events_early_terms[j] + time_matrix[j][i])
print 'Events early terms:\t{}'.format(format_array(events_early_terms, prefix='Tp'))
events_late_terms = np.array([float('inf') for _ in range(n)])
events_late_terms[n - 1] = events_early_terms[n - 1]
for i in reversed(range(n)):
for j in range(n):
if time_matrix[i][j] != -1:
events_late_terms[i] = min(events_late_terms[i], events_late_terms[j] - time_matrix[i][j])
print 'Events late terms:\t{}'.format(format_array(events_late_terms, prefix='Tn'))
events_time_reserves = map(lambda (early, late): late - early, zip(events_early_terms, events_late_terms))
print 'Events time reserves:\t{}\n'.format(format_array(events_time_reserves, prefix='R'))
jobs_early_terms_start = np.ndarray((n, n))
jobs_early_terms_finish = np.ndarray((n, n))
jobs_late_terms_start = np.ndarray((n, n))
jobs_late_terms_finish = np.ndarray((n, n))
jobs_summary_reserves = np.ndarray((n, n))
jobs_free_reserves = np.ndarray((n, n))
jobs_independent_reserves = np.ndarray((n, n))
jobs_guaranteed_reserves = np.ndarray((n, n))
for i in range(n):
for j in range(n):
if time_matrix[i][j] == -1:
continue
jobs_early_terms_start[i][j] = events_early_terms[i]
jobs_early_terms_finish[i][j] = events_early_terms[i] + time_matrix[i][j]
jobs_late_terms_start[i][j] = events_late_terms[j] - time_matrix[i][j]
jobs_late_terms_finish[i][j] = events_late_terms[j]
jobs_summary_reserves[i][j] = events_late_terms[j] - events_early_terms[i] - time_matrix[i][j]
jobs_free_reserves[i][j] = events_early_terms[j] - events_early_terms[i] - time_matrix[i][j]
jobs_independent_reserves[i][j] = max(0, events_early_terms[j] - events_late_terms[i] - time_matrix[i][j])
jobs_guaranteed_reserves[i][j] = events_late_terms[j] - events_late_terms[i] - time_matrix[i][j]
jobs_params_titles = ['Jobs early terms start:', 'Jobs early terms finish:', 'Jobs late terms finish:',
'Jobs late terms finish:', 'Jobs summary reserves:', 'Jobs free reserves:',
'Jobs independent reserves:', 'Jobs guaranteed reserves:']
jobs_params_values = [jobs_early_terms_start, jobs_early_terms_finish, jobs_late_terms_finish,
jobs_late_terms_finish, jobs_summary_reserves, jobs_free_reserves,
jobs_independent_reserves, jobs_guaranteed_reserves]
jobs_params_names = ['Tps', 'Tpf', 'Tns', 'Tnf', 'Rs', 'Rf', 'Rn', 'Rg']
for title, matrix, prefix in zip(jobs_params_titles, jobs_params_values, jobs_params_names):
print title
print_matrix(matrix, time_matrix, prefix=prefix)
if __name__ == '__main__':
# task 1
print '-'*100
print 'task1:\n'
count_params('task1.in')
print '-'*100
# task 2
print 'task2:\n'
count_params('task2.in')
print '-'*100
| mit | -2,541,268,419,509,398,000 | 40.666667 | 118 | 0.586424 | false |
jabbalaci/Bash-Utils | get_images.py | 1 | 1798 | #!/usr/bin/env python3
"""
Extract image links from a web page
===================================
Author: Laszlo Szathmary, 2011 ([email protected])
GitHub: https://github.com/jabbalaci/Bash-Utils
Given a webpage, extract all image links.
Usage:
------
get_images.py URL [URL]... [options]
Options:
-l, --length Show lengths of images.
Last update: 2017-01-09 (yyyy-mm-dd)
"""
import sys
import urllib
from optparse import OptionParser
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
user_agent = {'User-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0'}
def get_content_length(url):
try:
h = requests.get(url, headers=user_agent).headers
return h['content-length']
except:
return "?"
def process(url, options):
r = requests.get(url, headers=user_agent)
soup = BeautifulSoup(r.text, "lxml")
for tag in soup.findAll('img', src=True):
image_url = urljoin(url, tag['src'])
print(image_url, end='')
if options.length:
length = get_content_length(image_url)
print('', length, end='')
print()
def main():
parser = OptionParser(usage='%prog URL [URL]... [options]')
#[options]
parser.add_option('-l',
'--length',
action='store_true',
default=False,
help='show lengths of images')
options, arguments = parser.parse_args()
if not arguments:
parser.print_help()
sys.exit(1)
# else, if at least one parameter was passed
for url in arguments:
process(url, options)
#############################################################################
if __name__ == "__main__":
main()
| mit | 4,686,281,172,346,054,000 | 22.973333 | 99 | 0.56396 | false |
jwhitlock/safetymomentum | setup.py | 1 | 2148 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Packaging setup for safetymomentum."""
from safetymomentum import __version__ as version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_long_description(title):
"""Create the long_description from other files."""
readme = open('README.rst').read()
history = open('HISTORY.rst').read()
body_tag = ".. Omit badges from docs"
readme_body_start = readme.index(body_tag)
assert readme_body_start
readme_body = readme[readme_body_start + len(body_tag):]
history_body = history.replace('.. :changelog:', '')
bars = '=' * len(title)
long_description = """
%(bars)s
%(title)s
%(bars)s
%(readme_body)s
%(history_body)s
""" % locals()
return long_description
requirements = [
'Django>=1.6',
]
test_requirements = [
'dj_database_url',
'django_nose',
'django_extensions',
]
setup(
name='safetymomentum',
version=version,
description='Safety Momentum, for safety!', # flake8: noqa
long_description=get_long_description('Safety Momentum, for safety!'),
author='Safety Momentum',
author_email='[email protected]',
url='https://github.com/jwhitlock/safetymomentum',
packages=[
'safetymomentum',
],
package_dir={
'safetymomentum': 'safetymomentum',
},
include_package_data=True,
install_requires=requirements,
license="MPL 2.0",
zip_safe=False,
keywords='safetymomentum',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='sm_site.runtests.runtests',
tests_require=test_requirements
)
| mpl-2.0 | -4,726,363,298,531,102,000 | 25.85 | 74 | 0.628492 | false |
rdo-management/neutron | neutron/agent/l3/dvr_router.py | 1 | 9814 | # Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_utils import excutils
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import utils as common_utils
from neutron.i18n import _LE
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class DvrRouter(router.RouterInfo):
def __init__(self, agent, host, *args, **kwargs):
super(DvrRouter, self).__init__(*args, **kwargs)
self.agent = agent
self.host = host
self.floating_ips_dict = {}
self.snat_iptables_manager = None
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.dist_fip_count = None
self.snat_namespace = None
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
floating_ips = super(DvrRouter, self).get_floating_ips()
return [i for i in floating_ips if i['host'] == self.host]
def _handle_fip_nat_rules(self, interface_name, action):
"""Configures NAT rules for Floating IPs for DVR.
Remove all the rules. This is safe because if
use_namespaces is set as False then the agent can
only configure one router, otherwise each router's
NAT rules will be in their own namespace.
"""
self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
self.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add them back if the action is add_rules
if action == 'add_rules' and interface_name:
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
self.iptables_manager.ipv4['nat'].add_rule(*rule)
self.iptables_manager.apply()
def floating_ip_added_dist(self, fip, fip_cidr):
"""Add floating IP to FIP namespace."""
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
rule_pr = self.fip_ns.allocate_rule_priority()
self.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
ip_rule = ip_lib.IpRule(namespace=self.ns_name)
ip_rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
#Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
interface_name = (
self.fip_ns.get_ext_device_name(
self.fip_ns.agent_gateway_port['id']))
ip_lib.send_garp_for_proxyarp(fip_ns_name,
interface_name,
floating_ip,
self.agent_conf.send_arp_for_ha)
# update internal structures
self.dist_fip_count = self.dist_fip_count + 1
def floating_ip_removed_dist(self, fip_cidr):
"""Remove floating IP from FIP namespace."""
floating_ip = fip_cidr.split('/')[0]
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.local_subnets.allocate(self.router_id)
rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
fip_ns_name = self.fip_ns.get_name()
if floating_ip in self.floating_ips_dict:
rule_pr = self.floating_ips_dict[floating_ip]
ip_rule = ip_lib.IpRule(namespace=self.ns_name)
ip_rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
self.fip_ns.deallocate_rule_priority(rule_pr)
#TODO(rajeev): Handle else case - exception/log?
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
# check if this is the last FIP for this router
self.dist_fip_count = self.dist_fip_count - 1
if self.dist_fip_count == 0:
#remove default route entry
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name)
ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)
device.route.delete_gateway(str(fip_2_rtr.ip),
table=dvr_fip_ns.FIP_RT_TBL)
self.fip_ns.local_subnets.release(self.router_id)
self.rtr_fip_subnet = None
ns_ip.del_veth(fip_2_rtr_name)
is_last = self.fip_ns.unsubscribe(self.router_id)
if is_last:
# TODO(Carl) I can't help but think that another router could
# come in and want to start using this namespace while this is
# destroying it. The two could end up conflicting on
# creating/destroying interfaces and such. I think I'd like a
# semaphore to sync creation/deletion of this namespace.
self.fip_ns.delete()
self.fip_ns = None
def add_floating_ip(self, fip, interface_name, device):
if not self._add_fip_addr_to_device(fip, device):
return l3_constants.FLOATINGIP_STATUS_ERROR
# Special Handling for DVR - update FIP namespace
# and ri.namespace to handle DVR based FIP
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
self.floating_ip_added_dist(fip, ip_cidr)
return l3_constants.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
super(DvrRouter, self).remove_floating_ip(device, ip_cidr)
self.floating_ip_removed_dist(ip_cidr)
def create_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that creates a gateway for a dvr. The first step
# is to move the creation of the snat namespace here
self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'],
self.agent_conf,
self.driver,
self.use_ipv6)
self.snat_namespace.create()
return self.snat_namespace
def delete_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that removes an external gateway for a dvr. The
# first step is to move the deletion of the snat namespace here
self.snat_namespace.delete()
self.snat_namespace = None
def _get_internal_port(self, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
def _update_arp_entry(self, ip, mac, subnet_id, operation):
"""Add or delete arp entry into router namespace for the subnet."""
port = self._get_internal_port(subnet_id)
# update arp entry only if the subnet is attached to the router
if not port:
return
ip_cidr = str(ip) + '/32'
try:
# TODO(mrsmith): optimize the calls below for bulk calls
net = netaddr.IPNetwork(ip_cidr)
interface_name = self.get_internal_device_name(port['id'])
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
if operation == 'add':
device.neigh.add(net.version, ip, mac)
elif operation == 'delete':
device.neigh.delete(net.version, ip, mac)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("DVR: Failed updating arp entry"))
def _set_subnet_arp_info(self, port):
"""Set ARP info retrieved from Plugin for existing ports."""
if 'id' not in port['subnet']:
return
subnet_id = port['subnet']['id']
# TODO(Carl) Can we eliminate the need to make this RPC while
# processing a router.
subnet_ports = self.agent.get_ports_by_subnet(subnet_id)
for p in subnet_ports:
if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS:
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(fixed_ip['ip_address'],
p['mac_address'],
subnet_id,
'add')
| apache-2.0 | 8,099,741,431,079,580,000 | 44.435185 | 79 | 0.5968 | false |
openstack/sahara-dashboard | sahara_dashboard/content/data_processing/data_plugins/workflows/update.py | 1 | 4193 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
from saharaclient.api import base as api_base
from horizon import exceptions
from horizon import forms
from horizon import workflows
from sahara_dashboard.api import sahara as saharaclient
class UpdateLabelsAction(workflows.Action):
def __init__(self, request, *args, **kwargs):
super(UpdateLabelsAction, self).__init__(request, *args, **kwargs)
plugin_name = [
x['plugin_name'] for x in args if 'plugin_name' in x][0]
plugin = saharaclient.plugin_get(request, plugin_name)
self._serialize_labels(
'plugin_', _("Plugin label"), plugin.plugin_labels)
vers_labels = plugin.version_labels
for version in vers_labels.keys():
field_label = _("Plugin version %(version)s label") % {
'version': version}
self._serialize_labels(
'version_%s_' % version, field_label, vers_labels[version])
self.fields["plugin_name"] = forms.CharField(
widget=forms.HiddenInput(),
initial=plugin_name)
def _serialize_labels(self, prefix, prefix_trans, labels):
for name, label in labels.items():
if not label['mutable']:
continue
res_name_translated = "%s: %s" % (prefix_trans, name)
res_name = "label_%s%s" % (prefix, name)
self.fields[res_name] = forms.BooleanField(
label=res_name_translated,
help_text=label['description'],
widget=forms.CheckboxInput(),
initial=label['status'],
required=False,
)
class Meta(object):
name = _("Plugin")
help_text = _("Update the plugin labels")
class UpdatePluginStep(workflows.Step):
action_class = UpdateLabelsAction
depends_on = ('plugin_name', )
def contribute(self, data, context):
for name, item in data.items():
context[name] = item
return context
class UpdatePlugin(workflows.Workflow):
slug = "update_plugin"
name = _("Update Plugin")
success_message = _("Updated")
failure_message = _("Could not update plugin")
success_url = "horizon:project:data_processing.data_plugins:index"
default_steps = (UpdatePluginStep,)
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
super(UpdatePlugin, self).__init__(
request, context_seed, entry_point, *args, **kwargs)
def _get_update_values(self, context):
values = {'plugin_labels': {}, 'version_labels': {}}
for item, item_value in context.items():
if not item.startswith('label_'):
continue
name = item.split('_')[1:]
if name[0] == 'plugin':
values['plugin_labels'][name[1]] = {'status': item_value}
else:
if name[1] not in values['version_labels']:
values['version_labels'][name[1]] = {}
values['version_labels'][
name[1]][name[2]] = {'status': item_value}
return values
def handle(self, request, context):
try:
update_values = self._get_update_values(context)
saharaclient.plugin_update(
request, context['plugin_name'], update_values)
return True
except api_base.APIException as e:
self.error_description = str(e)
return False
except Exception:
exceptions.handle(request,
_("Plugin update failed."))
return False
| apache-2.0 | -4,733,765,303,333,254,000 | 37.118182 | 76 | 0.59814 | false |
ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/tools/XRCed/plugins/xh_wxlib.py | 1 | 2451 | # Name: wxlib.py
# Purpose: XML handlers for wx.lib classes
# Author: Roman Rolinsky <[email protected]>
# Created: 05.09.2007
# RCS-ID: $Id$
import wx
import wx.xrc as xrc
import wx.lib.foldpanelbar as fpb
from wx.lib.ticker_xrc import wxTickerXmlHandler
from wx.tools.XRCed.globals import TRACE
class FoldPanelBarXmlHandler(xrc.XmlResourceHandler):
def __init__(self):
xrc.XmlResourceHandler.__init__(self)
# Standard styles
self.AddWindowStyles()
# Custom styles
self.AddStyle('FPB_SINGLE_FOLD', fpb.FPB_SINGLE_FOLD)
self.AddStyle('FPB_COLLAPSE_TO_BOTTOM', fpb.FPB_COLLAPSE_TO_BOTTOM)
self.AddStyle('FPB_EXCLUSIVE_FOLD', fpb.FPB_EXCLUSIVE_FOLD)
self.AddStyle('FPB_HORIZONTAL', fpb.FPB_HORIZONTAL)
self.AddStyle('FPB_VERTICAL', fpb.FPB_VERTICAL)
self._isInside = False
def CanHandle(self,node):
return not self._isInside and self.IsOfClass(node, 'wx.lib.foldpanelbar.FoldPanelBar') or \
self._isInside and self.IsOfClass(node, 'foldpanel')
# Process XML parameters and create the object
def DoCreateResource(self):
TRACE('DoCreateResource: %s', self.GetClass())
if self.GetClass() == 'foldpanel':
n = self.GetParamNode('object')
if n:
old_ins = self._isInside
self._isInside = False
bar = self._w
item = self.CreateResFromNode(n, bar, None)
self._isInside = old_ins
wnd = item
if wnd:
item = bar.AddFoldPanel(self.GetText('label'),
collapsed=self.GetBool('collapsed'))
bar.AddFoldPanelWindow(item, wnd)
return wnd
else:
w = fpb.FoldPanelBar(self.GetParentAsWindow(),
self.GetID(),
self.GetPosition(),
self.GetSize(),
self.GetStyle(),
self.GetStyle('exstyle'))
self.SetupWindow(w)
self._w = w
old_ins = self._isInside
self._isInside = True
self.CreateChildren(w, True)
self._isInside = old_ins
return w
| mit | -6,277,563,818,982,344,000 | 37.532258 | 99 | 0.529172 | false |
uni-peter-zheng/tp-libvirt | libvirt/tests/src/resource_abnormal.py | 1 | 36840 | import os
import time
import stat
import signal
import logging
import threading
from autotest.client.shared import error
from autotest.client.shared import utils
from virttest import libvirt_storage
from virttest import utils_selinux
from virttest import qemu_storage
from virttest import libvirt_vm
from virttest import utils_misc
from virttest import virsh
from virttest import remote
from virttest import data_dir
from virttest.libvirt_xml import vol_xml
from virttest.libvirt_xml import vm_xml
from virttest.utils_test import libvirt
from virttest.staging import utils_cgroup
from virttest.staging import service
from virttest.tests import unattended_install
class Vol_clone(object):
"""
Test volume clone with abnormal resource
"""
def __init__(self, test, params):
self.pvtest = None
self.pool = None
self.test = test
self.params = params
self.vol_name = params.get("volume_name")
self.vol_new_name = params.get("volume_new_name")
self.pool_name = params.get("pool_name")
self.volume_size = params.get("volume_size", "1G")
self.pool_type = params.get("pool_type")
self.pool_target = params.get("pool_target")
self.emulated_img = params.get("emulated_image", "emulated-img")
def run_test(self):
"""
Start test, Creat a volume.
"""
emulated_size = "%sG" % (2 * int(self.volume_size[:-1]) + 1)
if int(self.volume_size[:-1]) <= 1:
raise error.TestNAError("Volume size must large than 1G")
self.pvtest = libvirt.PoolVolumeTest(self.test, self.params)
self.pvtest.pre_pool(self.pool_name,
self.pool_type,
self.pool_target,
self.emulated_img,
image_size=emulated_size,
pre_disk_vol=[self.volume_size])
self.pool = libvirt_storage.PoolVolume(self.pool_name)
self.pool.create_volume(self.vol_name, self.volume_size)
def result_confirm(self, params):
"""
Confirm if volume clone executed succeed
"""
if self.pool:
if not self.pool.clone_volume(self.vol_name, self.vol_new_name):
raise error.TestFail("Clone volume failed!")
def recover(self, params=None):
"""
Recover test environment
"""
if self.pvtest:
self.pvtest.cleanup_pool(self.pool_name, self.pool_type,
self.pool_target, self.emulated_img)
class Vol_create(object):
"""
Test volume create with abnormal resource
"""
def __init__(self, test, params):
self.pvtest = None
self.pool = None
self.test = test
self.params = params
self.vol_name = params.get("volume_name")
self.vol_new_name = params.get("volume_new_name")
self.pool_name = params.get("pool_name")
self.volume_size = params.get("volume_size", "1G")
self.pool_type = params.get("pool_type")
self.pool_target = params.get("pool_target")
self.emulated_img = params.get("emulated_image", "emulated-img")
def run_test(self):
"""
Start test, Creat a volume.
"""
emulated_size = "%sG" % (int(self.volume_size[:-1]) + 1)
if int(self.volume_size[:-1]) <= 1:
raise error.TestNAError("Volume size must large than 1G")
self.pvtest = libvirt.PoolVolumeTest(self.test, self.params)
self.pvtest.pre_pool(self.pool_name,
self.pool_type,
self.pool_target,
self.emulated_img,
image_size=emulated_size,
pre_disk_vol=[self.volume_size])
self.pool = libvirt_storage.PoolVolume(self.pool_name)
self.pool.create_volume(self.vol_name, self.volume_size)
def result_confirm(self, params):
"""
Confirm if volume create executed succeed.
"""
if self.pool:
volxml = vol_xml.VolXML.new_from_vol_dumpxml(self.vol_name,
self.pool_name)
volxml.name = self.vol_new_name
if volxml.create(self.pool_name):
raise error.TestFail("Volume '%s' created succeed but"
" expect failed!" % self.vol_new_name)
volxml.capacity = 1024 * 1024 * 1024 / 2
volxml.allocation = 1024 * 1024 * 1024 / 2
if not volxml.create(self.pool_name):
raise error.TestFail("Volume '%s' created failed!"
% self.vol_new_name)
def recover(self, params=None):
"""
Recover test environment
"""
if self.pvtest:
self.pvtest.cleanup_pool(self.pool_name, self.pool_type,
self.pool_target, self.emulated_img)
class Virt_clone(object):
"""
Test virt-clone with abnormal resource
"""
def __init__(self, test, params):
self.td = None
self.cpu_num = int(params.get("cpu_num", "1"))
self.vm_name = params.get("main_vm")
self.vm_new_name = params.get("vm_new_name")
self.cgroup_name = params.get("cgroup_name")
self.cgroup_dir = params.get("cgroup_dir")
self.new_image_file = params.get("new_image_file")
if self.new_image_file:
self.new_image_file = os.path.join(test.virtdir,
self.new_image_file)
self.time_out = int(params.get("time_out", "600"))
self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
self.twice_execute = "yes" == params.get("twice_execute", "no")
self.kill_first = "yes" == params.get("kill_first", "no")
if params.get("abnormal_type") in ["disk_lack", ""]:
self.selinux_enforcing = utils_selinux.is_enforcing()
if self.selinux_enforcing:
utils_selinux.set_status("permissive")
self.fs_type = params.get("fs_type", "ext4")
xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
disk_node = xml_file.get_disk_all()['vda']
source_file = disk_node.find('source').get('file')
self.image_size = utils_misc.get_image_info(source_file)['dsize']
# Set the size to be image_size
iscsi_size = "%sM" % (self.image_size / 1024 / 1024)
params['image_size'] = iscsi_size
self.iscsi_dev = qemu_storage.Iscsidev(params, test.virtdir,
"iscsi")
try:
device_source = self.iscsi_dev.setup()
except (error.TestError, ValueError), detail:
self.iscsi_dev.cleanup()
raise error.TestNAError("Cannot get iscsi device on this"
" host:%s\n" % detail)
libvirt.mk_part(device_source, iscsi_size)
self.mount_dir = os.path.join(test.virtdir,
params.get('mount_dir'))
if not os.path.exists(self.mount_dir):
os.mkdir(self.mount_dir)
params['mount_dir'] = self.mount_dir
self.partition = device_source + "1"
libvirt.mkfs(self.partition, self.fs_type)
utils_misc.mount(self.partition, self.mount_dir, self.fs_type)
self.new_image_file = os.path.join(self.mount_dir, "new_file")
def run_test(self):
"""
Start test, clone a guest in a cgroup
"""
if virsh.domain_exists(self.vm_new_name):
raise error.TestNAError("'%s' already exists! Please"
" select another domain name!"
% self.vm_new_name)
if os.path.exists(self.new_image_file):
os.remove(self.new_image_file)
modules = utils_cgroup.CgroupModules(self.cgroup_dir)
modules.init(['cpuset'])
self.cgroup = utils_cgroup.Cgroup('cpuset', None)
self.cgroup.initialize(modules)
self.cgroup_index = self.cgroup.mk_cgroup(cgroup=self.cgroup_name)
# Before use the cpu, set it to be enable
if self.cpu_status < 1:
utils_misc.set_cpu_status(self.cpu_num, True)
self.cgroup.set_property("cpuset.cpus", self.cpu_num,
self.cgroup_index, check=False)
self.cgroup.set_property("cpuset.mems", 0, self.cgroup_index,
check=False)
self.td0 = threading.Thread(target=self.cgroup.cgexec,
args=(self.cgroup_name, "virt-clone",
"-o %s -n %s --force --file %s"
% (self.vm_name, self.vm_new_name,
self.new_image_file)))
self.td1 = None
if self.twice_execute:
self.vm_new_name1 = self.vm_new_name + "1"
self.new_image_file1 = self.new_image_file + "1"
self.td1 = threading.Thread(target=self.cgroup.cgexec,
args=(self.cgroup_name, "virt-clone",
"-o %s -n %s --force --file %s"
% (self.vm_name,
self.vm_new_name1,
self.new_image_file1)))
self.td1.start()
self.td0.start()
# Wait for virt-clone has been started
time.sleep(30)
def result_confirm(self, params):
"""
Confirm if virt-clone executed succeed
"""
if self.kill_first:
# Stop this threading
first_pid = self.cgroup.get_pids(self.cgroup_index)[-1]
utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
else:
self.td0.join(self.time_out)
if self.td1:
self.td1.join(self.time_out)
abnormal_type = params.get("abnormal_type")
if abnormal_type == "cpu_lack":
if not virsh.domain_exists(self.vm_new_name):
raise error.TestFail("Clone '%s' failed" % self.vm_new_name)
else:
result = virsh.start(self.vm_new_name, ignore_status=True)
if result.exit_status:
raise error.TestFail("Cloned domain cannot be started!")
elif abnormal_type == "disk_lack":
if virsh.domain_exists(self.vm_new_name):
raise error.TestFail("Clone '%s' succeed but expect failed!"
% self.vm_new_name)
else:
if self.twice_execute and not self.kill_first:
if virsh.domain_exists(self.vm_new_name):
raise error.TestFail("Clone '%s' succeed but expect"
" failed!" % self.vm_new_name)
if virsh.domain_exists(self.vm_new_name1):
raise error.TestFail("Clone '%s' succeed but expect"
" failed!" % self.vm_new_name1)
elif self.twice_execute and self.kill_first:
if not virsh.domain_exists(self.vm_new_name):
raise error.TestFail("Clone '%s' failed!"
% self.vm_new_name)
def recover(self, params):
"""
Recover test environment
"""
abnormal_type = params.get("abnormal_type")
cpu_enable = True if self.cpu_status else False
utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
if virsh.domain_exists(self.vm_new_name):
virsh.remove_domain(self.vm_new_name)
if os.path.exists(self.new_image_file):
os.remove(self.new_image_file)
if self.twice_execute:
if virsh.domain_exists(self.vm_new_name1):
virsh.remove_domain(self.vm_new_name1)
if os.path.exists(self.new_image_file1):
os.remove(self.new_image_file1)
if abnormal_type == "memory_lack":
if params.has_key('memory_pid'):
pid = params.get('memory_pid')
if isinstance(pid,str):
pid = int(pid)
utils_misc.safe_kill(pid, signal.SIGKILL)
utils.run("swapon -a")
tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
if os.path.exists(tmp_c_file):
os.remove(tmp_c_file)
if os.path.exists(tmp_exe_file):
os.remove(tmp_exe_file)
elif abnormal_type in ["disk_lack", ""]:
if self.selinux_enforcing:
utils_selinux.set_status("enforcing")
tmp_file = os.path.join(self.mount_dir, "tmp")
if os.path.exists(tmp_file):
os.remove(tmp_file)
# Sometimes one umount action is not enough
utils_misc.wait_for(lambda: utils_misc.umount(self.partition,
self.mount_dir,
self.fs_type), 120)
if self.iscsi_dev:
self.iscsi_dev.cleanup()
os.rmdir(self.mount_dir)
elif abnormal_type == "cpu_lack":
os.system("cat /sys/fs/cgroup/cpuset/cpuset.cpus > /sys/fs/cgroup/cpuset/machine.slice/cpuset.cpus")
remove_machine_cgroup()
class Snapshot_create(object):
"""
Test snapshot create
"""
def __init__(self, test, params):
self.cpu_num = int(params.get("cpu_num", "1"))
self.cgroup_name = params.get("cgroup_name")
self.cgroup_dir = params.get("cgroup_dir")
self.time_out = int(params.get("time_out", "600"))
self.vm_name = params.get("main_vm")
self.time_out = int(params.get("time_out", "600"))
self.twice_execute = "yes" == params.get("twice_execute", "no")
self.kill_first = "yes" == params.get("kill_first", "no")
xml_file = vm_xml.VMXML.new_from_inactive_dumpxml(self.vm_name)
disk_node = xml_file.get_disk_all()['vda']
source_file = disk_node.find('source').get('file')
image_type = utils_misc.get_image_info(source_file)['format']
if image_type != "qcow2":
raise error.TestNAError("Disk image format is not qcow2, "
"ignore snapshot test!")
self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
self.current_snp_list = []
self.snp_list = virsh.snapshot_list(self.vm_name)
env = params.get("env")
vm = env.get_vm(self.vm_name)
# This can add snapshot create time
vm.wait_for_login()
def run_test(self):
"""
Start test, Creat a cgroup to create snapshot.
"""
modules = utils_cgroup.CgroupModules(self.cgroup_dir)
modules.init(['cpuset'])
self.cgroup = utils_cgroup.Cgroup('cpuset', None)
self.cgroup.initialize(modules)
self.cgroup_index = self.cgroup.mk_cgroup(cgroup=self.cgroup_name)
# Before use the cpu, set it to be enable
if self.cpu_status < 1:
utils_misc.set_cpu_status(self.cpu_num, True)
self.cgroup.set_property("cpuset.cpus", self.cpu_num,
self.cgroup_index, check=False)
self.cgroup.set_property("cpuset.mems", 0, self.cgroup_index,
check=False)
self.td0 = threading.Thread(target=self.cgroup.cgexec,
args=(self.cgroup_name, "virsh",
"snapshot-create %s" % self.vm_name))
self.td1 = None
if self.twice_execute:
self.td1 = threading.Thread(target=self.cgroup.cgexec,
args=(self.cgroup_name, "virsh",
"snapshot-create %s"
% self.vm_name))
self.td1.start()
self.td0.start()
def result_confirm(self, params):
"""
Confirm if snapshot has been created.
"""
if params.has_key('cpu_pid'):
cpu_id = params.get('cpu_pid')
self.cgroup.cgclassify_cgroup(int(cpu_id), self.cgroup_name)
if self.kill_first:
# Stop this threading
try:
first_pid = self.cgroup.get_pids(self.cgroup_index)[1]
utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
except IndexError:
logging.info("Snapshot create process in cgroup"
" has been over")
else:
if self.td1:
self.td1.join(self.time_out)
self.td0.join(self.time_out)
self.current_snp_list = virsh.snapshot_list(self.vm_name)
if len(self.snp_list) >= len(self.current_snp_list):
raise error.TestFail("Create snapshot failed for low memory!")
def recover(self, params=None):
"""
Recover test environment
"""
cpu_enable = True if self.cpu_status else False
utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
if os.path.exists(tmp_c_file):
os.remove(tmp_c_file)
if os.path.exists(tmp_exe_file):
os.remove(tmp_exe_file)
if params.has_key('memory_pid'):
pid = int(params.get('memory_pid'))
utils_misc.safe_kill(pid, signal.SIGKILL)
utils.run("swapon -a")
if params.has_key('cpu_pid'):
pid = int(params.get('cpu_pid'))
utils_misc.safe_kill(pid, signal.SIGKILL)
tmp_sh_file = params.get("tmp_sh_file")
if os.path.exists(tmp_sh_file):
os.remove(tmp_sh_file)
virsh.destroy(self.vm_name)
if len(self.snp_list) < len(self.current_snp_list):
self.diff_snp_list = list(set(self.current_snp_list) -
set(self.snp_list))
for item in self.diff_snp_list:
virsh.snapshot_delete(self.vm_name, item)
remove_machine_cgroup()
class Virsh_dump(object):
"""
Test virsh dump with abnormal resource
"""
def __init__(self, test, params):
self.cpu_num = int(params.get("cpu_num", "1"))
self.cgroup_name = params.get("cgroup_name")
self.cgroup_dir = params.get("cgroup_dir")
self.time_out = int(params.get("time_out", "600"))
self.vm_name = params.get("main_vm")
self.time_out = int(params.get("time_out", "600"))
self.twice_execute = "yes" == params.get("twice_execute", "no")
self.kill_first = "yes" == params.get("kill_first", "no")
self.cpu_status = utils_misc.get_cpu_status(self.cpu_num)
self.dump_file = os.path.join(test.virtdir,
params.get("dump_file", "dump.info"))
self.dump_file1 = self.dump_file + "1"
env = params.get("env")
vm = env.get_vm(self.vm_name)
vm.wait_for_login()
def run_test(self):
"""
Start test, Creat a cgroup to create snapshot.
"""
modules = utils_cgroup.CgroupModules(self.cgroup_dir)
modules.init(['cpuset'])
self.cgroup = utils_cgroup.Cgroup('cpuset', None)
self.cgroup.initialize(modules)
self.cgroup_index = self.cgroup.mk_cgroup(cgroup=self.cgroup_name)
# Before use the cpu, set it to be enable
if self.cpu_status < 1:
utils_misc.set_cpu_status(self.cpu_num, True)
self.cgroup.set_property("cpuset.cpus", self.cpu_num,
self.cgroup_index, check=False)
self.cgroup.set_property("cpuset.mems", 0, self.cgroup_index,
check=False)
self.td0 = threading.Thread(target=self.cgroup.cgexec,
args=(self.cgroup_name, "virsh",
"dump %s %s"
% (self.vm_name, self.dump_file)))
self.td1 = None
if self.twice_execute:
self.td1 = threading.Thread(target=self.cgroup.cgexec,
args=(self.cgroup_name, "virsh",
"dump %s %s"
% (self.vm_name,
self.dump_file1)))
self.td1.start()
self.td0.start()
def result_confirm(self, params):
"""
Confirm if dump file has been created.
"""
if params.has_key('cpu_pid'):
cpu_id = params.get('cpu_pid')
self.cgroup.cgclassify_cgroup(int(cpu_id), self.cgroup_name)
if self.kill_first:
# Stop this threading
try:
first_pid = self.cgroup.get_pids(self.cgroup_index)[1]
utils_misc.safe_kill(int(first_pid), signal.SIGKILL)
except IndexError:
logging.info("Dump process in cgroup has been over")
else:
if self.td1:
self.td1.join(self.time_out)
self.td0.join(self.time_out)
if not os.path.join(self.dump_file1):
raise error.TestFail("Dump file %s doesn't exist!"
% self.dump_file)
if self.twice_execute and not os.path.join(self.dump_file1):
raise error.TestFail("Dump file %s doesn't exist!"
% self.dump_file1)
def recover(self, params=None):
"""
Recover test environment
"""
cpu_enable = True if self.cpu_status else False
utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
virsh.destroy(self.vm_name)
if params.has_key('cpu_pid'):
pid = int(params.get('cpu_pid'))
utils_misc.safe_kill(pid, signal.SIGKILL)
tmp_sh_file = params.get("tmp_sh_file")
if os.path.exists(tmp_sh_file):
os.remove(tmp_sh_file)
if os.path.exists(self.dump_file):
os.remove(self.dump_file)
if os.path.exists(self.dump_file1):
os.remove(self.dump_file1)
remove_machine_cgroup()
class Virt_install(object):
"""
Test virt-install with abnormal resource
"""
def __init__(self, test, params):
self.vm_name = params.get("vm_name", "test-vm1")
while virsh.domain_exists(self.vm_name):
self.vm_name += ".test"
params["main_vm"] = self.vm_name
ios_file = os.path.join(data_dir.get_data_dir(),
params.get('cdrom_cd1'))
if not os.path.exists(ios_file):
raise error.TestNAError("Please prepare ios file:%s" % ios_file)
self.env = params.get('env')
self.vm = self.env.create_vm("libvirt", None, self.vm_name, params,
test.bindir)
self.env.register_vm(self.vm_name, self.vm)
self.twice_execute = "yes" == params.get("twice_execute", "no")
self.kill_first = "yes" == params.get("kill_first", "no")
self.read_only = "yes" == params.get("read_only", "no")
self.selinux_enforcing = utils_selinux.is_enforcing()
if self.selinux_enforcing:
utils_selinux.set_status("permissive")
self.image_path = os.path.join(test.virtdir, "test_image")
if not os.path.exists(self.image_path):
os.mkdir(self.image_path)
if self.read_only:
os.chmod(self.image_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
params["image_name"] = os.path.join(self.image_path, self.vm_name)
params["image_format"] = "raw"
params['force_create_image'] = "yes"
params['remove_image'] = "yes"
params['shutdown_cleanly'] = "yes"
params['shutdown_cleanly_timeout'] = 120
params['guest_port_unattended_install'] = 12323
params['inactivity_watcher'] = "error"
params['inactivity_treshold'] = 1800
params['image_verify_bootable'] = "no"
params['unattended_delivery_method'] = "cdrom"
params['drive_index_unattended'] = 1
params['drive_index_cd1'] = 2
params['boot_once'] = "d"
params['medium'] = "cdrom"
params['wait_no_ack'] = "yes"
params['image_raw_device'] = "yes"
params['backup_image_before_testing'] = "no"
params['kernel_params'] = ("ks=cdrom nicdelay=60 "
"console=ttyS0,115200 console=tty0")
params['cdroms'] += " unattended"
params['redirs'] += " unattended_install"
self.params = params
self.test = test
def run_test(self):
"""
Start test, Creat a threading to install VM.
"""
self.td = threading.Thread(target=unattended_install.run,
args=(self.test, self.params, self.env))
self.td.start()
# Wait for install start
time.sleep(10)
def result_confirm(self, params):
"""
Confirm if VM installation is succeed
"""
if self.twice_execute and self.kill_first:
get_pid_cmd = "ps -ef | grep '%s' | grep qemu-kvm | grep -v grep"\
% self.vm_name
result = utils.run(get_pid_cmd, ignore_status=True)
if result.exit_status:
raise error.TestFail("First install failed!")
install_pid = result.stdout.strip().split()[1]
utils_misc.safe_kill(int(install_pid), signal.SIGKILL)
self.td.join()
if self.read_only:
if virsh.domain_exists(self.vm_name):
raise error.TestFail("Domain '%s' should not exist"
% self.vm_name)
os.chmod(self.image_path,
stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
else:
if not virsh.domain_exists(self.vm_name):
raise error.TestFail("Domain '%s' should exists, no matter its"
" installation is succeed or failed!"
% self.vm_name)
else:
if not self.kill_first:
if self.vm.is_dead():
self.vm.start()
try:
self.vm.wait_for_login()
except remote.LoginTimeoutError, detail:
raise error.TestFail(str(detail))
else:
virsh.remove_domain(self.vm_name)
if self.twice_execute or self.read_only:
self.td1 = threading.Thread(target=unattended_install.run,
args=(self.test, params, self.env))
self.td1.start()
self.td1.join()
if not virsh.domain_exists(self.vm_name):
raise error.TestFail("Domain '%s' installation failed!"
% self.vm_name)
def recover(self, params=None):
"""
Recover test environment
"""
if self.selinux_enforcing:
utils_selinux.set_status("enforcing")
if virsh.domain_exists(self.vm_name):
virsh.remove_domain(self.vm_name)
image_file = params.get("image_name")
if os.path.exists(image_file):
os.remove(image_file)
if os.path.isdir(self.image_path):
os.rmdir(self.image_path)
self.env.unregister_vm(self.vm_name)
class Migration(object):
"""
Test virsh migrate --live with abnormal resource
"""
def __init__(self, test, params):
self.vm_name = params.get("main_vm", "test-vm1")
self.env = params.get('env')
self.time_out = int(params.get('time_out'))
self.time_out_test = "yes" == params.get('time_out_test')
self.remote_ip = params.get('remote_ip')
self.remote_user = params.get('remote_user')
self.local_ip = params.get('local_ip')
if self.remote_ip.count("ENTER") or self.local_ip.count("ENTER"):
raise error.TestNAError("Please set remote/local ip in base.cfg")
self.remote_pwd = params.get('remote_pwd')
self.local_mnt = params.get('local_mnt')
self.remote_mnt = params.get('remote_mnt')
self.session = remote.remote_login("ssh", self.remote_ip, "22",
self.remote_user,
self.remote_pwd, "#")
self.session.cmd("setsebool virt_use_nfs on")
local_hostname = utils.run("hostname").stdout.strip()
remote_hostname = self.session.cmd_output("hostname")
def file_add(a_str, a_file, session=None):
"""
Add detail to a file
"""
write_cmd = "echo '%s' >> %s" % (a_str, a_file)
if session:
session.cmd(write_cmd)
else:
utils.run(write_cmd)
# Edit /etc/hosts file on local and remote host
backup_hosts_cmd = "cat /etc/hosts > /etc/hosts.bak"
utils.run(backup_hosts_cmd)
self.session.cmd(backup_hosts_cmd)
hosts_local_str = "%s %s" % (self.local_ip, local_hostname)
hosts_remote_str = "%s %s" % (self.remote_ip, remote_hostname)
file_add(hosts_local_str, "/etc/hosts")
file_add(hosts_remote_str, "/etc/hosts")
file_add(hosts_local_str, "/etc/hosts", self.session)
file_add(hosts_remote_str, "/etc/hosts", self.session)
# Edit /etc/exports file on local host
utils.run("cat /etc/exports > /etc/exports.bak")
exports_str = "%s *(insecure,rw,sync,no_root_squash)" % self.local_mnt
file_add(exports_str, "/etc/exports")
nfs_mount_cmd = "mount -t nfs %s:%s %s"\
% (self.local_ip, self.local_mnt, self.remote_mnt)
self.session.cmd(nfs_mount_cmd)
vm = self.env.get_vm(self.vm_name)
vm.wait_for_login()
def run_test(self):
"""
Start test, Creat a threading to migrate VM.
"""
remote_uri = libvirt_vm.get_uri_with_transport(transport="ssh",
dest_ip=self.remote_ip)
option = "--live"
if self.time_out_test:
option += " --timeout %s" % self.time_out
self.td = threading.Thread(target=virsh.migrate,
args=(self.vm_name, remote_uri, option))
self.td.start()
def result_confirm(self, params):
"""
Confirm if migratiton is succeed.
"""
if self.time_out_test:
time.sleep(self.time_out)
domain_state = self.session.cmd_output("virsh domstate %s"
% self.vm_name)
if not domain_state.count("paused"):
raise error.TestFail("Guest should suspend with time out!")
self.td.join(self.time_out)
domain_info = self.session.cmd_output("virsh list")
abnormal_type = params.get("abnormal_type")
if not abnormal_type:
if not domain_info.count(self.vm_name):
raise error.TestFail("Guest migration failed!")
else:
if domain_info.count(self.vm_name):
raise error.TestFail("Guest migration succeed but expect"
" with %s!" % abnormal_type)
def recover(self, params=None):
"""
Recover test environment
"""
if self.session.cmd_output("virsh list").count(self.vm_name):
self.session.cmd("virsh destroy %s" % self.vm_name)
abnormal_type = params.get("abnormal_type")
if not abnormal_type:
self.session.cmd("umount %s -l" % self.remote_mnt)
recover_hosts_cmd = "mv -f /etc/hosts.bak /etc/hosts"
utils.run(recover_hosts_cmd)
self.session.cmd_status(recover_hosts_cmd)
utils.run("mv -f /etc/exports.bak /etc/exports")
self.session.close()
def cpu_lack(params):
"""
Disable assigned cpu.
"""
cpu_num = int(params.get("cpu_num", "0"))
if not utils_misc.set_cpu_status(cpu_num, False):
raise error.TestError("Set cpu '%s' failed!" % cpu_num)
def memory_lack(params):
"""
Lower the available memory of host
"""
tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
c_str = """
#include <stdio.h>
#include <unistd.h>
#include <malloc.h>
#define MAX 1024*1024*4
int main(void){
char *a;
while(1) {
a = malloc(MAX);
if (a == NULL) {
break;
}
}
while (1){
sleep(1);
}
return 0;
}"""
c_file = open(tmp_c_file, 'w')
c_file.write(c_str)
c_file.close()
try:
utils_misc.find_command('gcc')
except ValueError:
raise error.TestNAError('gcc command is needed!')
result = utils.run("gcc %s -o %s" % (tmp_c_file, tmp_exe_file))
if result.exit_status:
raise error.TestError("Compile C file failed: %s"
% result.stderr.strip())
# Set swap off before fill memory
utils.run("swapoff -a")
utils.run("%s &" % tmp_exe_file)
result = utils.run("ps -ef | grep %s | grep -v grep" % tmp_exe_file)
pid = result.stdout.strip().split()[1]
params['memory_pid'] = pid
def disk_lack(params):
"""
Lower the available disk space
"""
disk_size = params.get('image_size')
mount_dir = params.get('mount_dir')
# Will use 2/3 space of disk
use_size = int(disk_size[0:-1]) * 2 / 3
tmp_file = os.path.join(mount_dir, "tmp")
utils.run('dd if=/dev/zero of=%s bs=1G count=%s &' % (tmp_file, use_size))
def cpu_busy(params):
"""
Make the cpu busy, almost 100%
"""
tmp_sh_file = params.get("tmp_sh_file", "/tmp/test.sh")
shell_str = """
while true
do
j==${j:+1}
j==${j:-1}
done"""
sh_file = open(tmp_sh_file, 'w')
sh_file.write(shell_str)
sh_file.close()
os.chmod(tmp_sh_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
result = utils.run("%s &" % tmp_sh_file)
result = utils.run("ps -ef | grep %s | grep -v grep" % tmp_sh_file)
pid = result.stdout.strip().split()[1]
params['cpu_pid'] = pid
def network_restart(params):
"""
Restart remote network
"""
time_out = int(params.get('time_out'))
remote_ip = params.get('remote_ip')
remote_user = params.get('remote_user')
remote_pwd = params.get('remote_pwd')
session = remote.remote_login("ssh", remote_ip, "22", remote_user,
remote_pwd, "#")
runner = remote.RemoteRunner(session=session)
net_service = service.Factory.create_service("network", runner.run)
net_service.restart()
session.close()
try:
remote.wait_for_login("ssh", remote_ip, "22", remote_user,
remote_pwd, "#", timeout=time_out)
except remote.LoginTimeoutError, detail:
raise error.TestError(str(detail))
def remove_machine_cgroup():
"""
Remove machine/machine.slice cgroup by restart cgconfig and libvirtd
"""
cg_ser = utils_cgroup.CgconfigService()
cg_ser.cgconfig_restart()
libvirt_ser = service.Factory.create_specific_service("libvirtd")
libvirt_ser.restart()
def run(test, params, env):
"""
Test some commands' execution with abnormal resource.
1. Do some test preparation before test
2. Start test
3. Make resource abnormal
4. Confirm test result
5. Recover test environment
"""
# Test start
try:
test_type = params.get("test_type")
abnormal_type = params.get("abnormal_type")
params['env'] = env
# Start test before resource becomes to abnormal
test_case = globals()[test_type](test, params)
test_case.run_test()
# Make resource abnormal
if abnormal_type:
globals()[abnormal_type](params)
# Confirm test result
test_case.result_confirm(params)
finally:
if 'test_case' in dir():
test_case.recover(params)
| gpl-2.0 | -7,440,651,188,217,650,000 | 39.752212 | 112 | 0.537975 | false |
frostasm/qt-creator | tests/system/shared/utils.py | 1 | 31443 | #############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
import tempfile
def neededFilePresent(path):
found = os.path.exists(path)
if os.getenv("SYSTEST_DEBUG") == "1":
checkAccess(path)
elif not found:
test.fatal("Missing file or directory: " + path)
return found
def tempDir():
Result = os.path.abspath(os.getcwd()+"/../../testing")
if not os.path.exists(Result):
os.mkdir(Result)
return tempfile.mkdtemp(prefix="qtcreator_", dir=Result)
def deleteDirIfExists(path):
shutil.rmtree(path, True)
def verifyChecked(objectName):
object = waitForObject(objectName)
test.compare(object.checked, True)
return object
def ensureChecked(objectName, shouldBeChecked = True, timeout=20000):
if shouldBeChecked:
targetState = Qt.Checked
state = "checked"
else:
targetState = Qt.Unchecked
state = "unchecked"
widget = waitForObject(objectName, timeout)
try:
# needed for transition Qt::PartiallyChecked -> Qt::Checked -> Qt::Unchecked
clicked = 0
while not waitFor('widget.checkState() == targetState', 1000) and clicked < 2:
clickButton(widget)
clicked += 1
test.verify(waitFor("widget.checkState() == targetState", 1000))
except:
# widgets not derived from QCheckbox don't have checkState()
if not waitFor('widget.checked == shouldBeChecked', 1000):
mouseClick(widget, 10, 6, 0, Qt.LeftButton)
test.verify(waitFor("widget.checked == shouldBeChecked", 1000))
test.log("New state for QCheckBox: %s" % state,
str(objectName))
return widget
# verify that an object is in an expected enable state. Returns the object.
# param objectSpec specifies the object to check. It can either be a string determining an object
# or the object itself. If it is an object, it must exist already.
# param expectedState is the expected enable state of the object
def verifyEnabled(objectSpec, expectedState = True):
if isinstance(objectSpec, (str, unicode)):
waitFor("object.exists('" + str(objectSpec).replace("'", "\\'") + "')", 20000)
foundObject = findObject(objectSpec)
else:
foundObject = objectSpec
if objectSpec == None:
test.warning("No valid object in function verifyEnabled.")
else:
test.compare(foundObject.enabled, expectedState)
return foundObject
# select an item from a combo box
# param objectSpec specifies the combo box. It can either be a string determining an object
# or the object itself. If it is an object, it must exist already.
# param itemName is the item to be selected in the combo box
# returns True if selection was changed or False if the wanted value was already selected
def selectFromCombo(objectSpec, itemName):
object = verifyEnabled(objectSpec)
if itemName == str(object.currentText):
return False
else:
mouseClick(object, 5, 5, 0, Qt.LeftButton)
snooze(1)
mouseClick(waitForObjectItem(object, itemName.replace(".", "\\.")), 5, 5, 0, Qt.LeftButton)
test.verify(waitFor("str(object.currentText)==itemName", 5000),
"Switched combo item to '%s'" % itemName)
return True
def selectFromLocator(filter, itemName = None):
if itemName == None:
itemName = filter
itemName = itemName.replace(".", "\\.").replace("_", "\\_")
locator = waitForObject(":*Qt Creator_Utils::FilterLineEdit")
mouseClick(locator, 5, 5, 0, Qt.LeftButton)
replaceEditorContent(locator, filter)
# clicking the wanted item
# if you replace this by pressing ENTER, be sure that something is selected
# otherwise you will run into unwanted behavior
wantedItem = waitForObjectItem("{type='QTreeView' unnamed='1' visible='1'}", itemName)
doubleClick(wantedItem, 5, 5, 0, Qt.LeftButton)
def wordUnderCursor(window):
return textUnderCursor(window, QTextCursor.StartOfWord, QTextCursor.EndOfWord)
def lineUnderCursor(window):
return textUnderCursor(window, QTextCursor.StartOfLine, QTextCursor.EndOfLine)
def textUnderCursor(window, fromPos, toPos):
cursor = window.textCursor()
oldposition = cursor.position()
cursor.movePosition(fromPos)
cursor.movePosition(toPos, QTextCursor.KeepAnchor)
returnValue = cursor.selectedText()
cursor.setPosition(oldposition)
return returnValue
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def callableFile(path):
if is_exe(path):
return path
if platform.system() in ('Windows', 'Microsoft'):
for suffix in suffixes.split(os.pathsep):
if is_exe(path + suffix):
return path + suffix
return None
if platform.system() in ('Windows', 'Microsoft'):
suffixes = os.getenv("PATHEXT")
if not suffixes:
test.fatal("Can't read environment variable PATHEXT. Please check your installation.")
suffixes = ""
fpath, fname = os.path.split(program)
if fpath:
return callableFile(program)
else:
if platform.system() in ('Windows', 'Microsoft'):
cf = callableFile(os.getcwd() + os.sep + program)
if cf:
return cf
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
cf = callableFile(exe_file)
if cf:
return cf
return None
# this function removes the user files of given pro file(s)
# can be called with a single string object or a list of strings holding path(s) to
# the pro file(s) returns False if it could not remove all user files or has been
# called with an unsupported object
def cleanUpUserFiles(pathsToProFiles=None):
if pathsToProFiles==None:
return False
if isinstance(pathsToProFiles, (str, unicode)):
filelist = glob.glob(pathsToProFiles+".user*")
elif isinstance(pathsToProFiles, (list, tuple)):
filelist = []
for p in pathsToProFiles:
filelist.extend(glob.glob(p+".user*"))
else:
test.fatal("Got an unsupported object.")
return False
doneWithoutErrors = True
for file in filelist:
try:
file = os.path.abspath(file)
os.remove(file)
except:
doneWithoutErrors = False
return doneWithoutErrors
def invokeMenuItem(menu, item, *subItems):
if platform.system() == "Darwin":
try:
waitForObject(":Qt Creator.QtCreator.MenuBar_QMenuBar", 2000)
except:
nativeMouseClick(waitForObject(":Qt Creator_Core::Internal::MainWindow", 1000), 20, 20, 0, Qt.LeftButton)
# HACK to avoid squish crash using Qt5.2 on Squish 5.0.1 - remove asap
if platform.system() == "Darwin":
if menu == "Tools" and item == "Options...":
nativeType("<Command+,>")
return
if menu == "File" and item == "Exit":
nativeType("<Command+q>")
return
menuObject = waitForObjectItem(":Qt Creator.QtCreator.MenuBar_QMenuBar", menu)
snooze(1)
waitFor("menuObject.visible", 1000)
activateItem(menuObject)
itemObject = waitForObjectItem(objectMap.realName(menuObject), item)
waitFor("itemObject.enabled", 2000)
activateItem(itemObject)
for subItem in subItems:
sub = itemObject.menu()
waitFor("sub.visible", 1000)
itemObject = waitForObjectItem(sub, subItem)
activateItem(itemObject)
def logApplicationOutput():
# make sure application output is shown
ensureChecked(":Qt Creator_AppOutput_Core::Internal::OutputPaneToggleButton")
try:
output = waitForObject("{type='Core::OutputWindow' visible='1' windowTitle='Application Output Window'}")
test.log("Application Output:\n%s" % output.plainText)
return str(output.plainText)
except:
test.fail("Could not find any Application Output - did the project run?")
return None
# get the output from a given cmdline call
def getOutputFromCmdline(cmdline):
versCall = subprocess.Popen(cmdline, stdout=subprocess.PIPE, shell=True)
result = versCall.communicate()[0]
versCall.stdout.close()
return result
def selectFromFileDialog(fileName, waitForFile=False):
if platform.system() == "Darwin":
snooze(1)
nativeType("<Command+Shift+g>")
snooze(1)
nativeType(fileName)
snooze(1)
nativeType("<Return>")
snooze(3)
nativeType("<Return>")
snooze(1)
else:
fName = os.path.basename(os.path.abspath(fileName))
pName = os.path.dirname(os.path.abspath(fileName)) + os.sep
try:
waitForObject("{name='QFileDialog' type='QFileDialog' visible='1'}", 5000)
pathLine = waitForObject("{name='fileNameEdit' type='QLineEdit' visible='1'}")
snooze(1)
replaceEditorContent(pathLine, pName)
clickButton(waitForObject("{text='Open' type='QPushButton'}"))
waitFor("str(pathLine.text)==''")
snooze(1)
replaceEditorContent(pathLine, fName)
clickButton(waitForObject("{text='Open' type='QPushButton'}"))
except:
nativeType("<Ctrl+a>")
nativeType("<Delete>")
nativeType(pName + fName)
snooze(1)
nativeType("<Return>")
snooze(3)
if waitForFile:
fileCombo = waitForObject(":Qt Creator_FilenameQComboBox")
if not waitFor("str(fileCombo.currentText) in fileName", 5000):
test.fail("%s could not be opened in time." % fileName)
# add Qt documentations from given paths
# param which a list/tuple of the paths to the qch files to be added
def addHelpDocumentation(which):
global sdkPath
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Help")
clickItem(":Options_QListView", "Help", 14, 15, 0, Qt.LeftButton)
waitForObject("{container=':Options.qt_tabwidget_tabbar_QTabBar' type='TabItem' text='Documentation'}")
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "Documentation")
# get rid of all docs already registered
listWidget = waitForObject("{type='QListWidget' name='docsListWidget' visible='1'}")
if listWidget.count > 0:
rect = listWidget.visualItemRect(listWidget.item(0))
mouseClick(listWidget, rect.x+5, rect.y+5, 0, Qt.LeftButton)
type(listWidget, "<Ctrl+a>")
mouseClick(waitForObject("{type='QPushButton' name='removeButton' visible='1'}"), 5, 5, 0, Qt.LeftButton)
for qch in which:
clickButton(waitForObject("{type='QPushButton' name='addButton' visible='1' text='Add...'}"))
selectFromFileDialog(qch)
clickButton(waitForObject(":Options.OK_QPushButton"))
def addCurrentCreatorDocumentation():
currentCreatorPath = currentApplicationContext().cwd
if platform.system() == "Darwin":
docPath = os.path.abspath(os.path.join(currentCreatorPath, "Qt Creator.app", "Contents",
"Resources", "doc", "qtcreator.qch"))
else:
docPath = os.path.abspath(os.path.join(currentCreatorPath, "..", "share", "doc",
"qtcreator", "qtcreator.qch"))
if not os.path.exists(docPath):
test.fatal("Missing current Qt Creator documentation (expected in %s)" % docPath)
return
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Help")
clickItem(":Options_QListView", "Help", 14, 15, 0, Qt.LeftButton)
waitForObject("{container=':Options.qt_tabwidget_tabbar_QTabBar' type='TabItem' text='Documentation'}")
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "Documentation")
clickButton(waitForObject("{type='QPushButton' name='addButton' visible='1' text='Add...'}"))
selectFromFileDialog(docPath)
try:
waitForObject("{type='QMessageBox' unnamed='1' visible='1' "
"text?='Unable to register documentation.*'}", 3000)
test.passes("Qt Creator's documentation found already registered.")
clickButton(waitForObject("{type='QPushButton' text='OK' unnamed='1' visible='1' "
"container={name='groupBox' type='QGroupBox' visible='1'}}"))
except:
test.fail("Added Qt Creator's documentation explicitly.")
clickButton(waitForObject(":Options.OK_QPushButton"))
def verifyOutput(string, substring, outputFrom, outputIn):
index = string.find(substring)
if (index == -1):
test.fail("Output from " + outputFrom + " could not be found in " + outputIn)
else:
test.passes("Output from " + outputFrom + " found at position " + str(index) + " of " + outputIn)
# function that verifies the existence and the read permissions
# of the given file path
# if the executing user hasn't the read permission it checks
# the parent folders for their execute permission
def checkAccess(pathToFile):
if os.path.exists(pathToFile):
test.log("Path '%s' exists" % pathToFile)
if os.access(pathToFile, os.R_OK):
test.log("Got read access on '%s'" % pathToFile)
else:
test.fail("No read permission on '%s'" % pathToFile)
else:
test.fatal("Path '%s' does not exist or cannot be accessed" % pathToFile)
__checkParentAccess__(pathToFile)
# helper function for checking the execute rights of all
# parents of filePath
def __checkParentAccess__(filePath):
for i in range(1, filePath.count(os.sep)):
tmp = filePath.rsplit(os.sep, i)[0]
if os.access(tmp, os.X_OK):
test.log("Got execute permission on '%s'" % tmp)
else:
test.fail("No execute permission on '%s'" % tmp)
# this function checks for all configured Qt versions inside
# options dialog and returns a dict holding the kits as keys
# and a list of information of its configured Qt
def getConfiguredKits():
def __retrieveQtVersionName__(target, version):
treeView = waitForObject(":qtdirList_QTreeView")
return str(treeView.currentIndex().data().toString())
# end of internal function for iterateQtVersions
def __setQtVersionForKit__(kit, kitName, kitsQtVersionName):
treeView = waitForObject(":BuildAndRun_QTreeView")
clickItem(treeView, kit, 5, 5, 0, Qt.LeftButton)
qtVersionStr = str(waitForObject(":Kits_QtVersion_QComboBox").currentText)
kitsQtVersionName[kitName] = qtVersionStr
# end of internal function for iterate kits
kitsWithQtVersionName = {}
result = {}
# collect kits and their Qt versions
targetsQtVersions, qtVersionNames = iterateQtVersions(True, False, __retrieveQtVersionName__)
# update collected Qt versions with their configured device and version
iterateKits(True, True, __setQtVersionForKit__, kitsWithQtVersionName)
# merge defined target names with their configured Qt versions and devices
for kit, qtVersion in kitsWithQtVersionName.iteritems():
if kit in ('Fremantle', 'Harmattan', 'Qt Simulator'):
test.verify(qtVersion == 'None',
"The outdated kit '%s' should not have a Qt version" % kit)
elif qtVersion in qtVersionNames:
result[kit] = targetsQtVersions[qtVersionNames.index(qtVersion)].items()[0]
else:
test.fail("Qt version '%s' for kit '%s' can't be found in qtVersionNames."
% (qtVersion, kit))
clickButton(waitForObject(":Options.Cancel_QPushButton"))
test.log("Configured kits: %s" % str(result))
return result
def visibleCheckBoxExists(text):
try:
findObject("{type='QCheckBox' text='%s' visible='1'}" % text)
return True
except:
return False
# this function verifies if the text matches the given
# regex inside expectedTexts
# param text must be a single str/unicode
# param expectedTexts can be str/unicode/list/tuple
def regexVerify(text, expectedTexts):
if isinstance(expectedTexts, (str,unicode)):
expectedTexts = [expectedTexts]
for curr in expectedTexts:
pattern = re.compile(curr)
if pattern.match(text):
return True
return False
# function that opens Options Dialog and parses the configured Qt versions
# param keepOptionsOpen set to True if the Options dialog should stay open when
# leaving this function
# param alreadyOnOptionsDialog set to True if you already have opened the Options Dialog
# (if False this function will open it via the MenuBar -> Tools -> Options...)
# param additionalFunction pass a function or name of a defined function to execute
# for each correctly configured item on the list of Qt versions
# (Qt versions having no assigned toolchain, failing qmake,... will be skipped)
# this function must take at least 2 parameters - the first is the target name
# and the second the version of the current selected Qt version item
# param argsForAdditionalFunc you can specify as much parameters as you want to pass
# to additionalFunction from the outside
# the function returns a list of dict holding target-version mappings if used without
# additionalFunction
# WATCH OUT! if you're using the additionalFunction parameter - this function will
# return the list mentioned above as well as the returned value(s) from
# additionalFunction. You MUST call this function like
# result, additionalResult = _iterateQtVersions(...)
# where additionalResult is the result of all executions of additionalFunction which
# means it is a list of results.
def iterateQtVersions(keepOptionsOpen=False, alreadyOnOptionsDialog=False,
additionalFunction=None, *argsForAdditionalFunc):
result = []
additionalResult = []
if not alreadyOnOptionsDialog:
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Build & Run")
clickItem(":Options_QListView", "Build & Run", 14, 15, 0, Qt.LeftButton)
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "Qt Versions")
pattern = re.compile("Qt version (?P<version>.*?) for (?P<target>.*)")
treeView = waitForObject(":qtdirList_QTreeView")
model = treeView.model()
for rootIndex in dumpIndices(model):
rootChildText = str(rootIndex.data()).replace(".", "\\.").replace("_", "\\_")
for subIndex in dumpIndices(model, rootIndex):
subChildText = str(subIndex.data()).replace(".", "\\.").replace("_", "\\_")
clickItem(treeView, ".".join([rootChildText,subChildText]), 5, 5, 0, Qt.LeftButton)
currentText = str(waitForObject(":QtSupport__Internal__QtVersionManager.QLabel").text)
matches = pattern.match(currentText)
if matches:
target = matches.group("target").strip()
version = matches.group("version").strip()
result.append({target:version})
if additionalFunction:
try:
if isinstance(additionalFunction, (str, unicode)):
currResult = globals()[additionalFunction](target, version, *argsForAdditionalFunc)
else:
currResult = additionalFunction(target, version, *argsForAdditionalFunc)
except:
import sys
t,v,tb = sys.exc_info()
currResult = None
test.fatal("Function to additionally execute on Options Dialog could not be found or "
"an exception occurred while executing it.", "%s(%s)" % (str(t), str(v)))
additionalResult.append(currResult)
if not keepOptionsOpen:
clickButton(waitForObject(":Options.Cancel_QPushButton"))
if additionalFunction:
return result, additionalResult
else:
return result
# function that opens Options Dialog (if necessary) and parses the configured Kits
# param keepOptionsOpen set to True if the Options dialog should stay open when
# leaving this function
# param alreadyOnOptionsDialog set to True if you already have opened the Options Dialog
# (if False this functions will open it via the MenuBar -> Tools -> Options...)
# param additionalFunction pass a function or name of a defined function to execute
# for each configured item on the list of Kits
# this function must take at least 2 parameters - the first is the item (QModelIndex)
# of the current Kit (if you need to click on it) and the second the Kit name itself
# param argsForAdditionalFunc you can specify as much parameters as you want to pass
# to additionalFunction from the outside
# the function returns a list of Kit names if used without an additional function
# WATCH OUT! if you're using the additionalFunction parameter - this function will
# return the list mentioned above as well as the returned value(s) from
# additionalFunction. You MUST call this function like
# result, additionalResult = _iterateQtVersions(...)
# where additionalResult is the result of all executions of additionalFunction which
# means it is a list of results.
def iterateKits(keepOptionsOpen=False, alreadyOnOptionsDialog=False,
additionalFunction=None, *argsForAdditionalFunc):
result = []
additionalResult = []
if not alreadyOnOptionsDialog:
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Build & Run")
clickItem(":Options_QListView", "Build & Run", 14, 15, 0, Qt.LeftButton)
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "Kits")
treeView = waitForObject(":BuildAndRun_QTreeView")
model = treeView.model()
test.compare(model.rowCount(), 2, "Verifying expected target section count")
autoDetected = model.index(0, 0)
test.compare(autoDetected.data().toString(), "Auto-detected",
"Verifying label for target section")
manual = model.index(1, 0)
test.compare(manual.data().toString(), "Manual", "Verifying label for target section")
for section in [autoDetected, manual]:
for currentItem in dumpItems(model, section):
kitName = currentItem
if (kitName.endswith(" (default)")):
kitName = kitName.rsplit(" (default)", 1)[0]
result.append(kitName)
item = ".".join([str(section.data().toString()),
currentItem.replace(".", "\\.")])
if additionalFunction:
try:
if isinstance(additionalFunction, (str, unicode)):
currResult = globals()[additionalFunction](item, kitName, *argsForAdditionalFunc)
else:
currResult = additionalFunction(item, kitName, *argsForAdditionalFunc)
except:
import sys
t,v,tb = sys.exc_info()
currResult = None
test.fatal("Function to additionally execute on Options Dialog could not be "
"found or an exception occurred while executing it.", "%s(%s)" %
(str(t), str(v)))
additionalResult.append(currResult)
if not keepOptionsOpen:
clickButton(waitForObject(":Options.Cancel_QPushButton"))
if additionalFunction:
return result, additionalResult
else:
return result
# set "Always Start Full Help" in "Tools" -> "Options..." -> "Help" -> "General"
def setAlwaysStartFullHelp():
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Help")
clickItem(":Options_QListView", "Help", 5, 5, 0, Qt.LeftButton)
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "General")
selectFromCombo(":Startup.contextHelpComboBox_QComboBox", "Always Show in Help Mode")
clickButton(waitForObject(":Options.OK_QPushButton"))
def removePackagingDirectory(projectPath):
qtcPackaging = os.path.join(projectPath, "qtc_packaging")
if os.path.exists(qtcPackaging):
test.log("Removing old packaging directory '%s'" % qtcPackaging)
deleteDirIfExists(qtcPackaging)
else:
test.log("Couldn't remove packaging directory '%s' - did not exist." % qtcPackaging)
# returns the indices from a QAbstractItemModel
def dumpIndices(model, parent=None, column=0):
if parent:
return [model.index(row, column, parent) for row in range(model.rowCount(parent))]
else:
return [model.index(row, column) for row in range(model.rowCount())]
DisplayRole = 0
# returns the data from a QAbstractItemModel as strings
def dumpItems(model, parent=None, role=DisplayRole, column=0):
return [str(index.data(role)) for index in dumpIndices(model, parent, column)]
# returns the children of a QTreeWidgetItem
def dumpChildren(item):
return [item.child(index) for index in range(item.childCount())]
def writeTestResults(folder):
if not os.path.exists(folder):
print "Skipping writing test results (folder '%s' does not exist)." % folder
return
resultFile = open("%s.srf" % os.path.join(folder, os.path.basename(squishinfo.testCase)), "w")
resultFile.write("suite:%s\n" % os.path.basename(os.path.dirname(squishinfo.testCase)))
categories = ["passes", "fails", "fatals", "errors", "tests", "warnings", "xfails", "xpasses"]
for cat in categories:
resultFile.write("%s:%d\n" % (cat, test.resultCount(cat)))
resultFile.close()
# wait and verify if object exists/not exists
def checkIfObjectExists(name, shouldExist = True, timeout = 3000, verboseOnFail = False):
result = waitFor("object.exists(name) == shouldExist", timeout)
if verboseOnFail and not result:
test.log("checkIfObjectExists() failed for '%s'" % name)
return result
# wait for progress bar(s) to appear and disappear
def progressBarWait(timeout=60000, warn=True):
if not checkIfObjectExists(":Qt Creator_Core::Internal::ProgressBar", True, 6000):
if warn:
test.warning("progressBarWait() timed out when waiting for ProgressBar.",
"This may lead to unforeseen behavior. Consider increasing the timeout.")
checkIfObjectExists(":Qt Creator_Core::Internal::ProgressBar", False, timeout)
def readFile(filename):
f = open(filename, "r")
content = f.read()
f.close()
return content
def simpleFileName(navigatorFileName):
# try to find the last part of the given name, assume it's inside a (folder) structure
search = re.search(".*[^\\\\]\.(.*)$", navigatorFileName)
if search:
return search.group(1).replace("\\", "")
# it's just the filename
return navigatorFileName.replace("\\", "")
def clickOnTab(tabBarStr, tabText, timeout=5000):
if not waitFor("object.exists(tabBarStr)", timeout):
raise LookupError("Could not find QTabBar: %s" % objectMap.realName(tabBarStr))
tabBar = findObject(tabBarStr)
if platform.system() == 'Darwin' and not tabBar.visible:
test.log("Using workaround for Mac.")
setWindowState(tabBar, WindowState.Normal)
clickTab(tabBar, tabText)
waitFor("str(tabBar.tabText(tabBar.currentIndex)) == '%s'" % tabText, timeout)
# constructs a string holding the properties for a QModelIndex
# param property a string holding additional properties including their values
# ATTENTION! use single quotes for values (e.g. "text='Text'", "text='Text' occurrence='2'")
# param container the container (str) to be used for this QModelIndex
def getQModelIndexStr(property, container):
if (container.startswith(":")):
container = "'%s'" % container
return ("{column='0' container=%s %s type='QModelIndex'}" % (container, property))
def verifyItemOrder(items, text):
text = str(text)
lastIndex = 0
for item in items:
index = text.find(item)
test.verify(index > lastIndex, "'" + item + "' found at index " + str(index))
lastIndex = index
def openVcsLog():
try:
foundObj = waitForObject("{type='QPlainTextEdit' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}", 2000)
if className(foundObj) != 'QPlainTextEdit':
raise Exception("Found derived class, but not a pure QPlainTextEdit.")
except:
invokeMenuItem("Window", "Output Panes", "Version Control")
def openGeneralMessages():
if not object.exists(":Qt Creator_Core::OutputWindow"):
invokeMenuItem("Window", "Output Panes", "General Messages")
# function that retrieves a specific child object by its class
# this is sometimes the best way to avoid using waitForObject() on objects that
# occur more than once - but could easily be found by using a compound object
# (e.g. search for Utils::PathChooser instead of Utils::FancyLineEdit and get the child)
def getChildByClass(parent, classToSearchFor, occurrence=1):
children = [child for child in object.children(parent) if className(child) == classToSearchFor]
if len(children) < occurrence:
return None
else:
return children[occurrence - 1]
def getHelpViewer():
try:
return waitForObject(":Qt Creator_Help::Internal::HelpViewer", 3000)
except:
return waitForObject("{type='Help::Internal::TextBrowserHelpWidget' unnamed='1' "
"visible='1' window=':Qt Creator_Core::Internal::MainWindow'}", 1000)
def getHelpTitle():
hv = getHelpViewer()
try:
return str(hv.title)
except:
return str(hv.documentTitle)
def canTestEmbeddedQtQuick():
return (squishinfo.major * 0x10000 + squishinfo.minor * 0x100
+ squishinfo.patch) > 0x050100
| gpl-3.0 | -6,170,531,193,433,609,000 | 45.036603 | 117 | 0.659288 | false |
codilime/cloudify-manager | tests/workflow_tests/test_rest_service_sort.py | 1 | 3609 | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import tempfile
import shutil
from wagon.wagon import Wagon
from testenv import TestCase
from testenv.utils import get_resource as resource
from testenv.utils import deploy, execute_workflow
class TestRestServiceListSort(TestCase):
def test_blueprints_sort(self):
for i in range(10):
self.client.blueprints.upload(resource('dsl/sort.yaml'),
'blueprint{0}'.format(i))
self._test_sort('blueprints', '-id')
def test_deployments_sort(self):
for i in range(10):
deploy(resource('dsl/sort.yaml'))
self._test_sort('deployments', 'id')
def test_deployment_modifications_sort(self):
deployment = deploy(resource('dsl/sort.yaml'))
for i in range(2, 12):
modification = self.client.deployment_modifications.start(
deployment_id=deployment.id,
nodes={'node': {'instances': i}})
self.client.deployment_modifications.finish(modification.id)
self._test_sort('deployment_modifications', 'deployment_id')
def test_executions_sort(self):
deployment = deploy(resource('dsl/sort.yaml'))
for i in range(5):
execute_workflow('install', deployment.id)
execute_workflow('uninstall', deployment.id)
self._test_sort('executions',
['deployment_id', '-status'])
def test_nodes_sort(self):
deploy(resource('dsl/sort.yaml'))
self._test_sort('nodes', '-id')
def test_node_instances_sort(self):
deploy(resource('dsl/sort.yaml'))
self._test_sort('node_instances', ['node_id', '-id'])
def test_plugins_sort(self):
for i in range(1, 11):
tmpdir = tempfile.mkdtemp(prefix='test-sort-')
with open(os.path.join(tmpdir, 'setup.py'), 'w') as f:
f.write('from setuptools import setup\n')
f.write('setup(name="some-package", version={0})'.format(i))
wagon = Wagon(tmpdir)
plugin_path = wagon.create(archive_destination_dir=tmpdir)
self.client.plugins.upload(plugin_path)
shutil.rmtree(tmpdir)
self._test_sort('plugins', 'id')
def _test_sort(self, resource_name, sort):
api = getattr(self.client, resource_name)
actual_list = api.list(_sort=sort)
self.assertGreater(len(actual_list), 0)
expected_list = api.list()
# apply all sort parameters to unsorted list and compare with
# sorted list request
if not isinstance(sort, list):
sort = [sort]
for sort_param in reversed(sort):
field = sort_param.lstrip('-+')
is_reverse = True if sort_param[0] == '-' else False
expected_list.sort(
key=lambda res: getattr(res, field),
reverse=is_reverse)
self.assertListEqual(expected_list.items, actual_list.items)
| apache-2.0 | 2,703,651,746,208,794,000 | 38.228261 | 79 | 0.621779 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/Onboard/IconPalette.py | 1 | 11621 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2008 Francesco Fumanti <[email protected]>
#
# This file is part of Onboard.
#
# Onboard is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Onboard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os.path import join
from traceback import print_exc
import gtk
import gobject
### Logging ###
import logging
_logger = logging.getLogger("IconPalette")
###############
### Config Singleton ###
from Onboard.Config import Config
config = Config()
########################
from gettext import gettext as _
DRAG_THRESHOLD = 8 # in pixels; 8 is the default gtk value
RESIZE_AREA_SIZE = 20 # Use a fictive but sensible size
class IconPalette(gtk.Window):
"""
Class that creates a movable and resizable floating window without
decorations. The window shows the icon of onboard scaled to fit to the
window and a resize grip that honors the desktop theme in use.
Onboard offers an option to the user to make the window appear
whenever the user hides the onscreen keyboard. The user can then
click on the window to hide it and make the onscreen keyboard
reappear.
"""
"""Store whether the last click event was by button 1."""
_button1_pressed = False
"""
Needed in the motion-notify-event callback to ignore little movements
and in the button-release-event callback to determine whether a click
happened.
"""
_button1_press_x_pos = 0
_button1_press_y_pos = 0
"""When configuring: whether it is a resize or a move."""
_is_press_in_resize_area = False
def __init__(self):
gtk.Window.__init__(self)
_logger.debug("Entered in __init__")
# create iconpalette starting by an inherited gtk.window
self.set_accept_focus(False)
self.set_keep_above(True)
self.set_decorated(False)
self.set_property('skip-taskbar-hint', True)
self.set_resizable(True)
self.set_geometry_hints(self,
20, 20, # minimum width, height
-1, -1, # maximum width, height
config.icp_width, # base width
config.icp_height, # base height
1, 1, # width, height resize increment
-1, -1) # min, max aspect ratio
self.set_border_width(0)
self.set_app_paintable(True)
# default coordinates of the iconpalette on the screen
self.move(config.icp_x_position, config.icp_y_position)
self.resize(config.icp_width, config.icp_height)
# set up attributes for content of icon palette
self.image_pixbuf = None
icon_theme = gtk.icon_theme_get_default()
if icon_theme.has_icon("onboard"):
try:
self.image_pixbuf = icon_theme.load_icon("onboard", 192, 0)
except:
print_exc() # bug in oneiric: unsupported icon format svg
_logger.error(_("Failed to load Onboard icon"))
else:
_logger.error("_(Can't find Onboard icon")
if not self.image_pixbuf:
self.image_pixbuf = self.render_icon(gtk.STOCK_MISSING_IMAGE,
gtk.ICON_SIZE_DIALOG)
self.icp_image = gtk.Image()
self.image_box = gtk.Fixed()
self.image_box.put(self.icp_image, 0, 0)
self.add(self.image_box)
# set up event handling
self.add_events(gtk.gdk.BUTTON_PRESS_MASK
| gtk.gdk.BUTTON_RELEASE_MASK
| gtk.gdk.BUTTON1_MOTION_MASK)
self.connect("button-press-event", self._cb_start_click_or_move_resize)
self.connect("motion-notify-event", self._cb_move_resize_action)
self.connect("button-release-event", self._cb_click_action)
self.connect("configure-event", self._cb_scale_and_save)
self.connect("expose-event", self._cb_draw_resize_grip)
config.icp_size_change_notify_add(self.resize)
config.icp_position_change_notify_add(self.move)
gobject.signal_new("activated", IconPalette, gobject.SIGNAL_RUN_LAST,
gobject.TYPE_BOOLEAN, ())
_logger.debug("Leaving __init__")
def _is_click_in_resize_area(self, event):
"""Check whether the event occurred in the resize grip."""
_logger.debug("Entered in _is_click_in_resize_area")
response = False
if config.icp_width - RESIZE_AREA_SIZE < event.x \
and event.x < config.icp_width \
and config.icp_height - RESIZE_AREA_SIZE < event.y \
and event.y < config.icp_height:
response = True
return response
def _cb_start_click_or_move_resize(self, widget, event):
"""
This is the callback for the button-press-event.
It initiates the variables used during the moving and resizing
of the IconPalette window; and used to determine whether the
button-press and button-release sequence can be considered a
button click.
"""
_logger.debug("Entered in _cb_start_click_or_move_resize()")
if not event.button == 1: # we are only interested in button 1 events
return
self._button1_pressed = True
_logger.debug("passed self._button1_pressed = True")
self._is_press_in_resize_area = self._is_click_in_resize_area(event)
# needed to check whether movement is below threshold
self._button1_press_x_pos = event.x_root
self._button1_press_y_pos = event.y_root
def _cb_move_resize_action(self, widget, event):
"""
This is the callback for the motion-notify-event.
Depending on whether the button press occurred on the content of
the window or on the resize grip, it asynchronuously calls
gtk.Window.begin_move_drag() or gtk.Window.begin_resize_drag().
"""
_logger.debug("Entered in _cb_move_resize_action()")
# we are only interested in button 1 events
if not self._button1_pressed:
return
_logger.debug("passed _button1_pressed")
if abs(event.x_root - self._button1_press_x_pos) < DRAG_THRESHOLD \
and abs(event.y_root - self._button1_press_y_pos) < DRAG_THRESHOLD:
return # we ignore movements smaller than the threshold
_logger.debug("passed ignore small movement")
if self._is_press_in_resize_area:
_logger.debug("Entering begin_resize_drag()")
self.begin_resize_drag(gtk.gdk.WINDOW_EDGE_SOUTH_EAST, 1,
int(event.x_root), int(event.y_root),
event.time)
else:
_logger.debug("Entering begin_move_drag()")
self.begin_move_drag(1, int(event.x_root), int(event.y_root),
event.time)
# REMARK: begin_resize_drag() and begin_move_drag() seem to run
# asynchronously: in other words, if there is code after them, it will
# in most cases run before the move or the resize have finished.
# To execute code after begin_resize_drag() and begin_move_drag(),
# the callback of the configure-event can probably be used.
def _cb_scale_and_save(self, event, user_data):
"""
This is the callback for the configure-event.
It saves the geometry of the IconPalette window to the gconf keys
by using the Config singleton.
It scales the content of the IconPalette window to make it fit to
the new window size.
"""
_logger.debug("Entered in _cb_scale_and_save()")
if self.get_property("visible"):
# save size and position
config.icp_width, config.icp_height = self.get_size()
config.icp_x_position, config.icp_y_position = self.get_position()
# draw content (does not draw resize grip)
scaled_image_pixbuf = self.image_pixbuf.scale_simple(config.icp_width, \
config.icp_height, \
gtk.gdk.INTERP_BILINEAR)
resize_grip_area = scaled_image_pixbuf.subpixbuf( \
config.icp_width - RESIZE_AREA_SIZE, \
config.icp_height - RESIZE_AREA_SIZE, \
RESIZE_AREA_SIZE, RESIZE_AREA_SIZE)
resize_grip_area.fill(0x00000000) # make transparent
self.icp_image.set_from_pixbuf(scaled_image_pixbuf)
del resize_grip_area
del scaled_image_pixbuf
# REMARK: After clicking on the iconpalette, another configure event
# arrives after the iconpalette has been hidden and a wrong position
# gets stored in the config keys. Therefore the visibility check.
def _cb_draw_resize_grip(self, event, user_data):
"""
This is the callback for the expose-event.
It is responsible for drawing the resize grip.
"""
_logger.debug("Entered in _cb_draw_resize_grip()")
self.get_style().paint_resize_grip(self.window, \
gtk.STATE_NORMAL, \
None, self, None, \
gtk.gdk.WINDOW_EDGE_SOUTH_EAST, \
config.icp_width - RESIZE_AREA_SIZE, \
config.icp_height - RESIZE_AREA_SIZE, \
RESIZE_AREA_SIZE, RESIZE_AREA_SIZE)
def _cb_click_action(self, widget, event):
"""
This is the callback for the button-release-event.
If the button-release occurs around the coordinates of the preceding
button-press, it is considered to be a click (regardless of the
time passed between the button-press and button-release). The
IconPalette gets hidden and the custom activated-event is emitted.
"""
_logger.debug("Entered in _cb_click_action")
if not event.button == 1: # we are only interested in button 1 events
return
self._button1_pressed = False
self._is_press_in_resize_area = False
if abs(event.x_root - self._button1_press_x_pos) < DRAG_THRESHOLD \
and abs(event.y_root - self._button1_press_y_pos) < DRAG_THRESHOLD:
self.do_hide()
self.emit("activated")
def do_show(self):
"""Show the IconPalette at the correct position on the desktop."""
_logger.debug("Entered in do_show")
self.move(config.icp_x_position, config.icp_y_position)
# self.move() is necessary; otherwise under some
# circumstances that I don't understand yet, the icp does not
# reappear where it disappeared (probably position in wm != position
# in X)
self.show_all()
def do_hide(self):
"""Hide the IconPalette."""
_logger.debug("Entered in do_hide")
self.hide_all()
if __name__ == "__main__":
iconPalette = IconPalette()
iconPalette.do_show()
gtk.main()
| gpl-3.0 | -6,566,106,239,316,394,000 | 40.205674 | 84 | 0.607057 | false |
bmbouter/python-bugzilla | tests/__init__.py | 1 | 2668 |
from __future__ import print_function
import atexit
import difflib
import imp
import os
import shlex
import sys
if hasattr(sys.version_info, "major") and sys.version_info.major >= 3:
from io import StringIO
else:
from StringIO import StringIO
_cleanup = []
def _import(name, path):
_cleanup.append(path + "c")
return imp.load_source(name, path)
def _cleanup_cb():
for f in _cleanup:
if os.path.exists(f):
os.unlink(f)
atexit.register(_cleanup_cb)
bugzillascript = _import("bugzillascript", "bin/bugzilla")
def diff(orig, new):
"""
Return a unified diff string between the passed strings
"""
return "".join(difflib.unified_diff(orig.splitlines(1),
new.splitlines(1),
fromfile="Orig",
tofile="New"))
def difffile(expect, filename):
expect += '\n'
if not os.path.exists(filename) or os.getenv("__BUGZILLA_UNITTEST_REGEN"):
open(filename, "w").write(expect)
ret = diff(open(filename).read(), expect)
if ret:
raise AssertionError("Output was different:\n%s" % ret)
def clicomm(argv, bzinstance, returnmain=False, printcliout=False,
stdin=None, expectfail=False):
"""
Run bin/bugzilla.main() directly with passed argv
"""
argv = shlex.split(argv)
oldstdout = sys.stdout
oldstderr = sys.stderr
oldstdin = sys.stdin
oldargv = sys.argv
try:
if not printcliout:
out = StringIO()
sys.stdout = out
sys.stderr = out
if stdin:
sys.stdin = stdin
sys.argv = argv
ret = 0
mainout = None
try:
print(" ".join(argv))
print()
mainout = bugzillascript.main(bzinstance)
except SystemExit:
sys_e = sys.exc_info()[1]
ret = sys_e.code
outt = ""
if not printcliout:
outt = out.getvalue()
if outt.endswith("\n"):
outt = outt[:-1]
if ret != 0 and not expectfail:
raise RuntimeError("Command failed with %d\ncmd=%s\nout=%s" %
(ret, argv, outt))
elif ret == 0 and expectfail:
raise RuntimeError("Command succeeded but we expected success\n"
"ret=%d\ncmd=%s\nout=%s" % (ret, argv, outt))
if returnmain:
return mainout
return outt
finally:
sys.stdout = oldstdout
sys.stderr = oldstderr
sys.stdin = oldstdin
sys.argv = oldargv
| gpl-2.0 | -5,544,840,165,925,174,000 | 23.934579 | 78 | 0.543853 | false |
ChopChopKodi/pelisalacarta | python/main-classic/core/config.py | 1 | 11164 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 [email protected]
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# Parámetros de configuración (kodi)
# ------------------------------------------------------------
import os
import xbmc
import xbmcaddon
PLATFORM_NAME = "kodi-krypton"
OLD_PLATFORM = False
PLUGIN_NAME = "pelisalacarta"
__settings__ = xbmcaddon.Addon(id="plugin.video." + PLUGIN_NAME)
__language__ = __settings__.getLocalizedString
def get_platform():
return PLATFORM_NAME
def is_xbmc():
return True
def get_library_support():
return True
def get_system_platform():
""" fonction: pour recuperer la platform que xbmc tourne """
import xbmc
platform = "unknown"
if xbmc.getCondVisibility("system.platform.linux"):
platform = "linux"
elif xbmc.getCondVisibility("system.platform.xbox"):
platform = "xbox"
elif xbmc.getCondVisibility("system.platform.windows"):
platform = "windows"
elif xbmc.getCondVisibility("system.platform.osx"):
platform = "osx"
return platform
def open_settings():
__settings__.openSettings()
def get_setting(name, channel=""):
"""
Retorna el valor de configuracion del parametro solicitado.
Devuelve el valor del parametro 'name' en la configuracion global o en la configuracion propia del canal 'channel'.
Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.pelisalacarta\settings_channels el
archivo channel_data.json y lee el valor del parametro 'name'. Si el archivo channel_data.json no existe busca en la
carpeta channels el archivo channel.xml y crea un archivo channel_data.json antes de retornar el valor solicitado.
Si el parametro 'name' no existe en channel_data.json lo busca en la configuracion global y si ahi tampoco existe
devuelve un str vacio.
Parametros:
name -- nombre del parametro
channel [opcional] -- nombre del canal
Retorna:
value -- El valor del parametro 'name'
"""
# xbmc.log("config.get_setting name="+name+", channel="+channel+", OLD_PLATFORM="+str(OLD_PLATFORM))
# Specific channel setting
if channel:
# Old platforms read settings from settings-oldplatform.xml, all but the "include_in_global_search", "include_in_newest..."
if OLD_PLATFORM and ("user" in name or "password" in name):
# xbmc.log("config.get_setting reading channel setting from main xml '"+channel+"_"+name+"'")
value = __settings__.getSetting(channel+"_"+name)
# xbmc.log("config.get_setting -> '"+value+"'")
return value
# New platforms read settings from each channel
else:
# xbmc.log("config.get_setting reading channel setting '"+name+"' from channel xml")
from core import channeltools
value = channeltools.get_channel_setting(name, channel)
# xbmc.log("config.get_setting -> '"+repr(value)+"'")
if value is not None:
return value
else:
return ""
# Global setting
else:
# xbmc.log("config.get_setting reading main setting '"+name+"'")
value = __settings__.getSetting(channel+name)
# xbmc.log("config.get_setting -> '"+value+"'")
return value
def set_setting(name,value, channel=""):
"""
Fija el valor de configuracion del parametro indicado.
Establece 'value' como el valor del parametro 'name' en la configuracion global o en la configuracion propia del
canal 'channel'.
Devuelve el valor cambiado o None si la asignacion no se ha podido completar.
Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.pelisalacarta\settings_channels el
archivo channel_data.json y establece el parametro 'name' al valor indicado por 'value'. Si el archivo
channel_data.json no existe busca en la carpeta channels el archivo channel.xml y crea un archivo channel_data.json
antes de modificar el parametro 'name'.
Si el parametro 'name' no existe lo añade, con su valor, al archivo correspondiente.
Parametros:
name -- nombre del parametro
value -- valor del parametro
channel [opcional] -- nombre del canal
Retorna:
'value' en caso de que se haya podido fijar el valor y None en caso contrario
"""
if channel:
from core import channeltools
return channeltools.set_channel_setting(name, value, channel)
else:
try:
__settings__.setSetting(name, value)
except:
# xbmc.log("[config.py] ERROR al fijar el parametro global {0}= {1}".format(name, value))
return None
return value
def get_localized_string(code):
dev = __language__(code)
try:
dev = dev.encode("utf-8")
except:
pass
return dev
def get_library_path():
if get_system_platform() == "xbox":
default = xbmc.translatePath(os.path.join(get_runtime_path(), "library"))
else:
default = xbmc.translatePath("special://profile/addon_data/plugin.video." +
PLUGIN_NAME + "/library")
value = get_setting("librarypath")
if value == "":
value = default
return value
def get_temp_file(filename):
return xbmc.translatePath(os.path.join("special://temp/", filename))
def get_runtime_path():
return xbmc.translatePath(__settings__.getAddonInfo('Path'))
def get_data_path():
dev = xbmc.translatePath(__settings__.getAddonInfo('Profile'))
# Parche para XBMC4XBOX
if not os.path.exists(dev):
os.makedirs(dev)
return dev
def get_cookie_data():
import os
ficherocookies = os.path.join(get_data_path(), 'cookies.dat')
cookiedatafile = open(ficherocookies, 'r')
cookiedata = cookiedatafile.read()
cookiedatafile.close()
return cookiedata
# Test if all the required directories are created
def verify_directories_created():
import logger
from core import filetools
# Force download path if empty
download_path = get_setting("downloadpath")
if download_path == "":
if is_xbmc():
download_path = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/downloads"
else:
download_path = filetools.join(get_data_path(), "downloads")
set_setting("downloadpath", download_path)
# Force download list path if empty
download_list_path = get_setting("downloadlistpath")
if download_list_path == "":
if is_xbmc():
download_list_path = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/downloads/list"
else:
download_list_path = filetools.join(get_data_path(), "downloads", "list")
set_setting("downloadlistpath", download_list_path)
# Force bookmark path if empty
bookmark_path = get_setting("bookmarkpath")
if bookmark_path == "":
if is_xbmc():
bookmark_path = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/downloads/list"
else:
bookmark_path = filetools.join(get_data_path(), "bookmarks")
set_setting("bookmarkpath", bookmark_path)
# Create data_path if not exists
if not os.path.exists(get_data_path()):
logger.debug("Creating data_path " + get_data_path())
filetools.mkdir(get_data_path())
if is_xbmc():
# xbmc.log("Es una plataforma XBMC")
if download_path.startswith("special://"):
# Translate from special and create download_path if not exists
download_path = xbmc.translatePath(download_path)
texto = "(from special)"
else:
texto = ""
# TODO si tiene smb se debería poder dejar que cree? filetools permite crear carpetas para SMB
if not download_path.lower().startswith("smb") and not filetools.exists(download_path):
logger.debug("Creating download_path" + texto + ": " + download_path)
filetools.mkdir(download_path)
if download_list_path.startswith("special://"):
# Create download_list_path if not exists
download_list_path = xbmc.translatePath(download_list_path)
texto = "(from special)"
else:
texto = ""
# TODO si tiene smb se debería poder dejar que cree? filetools permite crear carpetas para SMB
if not download_list_path.lower().startswith("smb") and not filetools.exists(download_list_path):
logger.debug("Creating download_list_path" + texto + ": " + download_list_path)
filetools.mkdir(download_list_path)
if bookmark_path.startswith("special://"):
# Create bookmark_path if not exists
bookmark_path = xbmc.translatePath(bookmark_path)
texto = "(from special)"
else:
texto = ""
# TODO si tiene smb se debería poder dejar que cree? filetools permite crear carpetas para SMB
if not bookmark_path.lower().startswith("smb") and not filetools.exists(bookmark_path):
logger.debug("Creating bookmark_path" + texto + ": " + bookmark_path)
filetools.mkdir(bookmark_path)
# Create library_path if not exists
# TODO si tiene smb se debería poder dejar que cree? filetools permite crear carpetas para SMB
if not get_library_path().lower().startswith("smb") and not os.path.exists(get_library_path()):
logger.debug("Creating library_path " + get_library_path())
filetools.mkdir(get_library_path())
# Create settings_path is not exists
settings_path = filetools.join(get_data_path(), "settings_channels")
if not filetools.exists(settings_path):
logger.debug("Creating settings_path " + settings_path)
filetools.mkdir(settings_path)
# Checks that a directory "xbmc" is not present on platformcode
old_xbmc_directory = os.path.join(get_runtime_path(), "platformcode", "xbmc")
if os.path.exists(old_xbmc_directory):
logger.debug("Removing old platformcode.xbmc directory")
filetools.rmdirtree(old_xbmc_directory)
| gpl-3.0 | 5,997,169,340,548,187,000 | 34.759615 | 131 | 0.643811 | false |
knipknap/Gelatin | Gelatin/util.py | 1 | 5488 | # Copyright (c) 2010-2017 Samuel Abels
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import codecs
from . import generator
from .generator import Builder
from .parser import Parser
from .compiler import SyntaxCompiler
def compile_string(syntax):
"""
Builds a converter from the given syntax and returns it.
:type syntax: str
:param syntax: A Gelatin syntax.
:rtype: compiler.Context
:return: The compiled converter.
"""
return Parser().parse_string(syntax, SyntaxCompiler())
def compile(syntax_file, encoding='utf8'):
"""
Like compile_string(), but reads the syntax from the file with the
given name.
:type syntax_file: str
:param syntax_file: Name of a file containing Gelatin syntax.
:type encoding: str
:param encoding: Character encoding of the syntax file.
:rtype: compiler.Context
:return: The compiled converter.
"""
return Parser().parse(syntax_file,
SyntaxCompiler(),
encoding=encoding)
def generate(converter, input_file, format='xml', encoding='utf8'):
"""
Given a converter (as returned by compile()), this function reads
the given input file and converts it to the requested output format.
Supported output formats are 'xml', 'yaml', 'json', or 'none'.
:type converter: compiler.Context
:param converter: The compiled converter.
:type input_file: str
:param input_file: Name of a file to convert.
:type format: str
:param format: The output format.
:type encoding: str
:param encoding: Character encoding of the input file.
:rtype: str
:return: The resulting output.
"""
with codecs.open(input_file, encoding=encoding) as thefile:
return generate_string(converter, thefile.read(), format=format)
def generate_to_file(converter,
input_file,
output_file,
format='xml',
in_encoding='utf8',
out_encoding='utf8'):
"""
Like generate(), but writes the output to the given output file
instead.
:type converter: compiler.Context
:param converter: The compiled converter.
:type input_file: str
:param input_file: Name of a file to convert.
:type output_file: str
:param output_file: The output filename.
:type format: str
:param format: The output format.
:type in_encoding: str
:param in_encoding: Character encoding of the input file.
:type out_encoding: str
:param out_encoding: Character encoding of the output file.
:rtype: str
:return: The resulting output.
"""
with codecs.open(output_file, 'w', encoding=out_encoding) as thefile:
result = generate(converter, input_file, format=format, encoding=in_encoding)
thefile.write(result)
def generate_string(converter, input, format='xml'):
"""
Like generate(), but reads the input from a string instead of
from a file.
:type converter: compiler.Context
:param converter: The compiled converter.
:type input: str
:param input: The string to convert.
:type format: str
:param format: The output format.
:rtype: str
:return: The resulting output.
"""
serializer = generator.new(format)
if serializer is None:
raise TypeError('invalid output format ' + repr(format))
builder = Builder()
converter.parse_string(input, builder)
return builder.serialize(serializer)
def generate_string_to_file(converter,
input,
output_file,
format='xml',
out_encoding='utf8'):
"""
Like generate(), but reads the input from a string instead of
from a file, and writes the output to the given output file.
:type converter: compiler.Context
:param converter: The compiled converter.
:type input: str
:param input: The string to convert.
:type output_file: str
:param output_file: The output filename.
:type format: str
:param format: The output format.
:type out_encoding: str
:param out_encoding: Character encoding of the output file.
:rtype: str
:return: The resulting output.
"""
with codecs.open(output_file, 'w', encoding=out_encoding) as thefile:
result = generate_string(converter, input, format=format)
thefile.write(result)
| mit | 1,872,208,471,006,823,000 | 34.636364 | 85 | 0.668003 | false |
Synss/python-mbedtls | docs/source/conf.py | 1 | 2086 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../../src"))
# -- Project information -----------------------------------------------------
project = "python-mbedtls"
copyright = "2016, Mathias Laurin"
author = "Mathias Laurin"
# The full version, including alpha/beta/rc tags
release = "1.5.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# Autodoc options
autodoc_default_options = {"members": True, "undoc-members": True}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| mit | -642,639,203,501,782,700 | 32.111111 | 79 | 0.657718 | false |
LuizArmesto/easyaspect | test/test_utils.py | 1 | 4815 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src'))
import unittest
try:
from unittest import mock
except ImportError:
import mock
from easyaspect.utils import (get_module, get_classes, get_methods,
get_properties)
class DummyClass(object):
prop_a = 'string'
prop_b = 42
def method_a(self):
pass
def method_b(self):
pass
class TestModule(unittest.TestCase):
def test_empty_module_name(self):
self.assertEqual(None, get_module(''))
def test_exception_when_not_found(self):
self.assertRaises(
ImportError, get_module, 'this_module_should_not_exist_jdjfkrkd')
@mock.patch('easyaspect.utils.import_module')
def test_import_and_return_a_module(self, mocked_import_module):
mocked_import_module.return_value = 'module_returned'
self.assertEqual('module_returned', get_module('module_name'))
class TestClasses(unittest.TestCase):
def test_with_dictionary(self):
self.assertEqual([DummyClass], get_classes(globals(), 'Dummy*'))
def test_with_module(self):
from importlib import import_module
module = import_module(DummyClass.__module__)
self.assertEqual([DummyClass], get_classes(module, 'Dummy*'))
def test_with_string(self):
module = DummyClass.__module__
self.assertEqual([DummyClass], get_classes(module, 'Dummy*'))
def test_with_complete_string(self):
module = DummyClass.__module__
self.assertEqual([DummyClass],
get_classes('{}.Dummy*'.format(module)))
def test_with_only_classname_string(self):
self.assertEqual([DummyClass], get_classes('Dummy*'))
def test_wrong_cls_type(self):
self.assertRaises(TypeError, get_classes, 0, 'Dummy*')
def test_with_class_itself(self):
self.assertEqual([DummyClass], get_classes(DummyClass))
def test_not_found(self):
module = DummyClass.__module__
self.assertEqual([], get_classes(module, 'Dmmy*'))
class TestMethods(unittest.TestCase):
def test_get_methods_from_class(self):
methods = [(DummyClass, [
('method_a', DummyClass.method_a),
('method_b', DummyClass.method_b)
])]
self.assertEqual(methods, get_methods(DummyClass, 'method_*'))
def test_get_methods_from_string(self):
methods = [(DummyClass, [
('method_a', DummyClass.method_a),
('method_b', DummyClass.method_b)
])]
self.assertEqual(methods, get_methods('DummyClass', 'method_*'))
def test_get_methods_from_complete_module_string(self):
methods = [(DummyClass, [
('method_a', DummyClass.method_a),
('method_b', DummyClass.method_b)
])]
self.assertEqual(methods, get_methods('{}.DummyClass'.format(
DummyClass.__module__), 'method_*'))
def test_get_methods_from_full_string(self):
methods = [(DummyClass, [
('method_a', DummyClass.method_a),
('method_b', DummyClass.method_b)
])]
self.assertEqual(methods, get_methods('DummyClass.method_*'))
def test_get_methods_with_wrong_type(self):
self.assertEqual([], get_methods(0))
def test_get_methods_from_method_itself(self):
methods = [(DummyClass, [
('method_a', DummyClass.method_a)
])]
self.assertEqual(methods, get_methods(DummyClass.method_a))
class TestProperties(unittest.TestCase):
def test_get_properties_from_class(self):
props = [(DummyClass, [
('prop_a', DummyClass.prop_a),
('prop_b', DummyClass.prop_b)
])]
self.assertEqual(props, get_properties(DummyClass, 'prop_*'))
def test_get_properties_from_string(self):
props = [(DummyClass, [
('prop_a', DummyClass.prop_a),
('prop_b', DummyClass.prop_b)
])]
self.assertEqual(props, get_properties('DummyClass', 'prop_*'))
def test_get_properties_from_complete_module_string(self):
props = [(DummyClass, [
('prop_a', DummyClass.prop_a),
('prop_b', DummyClass.prop_b)
])]
self.assertEqual(props, get_properties('{}.DummyClass'.format(
DummyClass.__module__), 'prop_*'))
def test_get_properties_from_full_string(self):
props = [(DummyClass, [
('prop_a', DummyClass.prop_a),
('prop_b', DummyClass.prop_b)
])]
self.assertEqual(props,
get_properties('{}.DummyClass.prop_*'.format(
DummyClass.__module__)))
def test_get_properties_with_wrong_type(self):
self.assertEqual([], get_properties(0))
| mit | -314,514,503,968,040,600 | 32.206897 | 77 | 0.598962 | false |
merenlab/anvio | anvio/genomedescriptions.py | 1 | 73853 | # -*- coding: utf-8
# pylint: disable=line-too-long
"""
A module for dealing with genome storages.
Pangenomic workflow heavily uses this module.
Ad hoc access to make sense of internal or external genome descriptions is also welcome.
"""
import os
import sys
import copy
import hashlib
import argparse
from collections import Counter
import anvio
import anvio.db as db
import anvio.tables as t
import anvio.utils as utils
import anvio.dbops as dbops
import anvio.terminal as terminal
import anvio.ccollections as ccollections
import anvio.filesnpaths as filesnpaths
from anvio.errors import ConfigError
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "[email protected]"
run = terminal.Run()
progress = terminal.Progress()
pp = terminal.pretty_print
P = terminal.pluralize
class GenomeDescriptions(object):
def __init__(self, args=None, run=run, progress=progress):
self.args = args
self.run = run
self.progress = progress
self.genomes = {}
self.internal_genomes_dict = None
self.external_genomes_dict = None
self.initialized = False
A = lambda x: self.args.__dict__[x] if x in self.args.__dict__ else None
self.just_do_it = A('just_do_it')
self.functions_are_available = False
self.function_annotation_sources = set([])
self.input_file_for_internal_genomes = A('internal_genomes')
self.input_file_for_external_genomes = A('external_genomes')
self.skip_checking_genome_hashes = A('skip_checking_genome_hashes')
self.list_hmm_sources = A('list_hmm_sources') # <<< these two are look out of place, but if the args requests
self.list_available_gene_names = A('list_available_gene_names') # for information about HMMs, this is the bets place to set them
self.gene_caller = A('gene_caller')
if self.input_file_for_internal_genomes or self.input_file_for_external_genomes:
self.read_genome_paths_from_input_files()
# see `self.is_proper_db` function for these two variables:
self.contigs_dbs_found = set([])
self.profile_dbs_found = set([])
self.external_genomes_with_identical_hashes = {}
self.internal_genomes_with_identical_hashes = {}
def is_proper_db(self, db_path, db_type):
"""Check if contigs db or profile db is OK.
A given contigs database have multiple entries in an internal or external genomes file.
The same goes for a profile database. One of the things we have to do during initialization
is to check whether each entry in int/ext genomes files is associated with a legitimate
contigs or profile databases. This function fills up `self.contigs_dbs_found` and
`self.profile_dbs_found` variables every time it is called. If there is alread an entry
it returns True without any additional function call.
Parameters
==========
db_path: str
path to the database
db_type: str in ['contigs', 'profile']
whether a given database is assumed to be a contigs or profile database
"""
db_types_factory = {'contigs': {'check': utils.is_contigs_db, 'variable': self.contigs_dbs_found},
'profile': {'check': utils.is_profile_db, 'variable': self.profile_dbs_found}}
if db_type not in db_types_factory:
raise ConfigError("is_proper_db :: wrong `db_type` :/ Pick either: %s" % ', '.join(db_types_factory))
if db_path in db_types_factory[db_type]['variable']:
return True
else:
db_types_factory[db_type]['check'](db_path)
db_types_factory[db_type]['variable'].add(db_path)
def names_check(self):
i, n = list(self.internal_genomes_dict.keys() if self.internal_genomes_dict else []), \
list(self.external_genomes_dict.keys() if self.external_genomes_dict else [])
if not i and not n:
raise ConfigError("You actually managed to get all the way down here in the code without actually providing any internal "
"or external genome files! You got 5 anvi'o points for being awesome. But this is not gonna work since "
"you really need to provide at least one of those files so anvi'o takes away 4 of those points :/ The "
"anvi'o giveth, and the anvi'o taketh away. Enjoy your point.")
if len(i) + len(n) != len(set(i + n)):
raise ConfigError("Each entry both in internal and external genome descriptions should have a unique 'name'. This does not "
"seem to be the case with your input :/")
def read_genome_paths_from_input_files(self):
"""Reads internal and external genome files, populates self.genomes"""
fields_for_internal_genomes_input = ['name', 'bin_id', 'collection_id', 'profile_db_path', 'contigs_db_path']
fields_for_external_genomes_input = ['name', 'contigs_db_path']
self.internal_genomes_dict = utils.get_TAB_delimited_file_as_dictionary(self.input_file_for_internal_genomes, expected_fields=fields_for_internal_genomes_input) if self.input_file_for_internal_genomes else {}
self.external_genomes_dict = utils.get_TAB_delimited_file_as_dictionary(self.input_file_for_external_genomes, expected_fields=fields_for_external_genomes_input) if self.input_file_for_external_genomes else {}
def list_HMM_info_and_quit(self):
if not self.initialized:
self.load_genomes_descriptions()
hmm_sources_in_all_genomes = self.get_HMM_sources_common_to_all_genomes()
# since we know hmm sources in `hmm_sources_in_all_genomes` are common to all genomes,
# we could use any of those genomes to learn about the specifics of them. here we take
# the first one from `self.genomes`
contigs_db = dbops.ContigsDatabase(list(self.genomes.values())[0]['contigs_db_path'])
hmm_sources_info = contigs_db.db.get_table_as_dict(t.hmm_hits_info_table_name)
contigs_db.disconnect()
if self.list_hmm_sources or self.list_available_gene_names:
if not len(hmm_sources_in_all_genomes):
raise ConfigError("There are no HMM sources among your external genomes that occur in every genome :/")
if self.list_hmm_sources:
self.run.warning(None, 'HMM SOURCES COMMON TO ALL %d GENOMES' % (len(self.genomes)), lc='yellow')
for source in sorted(hmm_sources_in_all_genomes):
s = hmm_sources_info[source]
self.run.info_single('%s [type: %s] [num genes: %d]' % (source, s['search_type'], len(s['genes'])))
sys.exit(0)
if self.list_available_gene_names:
self.run.warning(None, 'GENES IN HMM SOURCES COMMON TO ALL %d GENOMES' % (len(self.genomes)), lc='yellow')
for source in sorted(hmm_sources_in_all_genomes):
s = hmm_sources_info[source]
gene_names = ', '.join(sorted([g.strip() for g in s['genes'].split(',')]))
self.run.info_single('%s [type: %s]: %s' % (source, s['search_type'], gene_names), nl_after = 2)
sys.exit(0)
def get_HMM_sources_common_to_all_genomes(self):
"""Returns True if all HMM sources in all genomes are comparable"""
hmm_sources_info_per_genome = {}
# first recover hmm sources info per genome
for genome_name in self.genomes:
if 'hmm_sources_info' not in self.genomes[genome_name]:
# someone did not run the expensive `init` function. but we can recover this
# here quitte cheaply
contigs_db = dbops.ContigsDatabase(self.genomes[genome_name]['contigs_db_path'])
hmm_sources_info = contigs_db.db.get_table_as_dict(t.hmm_hits_info_table_name)
else:
hmm_sources_info = self.genomes[genome_name]['hmm_sources_info']
hmm_sources_info_per_genome[genome_name] = hmm_sources_info
hmm_sources_found = set([])
for genome_name in self.genomes:
[hmm_sources_found.add(s) for s in hmm_sources_info.keys()]
# find out hmm_sources that occur in all genomes
hmm_sources_in_all_genomes = copy.deepcopy(hmm_sources_found)
for genome_name in self.genomes:
for hmm_source in hmm_sources_found:
if hmm_source not in hmm_sources_info_per_genome[genome_name] and hmm_source in hmm_sources_in_all_genomes:
hmm_sources_in_all_genomes.remove(hmm_source)
return hmm_sources_in_all_genomes
def load_genomes_descriptions(self, skip_functions=False, init=True, skip_sanity_check=False):
"""Load genome descriptions from int/ext genome dictionaries"""
# start with a sanity check to make sure name are distinct
self.names_check()
self.internal_genome_names = list(self.internal_genomes_dict.keys())
self.external_genome_names = list(self.external_genomes_dict.keys())
# let us know if the user did not want a full init.
self.full_init = init
# convert relative paths to absolute paths and MERGE internal and external genomes into self.genomes:
for source, input_file in [(self.external_genomes_dict, self.input_file_for_external_genomes),
(self.internal_genomes_dict, self.input_file_for_internal_genomes)]:
for genome_name in source:
self.genomes[genome_name] = source[genome_name]
for db_path_var in ['contigs_db_path', 'profile_db_path']:
if db_path_var not in self.genomes[genome_name]:
continue
path = self.genomes[genome_name][db_path_var]
if not path:
raise ConfigError("Bad news: anvi'o was loading genome desriptions, and it run into an empty path for "
"the genome %s. How did this happen? HOW? :(" % genome_name)
if not path.startswith('/'):
self.genomes[genome_name][db_path_var] = os.path.abspath(os.path.join(os.path.dirname(input_file), path))
# while we are going through all genomes and reconstructing self.genomes for the first time,
# let's add the 'name' attribute in it as well.'
self.genomes[genome_name]['name'] = genome_name
# add hashes for each genome in the self.genomes dict.
self.genome_hash_to_genome_name = {}
self.progress.new('Setting up genome hash dicts', progress_total_items=len(self.genomes))
for genome_name in self.external_genome_names:
self.progress.update("working on %s (external)" % (genome_name), increment=True)
g_hash = str(self.get_genome_hash_for_external_genome(self.genomes[genome_name]))
self.genomes[genome_name]['genome_hash'] = g_hash
self.genome_hash_to_genome_name[g_hash] = genome_name
for genome_name in self.internal_genome_names:
self.progress.update("working on %s (internal)" % (genome_name), increment=True)
g_hash = str(self.get_genome_hash_for_internal_genome(self.genomes[genome_name]))
self.genomes[genome_name]['genome_hash'] = g_hash
self.genome_hash_to_genome_name[g_hash] = genome_name
self.progress.end()
# if the user wanted anvi'o to not care about checking genome hashes and we ended up
# finding genomes with identical hashes, let them know
if self.skip_checking_genome_hashes and (len(self.internal_genomes_with_identical_hashes) or len(self.external_genomes_with_identical_hashes)):
self.run.warning("While processing internal and/or external genomes files you have provided, "
"anvi'o found genomes with identical hashes (which means they were practically "
"identical to each other). But since you have instructed anvi'o to ignore that "
"it is now continuing with the flow (even %d hashes for your internal genomes and %d) "
"hashes for your external gneomes appeared more than once). See below the genome names "
"with identical hashes:" % (len(self.internal_genomes_with_identical_hashes),
len(self.external_genomes_with_identical_hashes)),
overwrite_verbose=True)
for _t, _d in [('Internal', self.internal_genomes_with_identical_hashes), ('External', self.external_genomes_with_identical_hashes)]:
all_genome_hashes = list(_d.keys())
for genome_hash in all_genome_hashes:
self.run.info("%s genomes with hash %s" % (_t, genome_hash), "%s" % ", ".join(_d[genome_hash]),
overwrite_verbose=True,
nl_after = 1 if genome_hash == all_genome_hashes[-1] else 0,
lc='red')
# if the client is not interested in functions, skip the rest.
if skip_functions:
self.functions_are_available = False
else:
self.init_functions()
# this will populate self.genomes with relevant data that can be learned about these genomes such as 'avg_gene_length',
# 'num_splits', 'num_contigs', 'num_genes', 'percent_redundancy', 'gene_caller_ids', 'total_length', 'partial_gene_calls',
# 'percent_completion', 'num_genes_per_kb', 'gc_content'.
if self.full_init:
self.init_internal_genomes()
self.init_external_genomes()
else:
# init will do everything. but it is very expensive. if the user does not want to
# init all the bulky stuff, we still can give them the contents of the meta tables.
for genome_name in self.genomes:
g = self.genomes[genome_name]
contigs_db = dbops.ContigsDatabase(g['contigs_db_path'])
for key in contigs_db.meta:
g[key] = contigs_db.meta[key]
# we are done hre.
self.initialized = True
# make sure it is OK to go with self.genomes
if not skip_sanity_check:
self.sanity_check()
def get_functions_and_sequences_dicts_from_contigs_db(self, genome_name, requested_source_list=None, return_only_functions=False):
"""This function fetches dictionaries of functions, AA sequences, and DNA sequences for a particular genome.
PARAMETERS
==========
genome_name, str
the genome name you want data for
requested_source_list, list
the functional annotation sources you want data for. If not provided, data will be fetched for all sources in
self.function_annotation_sources
return_only_functions, bool
Return only functions, and don't bother with sequences
RETURNS
=======
function_calls_dict : dictionary of function annotations
aa_sequences_dict : dictionary of corresponding amino acid sequences
dna_sequences_dict : dictionary of corresponding nucleotide sequences
"""
if not requested_source_list:
requested_source_list = list(self.function_annotation_sources)
g = self.genomes[genome_name]
args = argparse.Namespace()
args.contigs_db = g['contigs_db_path']
# we are about to initialize the contigs super, but before that, we need to make sure that
# the class will know about the splits that describe this genome in the contigs database
# IF it is an internal genome. otherwise we will end up gathering all the functions in
# gontigs database for it.
if genome_name in self.internal_genome_names:
args.split_names_of_interest = self.get_split_names_of_interest_for_internal_genome(g)
contigs_super = dbops.ContigsSuperclass(args, r=anvio.terminal.Run(verbose=False))
if self.functions_are_available:
contigs_super.init_functions(requested_sources=requested_source_list)
function_calls_dict = contigs_super.gene_function_calls_dict
else:
function_calls_dict = {}
if return_only_functions:
return (function_calls_dict, None, None)
# get dna sequences
gene_caller_ids_list, dna_sequences_dict = contigs_super.get_sequences_for_gene_callers_ids(gene_caller_ids_list=list(g['gene_caller_ids']))
# get amino acid sequences.
# FIXME: this should be done in the contigs super.
contigs_db = dbops.ContigsDatabase(g['contigs_db_path'])
aa_sequences_dict = contigs_db.db.get_table_as_dict(t.gene_amino_acid_sequences_table_name)
contigs_db.disconnect()
return (function_calls_dict, aa_sequences_dict, dna_sequences_dict)
def init_functions(self):
# check whether function calls are available for all genomes involved, and whether function sources for each genome is identical
genomes_with_no_functional_annotation = []
function_annotation_sources_per_genome = {}
all_function_annotation_sources_observed = set([])
for genome_name in self.genomes:
g = self.genomes[genome_name]
contigs_db = dbops.ContigsDatabase(g['contigs_db_path'])
sources = contigs_db.meta['gene_function_sources']
contigs_db.disconnect()
if not sources:
genomes_with_no_functional_annotation.append(genome_name)
else:
function_annotation_sources_per_genome[genome_name] = sources
all_function_annotation_sources_observed.update(sources)
if genomes_with_no_functional_annotation:
if len(genomes_with_no_functional_annotation) == len(self.genomes):
self.run.warning("None of your genomes seem to have any functional annotation. No biggie. Things will continue to work. But "
"then your genomes have no functional annotation. SAD.")
else:
self.run.warning("Some of your genomes (%d of the %d, to be precise) seem to have no functional annotation. Since this workflow "
"can only use matching functional annotations across all genomes involved, having even one genome without "
"any functions means that there will be no matching function across all. Things will continue to work, but "
"you will have no functions at the end for your gene clusters." % \
(len(genomes_with_no_functional_annotation), len(self.genomes)))
# make sure it is clear.
function_annotation_sources_per_genome = {}
all_function_annotation_sources_observed = set([])
elif not len(all_function_annotation_sources_observed):
self.run.warning("None of your genomes seem to have any functional annotation. No biggie. Things will continue to work. But "
"then your genomes have no functional annotation. It is sad.")
else:
# this guy down below fills in the self.function_annotation_sources with function annotation sources
# that are common to all genomes.
for sources in list(function_annotation_sources_per_genome.values()):
if not sources:
continue
if not(self.function_annotation_sources):
self.function_annotation_sources.update(sources)
else:
self.function_annotation_sources = self.function_annotation_sources.intersection(sources)
function_annotation_sources_some_genomes_miss = all_function_annotation_sources_observed.difference(self.function_annotation_sources)
if not len(self.function_annotation_sources):
# none of the functions are common
self.run.warning("Although some of your genomes had some functional annotations, none of them were common to all genomes :/ "
"Anvi'o will continue working with them, but you will have no functions available to you downstream. Just "
"so you know, these are the annotation sources observed at least once in at least one of your genomes: '%s'" % \
(', '.join(all_function_annotation_sources_observed)))
self.functions_are_available = False
else:
self.functions_are_available = True
# good. here we know some functions are available, but let's get some further understanding, and report it to the user, you know,
# because we're nice:
if len(function_annotation_sources_some_genomes_miss):
# some functions were missing from some genomes
self.run.warning("Anvi'o has good news and bad news for you (very balanced, as usual). The good news is that there are some "
"functional annotation sources that are common to all of your genomes, and they will be used whenever "
"it will be appropriate. Here they are: '%s'. The bad news is you had more function annotation sources, "
"but they were not common to all genomes. Here they are so you can say your goodbyes to them (because "
"they will not be used): '%s'" % \
(', '.join(self.function_annotation_sources), ', '.join(function_annotation_sources_some_genomes_miss)))
else:
# every function ever observed is common to all genomes.
self.run.warning("Good news! Anvi'o found all these functions that are common to all of your genomes and will use them for "
"downstream analyses and is very proud of you: '%s'." % (', '.join(self.function_annotation_sources)), lc='green')
def get_genome_hash_for_external_genome(self, entry):
self.is_proper_db(entry['contigs_db_path'], db_type='contigs')
genome_hash = db.DB(entry['contigs_db_path'], None, ignore_version=True).get_meta_value('contigs_db_hash')
if genome_hash in self.genome_hash_to_genome_name:
if self.skip_checking_genome_hashes:
if genome_hash in self.external_genomes_with_identical_hashes:
self.external_genomes_with_identical_hashes[genome_hash].add(entry['name'])
self.external_genomes_with_identical_hashes[genome_hash].add(self.genome_hash_to_genome_name[genome_hash])
else:
self.external_genomes_with_identical_hashes[genome_hash] = set([self.genome_hash_to_genome_name[genome_hash], entry['name']])
else:
self.progress.reset()
raise ConfigError("While working on your external genomes, anvi'o realized that genome %s and %s seem to have the same hash. "
"If you are aware of this and/or if you would like anvi'o to not check genome hashes, please use the flag "
"`--skip-checking-genome-hashes`." % (self.genome_hash_to_genome_name[genome_hash], entry['name']))
return genome_hash
def get_genome_hash_for_internal_genome(self, entry):
self.is_proper_db(entry['contigs_db_path'], db_type='contigs')
split_names_of_interest = self.get_split_names_of_interest_for_internal_genome(entry)
contigs_db_hash = db.DB(entry['contigs_db_path'], None, ignore_version=True).get_meta_value('contigs_db_hash')
genome_hash = hashlib.sha224('_'.join([''.join(split_names_of_interest), contigs_db_hash]).encode('utf-8')).hexdigest()[0:12]
if genome_hash in self.genome_hash_to_genome_name:
if self.skip_checking_genome_hashes:
if genome_hash in self.internal_genomes_with_identical_hashes:
self.internal_genomes_with_identical_hashes[genome_hash].add(entry['name'])
self.internal_genomes_with_identical_hashes[genome_hash].add(self.genome_hash_to_genome_name[genome_hash])
else:
self.internal_genomes_with_identical_hashes[genome_hash] = set([self.genome_hash_to_genome_name[genome_hash], entry['name']])
else:
self.progress.reset()
genome_1, genome_2 = self.genome_hash_to_genome_name[genome_hash], entry['name']
raise ConfigError("According to hash values anvi'o has been generating for your internal genomes, not all genomes you have seem to be uniuqe. "
"It is most likely you unintentionally listed the same information for different genome names. If you would like "
"to double check, genome %s (in '%s') and genome %s (in '%s') seem to have the same hash (so they are basically the same genomes). "
"If you are aware of this and/or if you would like anvi'o to not check genome hashes, please use the flag "
"`--skip-checking-genome-hashes`." % (genome_1,
self.genomes[genome_1]['collection_id'],
genome_2,
self.genomes[genome_2]['collection_id']))
return genome_hash
def init_external_genomes(self):
from anvio.summarizer import ContigSummarizer
self.progress.new('Initializing external genomes', progress_total_items=len(self.external_genome_names))
for genome_name in self.external_genome_names:
c = self.genomes[genome_name]
c['external_genome'] = True
self.progress.update('working on %s' % (genome_name), increment=True)
contigs_db_summary = ContigSummarizer(c['contigs_db_path']).get_contigs_db_info_dict(gene_caller_to_use=self.gene_caller)
for key in contigs_db_summary:
c[key] = contigs_db_summary[key]
self.progress.end()
self.run.info('External genomes', '%d found.' % len(self.external_genome_names))
def get_unique_profile_db_path_to_internal_genome_name_dict(self):
"""Returns a dictionary to bind all genome names that originate from the same profile db"""
unique_profile_db_path_to_internal_genome_name = {}
for profile_path in set([self.genomes[g]['profile_db_path'] for g in self.internal_genome_names]):
unique_profile_db_path_to_internal_genome_name[profile_path] = [g for g in self.internal_genome_names if self.genomes[g]['profile_db_path'] == profile_path]
return unique_profile_db_path_to_internal_genome_name
def init_internal_genomes(self):
from anvio.summarizer import ContigSummarizer
self.progress.new('Initializing internal genomes')
# to not initialize things over and over again:
unique_profile_db_path_to_internal_genome_name = self.get_unique_profile_db_path_to_internal_genome_name_dict()
for profile_db_path in unique_profile_db_path_to_internal_genome_name:
self.collections = ccollections.Collections()
self.collections.populate_collections_dict(profile_db_path)
for genome_name in unique_profile_db_path_to_internal_genome_name[profile_db_path]:
self.progress.update('working on %s' % (genome_name))
c = self.genomes[genome_name]
c['external_genome'] = False
utils.is_profile_db_and_contigs_db_compatible(c['profile_db_path'], c['contigs_db_path'])
split_names_of_interest = self.get_split_names_of_interest_for_internal_genome(c)
# here we are using the get_contigs_db_info_dict function WITH split names we found in the collection
# which returns a partial summary from the contigs database focusing only those splits. a small workaround
# to be able to use the same function for bins in collections:
contigs_summary = ContigSummarizer(c['contigs_db_path'])
summary_from_contigs_db_summary = contigs_summary.get_contigs_db_info_dict(split_names=split_names_of_interest,
gene_caller_to_use=self.gene_caller)
for key in summary_from_contigs_db_summary:
c[key] = summary_from_contigs_db_summary[key]
self.progress.end()
self.run.info('Internal genomes', '%d have been initialized.' % len(self.internal_genome_names))
def get_split_names_of_interest_for_internal_genome(self, entry):
self.is_proper_db(entry['profile_db_path'], db_type='profile')
# get splits of interest:
class Args: pass
args = Args()
args.profile_db = entry['profile_db_path']
args.collection_name = entry['collection_id']
args.bin_id = entry['bin_id']
split_names_of_interest = list(ccollections.GetSplitNamesInBins(args).get_split_names_only())
if not len(split_names_of_interest):
raise ConfigError("There are 0 splits defined for bin id %s in collection %s..." % (entry['bin_id'], entry['collection_id']))
return split_names_of_interest
def sanity_check(self):
"""Make sure self.genomes is good to go"""
# depending on whether args requested such behavior.
self.list_HMM_info_and_quit()
self.progress.new('Sanity checks')
# make sure genes are called in every contigs db:
self.progress.update("Checking gene calls ..")
genomes_missing_gene_calls = [g for g in self.genomes if not self.genomes[g]['genes_are_called']]
if len(genomes_missing_gene_calls):
self.progress.end()
raise ConfigError('Genes must have been called during the generation of contigs database for this workflow to work. However,\
these external genomes do not have gene calls: %s' % (', '.join(genomes_missing_gene_calls)))
if not self.full_init:
# if this is not full init, stop the sanity check here.
self.progress.end()
self.run.warning("You (or the programmer) requested genome descriptions for your internal and/or external "
"genomes to be loaded _without_ a 'full init'. There is nothing for you to be concerned. "
"This is just a friendly reminder to make sure you know that if something goes terribly "
"wrong later (like your computer sets itself on fire), this may be the reason.")
return
self.progress.update("Checking HMMs and SCGs ..")
# make sure HMMs for SCGs were run for every contigs db:
genomes_missing_hmms_for_scgs = [g for g in self.genomes if not self.genomes[g]['hmms_for_scgs_were_run']]
if len(genomes_missing_hmms_for_scgs):
if len(genomes_missing_hmms_for_scgs) == len(self.genomes):
self.progress.reset()
self.run.warning("The contigs databases you are using for this analysis are missing HMMs for single-copy core genes. "
"Maybe you haven't run `anvi-run-hmms` on your contigs database, or they didn't contain any hits. "
"It is perfectly legal to have anvi'o contigs databases without HMMs or SCGs for things to work, "
"but we wanted to give you heads up so you can have your 'aha' moment if you see funny things in "
"the interface.")
else:
self.progress.end()
raise ConfigError("Some of the genomes you have for this analysis are missing HMM hits for SCGs (%d of %d of them, to be precise). You "
"can run `anvi-run-hmms` on them to recover from this. Here is the list: %s" % \
(len(genomes_missing_hmms_for_scgs), len(self.genomes), ','.join(genomes_missing_hmms_for_scgs)))
# make sure genome names are not funny (since they are going to end up being db variables soon)
self.progress.update("Checking genome names ..")
[utils.is_this_name_OK_for_database('genome name "%s"' % genome_name, genome_name) for genome_name in self.genomes]
# figure out whether there are genomes with gene calls that are NOT processed
self.progress.update("Checking gene calls that are not processed ..")
genomes_with_non_reported_gene_calls_from_other_gene_callers = []
for genome_name in self.genomes:
if self.genomes[genome_name]['gene_calls_from_other_gene_callers']:
genomes_with_non_reported_gene_calls_from_other_gene_callers.append(genome_name)
if len(genomes_with_non_reported_gene_calls_from_other_gene_callers):
info = []
for genome_name in genomes_with_non_reported_gene_calls_from_other_gene_callers:
info.append('%s (%s)' % (genome_name,
', '.join(['%d gene calls by "%s"' % (tpl[1], tpl[0]) for \
tpl in self.genomes[genome_name]['gene_calls_from_other_gene_callers'].items()])))
gene_caller = list(self.genomes.values())[0]['gene_caller']
if anvio.DEBUG:
self.progress.reset()
self.run.warning("Some of your genomes had gene calls identified by gene callers other than "
"the gene caller anvi'o used, which was set to '%s' either by default, or because you asked for it. "
"The following genomes contained genes that were not processed (this may be exactly what you expect "
"to happen, but if was not, you may need to use the `--gene-caller` flag to make sure anvi'o is using "
"the gene caller it should be using): %s." % \
(gene_caller, ', '.join(info)), header="PLEASE READ CAREFULLY", lc='green')
else:
self.progress.reset()
self.run.warning("Some of your genomes had gene calls identified by gene callers other than "
"the anvi'o default, '%s', and will not be processed. Use the `--debug` flag "
"if this sounds important and you would like to see more of this message." % \
(gene_caller), header="JUST FYI", lc='green')
# check whether every genome has at least one gene call.
self.progress.update("Making sure each genome has at least one gene call ..")
genomes_with_no_gene_calls = [g for g in self.genomes if not self.genomes[g]['num_genes']]
if len(genomes_with_no_gene_calls):
self.progress.reset()
if len(genomes_with_no_gene_calls) == len(self.genomes):
raise ConfigError("None of your genomes seem to have a gene call, which is a typical error you get if you are working "
"with contigs databases with external gene calls. You can solve it by looking at the output of the "
"program `anvi-db-info` for a given contigs database in your collection, and use one of the gene "
"caller sources listed in the output using the `--gene-caller` parameter.")
else:
raise ConfigError(f"Well, {len(genomes_with_no_gene_calls)} of your {len(self.genomes)} genomes seems to have 0 gene calls. "
f"We can't think of any reason to include genomes that contain no gene calls into a genomes storage, "
f"hence, we are going to stop here and ask you to remove these genomes from your analysis first: "
f"{', '.join(genomes_with_no_gene_calls)}. If you think this is happening because you didn't set "
f"the right source for gene calls, you can always take a look at what is available in a given "
f"contigs database by running the program `anvi-db-info`.")
self.progress.end()
class MetagenomeDescriptions(object):
def __init__(self, args=None, run=run, progress=progress, enforce_single_profiles=True):
self.args = args
self.run = run
self.progress = progress
self.enforce_single_profiles = enforce_single_profiles
self.metagenomes = {}
self.metagenomes_dict = None
self.profile_dbs_available = False
A = lambda x: self.args.__dict__[x] if x in self.args.__dict__ else None
self.input_file_for_metagenomes = A('metagenomes')
if self.input_file_for_metagenomes:
self.read_paths_from_input_file()
def names_check(self):
names = utils.get_column_data_from_TAB_delim_file(self.input_file_for_metagenomes, [0])[0][1:]
if len(names) != len(set(names)):
raise ConfigError("Each entry in your metagenomes file must be unique :/")
def read_paths_from_input_file(self):
"""Reads metagenome files, populates self.metagenomes"""
columns = utils.get_columns_of_TAB_delim_file(self.input_file_for_metagenomes)
if 'profile_db_path' in columns:
fields_for_metagenomes_input = ['name', 'contigs_db_path', 'profile_db_path']
self.profile_dbs_available = True
else:
fields_for_metagenomes_input = ['name', 'contigs_db_path']
self.profile_dbs_available = False
self.metagenomes_dict = utils.get_TAB_delimited_file_as_dictionary(self.input_file_for_metagenomes, expected_fields=fields_for_metagenomes_input) if self.input_file_for_metagenomes else {}
def load_metagenome_descriptions(self, skip_functions=False, init=True, skip_sanity_check=False):
"""Load metagenome descriptions"""
# start with a sanity check to make sure name are distinct
self.names_check()
self.metagenome_names = list(self.metagenomes_dict.keys())
for metagenome_name in self.metagenomes_dict:
self.metagenomes[metagenome_name] = self.metagenomes_dict[metagenome_name]
for db_path_var in ['contigs_db_path', 'profile_db_path']:
if db_path_var not in self.metagenomes[metagenome_name]:
continue
path = self.metagenomes[metagenome_name][db_path_var]
if not path:
raise ConfigError("Bad news: anvi'o was loading metagenome desriptions, and it run into an empty path for "
"the metagenome %s. How did this happen? HOW? :(" % metagenome_name)
if not path.startswith('/'):
self.metagenomes[metagenome_name][db_path_var] = os.path.abspath(os.path.join(os.path.dirname(self.input_file_for_metagenomes), path))
# while we are going through all genomes and reconstructing self.metagenomes for the first time,
# let's add the 'name' attribute in it as well.'
self.metagenomes[metagenome_name]['name'] = metagenome_name
# add hashes for each metagenome in the self.metagenomes dict.
self.metagenome_hash_to_metagenome_name = {}
for metagenome_name in self.metagenome_names:
g_hash = self.get_metagenome_hash(self.metagenomes[metagenome_name])
self.metagenomes[metagenome_name]['metagenome_hash'] = g_hash
self.metagenome_hash_to_metagenome_name[g_hash] = metagenome_name
for metagenome_name in self.metagenomes:
g = self.metagenomes[metagenome_name]
contigs_db = dbops.ContigsDatabase(g['contigs_db_path'])
for key in contigs_db.meta:
g[key] = contigs_db.meta[key]
if not skip_sanity_check:
self.sanity_check()
def get_metagenome_hash(self, entry):
utils.is_contigs_db(entry['contigs_db_path'])
contigs_db_hash = db.DB(entry['contigs_db_path'], None, ignore_version=True).get_meta_value('contigs_db_hash')
return contigs_db_hash
def sanity_check(self):
"""Make sure self.metagenomes is good to go"""
if self.profile_dbs_available and self.enforce_single_profiles:
non_single_profiles = [m for m in self.metagenomes if utils.is_profile_db_merged(self.metagenomes[m]['profile_db_path'])
and not utils.is_blank_profile(self.metagenomes[m]['profile_db_path'])]
if len(non_single_profiles):
raise ConfigError("All profile databases associated with your metagenomes must be single profiles :( Here "
"is a list of them that are not: '%s'." % (', '.join(non_single_profiles)))
# make sure genes are called in every contigs db:
metagenomes_missing_gene_calls = [g for g in self.metagenomes if not self.metagenomes[g]['genes_are_called']]
if len(metagenomes_missing_gene_calls):
raise ConfigError('Genes must have been called during the generation of contigs database for this workflow to work. However,\
these metagenomes do not have gene calls: %s' % (', '.join(metagenomes_missing_gene_calls)))
# if two contigs db has the same hash, we are kinda f'd:
if len(set([self.metagenomes[metagenome_name]['metagenome_hash'] for metagenome_name in self.metagenome_names])) != len(self.metagenome_names):
raise ConfigError('Not all hash values are unique across all contig databases you provided. Something '
'very fishy is going on :/')
# make sure genome names are not funny (since they are going to end up being db variables soon)
[utils.is_this_name_OK_for_database('metagenome name "%s"' % metagenome_name, metagenome_name) for metagenome_name in self.metagenomes]
class AggregateFunctions:
"""Aggregate functions from anywhere.
The purpose of this class is to collect functions from many distinct databases,
including external genomes, internal genomes, and/or a genomes storage, and report
a set of dictionaries that give access to the presence/absence and frequency
of all functions annotated by a single source.
One fancy function in AggregateFunctions is `report_functions_per_group_stats`. For
instance, one could initiate the class in the following way to get a functions per
group stats output file for functional enrichment analysis:
>>> import argparse
>>> import anvio.genomedescriptions as g
>>> args = argparse.Namespace(external_genomes=external_genomes_path, annotation_source='KOfam')
>>> groups = {'adoles': ['B_adolescentis', 'B_adolescentis_1_11', 'B_adolescentis_22L', 'B_adolescentis_6', 'B_adolescentis_ATCC_15703'],
'lactis': ['B_animalis', 'B_lactis_AD011', 'B_lactis_ATCC_27673', 'B_lactis_B420', 'B_lactis_BB_12', 'B_lactis_BF052'],
'longum': ['B_longum', 'B_longum_AH1206', 'B_longum_BBMN68', 'B_longum_BORI', 'B_longum_CCUG30698', 'B_longum_GT15']}
>>> facc = g.AggregateFunctions(args, layer_groups=groups)
>>> facc.report_functions_per_group_stats(output_file_path)
For other uses of this class, see the `functional` mode in the interactive class
which is accessed by anvi-display-functions, or anvi-script-gen-functions-per-group-stats-output
that gives access to `report_functions_per_group_stats` function to generate the
functions across groups output.
Paremeters
==========
args : argparse.Namespace object
See the class header for options.
layer_groups : dict
When provided, the class will recognize that genomes belong to distinct groups
and will prepare grouped frequency and presence-absence dicts, as well. This
can ALTERNATIVELY be defined through a TAB-delimited input file passed through
args.
"""
def __init__(self, args, layer_groups=None, skip_sanity_check=False, skip_init=False, r=run, p=progress):
self.args = args
self.run = r
self.progress = p
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.genomes_storage_path = A('genomes_storage')
self.external_genomes_path = A('external_genomes')
self.internal_genomes_path = A('internal_genomes')
self.function_annotation_source = A('annotation_source')
self.min_occurrence = A('min_occurrence') or 1
self.aggregate_based_on_accession = A('aggregate_based_on_accession') or False
self.aggregate_using_all_hits = A('aggregate_using_all_hits') or False
self.layer_groups_input_file_path = A('groups_txt') or False
self.print_genome_names_and_quit = A('print_genome_names_and_quit') or False
self.functional_occurrence_table_output_path = A('functional_occurrence_table_output')
self.functional_enrichment_output_path = A('output_file')
# -----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----
# these are some primary data structures this class reports
# remember, 'key' here can be accession ids, or functio names
# depending on `self.aggregate_based_on_accession`
self.hash_to_key = {}
# this variable makes sure even if functions are aggregated
# using accession ids, there is a way to resolve function
# names that correspond to each item. This is going to be a
# life saver while trying to summarize things
self.hash_to_function_dict = {}
# distribution of 'keys' (i.e., accession ids or functions)
# across genomes based on the frequency of observation or
# presence absence. Having two disctionaries for this sounds
# stupid at first (because it is), since the presence/absence
# data can always be recovered from the frequency data. but
# the momory fingerprint of these dicts are always going to be
# nothing and will save additional steps in places where an
# instance is used.
self.functions_across_layers_frequency = {}
self.functions_across_layers_presence_absence = {}
# just like the previous two, but rather than genome names as
# layers, these dicts will be tracking 'gruops' as defined by
# the member variable, `self.layer_groups`. please note that
# the presence-absence dictionary will not be composed of
# binary variables, but will have the sum of presence absence
# of a given function across all layers in a given group
self.functions_across_groups_frequency = {}
self.functions_across_groups_presence_absence = {}
# two additional dicts to alwys be able to convert accession
# ids to function names and vice versa. remember, an accession
# id will resolve to a single function name (unless the provider
# of function names did not screw things up), but a function
# name can resolve to multiple accession ids, hence the latter
# dict will contain will contain for each of its keys a list.
self.accession_id_to_function_dict = {}
self.function_to_accession_ids_dict = {}
# to keep track of all layer names and where they are coming from.
self.layer_names_considered = set({})
self.layer_names_from_internal_genomes = []
self.layer_names_from_external_genomes = []
self.layer_names_from_genomes_storage = []
# if there are 'groups' defined through the `layer_groups` variable
# or through a `self.layer_groups_input_file_path`, this class will
# automatically perform a functional enrichment analysis and will report
# its output in the following dictionary.
self.functional_enrichment_stats_dict = None
# -----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----
# this will summarize what happened in a text form.
self.summary_markdown = None
# -----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----
# Here we will quickly deal with layer groups during the initialization of the class
# this section of the init will establish a propeer `self.layer_groups` variable for
# later use.
self.layer_name_to_group_name = {}
self.layer_groups_defined = False
self.layer_groups = None
if layer_groups or self.layer_groups_input_file_path:
if layer_groups and not isinstance(layer_groups, dict):
raise ConfigError("The variable `layer_groups` is supposed to be of type `dict`.")
if self.layer_groups_input_file_path and layer_groups:
raise ConfigError("You can either specify layer groups by passing a dictionary, or "
"you can use the `layer_groups_input_file_path` argument, but not "
"both :/")
if self.layer_groups_input_file_path:
self.layer_name_to_group_name, self.layer_groups = utils.get_groups_txt_file_as_dict(self.layer_groups_input_file_path)
elif layer_groups:
self.layer_groups = layer_groups
# Finally, last AND least, a small helper dictionary we will use if there are groups defined
# by the user:
if self.layer_groups:
for group_name in self.layer_groups:
for layer_name in self.layer_groups[group_name]:
self.layer_name_to_group_name[layer_name] = group_name
if self.layer_groups:
self.layer_groups_defined = True
group_names = sorted(list(self.layer_groups.keys()))
self.run.info('Groups found and parsed', ', '.join(group_names))
# -----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----8<-----
self.key_hash_prefix = f"{'acc_' if self.aggregate_based_on_accession else 'func_'}"
self.K = lambda: 'accession ID' if self.aggregate_based_on_accession else 'function'
self.V = lambda: 'function' if self.aggregate_based_on_accession else 'accession ID'
self.sanity_checked = False
self.initialized = False
if not skip_sanity_check:
self.sanity_check()
if not skip_init:
self.init()
def init(self):
if self.initialized:
raise ConfigError("Soemone already called the init function on this instance. You can't do it again. "
"Go get your own instance :(")
# populate main dictionaries
self._init_functions_from_int_ext_genomes()
self._init_functions_from_genomes_storage()
# show the user what genome names are being consdiered for this analysis
if self.print_genome_names_and_quit:
self.run.info(f"Genome names found (n={len(self.layer_names_considered)})", ' '.join(self.layer_names_considered))
sys.exit()
self._populate_group_dicts() # <-- this has to be called after all genomes are initialized
if self.min_occurrence:
num_occurrence_of_keys = [(c, sum(self.functions_across_layers_presence_absence[c].values())) for c in self.functions_across_layers_presence_absence]
keys_to_remove = set([key for (key, frequency) in num_occurrence_of_keys if frequency < self.min_occurrence])
# the following if block takes care of cleaning up both `self.accession_id_to_function_dict` and
# `self.function_to_accession_ids_dict` dicts in a mindful fashion in addition to the cleanup of
# all other dicts.
if len(keys_to_remove):
for key in keys_to_remove:
self.functions_across_layers_frequency.pop(key)
self.functions_across_layers_presence_absence.pop(key)
if self.layer_groups_defined:
self.functions_across_groups_frequency.pop(key)
self.functions_across_groups_presence_absence.pop(key)
self.hash_to_key.pop(key) if key in self.hash_to_key else None
# these are the trick ones since how this step should be handled will depend on
# what is key and what is value in the instance configuration:
if self.aggregate_based_on_accession:
if key in self.hash_to_key:
accession = self.hash_to_key[key]
function = self.accession_id_to_function_dict[accession]
self.accession_id_to_function_dict.pop(accession)
self.function_to_accession_ids_dict[function].pop(accession)
if not len(self.function_to_accession_ids_dict[function]):
self.function_to_accession_ids_dict.pop(function)
else:
if key in self.function_to_accession_ids_dict:
accessions = self.function_to_accession_ids_dict[key]
self.function_to_accession_ids_dict.pop(key)
for accession in accessions:
self.accession_id_to_function_dict.pop(accession)
self.run.warning(f"As per your request, anvi'o removed {len(keys_to_remove)} {self.K()}s found in"
f"{self.function_annotation_source} from downstream analyses since they occurred "
f"in less than {self.min_occurrence} genomes.")
if self.layer_groups:
self.do_functional_enrichment_analysis()
self.update_summary_markdown()
self.initialized = True
def sanity_check(self):
if self.functional_occurrence_table_output_path:
filesnpaths.is_output_file_writable(self.functional_occurrence_table_output_path)
if self.functional_enrichment_output_path:
filesnpaths.is_output_file_writable(self.functional_enrichment_output_path)
if not self.function_annotation_source:
raise ConfigError("Someone didn't specify any function annotation source and ended up in a bad "
"place. Aggregating functions require a source for functional annotations.")
if not self.external_genomes_path and not self.internal_genomes_path and not self.genomes_storage_path:
raise ConfigError("You must provide at least one source of genomes to this class :/")
if self.min_occurrence and not isinstance(self.min_occurrence, int):
raise ConfigError("Obviously, --min-occurrence must be an integer.")
if self.min_occurrence < 1:
raise ConfigError(f"What do you have in mind when you say I want my functions to occur in at least {self.min_occurrence} genomes?")
if self.layer_groups_defined:
groups_with_single_layers = set([])
if not len(self.layer_groups) > 1:
raise ConfigError("Layer groups must have two or more groups.")
for layer_group in self.layer_groups:
if not isinstance(self.layer_groups[layer_group], list):
raise ConfigError("Each layer group must be composed list of layer names :(")
if not len(self.layer_groups[layer_group]) > 1:
groups_with_single_layers.add(layer_group)
if len(set(self.layer_groups[layer_group])) != len(self.layer_groups[layer_group]):
raise ConfigError("Items in each layer group must be unique :/")
if len(groups_with_single_layers):
self.run.warning(f"In an ideal world, each group would describe at least two layer names. It is not "
f"the case for {P('this group', len(groups_with_single_layers), alt='these groups')}: "
f"{', '.join(groups_with_single_layers)}. That is OK and anvi'o will continue with this "
f"analysis, but if something goes wrong with your stats or whatever, you will remember "
f"this moment and go like, \"Hmm. That's why my adjusted q-values are like one point zero 🤔\".")
# sanity check 3000 -- no joker shall pass:
list_of_layer_names_lists = list(self.layer_groups.values())
for i in range(0, len(list_of_layer_names_lists) - 1):
for j in range(i + 1, len(list_of_layer_names_lists)):
co_occurring_names = set(list_of_layer_names_lists[i]).intersection(set(list_of_layer_names_lists[j]))
if len(co_occurring_names):
raise ConfigError(f"Layer names should occur in only one group, but AS YOU CAN GUESS BY NOW, that is not the case "
f"with your groups :/ At the least, the layer name '{co_occurring_names.pop()}' occurs in more than "
f"one group.")
self.sanity_checked = True
def update_combined_functions_dicts(self, genome_name, accession, function):
"""Modify accession dicts with new things.
This function is necessary to avoid redundant code to handle function dicts of different kinds
we get from int/external genomes and genome storage. a redesign of the genome storage will likely
fix this problem in the future by unifying how functions are collected from different anvi'o databases.
"""
if genome_name not in self.layer_names_considered:
self.layer_names_considered.add(genome_name)
key, value = (accession, function) if self.aggregate_based_on_accession else (function, accession)
if not key or not len(key):
raise ConfigError(f"Anvi'o is very sorry to tell you that the annotation source you have chosen "
f"here, '{self.function_annotation_source}', seem to include "
f"{self.V()}s with no {self.K()}s. Here is one example function with no "
f"{self.K()}: '{value}'. You will have to choose another annotation source :(")
if not function:
raise ConfigError(f"It saddens anvi'o to let you know that there are some {self.V}s in "
f"'{self.function_annotation_source}' that clearly are blank. Here is an "
f"example {self.K()} that has a blank {self.V()}: '{key}'. Either you "
f"need to choose another annotation source, or fix this problem with the "
f"existing annotations from {self.function_annotation_source} by using a "
f"combination of `anvi-export-functions` and `anvi-import-functions` (which "
f"is totally doable and you certainly can do it).")
if self.aggregate_using_all_hits:
pass
else:
key = key.split('!!!')[0]
value = value.split('!!!')[0]
# we wish to keep track of actual accessions and functions, too:
accession, function = (key, value) if self.aggregate_based_on_accession else (value, key)
# from now on we will only work with hashes of our keys, whether the keys here are function names or
# accession ids as defined by self.aggregate_based_on_accession boolean. this is a necessary
# complexity because function names are free text, can be very long, include weird characters. and
# when we cluster data with those keys, some characters will be replaced with others or otherwise
# they will break the newick file format and so on. using key hashes will make sure we don't get
# screwed by that, and we will always use the lookup dict `self.hash_to_key` to find out what was
# our key (and that's exactly what the next few lines of code do here):
if key in self.hash_to_key:
key_hash = self.hash_to_key[key]
else:
key_hash = self.key_hash_prefix + hashlib.sha224(key.encode('utf-8')).hexdigest()[0:12]
self.hash_to_key[key] = key_hash
self.hash_to_function_dict[key_hash] = {self.function_annotation_source: function}
# --
if key_hash not in self.functions_across_layers_frequency:
self.functions_across_layers_frequency[key_hash] = Counter({})
self.functions_across_layers_presence_absence[key_hash] = Counter({})
self.functions_across_layers_frequency[key_hash][genome_name] += 1
self.functions_across_layers_presence_absence[key_hash][genome_name] = 1
if accession not in self.accession_id_to_function_dict:
self.accession_id_to_function_dict[accession] = {self.function_annotation_source: function}
if function not in self.function_to_accession_ids_dict:
self.function_to_accession_ids_dict[function] = {self.function_annotation_source: set([accession])}
else:
self.function_to_accession_ids_dict[function][self.function_annotation_source].add(accession)
return
def check_layer_names(self, layer_names=[]):
if not isinstance(layer_names, list):
raise ConfigError("`layer_names` must be of type list :/")
already_in_the_dict = [g for g in layer_names if g in self.layer_names_considered]
if len(already_in_the_dict):
raise ConfigError(f"Anvi'o is not happy because there are some genome or metagenome names that are not unique "
f"across all input databases :/ Here is an example: {already_in_the_dict[0]}.")
else:
# you good fam
pass
def _init_functions_from_int_ext_genomes(self):
if not self.external_genomes_path and not self.internal_genomes_path:
return
g = GenomeDescriptions(self.args, run=terminal.Run(verbose=False))
g.load_genomes_descriptions()
g.init_functions()
self.layer_names_from_internal_genomes = copy.deepcopy(g.internal_genome_names)
self.layer_names_from_external_genomes = copy.deepcopy(g.external_genome_names)
for genome_name in g.genomes:
self.check_layer_names([genome_name])
gene_functions_in_genome_dict, _, _= g.get_functions_and_sequences_dicts_from_contigs_db(genome_name, requested_source_list=[self.function_annotation_source], return_only_functions=True)
# reminder, an entry in gene_functions_in_genome_dict looks like this:
# 2985: {'COG20_PATHWAY': ('COG0073!!!COG0143', 'Aminoacyl-tRNA synthetases', 0)}
for entry in gene_functions_in_genome_dict.values():
accession, function, e_value = entry[self.function_annotation_source]
self.update_combined_functions_dicts(genome_name, accession, function)
def _init_functions_from_genomes_storage(self):
if not self.genomes_storage_path:
return
from anvio.genomestorage import GenomeStorage
g = GenomeStorage(storage_path=self.genomes_storage_path, function_annotation_sources=[self.function_annotation_source], run=terminal.Run(verbose=False), progress=self.progress, skip_init=True)
# make sure we are not overwriting existing genome names in int or ext genomes:
genome_names_in_storage_db = list(g.db.get_single_column_from_table(t.genome_info_table_name, 'genome_name', unique=True))
self.layer_names_from_genomes_storage = copy.deepcopy(genome_names_in_storage_db)
self.check_layer_names(genome_names_in_storage_db)
gene_functions_in_genomes_dict, _ = g.get_gene_functions_in_genomes_dict()
for entry in gene_functions_in_genomes_dict.values():
# an entry in gene_functions_in_genomes_dict looks lke this:
# 72645: {'genome_name': 'B_lactis_BF052', 'gene_callers_id': 443, 'source': 'COG20_PATHWAY', 'accession': 'COG0207', 'function': 'Thymidylate biosynthesis', 'e_value': 4.2e-149}
genome_name, accession, function = entry['genome_name'], entry['accession'], entry['function']
self.update_combined_functions_dicts(genome_name, accession, function)
def _populate_group_dicts(self):
if not self.layer_groups_defined:
return
for key_hash in self.functions_across_layers_frequency:
if key_hash not in self.functions_across_groups_frequency:
self.functions_across_groups_frequency[key_hash] = Counter({})
self.functions_across_groups_presence_absence[key_hash] = Counter({})
for layer_name in self.layer_names_considered:
if layer_name in self.layer_name_to_group_name:
if layer_name in self.functions_across_layers_frequency[key_hash]:
group_name = self.layer_name_to_group_name[layer_name]
self.functions_across_groups_frequency[key_hash][group_name] += self.functions_across_layers_frequency[key_hash][layer_name]
self.functions_across_groups_presence_absence[key_hash][group_name] += 1
def update_summary_markdown(self):
G = lambda x: '\n'.join(['* %s' % l for l in x]) if len(x) else "*None :/*"
self.summary_markdown = (f"### Quick overview\nUsing the function annotation source **'{self.function_annotation_source}'**, anvi'o "
f"aggregated **{len(self.hash_to_key)} unique {self.K()}s** that occurred in **at least {self.min_occurrence}** "
f"of the total {len(self.layer_names_considered)} *layers*. Here we use the term 'layer' instead of 'genomes', "
f"since what anvi'o assumes to be a genome might be a metagenome depending on the contigs database you have provided. "
f"If you know what you have, feel free to replace the term 'layer' with 'genome' in your mind.")
self.summary_markdown += (f"\n\n**Internal genomes** ({P('layer', len(self.layer_names_from_internal_genomes))}):\n\n{G(self.layer_names_from_internal_genomes)}"
f"\n\n**External genomes** ({P('layer', len(self.layer_names_from_external_genomes))}):\n\n{G(self.layer_names_from_external_genomes)}"
f"\n\n**Genomes storage** ({P('layer', len(self.layer_names_from_genomes_storage))}):\n\n{G(self.layer_names_from_genomes_storage)}")
if self.layer_groups:
self.summary_markdown += (f"\n\n### Functional enrichment analysis\nAnvi'o also performed a functional enrichment analysis based on {len(self.layer_groups)} "
f"groups defined by the user. The results of this analysis is shown in your view with the these additional layers: "
f"'enrichment_score', 'unadjusted_p_value', 'adjusted_q_value', 'associated_groups'. You can learn more about the "
f"details of the meaning of these columns [here](https://merenlab.org/software/anvio/help/main/artifacts/functional-enrichment-txt/). "
f"Here is how those groups were defined:")
for group_name in self.layer_groups:
self.summary_markdown += (f"\n\n**Group '{group_name}'** ({P('layer', len(self.layer_groups[group_name]))}):\n\n{G(self.layer_groups[group_name])}")
def do_functional_enrichment_analysis(self):
"""Performs functional enrichment analysis if user defined layer groups.
This function fills in the the variable `self.functional_enrichment_stats_dict` so
the downstream analyses can use it to do fancy things.
"""
if not self.layer_groups:
raise ConfigError("Functional enrichment analysis requires the `self.layer_groups` to be "
"initialized. But someone called this function without first intializing "
"groups :/ ")
# FIXME: this is kind of a stupid design. we create this directory even if the user has declared output file names
# for both teh functional occurrence table output and functional enrichment output.
output_directory = filesnpaths.get_temp_directory_path()
if not self.functional_occurrence_table_output_path:
self.functional_occurrence_table_output_path = os.path.join(output_directory, 'FUNC_OCCURENCE_STATS.txt')
if not self.functional_enrichment_output_path:
self.functional_enrichment_output_path = os.path.join(output_directory, 'FUNC_ENRICHMENT_OUTPUT.txt')
# get functions per group stats file
self.report_functions_per_group_stats(self.functional_occurrence_table_output_path, quiet=True)
# run the enrichment analysis
self.functional_enrichment_stats_dict = utils.run_functional_enrichment_stats(functional_occurrence_stats_input_file_path=self.functional_occurrence_table_output_path,
enrichment_output_file_path=self.functional_enrichment_output_path,
run=self.run,
progress=self.progress)
def report_functions_per_group_stats(self, output_file_path, quiet=False):
"""A function to summarize functional occurrence for groups of genomes.
Please note that this function will not report functions that are associated
with ALL groups.
"""
filesnpaths.is_output_file_writable(output_file_path)
if not self.layer_groups_defined:
raise ConfigError("No groups seem to have been defined. This function is useless without :/")
group_names = sorted(list(self.layer_groups.keys()))
num_groups = len(group_names)
group_counts = dict([(g, len(self.layer_groups[g])) for g in group_names])
d = {}
for key_hash in self.functions_across_groups_presence_absence:
# learn which groups are associated with this function
associated_groups = [g for g in group_names if self.functions_across_groups_presence_absence[key_hash][g]]
# if the function is associated with all groups, simply skip that entry
if len(associated_groups) == num_groups:
continue
function = self.hash_to_function_dict[key_hash][self.function_annotation_source]
d[key_hash] = {}
d[key_hash]['function'] = function
d[key_hash]['accession'] = ','.join(self.function_to_accession_ids_dict[function][self.function_annotation_source])
d[key_hash]['associated_groups'] = ','.join(associated_groups)
for group_name in group_names:
d[key_hash][f"N_{group_name}"] = group_counts[group_name]
if group_name in self.functions_across_groups_presence_absence[key_hash]:
d[key_hash][f"p_{group_name}"] = self.functions_across_groups_presence_absence[key_hash][group_name] / group_counts[group_name]
else:
d[key_hash][f"p_{group_name}"] = 0
if not len(d):
raise ConfigError("Something weird is happening here :( It seems every single function across your genomes "
"is associated with all groups you have defined. There is nothing much anvi'o can work with "
"here. If you think this is a mistake, please let us know.")
static_column_names = ['key', 'function', 'accession', 'associated_groups']
dynamic_column_names = []
[dynamic_column_names.extend([f'p_{g}', f'N_{g}']) for g in group_names]
utils.store_dict_as_TAB_delimited_file(d, output_file_path, headers=static_column_names+dynamic_column_names)
if not quiet:
self.run.info('Functions per group stats file', output_file_path)
| gpl-3.0 | 6,856,453,207,028,735,000 | 55.28811 | 216 | 0.615978 | false |
rven/odoo | addons/website/tests/test_views_inherit_module_update.py | 1 | 4286 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import standalone
"""
This test ensure `inherit_id` update is correctly replicated on cow views.
The view receiving the `inherit_id` update is either:
1. in a module loaded before `website`. In that case, `website` code is not
loaded yet, so we store the updates to replay the changes on the cow views
once `website` module is loaded (see `_check()`). This test is testing that
part.
2. in a module loaded after `website`. In that case, the `inherit_id` update is
directly replicated on the cow views. That behavior is tested with
`test_module_new_inherit_view_on_parent_already_forked` and
`test_specific_view_module_update_inherit_change` in `website` module.
"""
@standalone('cow_views_inherit')
def test_01_cow_views_inherit_on_module_update(env):
# A B A B
# / \ => / \
# D D' D D'
# 1. Setup hierarchy as comment above
View = env['ir.ui.view']
View.with_context(_force_unlink=True, active_test=False).search([('website_id', '=', 1)]).unlink()
child_view = env.ref('portal.footer_language_selector')
parent_view = env.ref('portal.portal_back_in_edit_mode')
# Change `inherit_id` so the module update will set it back to the XML value
child_view.write({'inherit_id': parent_view.id, 'arch': child_view.arch_db.replace('o_footer_copyright_name', 'text-center')})
# Trigger COW on view
child_view.with_context(website_id=1).write({'name': 'COW Website 1'})
child_cow_view = child_view._get_specific_views()
# 2. Ensure setup is as expected
assert child_cow_view.inherit_id == parent_view, "Ensure test is setup as expected."
# 3. Upgrade the module
portal_module = env['ir.module.module'].search([('name', '=', 'portal')])
portal_module.button_immediate_upgrade()
env.reset() # clear the set of environments
env = env() # get an environment that refers to the new registry
# 4. Ensure cow view also got its inherit_id updated
expected_parent_view = env.ref('portal.frontend_layout') # XML data
assert child_view.inherit_id == expected_parent_view, "Generic view security check."
assert child_cow_view.inherit_id == expected_parent_view, "COW view should also have received the `inherit_id` update."
@standalone('cow_views_inherit')
def test_02_cow_views_inherit_on_module_update(env):
# A B B' A B B'
# / \ => | |
# D D' D D'
# 1. Setup hierarchy as comment above
View = env['ir.ui.view']
View.with_context(_force_unlink=True, active_test=False).search([('website_id', '=', 1)]).unlink()
view_D = env.ref('portal.my_account_link')
view_A = env.ref('portal.message_thread')
# Change `inherit_id` so the module update will set it back to the XML value
view_D.write({'inherit_id': view_A.id, 'arch_db': view_D.arch_db.replace('o_logout_divider', 'discussion')})
# Trigger COW on view
view_B = env.ref('portal.user_dropdown') # XML data
view_D.with_context(website_id=1).write({'name': 'D Website 1'})
view_B.with_context(website_id=1).write({'name': 'B Website 1'})
view_Dcow = view_D._get_specific_views()
# 2. Ensure setup is as expected
view_Bcow = view_B._get_specific_views()
assert view_Dcow.inherit_id == view_A, "Ensure test is setup as expected."
assert len(view_Bcow) == len(view_Dcow) == 1, "Ensure test is setup as expected (2)."
assert view_B != view_Bcow, "Security check to ensure `_get_specific_views` return what it should."
# 3. Upgrade the module
portal_module = env['ir.module.module'].search([('name', '=', 'portal')])
portal_module.button_immediate_upgrade()
env.reset() # clear the set of environments
env = env() # get an environment that refers to the new registry
# 4. Ensure cow view also got its inherit_id updated
assert view_D.inherit_id == view_B, "Generic view security check."
assert view_Dcow.inherit_id == view_Bcow, "COW view should also have received the `inherit_id` update."
| agpl-3.0 | 3,829,057,476,757,138,400 | 49.423529 | 130 | 0.643024 | false |
atleypnorth/bp2dvcm | recorddetail5.py | 1 | 1448 | import datetime
from functools import wraps
from logging import getLogger
LOG = getLogger(__name__)
class RecordDetail(object):
def __init__(self, name=None):
self.name = name
self.status = None
self.start_time = datetime.datetime.now()
self.stop_time = None
self.number_of_records = None
self.other_details = None
def save(self,):
# Do stuff to write to database
LOG.info('saving %s ', self)
def __str__(self):
return ('name: %s start: %s stop : %s status : %d' % (self.name, self.start_time,
self.stop_time, self.status))
def stop(self, status):
LOG.info('stopping')
self.status = status
self.stop_time = datetime.datetime.now()
self.save()
def __call__(self, func):
if self.name is None:
self.name = func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
status = 0
# Need to set this now when the function is called, not when the decorator is evaluated
self.start_time = datetime.datetime.now()
try:
args += (self,)
result = func(*args, **kwargs)
except Exception:
status = -1
raise
finally:
self.stop(status)
return result
return wrapper
| mit | -4,130,902,076,194,367,500 | 27.96 | 99 | 0.517265 | false |
noironetworks/neutron | neutron/conf/policies/qos.py | 1 | 4265 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
rules = [
policy.RuleDefault('get_policy',
'rule:regular_user',
description='Access rule for getting QoS policy'),
policy.RuleDefault('create_policy',
'rule:admin_only',
description='Access rule for creating QoS policy'),
policy.RuleDefault('update_policy',
'rule:admin_only',
description='Access rule for updating QoS policy'),
policy.RuleDefault('delete_policy',
'rule:admin_only',
description='Access rule for deleting QoS policy'),
policy.RuleDefault('get_rule_type',
'rule:regular_user',
description=('Access rule for getting '
'all available QoS rule types')),
policy.RuleDefault('get_policy_bandwidth_limit_rule',
'rule:regular_user',
description=('Access rule for getting '
'QoS bandwidth limit rule')),
policy.RuleDefault('create_policy_bandwidth_limit_rule',
'rule:admin_only',
description=('Access rule for creating '
'QoS bandwidth limit rule')),
policy.RuleDefault('update_policy_bandwidth_limit_rule',
'rule:admin_only',
description=('Access rule for updating '
'QoS bandwidth limit rule')),
policy.RuleDefault('delete_policy_bandwidth_limit_rule',
'rule:admin_only',
description=('Access rule for deleting '
'QoS bandwidth limit rule')),
policy.RuleDefault('get_policy_dscp_marking_rule',
'rule:regular_user',
description=('Access rule for getting '
'QoS dscp marking rule')),
policy.RuleDefault('create_policy_dscp_marking_rule',
'rule:admin_only',
description=('Access rule for creating '
'QoS dscp marking rule')),
policy.RuleDefault('update_policy_dscp_marking_rule',
'rule:admin_only',
description=('Access rule for updating '
'QoS dscp marking rule')),
policy.RuleDefault('delete_policy_dscp_marking_rule',
'rule:admin_only',
description=('Access rule for deleting '
'QoS dscp marking rule')),
policy.RuleDefault('get_policy_minimum_bandwidth_rule',
'rule:regular_user',
description=('Access rule for getting '
'QoS minimum bandwidth rule')),
policy.RuleDefault('create_policy_minimum_bandwidth_rule',
'rule:admin_only',
description=('Access rule for creating '
'QoS minimum bandwidth rule')),
policy.RuleDefault('update_policy_minimum_bandwidth_rule',
'rule:admin_only',
description=('Access rule for updating '
'QoS minimum bandwidth rule')),
policy.RuleDefault('delete_policy_minimum_bandwidth_rule',
'rule:admin_only',
description=('Access rule for deleting '
'QoS minimum bandwidth rule')),
]
def list_rules():
return rules
| apache-2.0 | -4,615,900,779,380,499,000 | 46.921348 | 76 | 0.525205 | false |
QualiSystems/cloudshell-orch-sandbox | cloudshell-orch-core/cloudshell/workflow/orchestration/teardown/default_teardown_orchestrator.py | 1 | 1883 | from cloudshell.workflow.orchestration.sandbox import Sandbox
from cloudshell.workflow.orchestration.teardown.default_teardown_logic import DefaultTeardownLogic
class DefaultTeardownWorkflow(object):
def __init__(self):
pass
def register(self, sandbox):
"""
:param Sandbox sandbox:
:return:
"""
sandbox.logger.info("Adding default teardown orchestration")
sandbox.workflow.add_to_teardown(self.default_teardown, None)
def default_teardown(self, sandbox, components):
"""
:param Sandbox sandbox:
:return:
"""
api = sandbox.automation_api
reservation_details = api.GetReservationDetails(reservationId=sandbox.id, disableCache=True)
api.WriteMessageToReservationOutput(reservationId=sandbox.id,
message='Beginning sandbox teardown')
DefaultTeardownLogic.disconnect_all_routes_in_reservation(api=api,
reservation_details=reservation_details,
reservation_id= sandbox.id,
logger=sandbox.logger)
DefaultTeardownLogic.power_off_and_delete_all_vm_resources(api = api,
reservation_details =reservation_details,
reservation_id=sandbox.id,
logger=sandbox.logger, components=sandbox.components)
DefaultTeardownLogic.cleanup_connectivity(api=api,
reservation_id=sandbox.id,
logger=sandbox.logger)
| apache-2.0 | -3,816,085,675,126,176,000 | 44.926829 | 120 | 0.519384 | false |
ghowland/gomh | _backups/gomh_013.py | 1 | 15900 | #!/usr/bin/env python
import pygame
import sys
import math
SCALE = 0.5
sprite_size = [int(85*SCALE), int(112*SCALE)]
# Initialize the screen
pygame.init()
SCREEN_SIZE = (640, 480)
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption('Get Off My Head')
#pygame.mouse.set_visible(0)
# Create the background
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((0, 0, 0))
# Scrolling here. X and Y (Y to be implemented later...)
SCROLL_OFFSET = [0, 0]
def LoadImage(filename):
image = pygame.image.load(filename)
image = pygame.transform.scale(image, (int(image.get_width()*SCALE), int(image.get_height()*SCALE)))
image = image.convert_alpha()
return image
# Load the SF character sprites
sf_sprites = LoadImage('sf_sprites.png')
# Load scene and it's collision mask
scene = pygame.image.load('sf_back.png')
scene_mask = pygame.image.load('sf_back_mask.png')
# Create Actor Animations Sets (ghetto style, only left/right)
animations = {}
# for row in range(0, SF_SPRITE_MATRIX[1]):
# for col in range(0, SF_SPRITE_MATRIX[0]):
for row in range(0, 4):
for col in range(0, 4):
key = (col, row)
face_right = pygame.Surface(sprite_size)
face_right.convert_alpha()
face_right.blit(sf_sprites, (0,0), [sprite_size[0] * col, sprite_size[1] * row, sprite_size[0], sprite_size[1]])
face_left = pygame.transform.flip(face_right, True, False)
animations[key] = [face_right, face_left]
class Actor:
def __init__(self, id, name, start_pos, image_size, image_right, image_left):
print 'Creating Actor: %s: %s: %s' % (id, name, start_pos)
# Specified information
self.id = id
self.name = name
self.pos = start_pos
self.image_size = image_size
self.image_right = image_right
self.image_left = image_left
# Internal information
self.jump = 0
self.fall = 1
self.move_left = False
def __repr__(self):
output = '%s: %s: %s' % (self.id, self.name, self.pos)
return output
def GetSurface(self):
"""Return the current surface for this game.
TODO(g): Animations have not yet been introduced.
"""
if self.move_left:
return self.image_left
else:
return self.image_right
def FindClosestActor(self):
global ACTORS
closest_actor = None
closest_dist = None
for actor in ACTORS:
# Skip yourself
if actor.id == self.id:
continue
dist = self.GetDistanceToActor(actor)
if closest_dist == None or dist < closest_dist:
closest_actor = actor
closest_dist = dist
return closest_actor
def GetDistanceToActor(self, actor):
dist = math.sqrt((actor.pos[0] - self.pos[0])**2 + (actor.pos[1] - self.pos[1])**2 )
return dist
def Update(self):
"""Process all physics and junk"""
#TODO(g): Replace actor. with self., this is a short-cut
actor = self
# Fall, if you can
if actor.jump == 0:
[fall_pos, collision_actor] = MovePosCollide(actor, [0, actor.fall], ACTORS, scene_mask)
if fall_pos != actor.pos:
actor.pos = fall_pos
if actor.fall < 10:
actor.fall += 1
else:
actor.fall = 1
if actor.jump > 0:
hit_the_roof = False
for count in range(0, actor.jump):
[jump_pos, collision_actor] = MovePosCollide(actor, [0, -1], ACTORS, scene_mask)
# If we hit a ceiling, dont immediately cancell the jump, but reduce it quickly (gives a sense of upward inertia)
if jump_pos == actor.pos:
hit_the_roof = True
break
# Update the new position, cause we didnt hit the roof
else:
actor.pos = jump_pos
# Reduce the jump each frame
if not hit_the_roof:
actor.jump -= 1
else:
actor.jump = actor.jump / 2
if actor.jump <= 2:
actor.jump = 0
def Jump(self):
global ACTORS
global scene_mask
[ground_test_pos, collision_actor] = MovePosCollide(self, [0, 1], ACTORS, scene_mask)
# If we are free to jump
if ground_test_pos == self.pos and self.jump == 0:
# Test if there is an actor (or obstacle) directly above us
[actor_on_head_test_pos, collision_actor] = MovePosCollide(self, [0, -1], ACTORS, scene_mask)
if actor_on_head_test_pos != self.pos:
self.jump = 17
# Else, if there was an actor standing on our head
elif collision_actor != None:
collision_actor.jump += 17
# Create our actors
ACTORS = []
# Automatically load all the character
for row in range(0, 4):
for col in range(0, 4):
key = (col, row)
id = 4*row + col
# Only create this character if its not off the screen. Thats a lot of characters anyway
start_x = id * 150
if len(ACTORS) < 6:
actor = Actor(id, 'Name: %s' % id, [start_x, 130], sprite_size, animations[key][0], animations[key][1])
ACTORS.append(actor)
# Specify the player, so that we dont use NPC AI for it
PLAYER_ACTOR_ID = 1
# Find player actor
PLAYER_ACTOR = None
for actor in ACTORS:
if actor.id == PLAYER_ACTOR_ID:
PLAYER_ACTOR = actor
break
if PLAYER_ACTOR == None:
raise Exception('WTF? Couldnt find the player actor, you didnt specify the ID correctly or didnt add the player actor in ACTORS')
def TestCollisionByPixelStep(start_pos, end_pos, step, scene, scene_obstacle_color=(255,255,255), log=False):
"""Test for a collision against the scene, starting at start_pos, ending at end_pos, using step to increment.
NOTE: This function assumes that the bounding box has already been tested against the scene, and may call scene.get_at() in negative or over scene size, and crash
"""
# Create deltas (differences) for the step in X and Y depending on the step and start-end positions
# Delta X
if start_pos[0] < end_pos[0]:
dx = 1
elif start_pos[0] > end_pos[0]:
dx = -1
else:
dx = 0
# Delta Y
if start_pos[1] < end_pos[1]:
dy = 1
elif start_pos[1] > end_pos[1]:
dy = -1
else:
dy = 0
# Ensure we can actually move across the line, or fail
if dx == 0 and dy == 0:
raise Exception('What the fuck? The start and end positions are the same... Handle this case later.')
# Determine the distance required to travel in X and Y directions based on the start/end positions
distance_x = abs(start_pos[0] - end_pos[0])
distance_y = abs(start_pos[1] - end_pos[1])
# Start the current position at the starting position
current_pos = [start_pos[0], start_pos[1]]
# Loop until we reach the end position, or find a collision
end_pos_reached = False
has_collision = False
distance_travelled = 0
while not end_pos_reached and not has_collision:
# Get the pixel value at the current position
scene_value = scene.get_at(current_pos)[:3]
if log:
print 'Col: dx: %s dy: %s Start: %s End: %s Cur: %s distX: %s distY: %s Pix: %s' % (dx, dy, start_pos, end_pos, current_pos, distance_x, distance_y, scene_value)
# If the pixel matches the scene_obstacle_color, there is a collision
if scene_value == scene_obstacle_color:
has_collision = True
# Else, increment the current_pos by the dx and dy, multiplied by the step
else:
# Increment the current_pos
current_pos = [current_pos[0] + (dx * step), current_pos[1] + (dy * step)]
distance_travelled += step
# If the current_pos is past the end_pos, then test the end_pos position, and set end_pos_reached (final test it required)
if distance_x != 0 and distance_travelled >= distance_x:
# We reached the end, but make the last pixel test anyway, just to be sure we have checked them all
end_pos_reached = True
# Get the pixel value at the current position
scene_value = scene.get_at(end_pos)[:3]
# If the pixel matches the scene_obstacle_color, there is a collision
if scene_value == scene_obstacle_color:
has_collision = True
elif distance_y != 0 and distance_travelled >= distance_y:
# We reached the end, but make the last pixel test anyway, just to be sure we have checked them all
end_pos_reached = True
# Get the pixel value at the current position
scene_value = scene.get_at(end_pos)[:3]
# If the pixel matches the scene_obstacle_color, there is a collision
if scene_value == scene_obstacle_color:
has_collision = True
return has_collision
def MovePosCollide(actor, move, all_actors, scene_image, scene_obstacle_color=(255,255,255), log=False):
"""Collision with actors and scene"""
# Collision with scene
scene_pos = MovePosCollideWithScene(actor.pos, move, actor.image_size, scene_image, scene_obstacle_color=(255,255,255), log=log)
if scene_pos == actor.pos:
scene_collision = True
else:
scene_collision = False
# Test against actors
actor_collision = False
collision_with_actor = None
target_pos = [actor.pos[0] + move[0], actor.pos[1] + move[1]]
target_rect = pygame.Rect(target_pos, actor.image_size)
for test_actor in all_actors:
# Dont count yourself
if actor.id != test_actor.id:
test_actor_rect = pygame.Rect(test_actor.pos, test_actor.image_size)
has_collision = test_actor_rect.colliderect(target_rect)
if has_collision:
#print 'Collision: %s with %s' % (target_pos, test_actor)
actor_collision = True
collision_with_actor = test_actor
break
else:
#print 'Collision: Skip self: %s' % test_actor
pass
# If we didnt have collisions with scene or actors, return moved position
if not scene_collision and not actor_collision:
return (target_pos, collision_with_actor)
# Else, had collision so return current position
else:
result = [list(actor.pos), collision_with_actor]
#print 'Collision with actor: %s' % result
return result
def MovePosCollideWithScene(pos, move, bounding_box_size, scene_image, scene_obstacle_color=(255,255,255), log=False):
"""Returns a new position [x, y] from pos, moved by move [dx, dy], with
respect to colliding against non-moveable area in scene_image
(non [0,0,0] colors)
Args:
pos: list, [x, y]
move: list, [dx, dy]
bounding_box_size: list, [width, height]
scene_image, Surface object
Returns: list [new_x, new_y], if move is OK, otherwise [old_x, old_y]
"""
has_collision = False
# Create target position, where we want to move to
target_pos = [pos[0] + move[0], pos[1] + move[1]]
# Test for out of scene positions, and block
if target_pos[0] < 0:
has_collision = True
elif target_pos[0] + bounding_box_size[0] >= scene.get_width() - 1:
has_collision = True
elif target_pos[1] < 0:
has_collision = True
elif target_pos[1] + bounding_box_size[1] >= scene.get_height() - 1:
has_collision = True
# Test scene, if we havent already found a collision with the scene border
if not has_collision:
# Test every N pixels, to not miss collisions that are smaller than the bounding box
step_test = 1
#TODO(g): Collision detection with scene_image
# Make all 4 corners of the bounding box
corner_top_left = [target_pos[0], target_pos[1]]
corner_top_right = [target_pos[0] + bounding_box_size[0], target_pos[1]]
corner_bottom_left = [target_pos[0], target_pos[1] + bounding_box_size[1]]
corner_bottom_right = [target_pos[0] + bounding_box_size[0], target_pos[1] + bounding_box_size[1]]
if log:
print ''
# Test the bounding box, using step (N pixels) to get better resolution on obstacle collision
if TestCollisionByPixelStep(corner_top_left, corner_top_right, step_test, scene_image, log=log):
has_collision = True
elif TestCollisionByPixelStep(corner_top_left, corner_bottom_left, step_test, scene_image, log=log):
has_collision = True
elif TestCollisionByPixelStep(corner_top_right, corner_bottom_right, step_test, scene_image, log=log):
has_collision = True
elif TestCollisionByPixelStep(corner_bottom_left, corner_bottom_right, step_test, scene_image, log=log):
has_collision = True
# If there was a collision, dont move, create a new list form the old list
if has_collision:
final_pos = [pos[0], pos[1]]
# Else, there was not a collision, move the position
else:
final_pos = target_pos
return final_pos
def GetPosScrolled(pos):
global SCROLL_OFFSET
scrolled_pos = [pos[0] - SCROLL_OFFSET[0], pos[1] - SCROLL_OFFSET[1]]
return scrolled_pos
def Draw(surface, target_surface, pos):
target_surface.blit(surface, GetPosScrolled(pos))
while True:
#print 'Actors: %s' % ACTORS
# Enemy AI
for actor in ACTORS:
# Skip the player, process everyone else
if actor.id == PLAYER_ACTOR_ID:
continue
# Find targer actor (the closest)
target_actor = actor.FindClosestActor()
if target_actor == None:
raise Exception('WTF, is there only one?')
# Player is to the Right
if actor.pos[0] < target_actor.pos[0]:
actor.move_left = False
[move_pos, collision_actor] = MovePosCollide(actor, [5, 0], ACTORS, scene_mask)
if move_pos != actor.pos:
actor.pos = move_pos
# Player is to the Left
elif actor.pos[0] > target_actor.pos[0]:
actor.move_left = True
[move_pos, collision_actor] = MovePosCollide(actor, [-5, 0], ACTORS, scene_mask)
if move_pos != actor.pos:
actor.pos = move_pos
# Try to jump, all the time
actor.Jump()
# Event pump
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
# Player input handling
keys = pygame.key.get_pressed() #checking pressed keys
# Left
if keys[pygame.K_LEFT]:
PLAYER_ACTOR.move_left = True
[PLAYER_ACTOR.pos, collision_actor] = MovePosCollide(PLAYER_ACTOR, [-5, 0], ACTORS, scene_mask)
# Right
if keys[pygame.K_RIGHT]:
PLAYER_ACTOR.move_left = False
[PLAYER_ACTOR.pos, collision_actor] = MovePosCollide(PLAYER_ACTOR, [5, 0], ACTORS, scene_mask)
# Up
if keys[pygame.K_UP]:
PLAYER_ACTOR.Jump()
# [ground_test_pos, collision_actor] = MovePosCollide(PLAYER_ACTOR, [0, 1], ACTORS, scene_mask)
# # If we are free to jump
# if ground_test_pos == PLAYER_ACTOR.pos and PLAYER_ACTOR.jump == 0:
# # Test if there is an actor (or obstacle) directly above us
# [actor_on_head_test_pos, collision_actor] = MovePosCollide(PLAYER_ACTOR, [0, -1], ACTORS, scene_mask)
# if actor_on_head_test_pos != PLAYER_ACTOR.pos:
# PLAYER_ACTOR.jump = 17
# # Else, if there was an actor standing on our head
# elif collision_actor != None:
# collision_actor.jump += 17
# Update all our actors
for actor in ACTORS:
actor.Update()
# If ESC is hit, quit
if keys[pygame.K_ESCAPE]:
sys.exit(0)
# Handle scrolling the world
scrolled_screen_x = [SCROLL_OFFSET[0], SCROLL_OFFSET[0] + SCREEN_SIZE[0]]
boundary_x = int(SCREEN_SIZE[0] / 2.5)
scroll_by_pixels = 3
# Left screen boundary
if PLAYER_ACTOR.pos[0] < scrolled_screen_x[0] + boundary_x:
SCROLL_OFFSET[0] -= scroll_by_pixels
if SCROLL_OFFSET[0] < 0:
SCROLL_OFFSET[0] = 0
# Right screen boundary
elif PLAYER_ACTOR.pos[0] > scrolled_screen_x[1] - boundary_x:
SCROLL_OFFSET[0] += scroll_by_pixels
max_scroll_x = scene.get_width() - SCREEN_SIZE[0]
if SCROLL_OFFSET[0] >= max_scroll_x:
SCROLL_OFFSET[0] = max_scroll_x
# Render background
Draw(scene, background, (0,0))
# Draw all the actors
for actor in ACTORS:
Draw(actor.GetSurface(), background, actor.pos)
# Render to screen
screen.blit(background, (0,0))
pygame.display.flip()
| mit | -1,761,843,542,282,070,800 | 30.673307 | 171 | 0.646164 | false |
greenhost/viper | scripts/license.py | 1 | 7545 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=python
#
# Copyright (c) 2013 Greenhost VOF
# https://greenhost.nl -\- https://greenhost.io
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os, sys
import fnmatch, glob
import shutil
LICENSE = """Copyright (c) 2013 Greenhost VOF and contributors
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
def list_sources(root = '.', extensions = ['*.py', '*.cs', '*.bat']):
matches = []
for root, dirnames, filenames in os.walk(root):
for ext in extensions:
for filename in fnmatch.filter(filenames, ext):
fn = os.path.join(root, filename)
matches.append(fn)
print fn
return matches
def interp_license(fn):
fnout = fn+".out"
with open(fnout, 'w') as fout:
with open(fn, 'r') as fin:
for line in fin:
if 'Copyright (c) 2013 Greenhost VOF and contributors
if '
if 'Redistribution and use in source and binary forms, with or without
if 'modification, are permitted provided that the following conditions are met:
if '
if '1. Redistributions of source code must retain the above copyright notice, this
if ' list of conditions and the following disclaimer.
if '2. Redistributions in binary form must reproduce the above copyright notice,
if ' this list of conditions and the following disclaimer in the documentation
if ' and/or other materials provided with the distribution.
if '
if 'THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
if 'ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
if 'WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
if 'DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
if 'ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
if '(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
if 'LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
if 'ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
if '(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
if 'SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
if '
if 'The views and conclusions contained in the software and documentation are those
if 'of the authors and should not be interpreted as representing official policies,
if 'either expressed or implied, of the FreeBSD Project.
prefix = line[0:line.find("Copyright (c) 2013 Greenhost VOF and contributors
prefix = line[0:line.find("
prefix = line[0:line.find("Redistribution and use in source and binary forms, with or without
prefix = line[0:line.find("modification, are permitted provided that the following conditions are met:
prefix = line[0:line.find("
prefix = line[0:line.find("1. Redistributions of source code must retain the above copyright notice, this
prefix = line[0:line.find(" list of conditions and the following disclaimer.
prefix = line[0:line.find("2. Redistributions in binary form must reproduce the above copyright notice,
prefix = line[0:line.find(" this list of conditions and the following disclaimer in the documentation
prefix = line[0:line.find(" and/or other materials provided with the distribution.
prefix = line[0:line.find("
prefix = line[0:line.find("THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
prefix = line[0:line.find("ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
prefix = line[0:line.find("WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
prefix = line[0:line.find("DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
prefix = line[0:line.find("ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
prefix = line[0:line.find("(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
prefix = line[0:line.find("LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
prefix = line[0:line.find("ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
prefix = line[0:line.find("(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
prefix = line[0:line.find("SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
prefix = line[0:line.find("
prefix = line[0:line.find("The views and conclusions contained in the software and documentation are those
prefix = line[0:line.find("of the authors and should not be interpreted as representing official policies,
prefix = line[0:line.find("either expressed or implied, of the FreeBSD Project.
# @todo insert license
for l in LICENSE.splitlines():
fout.write( "{0}{1}\n".format(prefix, l) )
else:
fout.write(line)
def main():
lst = list_sources('.')
for fn in lst:
interp_license(fn)
shutil.copy(fn+".out", fn)
os.unlink(fn+".out")
if __name__ == '__main__':
main()
| gpl-3.0 | -4,695,716,823,875,586,000 | 55.729323 | 118 | 0.710007 | false |
JanikNex/adventure16 | src/controller/controller_credits.py | 1 | 2012 | from src.gui.gui_credits import *
from src.utilclasses.caudiohandler import *
from src.utilclasses.cjsonhandler import *
class CreditController(object):
def __init__(self, mode='fromStart'):
# Creditphase wird initialisiert
"""
Erstellt ein neues CreditController-Objekt
:param mode: Modus der Credits (unbenutzt)
"""
self.phase = 0
self.jsonparser = JSONHandler()
self.jsonparser.openNewFile('credits')
self.text = self.jsonparser.getData()[str(mode)]['text']
# Neues GUI-Objekt
self.gui = GUICredits()
# Neuer AudioHandler
self.audioHandler = AudioHandler(self.jsonparser.getData()[str(mode)]['audiofile'])
self.audioHandler.play()
# Registrierung der Event-Handler
self.gui.fenster.protocol("WM_DELETE_WINDOW", self.windowCloseEvent)
self.gui.fenster.bind("<Return>", self.windowCloseEvent)
# Beginnen der Präsentation
self.gui.fenster.after(6000, self.nextPhase)
# Aktivierung des Fensters
self.gui.fenster.mainloop()
def windowCloseEvent(self, event=None):
"""
Stoppt die Audiowiedergabe und schließt das Fenster
:param event: Event falls Funktion als Eventhandler aufgerufen wird
"""
self.audioHandler.stop()
self.gui.fenster.quit()
self.gui.fenster.destroy()
def setText(self, text):
"""
Setzt die Textvariablen auf den Inhalt der übergebenen Liste
:param text: Liste [Titel, Beschreibung]
:type text: list
"""
self.gui.title.set(text[0])
self.gui.description.set(text[1])
def nextPhase(self):
"""
Startet die nächste Phase der Credits
"""
if self.phase <= (len(self.text) - 1):
self.setText(self.text[self.phase])
self.phase += 1
self.gui.fenster.after(4000, self.nextPhase)
if __name__ == '__main__':
t = CreditController()
| gpl-3.0 | 1,510,442,056,439,928,800 | 33.033898 | 91 | 0.619522 | false |
sahiljain/catapult | dashboard/dashboard/pinpoint/models/change.py | 1 | 5253 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from dashboard.services import gitiles_service
class NonLinearError(Exception):
"""Raised when trying to find the midpoint of Changes that are not linear."""
class Change(collections.namedtuple('Change',
('base_commit', 'deps', 'patch'))):
"""A particular set of Deps with or without an additional patch applied.
For example, a Change might sync to chromium/src@9064a40 and catapult@8f26966,
then apply patch 2423293002.
"""
def __new__(cls, base_commit, deps=(), patch=None):
"""Create a Change.
Args:
base_commit: A Dep representing the initial commit to sync to. The DEPS
file at that commit implies the default commits for any dependencies.
deps: An optional iterable of Deps to override the dependencies implied
by base_commit.
patch: An optional patch to apply to the Change. A string of the format:
<gerrit or rietveld>/<server hostname>/<change id>/<patch set>
"""
# TODO: deps is unordered. Make it a frozenset.
return super(Change, cls).__new__(cls, base_commit, tuple(deps), patch)
def __str__(self):
string = ' '.join(str(dep) for dep in self.all_deps)
if self.patch:
string += ' + ' + self.patch
return string
@property
def all_deps(self):
return tuple([self.base_commit] + list(self.deps))
@property
def most_specific_commit(self):
return self.deps[-1] if self.deps else self.base_commit
@classmethod
def Midpoint(cls, change_a, change_b):
"""Return a Change halfway between the two given Changes.
A NonLinearError is raised if the Changes are not linear. The Changes are
linear iff they are identical except for exactly one git hash. See
change_test.py for examples of linear and nonlinear Changes.
Args:
change_a: The first Change in the range.
change_b: The last Change in the range.
Returns:
A new Change representing the midpoint.
The commit before the midpoint if the range has an even number of commits.
None if the range is empty, or the Changes are given in the wrong order.
Raises:
NonLinearError: The Changes are not linear.
"""
# TODO: Handle DEPS rolls, including nested ones.
_ValidateChangeLinearity(change_a, change_b)
midpoint_all_deps = []
for dep_a, dep_b in zip(change_a.all_deps, change_b.all_deps):
if dep_a.git_hash == dep_b.git_hash:
midpoint_dep = dep_a
else:
midpoint_dep = Dep.Midpoint(dep_a, dep_b)
if not midpoint_dep:
return None
midpoint_all_deps.append(midpoint_dep)
return cls(midpoint_all_deps[0], midpoint_all_deps[1:], change_a.patch)
class Dep(collections.namedtuple('Dep', ('repository', 'git_hash'))):
"""A git repository pinned to a particular commit."""
def __str__(self):
return self.repository.split('/')[-1] + '@' + self.git_hash[:7]
@classmethod
def Midpoint(cls, dep_a, dep_b):
"""Return a Dep halfway between the two given Deps.
Uses Gitiles to look up the commit range.
Args:
dep_a: The first Dep in the range.
dep_b: The last Dep in the range.
Returns:
A new Dep representing the midpoint.
The commit before the midpoint if the range has an even number of commits.
None if the range is empty, or the Deps are given in the wrong order.
Raises:
ValueError: The Deps are in different repositories.
"""
if dep_a.repository != dep_b.repository:
raise ValueError("Can't find the midpoint of Deps in differing "
'repositories: "%s" and "%s"' % (dep_a, dep_b))
commits = gitiles_service.CommitRange(dep_a.repository,
dep_a.git_hash, dep_b.git_hash)
# We don't handle NotFoundErrors because we assume that all Deps either came
# from this method or were already validated elsewhere.
if len(commits) <= 1:
return None
commits = commits[1:] # Remove dep_b from the range.
return cls(dep_a.repository, commits[len(commits) / 2]['commit'])
def _ValidateChangeLinearity(change_a, change_b):
if len(change_a.deps) != len(change_b.deps):
raise NonLinearError('Change A has %d deps and Change B has %d deps' %
(len(change_a.deps), len(change_b.deps)))
if change_a.patch != change_b.patch:
raise NonLinearError('Change A has patch "%s" and Change B has patch "%s"'
% (change_a.patch, change_b.patch))
differences = 0
for dep_a, dep_b in zip(change_a.all_deps, change_b.all_deps):
if dep_a.repository != dep_b.repository:
raise NonLinearError('Change A has repository "%s" and '
'Change B has repository "%s"' %
(dep_a.repository, dep_b.repository))
if dep_a.git_hash != dep_b.git_hash:
differences += 1
if differences == 0:
raise NonLinearError('The Changes are the same.')
elif differences > 1:
raise NonLinearError('The Changes have multiple differing commits.')
| bsd-3-clause | 2,869,053,224,633,806,300 | 34.493243 | 80 | 0.650295 | false |
madjar/nox | nox/search.py | 1 | 3136 | import os
import collections
import json
import subprocess
import re
import click
from .cache import region
class NixEvalError(Exception):
pass
def nix_packages_json():
click.echo('Refreshing cache')
try:
output = subprocess.check_output(['nix-env', '-qa', '--json', '--show-trace'],
universal_newlines=True)
except subprocess.CalledProcessError as e:
raise NixEvalError from e
return json.loads(output)
Package = collections.namedtuple('Package', 'attribute name description')
def key_for_path(path):
try:
manifest = os.path.join(path, 'manifest.nix')
with open(manifest) as f:
return f.read()
except (FileNotFoundError, NotADirectoryError):
pass
if os.path.exists(os.path.join(path, '.git')):
return subprocess.check_output('git rev-parse --verify HEAD'.split(),
cwd=path)
click.echo('Warning: could not find a version indicator for {}'.format(path))
return None
def all_packages(force_refresh=False):
defexpr = os.path.expanduser('~/.nix-defexpr/')
paths = os.listdir(defexpr)
key = str({p: key_for_path(defexpr + p) for p in paths})
if force_refresh:
region.delete(key)
packages_json = region.get_or_create(key, nix_packages_json)
return (Package(attr, v['name'], v['meta'].get('description', ''))
for attr, v in packages_json.items())
@click.command()
@click.argument('queries', nargs=-1)
@click.option('--force-refresh', is_flag=True)
def main(queries, force_refresh):
"""Search a package in nix"""
patterns = [re.compile(query, re.IGNORECASE) for query in queries]
try:
results = [p for p in all_packages(force_refresh)
if any((all((pat.search(s) for pat in patterns)) for s in p))]
except NixEvalError:
raise click.ClickException('An error occured while running nix (displayed above). Maybe the nixpkgs eval is broken.')
results.sort()
for i, p in enumerate(results, 1):
line = '{} {} ({})\n {}'.format(
click.style(str(i), fg='black', bg='yellow'),
click.style(p.name, bold=True),
click.style(p.attribute, dim=True),
click.style(p.description.replace("\n", "\n ")))
click.echo(line)
if results:
def parse_input(inp):
if inp[0] == 's':
action = 'shell'
inp = inp[1:]
else:
action = 'install'
packages = [results[int(i) - 1] for i in inp.split()]
return action, packages
action, packages = click.prompt('Packages to install',
value_proc=parse_input)
attributes = [p.attribute for p in packages]
if action == 'install':
subprocess.check_call(['nix-env', '-iA', '--show-trace'] + attributes)
elif action == 'shell':
attributes = [a[len('nixpkgs.'):] for a in attributes]
subprocess.check_call(['nix-shell', '-p', '--show-trace'] + attributes)
| mit | 66,833,306,864,362,456 | 32.361702 | 125 | 0.583865 | false |
PrinceNgattaiLam/Trafic | TraficLib/PipelineEval.py | 1 | 4702 | import os
import argparse
import subprocess
import shutil
import time
import sys
# from fiberfileIO import *
from makeDataset import make_fiber_feature
from runStore import run_store
from runClassification import run_classification
start = time.time()
BLUE_BOLD = "\033[1;34m"
YELLOW = "\033[0;33m"
RED = "\033[0;31m"
NC = "\033[0m"
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', action='store', dest='output_dir', help='Final output file (.vtk or .vtp)',
default='/root/work/DeepLearning/Project/Outputs/')
parser.add_argument('--data_file', action='store', dest='data_file', help='Input folder containing fibers to test',
default='/root/work/DeepLearning/Project/')
parser.add_argument('--checkpoint_dir', action='store', dest='checkpoint_dir', help='Path to restrore the model '
'of the network (must be a .ckpt)',
default="/root/work/DeepLearning/Project/Data/CKPT/model3.ckpt")
parser.add_argument('--landmark_file', action='store', dest='landmark_file', help='Landmarks File (.vt[k/p], or .fcsv)',
default="")
parser.add_argument('--multiclass', action='store_true', dest='multiclass', help='Enable the multiclassification training',
default=False)
parser.add_argument('--biclass', action='store_true', dest='biclass', help='Enable the biclassification training',
default=False)
parser.add_argument('--summary_dir', action='store', dest='summary_dir', help='Summary directory ',
default="")
parser.add_argument('--fiber_name', action='store', dest='fiber_name', help='Name of the fiber for the biclassification case ',
default="")
def run_pipeline_eval(data_file, output_dir, landmark_file, checkpoint_dir, summary_dir, num_landmarks, fiber_name="Fiber"):
print "---Preprocessing Dataset..."
sys.stdout.flush()
currentPath = os.path.dirname(os.path.abspath(__file__))
make_dataset_py = os.path.join(currentPath, 'makeDataset.py')
store_py = os.path.join(currentPath, 'runStore.py')
classification_py = os.path.join(currentPath, 'runClassification.py')
env_dir = os.path.join(currentPath, "..", "miniconda2")
prefix = os.path.join(env_dir,"envs","env_trafic","lib","libc6_2.17","lib","x86_64-linux-gnu","ld-2.17.so")
pythonPath = os.path.join(env_dir,"bin","python")
src_dir = os.path.dirname(data_file)
src_name = os.path.basename(data_file)
tmp_dir = os.path.join(currentPath, 'tmp_dir_eval')
tmp_file = os.path.join(tmp_dir, src_name)
make_fiber_feature(data_file, tmp_file, landmark_file, num_landmarks=num_landmarks, classification=True)
# print subprocess.Popen(cmd_make_dataset, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
print "---Storing Dataset..."
sys.stdout.flush()
# cmd_store = [prefix, pythonPath, store_py, "--test_dir", tmp_dir, "--original_dir", src_dir,"--num_landmarks",str(num_landmarks)]
# out, err = subprocess.Popen(cmd_store, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
# print("\nout : " + str(out) + "\nerr : " + str(err))
run_store(test_dir=tmp_dir, original_dir=src_dir, num_landmarks=num_landmarks)
print "---Classifying Dataset..."
sys.stdout.flush()
multi = False
if num_landmarks==5:
multi=False
elif num_landmarks==32:
multi=True
run_classification(tmp_dir, output_dir, checkpoint_dir, summary_dir, fiber_name=fiber_name, multiclass=multi)
# cmd_class = [prefix, pythonPath, classification_py, "--data_dir",tmp_dir,"--output_dir",output_dir,"--checkpoint_dir",checkpoint_dir,"--summary_dir",summary_dir, "--fiber_name", fiber_name]
# if num_landmarks == 32:
# cmd_class.append("--multiclass")
# out, err = subprocess.Popen(cmd_class, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
# print("\nout : " + str(out) + "\nerr : " + str(err))
shutil.rmtree(tmp_dir)
def main():
args = parser.parse_args()
output_dir = args.output_dir
data_file = args.data_file
checkpoint_dir = args.checkpoint_dir
landmark_file = args.landmark_file
multiclass = args.multiclass
biclass = args.biclass
summary_dir = args.summary_dir
fiber_name = args.fiber_name
if multiclass:
run_pipeline_eval(data_file, output_dir, landmark_file, checkpoint_dir, summary_dir, 32, fiber_name)
elif biclass:
run_pipeline_eval(data_file, output_dir, landmark_file, checkpoint_dir, summary_dir, 5, fiber_name)
if __name__ == '__main__':
try:
main()
except Exception, e:
print ('ERROR, EXCEPTION CAUGHT')
print str(e)
import traceback
traceback.print_exc() | apache-2.0 | -331,976,346,076,911,900 | 45.107843 | 193 | 0.674394 | false |
gersolar/stations | stations/forms.py | 1 | 2146 | # In forms.py...
from django import forms
from stations.models import Configuration
from datetime import datetime
import os
import pytz
from importer import from_csv, from_xls
class DocumentForm(forms.Form,object):
def __init__(self, *args, **kwargs):
super(DocumentForm, self).__init__(*args, **kwargs)
self.fields["configuration"] = forms.ChoiceField(label="Configuration", choices = [(c.id, c) for c in Configuration.actives()])
self.fields["end"] = forms.DateTimeField(label="End configuration", initial=datetime.utcnow().replace(tzinfo=pytz.UTC))
self.fields["backup"] = forms.FileField(label='Select the file with measurements', help_text='max. 42 megabytes')
self.fields["between"] = forms.IntegerField(label='Between')
self.fields["refresh_presision"] = forms.IntegerField(label='Refresh presision')
def get_configuration(self, request):
label = self.cleaned_data['configuration']
configuration = dict(self.fields['configuration'].choices)[int(label)]
end = self.cleaned_data["end"].replace(tzinfo=pytz.UTC)
between = self.cleaned_data["between"]
refresh_presision = self.cleaned_data["refresh_presision"]
rows, backup_name = self.process_rows(request, configuration)
return None if not label or not backup_name or len(rows) is 0 else configuration, end, rows, between, refresh_presision
def process_rows(self,request,configuration):
f = request.FILES['backup']
backup_name = configuration.get_backup_filename(f.name)
with open(backup_name, 'wb') as destination:
for chunk in f.chunks():
destination.write(chunk)
try:
return getattr(self, "t_%s_to_rows" % backup_name.split(".")[-1])(backup_name), backup_name
except Exception:
os.remove(backup_name)
return [], None
def t_csv_to_rows(self, filename):
utc_diff = -3
timestamp_col = 0
channel = 1
skip_rows = 3
return from_csv(filename, utc_diff, timestamp_col, channel, skip_rows)
def t_xls_to_rows(self, filename):
utc_diff = -3
i_sheet = 1
x_year = 1
x_julian = 2
x_timestamp = 3
x_value = 9
y_from = 10
return from_xls(filename, utc_diff, i_sheet, x_year, x_julian, x_timestamp, x_value, y_from) | mit | -6,600,273,940,050,515,000 | 37.339286 | 129 | 0.71575 | false |
wsdream/WS-DREAM | benchmarks/commons/evallib.py | 1 | 6133 | ########################################################
# evallib.py: common functions for evaluator.py
# Author: Jamie Zhu <jimzhu@GitHub>
# Created: 2015/8/17
# Last updated: 2015/8/30
########################################################
import numpy as np
from numpy import linalg as LA
import os, sys, time
from commons.utils import logger
import cPickle as pickle
import random
#======================================================#
# Function to compute the evaluation metrics
#======================================================#
def evaluate(testMatrix, recoveredMatrix, para):
(testVecX, testVecY) = np.where(testMatrix > 0)
testVec = testMatrix[testVecX, testVecY]
estiVec = recoveredMatrix[testVecX, testVecY]
evalResult = errMetric(testVec, estiVec, para['metrics'])
return evalResult
#======================================================#
# Function to remove the entries of data matrix
# Return the trainMatrix and testMatrix
#======================================================#
def removeEntries(matrix, density, seedId):
(vecX, vecY) = np.where(matrix > 0)
vecXY = np.c_[vecX, vecY]
numRecords = vecX.size
numAll = matrix.size
random.seed(seedId)
randomSequence = range(0, numRecords)
random.shuffle(randomSequence) # one random sequence per round
numTrain = int(numAll * density)
# by default, we set the remaining QoS records as testing data
numTest = numRecords - numTrain
trainXY = vecXY[randomSequence[0 : numTrain], :]
testXY = vecXY[randomSequence[- numTest :], :]
trainMatrix = np.zeros(matrix.shape)
trainMatrix[trainXY[:, 0], trainXY[:, 1]] = matrix[trainXY[:, 0], trainXY[:, 1]]
testMatrix = np.zeros(matrix.shape)
testMatrix[testXY[:, 0], testXY[:, 1]] = matrix[testXY[:, 0], testXY[:, 1]]
# ignore invalid testing data
idxX = (np.sum(trainMatrix, axis=1) == 0)
testMatrix[idxX, :] = 0
idxY = (np.sum(trainMatrix, axis=0) == 0)
testMatrix[:, idxY] = 0
return trainMatrix, testMatrix
#======================================================#
# Function to compute the evaluation metrics
#======================================================#
def errMetric(realVec, estiVec, metrics):
result = []
absError = np.abs(estiVec - realVec)
mae = np.average(absError)
for metric in metrics:
if 'MAE' == metric:
result = np.append(result, mae)
if 'NMAE' == metric:
nmae = mae / (np.sum(realVec) / absError.shape)
result = np.append(result, nmae)
if 'RMSE' == metric:
rmse = LA.norm(absError) / np.sqrt(absError.shape)
result = np.append(result, rmse)
if 'MRE' == metric or 'NPRE' == metric:
relativeError = absError / realVec
if 'MRE' == metric:
mre = np.percentile(relativeError, 50)
result = np.append(result, mre)
if 'NPRE' == metric:
npre = np.percentile(relativeError, 90)
result = np.append(result, npre)
return result
#======================================================#
# Dump the raw result into tmp file
#======================================================#
def dumpresult(outFile, result):
try:
with open(outFile, 'wb') as fid:
pickle.dump(result, fid)
except Exception, e:
logger.error('Dump file failed: ' + outFile)
logger.error(e)
sys.exit()
#======================================================#
# Process the raw result files
#======================================================#
def summarizeResult(para):
path = '%s%s_%s_result'%(para['outPath'], para['dataName'], para['dataType'])
evalResults = np.zeros((len(para['density']), para['rounds'], len(para['metrics'])))
timeResults = np.zeros((len(para['density']), para['rounds']))
k = 0
for den in para['density']:
for rnd in xrange(para['rounds']):
inputfile = path + '_%.2f_round%02d.tmp'%(den, rnd + 1)
with open(inputfile, 'rb') as fid:
data = pickle.load(fid)
os.remove(inputfile)
(evalResults[k, rnd, :], timeResults[k, rnd]) = data
k += 1
saveSummaryResult(path, evalResults, timeResults, para)
#======================================================#
# Save the summary evaluation results into file
#======================================================#
def saveSummaryResult(outfile, result, timeinfo, para):
fileID = open(outfile + '.txt', 'w')
print ('Average result: [%s]'%outfile)
print 'Metrics:', para['metrics']
fileID.write('======== Results summary ========\n')
fileID.write('Metrics: ')
for metric in para['metrics']:
fileID.write('| %s '%metric)
fileID.write('\n')
fileID.write('[Average]\n')
k = 0
for den in para['density']:
fileID.write('density=%.2f: '%den)
avgResult = np.average(result[k, :, :], axis=0)
np.savetxt(fileID, np.matrix(avgResult), fmt='%.4f', delimiter=' ')
print 'density=%.2f: '%den, avgResult
k += 1
fileID.write('\n[Standard deviation (std)]\n')
k = 0
for den in para['density']:
fileID.write('density=%.2f: '%den)
np.savetxt(fileID, np.matrix(np.std(result[k, :, :], axis=0)), fmt='%.4f', delimiter=' ')
k += 1
fileID.write('\n======== Detailed results ========\n')
k = 0
for den in para['density']:
fileID.write('[density=%.2f, %2d rounds]\n'%(den, para['rounds']))
np.savetxt(fileID, np.matrix(result[k, :, :]), fmt='%.4f', delimiter=' ')
fileID.write('\n')
k += 1
fileID.close()
if para['saveTimeInfo']:
fileID = open(outfile + '_time.txt', 'w')
fileID.write('Average running time (second):\n')
k = 0
for den in para['density']:
fileID.write('density=%.2f: '%den)
np.savetxt(fileID, np.matrix(np.average(timeinfo[k, :])), fmt='%.4f', delimiter=' ')
k += 1
fileID.close() | mit | 4,573,878,215,925,734,400 | 36.864198 | 99 | 0.515898 | false |
guillaume-florent/aoc-utils | aocutils/collections.py | 1 | 2126 | # coding: utf-8
r"""OCC collections utilities and conversions"""
from OCC.Core.TColgp import TColgp_Array1OfPnt, TColgp_Array1OfPnt2d
from OCC.Core.TCollection import TCollection_ExtendedString
def to_string(_string):
r"""str to OCC string conversion
Parameters
----------
_string : str
Returns
-------
TCollection_ExtendedString
"""
return TCollection_ExtendedString(_string)
def to_tcol_(_list, collection_type):
r"""Convert a Python list to OCC.TColgp* collection_type
Parameters
----------
_list : list
collection_type : OCC.TColgp.*
The OCC collection geom_type to convert to
Returns
-------
Handle to collection_type
"""
array = collection_type(1, len(_list) + 1)
for n, i in enumerate(_list):
array.SetValue(n + 1, i)
return array.GetHandle()
def tcol_dim_1(li, _type, start_at_one=False):
r"""function factory for 1-dimensional TCol* types
Parameters
----------
li : list[object]
The list that is used to populate the OCC collection
_type : type
The OCC collection geom_type
start_at_one : bool
Determines if the first index of the OCC collection will be 0 or 1
Returns
-------
_type
"""
if start_at_one:
pts = _type(1, len(li))
for i, element in enumerate(li):
pts.SetValue(i+1, element)
else:
pts = _type(0, len(li)-1)
for i, element in enumerate(li):
pts.SetValue(i, element)
pts.thisown = False
return pts
def point_list_to_tcolgp_array1_of_pnt(li):
r"""Populate a TColgp_Array1OfPnt with a list of points
Parameters
----------
li : list[gp_Pnt]
Returns
-------
TColgp_Array1OfPnt
"""
pts = TColgp_Array1OfPnt(0, len(li) - 1)
for n, i in enumerate(li):
pts.SetValue(n, i)
return pts
def point2d_list_to_tcolgp_array1_of_pnt2d(li):
r"""
Parameters
----------
li : list[gp_Pnt2d]
Returns
-------
TColgp_Array1OfPnt2d
"""
return tcol_dim_1(li, TColgp_Array1OfPnt2d)
| lgpl-3.0 | -3,028,877,044,064,268,000 | 19.640777 | 74 | 0.593603 | false |
nehudesi/MSim | module/eventBollinger.py | 1 | 8332 | '''
Version: MRT v3.0
Type: module
Location: C:\MRT3.0\module
Author: Chintan Patel
Email: [email protected]
Bollinger Based Trading
'''
import pandas as pd
import numpy as np
#import math
import copy
import module.qsdateutil as du
import datetime as dt
import module.DataAccess as da
#import module.tsutil as tsu
#import module.EventProfiler as ep
import modDatesReturn
import csv
import basic
def find_events(ls_symbols, d_data):
''' Finding the event dataframe '''
df_close = d_data['actual_close']
# ts_market = df_close['SPY']
print "Finding Events"
# Creating an empty dataframe
df_events = copy.deepcopy(df_close)
df_events = df_events * np.NAN
# Time stamps for the event range
ldt_timestamps = df_close.index
df_close = d_data['close']
df_mean = pd.rolling_mean(d_data['close'], 20)
df_std = pd.rolling_std(d_data['close'], 20)
df_bollinger = (df_close - df_mean) / (df_std)
writer = csv.writer(open('bollingerorders.csv', 'wb'), delimiter=',')
f_symreturn_cutoff = input('<<< Enter the cutoff in decimel for symbol return today: ')
f_symyest_cutoff = input('<<< Enter the cutoff in decimel for symbol return yesterday: ')
print '1 -> SYMBOL_RETURN_TODAY > ', f_symreturn_cutoff, 'SYMBOL_RETURN_YESTERDAY < ',f_symyest_cutoff
print '2 -> SYMBOL_RETURN_TODAY < ', f_symreturn_cutoff, 'SYMBOL_RETURN_YESTERDAY > ',f_symyest_cutoff
print '3 -> SYMBOL_RETURN_TODAY > ', f_symreturn_cutoff, 'SYMBOL_RETURN_YESTERDAY > ',f_symyest_cutoff
print '4 -> SYMBOL_RETURN_TODAY <', f_symreturn_cutoff, 'SYMBOL_RETURN_YESTERDAY < ',f_symyest_cutoff
try:
select = input('Select: ')
except ValueError:
basic.print_clrscr()
basic.print_logo()
basic.go_back()
except SyntaxError:
basic.print_clrscr()
basic.print_logo()
basic.go_back()
except NameError:
basic.print_clrscr()
basic.print_logo()
basic.go_back()
for s_sym in ls_symbols:
for i in range(1, len(ldt_timestamps)):
# Calculating the returns for this timestamp
f_symboll_today = df_bollinger[s_sym].ix[ldt_timestamps[i]]
f_symboll_yest = df_bollinger[s_sym].ix[ldt_timestamps[i - 1]]
f_marketbol_today = df_bollinger['SPY'].ix[ldt_timestamps[i]]
# f_marketprice_yest = ts_market.ix[ldt_timestamps[i - 1]]
i_shares = 100
if select == 1:
if f_symboll_today > float(f_symreturn_cutoff) and f_symboll_yest < float(f_symyest_cutoff):
if f_marketbol_today > 1.0:
df_events[s_sym].ix[ldt_timestamps[i]] = 1
row_to_enter = [str(ldt_timestamps[i].year), str(ldt_timestamps[i].month), \
str(ldt_timestamps[i].day), s_sym, 'Buy', i_shares]
writer.writerow(row_to_enter)
try:
time_n = ldt_timestamps[i + 5]
except:
time_n = ldt_timestamps[-1]
row_to_enter = [str(time_n.year), str(time_n.month), \
str(time_n.day), s_sym, 'Sell', i_shares]
writer.writerow(row_to_enter)
elif select == 2:
if f_symboll_today > float(f_symreturn_cutoff) and f_symboll_yest < float(f_symyest_cutoff):
if f_marketbol_today > 1.0:
df_events[s_sym].ix[ldt_timestamps[i]] = 1
row_to_enter = [str(ldt_timestamps[i].year), str(ldt_timestamps[i].month), \
str(ldt_timestamps[i].day), s_sym, 'Buy', i_shares]
writer.writerow(row_to_enter)
try:
time_n = ldt_timestamps[i + 5]
except:
time_n = ldt_timestamps[-1]
row_to_enter = [str(time_n.year), str(time_n.month), \
str(time_n.day), s_sym, 'Sell', i_shares]
writer.writerow(row_to_enter)
elif select == 3:
if f_symboll_today > float(f_symreturn_cutoff) and f_symboll_yest < float(f_symyest_cutoff):
if f_marketbol_today > 1.0:
df_events[s_sym].ix[ldt_timestamps[i]] = 1
row_to_enter = [str(ldt_timestamps[i].year), str(ldt_timestamps[i].month), \
str(ldt_timestamps[i].day), s_sym, 'Buy', i_shares]
writer.writerow(row_to_enter)
try:
time_n = ldt_timestamps[i + 5]
except:
time_n = ldt_timestamps[-1]
row_to_enter = [str(time_n.year), str(time_n.month), \
str(time_n.day), s_sym, 'Sell', i_shares]
writer.writerow(row_to_enter)
else:
if f_symboll_today > float(f_symreturn_cutoff) and f_symboll_yest < float(f_symyest_cutoff):
if f_marketbol_today > 1.0:
df_events[s_sym].ix[ldt_timestamps[i]] = 1
row_to_enter = [str(ldt_timestamps[i].year), str(ldt_timestamps[i].month), \
str(ldt_timestamps[i].day), s_sym, 'Buy', i_shares]
writer.writerow(row_to_enter)
try:
time_n = ldt_timestamps[i + 5]
except:
time_n = ldt_timestamps[-1]
row_to_enter = [str(time_n.year), str(time_n.month), \
str(time_n.day), s_sym, 'Sell', i_shares]
writer.writerow(row_to_enter)
#if f_symboll_today < -2.0 and f_symboll_yest >= -2.0:
#if f_marketbol_today > 1.0:
#df_events[s_sym].ix[ldt_timestamps[i]] = 1
#row_to_enter = [str(ldt_timestamps[i].year), str(ldt_timestamps[i].month), \
#str(ldt_timestamps[i].day), s_sym, 'Buy', i_shares]
#writer.writerow(row_to_enter)
#try:
#time_n = ldt_timestamps[i + 5]
#except:
#time_n = ldt_timestamps[-1]
#row_to_enter = [str(time_n.year), str(time_n.month), \
#str(time_n.day), s_sym, 'Sell', i_shares]
#writer.writerow(row_to_enter)
return df_events
def eventBollingerMain():
start_month,start_day,start_year,end_month,end_day,end_year = modDatesReturn.get_dates()
dt_start = dt.datetime(start_year, start_month, start_day)
dt_end = dt.datetime(end_year, end_month, end_day)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
dataobj = da.DataAccess('Yahoo')
ls_symbols = dataobj.get_symbols_from_list('mrtevent')
ls_symbols.append('SPY')
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method = 'ffill')
d_data[s_key] = d_data[s_key].fillna(method = 'bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
find_events(ls_symbols, d_data)
# print "Creating Study"
# ep.eventprofiler(df_events, d_data, i_lookback=20, i_lookforward=20,
# s_filename='MyEventStudy.pdf', b_market_neutral=True, b_errorbars=True,
# s_market_sym='SPY') | agpl-3.0 | 96,520,162,211,974,220 | 44.558659 | 108 | 0.493519 | false |
redox-alpha/omorfi | src/python/omorfi/omor_formatter.py | 1 | 24794 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Formatter to de/format omor style analyses for omrfi."""
# (c) Omorfi contributors <[email protected]> 2015
# see AUTHORS file in top-level dir of this project, or
# <https://github.com/flammie/omorfi/wiki/AUTHORS>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# utils to format apertium style data from omorfi database values
from .error_logging import fail_formatting_missing_for, just_fail
from .formatter import Formatter
from .lexc_formatter import lexc_escape
class OmorFormatter(Formatter):
common_multichars = {
'[ABBR=ABBREVIATION]',
'[ABBR=ACRONYM]',
"[BOUNDARY=CLAUSE]",
'[BOUNDARY=COMPOUND]',
"[BOUNDARY=SENTENCE]",
'[CASE=ABE]',
'[CASE=ABL]',
'[CASE=ACC]',
'[CASE=ADE]',
'[CASE=ALL]',
'[CASE=COM]',
'[CASE=ELA]',
'[CASE=ESS]',
'[CASE=GEN]',
'[CASE=ILL]',
'[CASE=INE]',
'[CASE=INS]',
'[CASE=LAT]',
'[CASE=NOM]',
'[CASE=PAR]',
'[CASE=TRA]',
'[CLIT=HAN]',
'[CLIT=KA]',
'[CLIT=KAAN]',
'[CLIT=KIN]',
'[CLIT=KO]',
'[CLIT=PA]',
'[CLIT=S]',
'[CMP=CMP]',
'[CMP=POS]',
'[CMP=SUP]',
'[COMPOUND_FORM=OMIT]',
'[COMPOUND_FORM=S]',
'[CONJ=ADVERBIAL]',
'[CONJ=COMPARATIVE]',
'[CONJ=COORD]',
'[DRV=IN]',
'[DRV=INEN]',
'[DRV=JA]',
'[DRV=LAINEN]',
'[DRV=LLINEN]',
'[DRV=MA]',
'[DRV=MAISILLA]',
'[DRV=MATON]',
'[DRV=MINEN]',
'[DRV=MPI]',
'[DRV=NUT]',
'[DRV=NUT]',
'[DRV=OI]',
'[DRV=S]',
'[DRV=STI]',
'[DRV=TAR]',
'[DRV=TATTAA]',
'[DRV=TATUTTAA]',
'[DRV=TAVA]',
'[DRV=TON]',
'[DRV=TSE]',
'[DRV=TTAA]',
'[DRV=TTAIN]',
'[DRV=TU]',
'[DRV=U]',
'[DRV=UUS]',
'[DRV=VA]',
'[DRV=VS]',
'[FILTER=NO_PROC]',
'[GUESS=COMPOUND]',
'[GUESS=DERIVE]',
'[INF=A]',
'[INF=E]',
'[INF=MA]',
'[INF=MAISILLA]',
'[INF=MINEN]',
"[LEX=ABE]",
"[LEX=ABL]",
"[LEX=ADE]",
"[LEX=ALL]",
"[LEX=ELA]",
"[LEX=ILL]",
"[LEX=INE]",
"[LEX=INS]",
"[LEX=PAR]",
"[LEX=STI]",
"[LEX=GEN]",
"[LEX=LAT]",
"[LEX=LOC]",
"[LEX=SEP]",
"[LEX=TTAIN]",
'[MOOD=COND]',
'[MOOD=EVNV]',
'[MOOD=IMPV]',
'[MOOD=INDV]',
'[MOOD=INDV][TENSE=PAST]',
'[MOOD=INDV][TENSE=PRESENT]',
'[MOOD=OPT]',
'[MOOD=POTN]',
'[NEG=CON]',
'[NUM=PL]',
'[NUM=SG]',
'[NUMTYPE=CARD]',
'[NUMTYPE=ORD]',
'[NUMTYPE=FRAC]',
'[NUMTYPE=MULT]',
'[PCP=AGENT]',
'[PCP=NEG]',
'[PCP=NUT]',
'[PCP=VA]',
'[PERS=PE4]',
'[PERS=PL1]',
'[PERS=PL2]',
'[PERS=PL3]',
'[PERS=SG1]',
'[PERS=SG2]',
'[PERS=SG3]',
'[POSITION=PREFIX]',
'[POSITION=SUFFIX]',
'[POSS=3]',
'[POSS=PL1]',
'[POSS=PL2]',
'[POSS=PL3]',
'[POSS=SG1]',
'[POSS=SG2]',
'[POSS=SG3]',
'[PRONTYPE=DEM]',
'[PRONTYPE=IND]',
'[PRONTYPE=INT]',
'[PRONTYPE=PRS]',
'[PRONTYPE=RCP]',
'[PRONTYPE=REL]',
'[PRONTYPE=REC]',
'[PROPER=ARTWORK]',
'[PROPER=CULTGRP]',
'[PROPER=EVENT]',
'[PROPER=FIRST]',
'[PROPER=GEO]',
'[PROPER=LAST]',
'[PROPER=MEDIA]',
'[PROPER=MISC]',
'[PROPER=ORG]',
'[PROPER=PRODUCT]',
'[SEM=COUNTRY]',
'[SEM=CURRENCY]',
'[SEM=EVENT]',
'[SEM=FEMALE]',
'[SEM=GEO]',
'[SEM=INHABITANT]',
'[SEM=LANGUAGE]',
'[SEM=MALE]',
'[SEM=MEASURE]',
'[SEM=MEDIA]',
'[SEM=ORG]',
'[SEM=POLIT]',
'[SEM=TIME]',
'[SEM=TITLE]',
'[STYLE=ARCHAIC]',
'[STYLE=DIALECTAL]',
'[STYLE=NONSTANDARD]',
'[STYLE=RARE]',
'[SUBCAT=ARROW]',
'[SUBCAT=BRACKET]',
"[SUBCAT=BRACKET][POSITION=FINAL]",
"[SUBCAT=BRACKET][POSITION=INITIAL]",
'[SUBCAT=COMMA]',
'[SUBCAT=CONJUNCTION]',
'[SUBCAT=CURRENCY]',
'[SUBCAT=DASH]',
"[SUBCAT=DASH]",
'[SUBCAT=DECIMAL]',
'[SUBCAT=DEMONSTRATIVE]',
'[SUBCAT=DIGIT]',
'[SUBCAT=FINAL]',
'[SUBCAT=INITIAL]',
'[SUBCAT=INTERJECTION]',
'[SUBCAT=INTERROGATIVE]',
'[SUBCAT=MATH]',
'[SUBCAT=NEG]',
"[SUBCAT=NEG]",
'[SUBCAT=OPERATION]',
'[ADPTYPE=POST]',
'[SUBCAT=PREFIX]',
'[ADPTYPE=PREP]',
'[SUBCAT=QUALIFIER]',
'[SUBCAT=QUANTIFIER]',
'[SUBCAT=QUOTATION]',
"[SUBCAT=QUOTATION][POSITION=FINAL]",
"[SUBCAT=QUOTATION][POSITION=INITIAL]",
'[SUBCAT=REFLEXIVE]',
'[SUBCAT=RELATION]',
'[SUBCAT=ROMAN]',
'[SUBCAT=SPACE]',
'[SUBCAT=SUFFIX]',
'[TENSE=PAST]',
'[TENSE=PRESENT]',
'[UPOS=ADJ]',
'[UPOS=ADP]',
'[UPOS=ADV]',
'[UPOS=AUX]',
'[UPOS=DET]',
'[UPOS=INTJ]',
'[UPOS=CONJ]',
'[UPOS=SCONJ]',
'[UPOS=SCONJ][CONJ=COMPARATIVE]',
'[UPOS=SCONJ][CONJ=ADVERBIAL]',
'[UPOS=NOUN]',
'[UPOS=NUM]',
'[UPOS=PRON]',
'[UPOS=PROPN]',
'[UPOS=PUNCT]',
'[UPOS=SYM]',
'[UPOS=VERB]',
'[UPOS=VERB][SUBCAT=NEG]',
'[UPOS=X]',
'[VOICE=ACT]',
'[VOICE=PSS]',
'[WORD_ID=',
"[FOREIGN=FOREIGN]"
}
old_poses = {
'[POS=ADPOSITION]',
'[POS=PUNCTUATION]',
'[POS=PRONOUN]',
'[POS=NUMERAL]',
'[POS=SYMBOL]'
}
allo_multichars = {
'[ALLO=A]',
'[ALLO=AN]',
'[ALLO=EN]',
'[ALLO=HAN]',
'[ALLO=HEN]',
'[ALLO=HIN]',
'[ALLO=HON]',
'[ALLO=HUN]',
'[ALLO=HVN]',
'[ALLO=HYN]',
'[ALLO=HÄN]',
'[ALLO=HÖN]',
'[ALLO=IA]',
'[ALLO=IDEN]',
'[ALLO=IDEN]',
'[ALLO=IEN]',
'[ALLO=IHIN]',
'[ALLO=IIN]',
'[ALLO=IN]',
'[ALLO=ISIIN]',
'[ALLO=ITA]',
'[ALLO=ITTEN]',
'[ALLO=ITÄ]',
'[ALLO=IÄ]',
'[ALLO=JA]',
'[ALLO=JÄ]',
'[ALLO=JEN]',
'[ALLO=NA]',
'[ALLO=ON]',
'[ALLO=SA]',
'[ALLO=SEEN]',
'[ALLO=TA]',
'[ALLO=TEN]',
'[ALLO=TÄ]',
'[ALLO=UN]',
'[ALLO=VN]',
'[ALLO=YN]',
'[ALLO=Ä]',
'[ALLO=ÄN]',
'[ALLO=ÖN]'
}
ktnkav_multichars = {
'[KTN=1]', '[KTN=2]', '[KTN=3]', '[KTN=4]', '[KTN=5]',
'[KTN=6]', '[KTN=7]', '[KTN=8]', '[KTN=9]', '[KTN=10]',
'[KTN=11]', '[KTN=12]', '[KTN=13]', '[KTN=14]', '[KTN=15]',
'[KTN=16]', '[KTN=17]', '[KTN=18]', '[KTN=19]', '[KTN=20]',
'[KTN=21]', '[KTN=22]', '[KTN=23]', '[KTN=24]', '[KTN=25]',
'[KTN=26]', '[KTN=27]', '[KTN=28]', '[KTN=29]', '[KTN=30]',
'[KTN=31]', '[KTN=32]', '[KTN=33]', '[KTN=34]', '[KTN=35]',
'[KTN=36]', '[KTN=37]', '[KTN=38]', '[KTN=39]', '[KTN=40]',
'[KTN=41]', '[KTN=42]', '[KTN=43]', '[KTN=44]', '[KTN=45]',
'[KTN=46]', '[KTN=47]', '[KTN=48]', '[KTN=49]', '[KTN=50]',
'[KTN=51]', '[KTN=52]', '[KTN=53]', '[KTN=54]', '[KTN=55]',
'[KTN=56]', '[KTN=57]', '[KTN=58]', '[KTN=59]', '[KTN=60]',
'[KTN=61]', '[KTN=62]', '[KTN=63]', '[KTN=64]', '[KTN=65]',
'[KTN=66]', '[KTN=67]', '[KTN=68]', '[KTN=69]', '[KTN=70]',
'[KTN=71]', '[KTN=72]', '[KTN=73]', '[KTN=74]', '[KTN=75]',
'[KTN=76]', '[KTN=77]', '[KTN=78]',
'[KTN=1007]', '[KTN=1009]', '[KTN=1010]', '[KTN=1024]', '[KTN=1026]',
'[KAV=A]', '[KAV=B]', '[KAV=C]', '[KAV=D]', '[KAV=E]',
'[KAV=F]', '[KAV=G]', '[KAV=H]', '[KAV=I]', '[KAV=J]',
'[KAV=K]', '[KAV=L]', '[KAV=M]',
'[KAV=N]', '[KAV=O]', '[KAV=P]', '[KAV=T]'}
stuff2omor = {
".sent": "[BOUNDARY=SENTENCE]",
"Aa": "[ALLO=A]",
"Aja": "[ALLO=JA]",
"Ana": "[ALLO=NA]",
"Asa": "[ALLO=SA]",
"Aia": "[ALLO=IA]",
"Ata": "[ALLO=TA]",
"Aita": "[ALLO=ITA]",
"Atä": "[ALLO=TÄ]",
"Aiä": "[ALLO=IÄ]",
"Aita": "[ALLO=ITA]",
"Aitä": "[ALLO=ITÄ]",
"Aitten": "[ALLO=ITTEN]",
"Aiden": "[ALLO=IDEN]",
"Aiin": "[ALLO=IIN]",
"Aihin": "[ALLO=IHIN]",
"Aseen": "[ALLO=SEEN]",
"Aisiin": "[ALLO=ISIIN]",
"Aien": "[ALLO=IEN]",
"Ajen": "[ALLO=JEN]",
"Aten": "[ALLO=TEN]",
"Aan": "[ALLO=AN]",
"Aen": "[ALLO=EN]",
"Ain": "[ALLO=IN]",
"Aon": "[ALLO=ON]",
"Aun": "[ALLO=UN]",
"Ayn": "[ALLO=YN]",
"Aän": "[ALLO=ÄN]",
"Aön": "[ALLO=ÖN]",
"Ahan": "[ALLO=HAN]",
"Ahen": "[ALLO=HEN]",
"Ahin": "[ALLO=HIN]",
"Ahon": "[ALLO=HON]",
"Ahun": "[ALLO=HUN]",
"Ahyn": "[ALLO=HYN]",
"Ahän": "[ALLO=HÄN]",
"Ahön": "[ALLO=HÖN]",
"Aten": "[ALLO=TEN]",
"Ajä": "[ALLO=JÄ]",
"Aä": "[ALLO=Ä]",
"Bc": "[BOUNDARY=COMPOUND]",
"B-": "[COMPOUND_FORM=OMIT]",
"B→": "[POSITION=SUFFIX]",
"B←": "[POSITION=PREFIX]",
"Cma": "[PCP=AGENT]",
"Cmaton": "[PCP=NEG]",
"Cva": "[PCP=VA]",
"Cnut": "[PCP=NUT]",
"Cpos": "[CMP=POS]",
"Ccmp": "[CMP=CMP]",
"Csup": "[CMP=SUP]",
"Dinen": "[DRV=INEN]",
"Dja": "[DRV=JA]",
# "Dmaisilla": "[INF=MAISILLA]",
"Dmaisilla": "[DRV=MAISILLA]",
"Dminen": "[DRV=MINEN]",
"Dtu": "[DRV=TU]",
"Dnut": "[DRV=NUT]",
"Dva": "[DRV=VA]",
"Dtava": "[DRV=TAVA]",
"Dma": "[DRV=MA]",
"Dmaton": "[DRV=MATON]",
"Ds": "[DRV=S]",
"Dsti": "[DRV=STI]",
"Dttaa": "[DRV=TTAA]",
"Dtattaa": "[DRV=TATTAA]",
"Dtatuttaa": "[DRV=TATUTTAA]",
"Dttain": "[DRV=TTAIN]",
"Du": "[DRV=U]",
"Duus": "[DRV=UUS]",
"Dmpi": "",
# "Dmpi": "[DRV=MPI]",
"Din": "",
# "Din": "[DRV=IN]",
"Ia": "[INF=A]",
"Ie": "[INF=E]",
"Ima": "[INF=MA]",
"Iminen": "[INF=MINEN]",
"Ncon": "[NEG=CON]",
"Nneg": "[SUBCAT=NEG]",
"Npl": "[NUM=PL]",
"Nsg": "[NUM=SG]",
"N??": "",
"Osg1": "[POSS=SG1]",
"Osg2": "[POSS=SG2]",
"O3": "[POSS=3]",
"Opl1": "[POSS=PL1]",
"Opl2": "[POSS=PL2]",
"Ppl1": "[PERS=PL1]",
"Ppl2": "[PERS=PL2]",
"Ppl3": "[PERS=PL3]",
"Psg1": "[PERS=SG1]",
"Psg2": "[PERS=SG2]",
"Psg3": "[PERS=SG3]",
"Ppe4": "[PERS=PE4]",
"Qka": "[CLIT=KA]",
"Qs": "[CLIT=S]",
"Qpa": "[CLIT=PA]",
"Qko": "[CLIT=KO]",
"Qkin": "[CLIT=KIN]",
"Qkaan": "[CLIT=KAAN]",
"Qhan": "[CLIT=HAN]",
"Tcond": "[MOOD=COND]",
"Timp": "[MOOD=IMPV]",
"Tpast": "[MOOD=INDV][TENSE=PAST]",
"Tpot": "[MOOD=POTN]",
"Topt": "[MOOD=OPT]",
"Tpres": "[MOOD=INDV][TENSE=PRESENT]",
"Uarch": "[STYLE=ARCHAIC]",
"Udial": "[STYLE=DIALECTAL]",
"Unonstd": "[STYLE=NONSTANDARD]",
"Urare": "[STYLE=RARE]",
"Vact": "[VOICE=ACT]",
"Vpss": "[VOICE=PSS]",
"Xabe": "[CASE=ABE]",
"Xabl": "[CASE=ABL]",
"Xade": "[CASE=ADE]",
"Xall": "[CASE=ALL]",
"Xcom": "[CASE=COM]",
"Xela": "[CASE=ELA]",
"Xess": "[CASE=ESS]",
"Xgen": "[CASE=GEN]",
"Xill": "[CASE=ILL]",
"Xine": "[CASE=INE]",
"Xins": "[CASE=INS]",
"Xnom": "[CASE=NOM]",
"Xpar": "[CASE=PAR]",
"Xtra": "[CASE=TRA]",
"Xlat": "[CASE=LAT]",
"Xacc": "[CASE=ACC]",
"X???": "",
"AUX": "[UPOS=AUX]",
"DET": "[UPOS=DET]",
"NOUN": "[UPOS=NOUN]",
"VERB": "[UPOS=VERB]",
"ADV": "[UPOS=ADV]",
"ADP": "[UPOS=ADP]",
"ADJ": "[UPOS=ADJ]",
"INTJ": "[UPOS=INTJ]",
"CONJ": "[UPOS=CONJ]",
"SCONJ": "[UPOS=SCONJ]",
"PRON": "[UPOS=PRON]",
"SYM": "[UPOS=SYM]",
"NUM": "[UPOS=NUM]",
"PROPN": "[UPOS=PROPN]",
"X": "[UPOS=X]",
"PUNCT": "[UPOS=PUNCT]",
"ABESSIVE": "[LEX=ABE]",
"ABLATIVE": "[LEX=ABL]",
"ADESSIVE": "[LEX=ADE]",
"ALLATIVE": "[LEX=ALL]",
"ELATIVE": "[LEX=ELA]",
"LOCATIVE": "[LEX=LOC]",
"GENITIVE": "[LEX=GEN]",
"ILLATIVE": "[LEX=ILL]",
"INESSIVE": "[LEX=INE]",
"INSTRUCTIVE": "[LEX=INS]",
"PARTITIVE": "[LEX=PAR]",
"SEPARATIVE": "[LEX=SEP]",
"LATIVE": "[LEX=LAT]",
"DERSTI": "[LEX=STI]",
"DERTTAIN": "[LEX=TTAIN]",
"CONJUNCTION": "",
"COORDINATING": "[UPOS=CONJ]",
"COMPARATIVE": "[UPOS=SCONJ][CONJ=COMPARATIVE]",
"PRONOUN": "[POS=PRONOUN]",
"ADVERBIAL": "[UPOS=SCONJ][CONJ=ADVERBIAL]",
"NUMERAL": "[POS=NUMERAL]",
"CARDINAL": "[NUMTYPE=CARD]",
"ORDINAL": "[NUMTYPE=ORD]",
# No [SUBCAT=DIGIT]: avoid multiple SUBCATs in one tagstring & comply
# with FTB1
"DIGIT": "",
"DECIMAL": "[SUBCAT=DECIMAL]",
"ROMAN": "[SUBCAT=ROMAN]",
"QUALIFIER": "[SUBCAT=QUALIFIER]",
"ACRONYM": "[ABBR=ACRONYM]",
"ABBREVIATION": "[ABBR=ABBREVIATION]",
"SUFFIX": "",
# "SUFFIX": "[SUBCAT=SUFFIX]",
"PREFIX": "",
# "PREFIX": "[SUBCAT=PREFIX]",
"INTERJECTION": "[SUBCAT=INTERJECTION]",
"ADPOSITION": "[POS=ADPOSITION]",
"DEMONSTRATIVE": "[PRONTYPE=DEM]",
"QUANTOR": "[SUBCAT=QUANTIFIER]",
"QUANTIFIER": "[SUBCAT=QUANTIFIER]",
"PERSONAL": "[PRONTYPE=PRS]",
"INDEFINITE": "[PRONTYPE=IND]",
# "INDEFINITE": "[SUBCAT=INDEF]",
"INTERROGATIVE": "[PRONTYPE=INT]",
"REFLEXIVE": "[SUBCAT=REFLEXIVE]",
"RELATIVE": "[PRONTYPE=REL]",
"RECIPROCAL": "[PRONTYPE=REC]",
"PL1": "[PERS=PL1]",
"PL2": "[PERS=PL2]",
"PL3": "[PERS=PL3]",
"SG1": "[PERS=SG1]",
"SG2": "[PERS=SG2]",
"SG3": "[PERS=SG3]",
"PE4": "[PERS=PE4]",
"COMP": "[CMP=CMP]",
"SUPERL": "[CMP=SUP]",
"ARCHAIC": "[STYLE=ARCHAIC]",
"DIALECTAL": "[STYLE=DIALECTAL]",
"NONSTANDARD": "[STYLE=NONSTANDARD]",
"RARE": "[STYLE=RARE]",
"TITLE": "[SEM=TITLE]",
"TIME": "[SEM=TIME]",
"CURRENCY": "[SEM=CURRENCY]",
"MEDIA": "[SEM=MEDIA]",
"POLIT": "[SEM=POLIT]",
"MEASURE": "[SEM=MEASURE]",
"MALE": "[SEM=MALE]",
"FEMALE": "[SEM=FEMALE]",
"CULTGRP": "[PROPER=CULTGRP]",
"PRODUCT": "[PROPER=PRODUCT]",
"ARTWORK": "[PROPER=ARTWORK]",
"EVENT": "[PROPER=EVENT]",
"FIRST": "[PROPER=FIRST]",
"LAST": "[PROPER=LAST]",
"GEO": "[PROPER=GEO]",
"ORG": "[PROPER=ORG]",
"MISC": "[PROPER=MISC]",
"COUNTRY": "[SEM=COUNTRY]",
"INHABITANT": "[SEM=INHABITANT]",
"LANGUAGE": "[SEM=LANGUAGE]",
"PUNCTUATION": "[POS=PUNCTUATION]",
"DASH": "[SUBCAT=DASH]",
"SPACE": "[SUBCAT=SPACE]",
"COMMA": "[SUBCAT=COMMA]",
"ARROW": "[SUBCAT=ARROW]",
"PREPOSITION": "[ADPTYPE=PREP]",
"POSTPOSITION": "[ADPTYPE=POST]",
"MULTIPLICATIVE": "[NUMTYPE=MULT]",
"FRACTION": "[NUMTYPE=FRAC]",
"CLAUSE-BOUNDARY": "[BOUNDARY=CLAUSE]",
"SENTENCE-BOUNDARY": "[BOUNDARY=SENTENCE]",
"INITIAL-QUOTE": "[SUBCAT=QUOTATION][POSITION=INITIAL]",
"FINAL-QUOTE": "[SUBCAT=QUOTATION][POSITION=FINAL]",
"INITIAL-BRACKET": "[SUBCAT=BRACKET][POSITION=INITIAL]",
"FINAL-BRACKET": "[SUBCAT=BRACKET][POSITION=FINAL]",
"UNSPECIFIED": "",
"FTB3man": "",
"LEMMA-START": "[WORD_ID=",
"CONJ|VERB": "[UPOS=VERB][SUBCAT=NEG]",
"FTB3MAN": "",
"XForeign": "[FOREIGN=FOREIGN]",
".": "",
"": ""}
def __init__(self, verbose=False, **kwargs):
for stuff, omor in self.stuff2omor.items():
if len(omor) < 2:
continue
elif omor not in self.common_multichars | self.old_poses | \
self.allo_multichars:
just_fail(
"There are conflicting formattings in here!\n" +
omor + " corresponding " + stuff +
" is not a valid defined omor multichar_symbol!")
self.verbose = verbose
self.semantics = True
if 'sem' not in kwargs or not kwargs['sem']:
for k, v in self.stuff2omor.items():
if "SEM=" in v:
self.stuff2omor[k] = ""
self.semantics = False
self.allo = True
if 'allo' not in kwargs or not kwargs['allo']:
for k, v in self.stuff2omor.items():
if "ALLO=" in v:
self.stuff2omor[k] = ""
self.allo = False
self.props = True
if 'props' not in kwargs or not kwargs['props']:
for k, v in self.stuff2omor.items():
if "PROPER=" in v:
self.stuff2omor[k] = ""
self.props = False
self.ktnkav = True
if 'ktnkav' not in kwargs or not kwargs['ktnkav']:
for k, v in self.stuff2omor.items():
if "KTN=" in v or "KAV=" in v:
self.stuff2omor[k] = ""
self.ktnkav = False
self.newparas = True
if 'newparas' not in kwargs or not kwargs['newparas']:
for k, v in self.stuff2omor.items():
if "NEW_PARA=" in v:
self.stuff2omor[k] = ""
self.newparas = False
def stuff2lexc(self, stuff):
if stuff == '0':
return "0"
if stuff in self.stuff2omor:
return self.stuff2omor[stuff]
else:
if self.verbose:
fail_formatting_missing_for(stuff, "omor")
return ""
def analyses2lexc(self, anals):
omorstring = ''
for tag in anals.split('|'):
omorstring += self.stuff2lexc(tag)
return omorstring
def continuation2lexc(self, anals, surf, cont):
omorstring = ''
if 'DIGITS_' in cont and not ('BACK' in cont or 'FRONT' in cont):
omorstring = lexc_escape(surf)
if anals and anals != 'LEMMA-START':
omorstring += ']'
# Collapse DRV=NUT/TU and PCP=NUT to PCP=NUT with full inflection
if anals == 'Dnut':
anals = 'Vact|Cnut'
elif anals == 'Dtu':
anals = 'Vpss|Cnut'
# Collapse DRV=VA/TAVA and PCP=VA to PCP=VA with full inflection
elif anals == 'Dva':
anals = 'Vact|Cva'
elif anals == 'Dtava':
anals = 'Vpss|Cva'
# Collapse DRV=MA and PCP=AGENT to PCP=AGENT with full inflection
elif anals == 'Dma':
anals = 'Cma'
# Collapse DRV=MATON and PCP=NEG to PCP=NEG with full inflection
elif anals == 'Dmaton':
anals = 'Cmaton'
elif ('Cnut' in anals or 'Cva' in anals or 'Cma' in anals or 'Cmaton' in anals) and \
(anals.endswith('Npl') or anals.endswith('Nsg')):
anals = anals + '|Xnom'
tags = anals.split('|')
for tag in tags:
omorstring += self.stuff2lexc(tag)
surf = lexc_escape(surf)
return "%s:%s\t%s ;\n" % (omorstring, surf, cont)
def wordmap2lexc(self, wordmap):
'''
format string for canonical omor format for morphological analysis
'''
if wordmap['stub'] == ' ':
# do not include normal white space for now
return ""
wordmap['stub'] = lexc_escape(wordmap['stub'])
wordmap['analysis'] = "[WORD_ID=%s]" % (lexc_escape(wordmap['lemma']))
wordmap['particle'] = wordmap['particle'].replace('QUALIFIER', 'ADJ')
wordmap['analysis'] += self.stuff2lexc(wordmap['upos'])
if wordmap['is_suffix']:
wordmap['analysis'] += self.stuff2lexc('SUFFIX')
if wordmap['is_prefix']:
wordmap['analysis'] += self.stuff2lexc('PREFIX')
if wordmap['upos'] == 'ADJ':
wordmap['analysis'] += self.stuff2lexc('Cpos')
if wordmap['particle']:
for pclass in wordmap['particle'].split('|'):
wordmap['analysis'] += self.stuff2lexc(pclass)
if wordmap['symbol']:
for subcat in wordmap['symbol'].split('|'):
wordmap['analysis'] += self.stuff2lexc(subcat)
if wordmap['prontype']:
for stuff in wordmap['prontype'].split("|"):
wordmap['analysis'] += self.stuff2lexc(stuff)
if wordmap['lex']:
for stuff in wordmap['lex'].split("|"):
wordmap['analysis'] += self.stuff2lexc(stuff)
if wordmap['abbr']:
for stuff in wordmap['abbr'].split("|"):
wordmap['analysis'] += self.stuff2lexc(stuff)
if wordmap['numtype']:
for stuff in wordmap['numtype'].split("|"):
wordmap['analysis'] += self.stuff2lexc(stuff)
if wordmap['adptype']:
for stuff in wordmap['adptype'].split("|"):
wordmap['analysis'] += self.stuff2lexc(stuff)
if wordmap['is_proper']:
if self.props and wordmap['proper_noun_class']:
for prop in wordmap['proper_noun_class'].split(','):
wordmap['analysis'] += self.stuff2lexc(prop)
else:
wordmap['analysis'] += self.stuff2lexc('PROPER')
if self.semantics and wordmap['sem']:
for sem in wordmap['sem'].split(','):
wordmap['analysis'] += self.stuff2lexc(sem)
if wordmap['style']:
wordmap['analysis'] += self.stuff2lexc(wordmap['style'])
if self.ktnkav and wordmap['upos'] != 'ACRONYM':
tag = "[KTN=%s]" % (lexc_escape(wordmap['kotus_tn']))
if tag in self.ktnkav_multichars:
wordmap['analysis'] += tag
if wordmap['kotus_av']:
wordmap['analysis'] += "[KAV=%(kotus_av)s]" % (wordmap)
if self.newparas:
wordmap['analysis'] += "[NEWPARA=%s]" % (wordmap['new_para'],)
# match WORD_ID= with epsilon, then stub and lemma might match
lex_stub = '0' + wordmap['stub']
retvals = []
retvals += ["%s:%s\t%s\t;" % (wordmap['analysis'], lex_stub,
wordmap['new_para'])]
return "\n".join(retvals)
def multichars_lexc(self):
multichars = "Multichar_Symbols\n"
multichars += "!! OMOR multichars:\n"
for mcs in self.common_multichars:
multichars += mcs + "\n"
multichars += Formatter.multichars_lexc(self)
return multichars
def root_lexicon_lexc(self):
root = Formatter.root_lexicon_lexc(self)
if True:
# want co-ordinated hyphens
root += "!! LEXICONS that can be co-ordinated hyphen -compounds\n"
root += self.stuff2lexc('B→') + ':- NOUN ;\n'
root += self.stuff2lexc('B→') + ':- ADJ ;\n'
root += self.stuff2lexc('B→') + ':- SUFFIX ;\n'
if False:
root += "0 TAGGER_HACKS ;\n"
return root
# self test
if __name__ == '__main__':
from sys import exit
formatter = OmorFormatter()
exit(0)
| gpl-3.0 | 6,803,007,253,595,068,000 | 31.575 | 93 | 0.451266 | false |
QudevETH/PycQED_py3 | pycqed/utilities/general.py | 1 | 28332 | import os
import sys
import ast
import numpy as np
import h5py
import json
import datetime
from contextlib import contextmanager
from pycqed.measurement import hdf5_data as h5d
from pycqed.analysis import analysis_toolbox as a_tools
import errno
import pycqed as pq
import glob
from os.path import dirname, exists
from os import makedirs
import logging
import subprocess
from functools import reduce # forward compatibility for Python 3
import operator
import string
from collections import OrderedDict # for eval in load_settings
log = logging.getLogger(__name__)
try:
import msvcrt # used on windows to catch keyboard input
except:
pass
digs = string.digits + string.ascii_letters
def get_git_info():
"""
Returns the SHA1 ID (hash) of the current git HEAD plus a diff against the HEAD
The hash is shortened to the first 10 digits.
:return: hash string, diff string
"""
diff = "Could not extract diff"
githash = '00000'
try:
# Refers to the global qc_config
PycQEDdir = pq.__path__[0]
githash = subprocess.check_output(['git', 'rev-parse',
'--short=10', 'HEAD'], cwd=PycQEDdir)
diff = subprocess.run(['git', '-C', PycQEDdir, "diff"],
stdout=subprocess.PIPE).stdout.decode('utf-8')
except Exception:
pass
return githash, diff
def str_to_bool(s):
valid = {'true': True, 't': True, '1': True,
'false': False, 'f': False, '0': False, }
if s.lower() not in valid:
raise KeyError('{} not a valid boolean string'.format(s))
b = valid[s.lower()]
return b
def bool_to_int_str(b):
if b:
return '1'
else:
return '0'
def int_to_bin(x, w, lsb_last=True):
"""
Converts an integer to a binary string of a specified width
x (int) : input integer to be converted
w (int) : desired width
lsb_last (bool): if False, reverts the string e.g., int(1) = 001 -> 100
"""
bin_str = '{0:{fill}{width}b}'.format((int(x) + 2**w) % 2**w,
fill='0', width=w)
if lsb_last:
return bin_str
else:
return bin_str[::-1]
def int2base(x: int, base: int, fixed_length: int=None):
"""
Convert an integer to string representation in a certain base.
Useful for e.g., iterating over combinations of prepared states.
Args:
x (int) : the value to convert
base (int) : the base to covnert to
fixed_length (int) : if specified prepends zeros
"""
if x < 0:
sign = -1
elif x == 0:
string_repr = digs[0]
if fixed_length is None:
return string_repr
else:
return string_repr.zfill(fixed_length)
else:
sign = 1
x *= sign
digits = []
while x:
digits.append(digs[int(x % base)])
x = int(x / base)
if sign < 0:
digits.append('-')
digits.reverse()
string_repr = ''.join(digits)
if fixed_length is None:
return string_repr
else:
return string_repr.zfill(fixed_length)
def mopen(filename, mode='w'):
if not exists(dirname(filename)):
try:
makedirs(dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
file = open(filename, mode='w')
return file
def dict_to_ordered_tuples(dic):
'''Convert a dictionary to a list of tuples, sorted by key.'''
if dic is None:
return []
keys = dic.keys()
# keys.sort()
ret = [(key, dic[key]) for key in keys]
return ret
def to_hex_string(byteval):
'''
Returns a hex representation of bytes for printing purposes
'''
return "b'" + ''.join('\\x{:02x}'.format(x) for x in byteval) + "'"
def load_settings(instrument,
label: str='', folder: str=None,
timestamp: str=None, update=True, **kw):
'''
Loads settings from an hdf5 file onto the instrument handed to the
function. By default uses the last hdf5 file in the datadirectory.
By giving a label or timestamp another file can be chosen as the
settings file.
Args:
instrument (instrument) : instrument onto which settings
should be loaded. Can be an instrument name (str) if update is
set to False.
label (str) : label used for finding the last datafile
folder (str) : exact filepath of the hdf5 file to load.
if filepath is specified, this takes precedence over the file
locating options (label, timestamp etc.).
timestamp (str) : timestamp of file in the datadir
update (bool, default True): if set to False, the loaded settings
will be returned instead of updating them in the instrument.
Kwargs:
params_to_set (list) : list of strings referring to the parameters
that should be set for the instrument
'''
from numpy import array # DO not remove. Used in eval(array(...))
if folder is None:
folder_specified = False
else:
folder_specified = True
if isinstance(instrument, str) and not update:
instrument_name = instrument
else:
instrument_name = instrument.name
verbose = kw.pop('verbose', True)
older_than = kw.pop('older_than', None)
success = False
count = 0
# Will try multiple times in case the last measurements failed and
# created corrupt data files.
while success is False and count < 10:
if folder is None:
folder = a_tools.get_folder(timestamp=timestamp, label=label,
older_than=older_than)
if verbose:
print('Folder used: {}'.format(folder))
try:
filepath = a_tools.measurement_filename(folder)
f = h5py.File(filepath, 'r')
sets_group = f['Instrument settings']
ins_group = sets_group[instrument_name]
if verbose:
print('Loaded settings successfully from the HDF file.')
params_to_set = kw.pop('params_to_set', None)
if params_to_set is not None:
if len(params_to_set) == 0:
log.warning('The list of parameters to update is empty.')
if verbose and update:
print('Setting parameters {} for {}.'.format(
params_to_set, instrument_name))
params_to_set = [(param, val) for (param, val) in
ins_group.attrs.items() if param in
params_to_set]
else:
if verbose and update:
print('Setting parameters for {}.'.format(instrument_name))
params_to_set = [
(param, val) for (param, val) in ins_group.attrs.items()
if param not in getattr(
instrument, '_params_to_not_load', {})]
if not update:
params_dict = {parameter : value for parameter, value in \
params_to_set}
f.close()
return params_dict
for parameter, value in params_to_set:
if parameter in instrument.parameters.keys() and \
hasattr(instrument.parameters[parameter], 'set'):
if value == 'None': # None is saved as string in hdf5
try:
instrument.set(parameter, None)
except Exception:
print('Could not set parameter "%s" to "%s" for '
'instrument "%s"' % (
parameter, value, instrument_name))
elif value == 'False':
try:
instrument.set(parameter, False)
except Exception:
print('Could not set parameter "%s" to "%s" for '
'instrument "%s"' % (
parameter, value, instrument_name))
elif value == 'True':
try:
instrument.set(parameter, True)
except Exception:
print('Could not set parameter "%s" to "%s" for '
'instrument "%s"' % (
parameter, value, instrument_name))
else:
try:
instrument.set(parameter, int(value))
except Exception:
try:
instrument.set(parameter, float(value))
except Exception:
try:
instrument.set(parameter, eval(value))
except Exception:
try:
instrument.set(parameter,
value)
except Exception:
log.error('Could not set parameter '
'"%s" to "%s" '
'for instrument "%s"' % (
parameter, value,
instrument_name))
success = True
f.close()
except Exception as e:
logging.warning(e)
success = False
try:
f.close()
except:
pass
if timestamp is None and not folder_specified:
print('Trying next folder.')
older_than = os.path.split(folder)[0][-8:] \
+ '_' + os.path.split(folder)[1][:6]
folder = None
else:
break
count += 1
if not success:
log.error('Could not open settings for instrument {}.'.format(
instrument_name))
print()
return
def load_settings_onto_instrument_v2(instrument, load_from_instr: str=None,
label: str='', folder: str=None,
timestamp: str=None):
'''
Loads settings from an hdf5 file onto the instrument handed to the
function. By default uses the last hdf5 file in the datadirectory.
By giving a label or timestamp another file can be chosen as the
settings file.
Args:
instrument (instrument) : instrument onto which settings should be
loaded
load_from_instr (str) : optional name of another instrument from
which to load the settings.
label (str) : label used for finding the last datafile
folder (str) : exact filepath of the hdf5 file to load.
if filepath is specified, this takes precedence over the file
locating options (label, timestamp etc.).
timestamp (str) : timestamp of file in the datadir
'''
older_than = None
# folder = None
instrument_name = instrument.name
success = False
count = 0
# Will try multiple times in case the last measurements failed and
# created corrupt data files.
while success is False and count < 10:
try:
if folder is None:
folder = a_tools.get_folder(timestamp=timestamp, label=label,
older_than=older_than)
filepath = a_tools.measurement_filename(folder)
f = h5py.File(filepath, 'r')
snapshot = {}
h5d.read_dict_from_hdf5(snapshot, h5_group=f['Snapshot'])
if load_from_instr is None:
ins_group = snapshot['instruments'][instrument_name]
else:
ins_group = snapshot['instruments'][load_from_instr]
success = True
except Exception as e:
logging.warning(e)
older_than = os.path.split(folder)[0][-8:] \
+ '_' + os.path.split(folder)[1][:6]
folder = None
success = False
count += 1
if not success:
logging.warning('Could not open settings for instrument "%s"' % (
instrument_name))
try:
f.close()
except:
pass
return False
for parname, par in ins_group['parameters'].items():
try:
if hasattr(instrument.parameters[parname], 'set'):
instrument.set(parname, par['value'])
except Exception as e:
print('Could not set parameter: "{}" to "{}" '
'for instrument "{}"'.format(parname, par['value'],
instrument_name))
logging.warning(e)
f.close()
return True
def send_email(subject='PycQED needs your attention!',
body='', email=None):
# Import smtplib for the actual sending function
import smtplib
# Here are the email package modules we'll need
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
if email is None:
email = qt.config['e-mail']
# Create the container (outer) email message.
msg = MIMEMultipart()
msg['Subject'] = subject
family = '[email protected]'
msg['From'] = '[email protected]'
msg['To'] = email
msg.attach(MIMEText(body, 'plain'))
# Send the email via our own SMTP server.
s = smtplib.SMTP_SSL('smtp.gmail.com')
s.login('[email protected]', 'DiCarloLab')
s.sendmail(email, family, msg.as_string())
s.quit()
def list_available_serial_ports():
'''
Lists serial ports
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of available serial ports
Frunction from :
http://stackoverflow.com/questions/12090503/
listing-available-com-ports-with-python
'''
import serial
if sys.platform.startswith('win'):
ports = ['COM' + str(i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this is to exclude your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def add_suffix_to_dict_keys(inputDict, suffix):
return {str(key)+suffix: (value) for key, value in inputDict.items()}
def execfile(path, global_vars=None, local_vars=None):
"""
Args:
path (str) : filepath of the file to be executed
global_vars : use globals() to use globals from namespace
local_vars : use locals() to use locals from namespace
execfile function that existed in python 2 but does not exists in python3.
"""
with open(path, 'r') as f:
code = compile(f.read(), path, 'exec')
exec(code, global_vars, local_vars)
def span_num(center: float, span: float, num: int, endpoint: bool=True):
"""
Creates a linear span of points around center
Args:
center (float) : center of the array
span (float) : span the total range of values to span
num (int) : the number of points in the span
endpoint (bool): whether to include the endpoint
"""
return np.linspace(center-span/2, center+span/2, num, endpoint=endpoint)
def span_step(center: float, span: float, step: float, endpoint: bool=True):
"""
Creates a range of points spanned around a center
Args:
center (float) : center of the array
span (float) : span the total range of values to span
step (float) : the stepsize between points in the array
endpoint (bool): whether to include the endpoint in the span
"""
# True*step/100 in the arange ensures the right boundary is included
return np.arange(center-span/2, center+span/2+endpoint*step/100, step)
def gen_sweep_pts(start: float=None, stop: float=None,
center: float=0, span: float=None,
num: int=None, step: float=None, endpoint=True):
"""
Generates an array of sweep points based on different types of input
arguments.
Boundaries of the array can be specified using either start/stop or
using center/span. The points can be specified using either num or step.
Args:
start (float) : start of the array
stop (float) : end of the array
center (float) : center of the array
N.B. 0 is chosen as a sensible default for the span.
it is argued that no such sensible default exists
for the other types of input.
span (float) : span the total range of values to span
num (int) : number of points in the array
step (float) : the stepsize between points in the array
endpoint (bool): whether to include the endpoint
"""
if (start is not None) and (stop is not None):
if num is not None:
return np.linspace(start, stop, num, endpoint=endpoint)
elif step is not None:
# numpy arange does not natively support endpoint
return np.arange(start, stop + endpoint*step/100, step)
else:
raise ValueError('Either "num" or "step" must be specified')
elif (center is not None) and (span is not None):
if num is not None:
return span_num(center, span, num, endpoint=endpoint)
elif step is not None:
return span_step(center, span, step, endpoint=endpoint)
else:
raise ValueError('Either "num" or "step" must be specified')
else:
raise ValueError('Either ("start" and "stop") or '
'("center" and "span") must be specified')
def getFromDict(dataDict: dict, mapList: list):
"""
get a value from a nested dictionary by specifying a list of keys
Args:
dataDict: nested dictionary to get the value from
mapList : list of strings specifying the key of the item to get
Returns:
value from dictionary
example:
example_dict = {'a': {'nest_a': 5, 'nest_b': 8}
'b': 4}
getFromDict(example_dict, ['a', 'nest_a']) -> 5
"""
return reduce(operator.getitem, mapList, dataDict)
def setInDict(dataDict: dict, mapList: list, value):
"""
set a value in a nested dictionary by specifying the location using a list
of key.
Args:
dataDict: nested dictionary to set the value in
mapList : list of strings specifying the key of the item to set
value : the value to set
example:
example_dict = {'a': {'nest_a': 5, 'nest_b': 8}
'b': 4}
example_dict_after = getFromDict(example_dict, ['a', 'nest_a'], 6)
example_dict = {'a': {'nest_a': 6, 'nest_b': 8}
'b': 4}
"""
getFromDict(dataDict, mapList[:-1])[mapList[-1]] = value
def is_more_rencent(filename: str, comparison_filename: str):
"""
Returns True if the contents of "filename" has changed more recently
than the contents of "comparison_filename".
"""
return os.path.getmtime(filename) > os.path.getmtime(comparison_filename)
def get_required_upload_information(pulses : list, station):
"""
Returns a list of AWGs required for the list of input pulses
"""
#Have to add all master AWG channels such that trigger channels are not empty
master_AWG = station.pulsar.master_AWG()
required_AWGs = []
required_channels = []
used_AWGs = station.pulsar.used_AWGs()
for pulse in pulses:
for key in pulse.keys():
if not 'channel' in key:
continue
channel = pulse[key]
if isinstance(channel, dict):
# the the CZ pulse has aux_channels_dict parameter
for ch in channel:
if not 'AWG' in ch:
continue
AWG = ch.split('_')[0]
if AWG == master_AWG:
for c in station.pulsar.channels:
if master_AWG in c and c not in required_channels:
required_channels.append(c)
if AWG in used_AWGs and AWG not in required_AWGs:
required_AWGs.append(AWG)
continue
if AWG in used_AWGs and AWG not in required_AWGs:
required_AWGs.append(AWG)
if not ch in required_channels:
required_channels.append(ch)
else:
if not 'AWG' in channel:
continue
AWG = channel.split('_')[0]
if AWG == master_AWG:
for c in station.pulsar.channels:
if master_AWG in c and c not in required_channels:
required_channels.append(c)
if AWG in used_AWGs and AWG not in required_AWGs:
required_AWGs.append(AWG)
continue
if AWG in used_AWGs and AWG not in required_AWGs:
required_AWGs.append(AWG)
if not channel in required_channels:
required_channels.append(channel)
return required_channels, required_AWGs
def dictionify(obj, only=None, exclude=None):
"""
Takes an arbitrary object and returns a dict with all variables/internal
states of the object (i.e. not functions)
Args:
obj: object
only (list): take only specific attributes
exclude (list): exclude specific attributes
Returns: dict form of the object
"""
obj_dict = vars(obj)
if only is not None:
assert np.ndim(only) == 1, "'only' must be of type list or array " \
"of attributes to include"
for k in obj_dict:
if k not in only:
obj_dict.pop(k)
if exclude is not None:
assert np.ndim(exclude) == 1, "'exclude' must be a list or array of" \
" attributes to exclude"
for k in obj_dict:
if k in exclude:
obj_dict.pop(k)
return obj_dict
class NumpyJsonEncoder(json.JSONEncoder):
'''
JSON encoder subclass that converts Numpy types to native python types
for saving in JSON files.
Also converts datetime objects to strings.
'''
def default(self, o):
if isinstance(o, np.integer):
return int(o)
elif isinstance(o, np.floating):
return float(o)
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, datetime.datetime):
return str(o)
else:
return super().default(o)
class KeyboardFinish(KeyboardInterrupt):
"""
Indicates that the user safely aborts/finishes the experiment.
Used to finish the experiment without raising an exception.
"""
pass
def check_keyboard_interrupt():
try: # Try except statement is to make it work on non windows pc
if msvcrt.kbhit():
key = msvcrt.getch()
if b"q" in key:
# this causes a KeyBoardInterrupt
raise KeyboardInterrupt('Human "q" terminated experiment.')
elif b"f" in key:
# this should not raise an exception
raise KeyboardFinish('Human "f" terminated experiment safely.')
except Exception:
pass
def temporary_value(*param_value_pairs):
"""
This context manager allows to change a given QCodes parameter
to a new value, and the original value is reverted upon exit of the context
manager.
Args:
*param_value_pairs: 2-tuples of qcodes parameters and their temporary
values
Example:
# measure qubit spectroscopy at a different readout frequency without
# setting the parameter value
with temporary_values((qb1.ro_freq, 6e9)):
qb1.measure_spectroscopy(...)
"""
class TemporaryValueContext:
def __init__(self, *param_value_pairs):
if len(param_value_pairs) > 0 and \
not isinstance(param_value_pairs[0], (tuple, list)):
param_value_pairs = (param_value_pairs,)
self.param_value_pairs = param_value_pairs
self.old_value_pairs = []
def __enter__(self):
log.debug('Entered TemporaryValueContext')
self.old_value_pairs = \
[(param, param()) for param, value in self.param_value_pairs]
for param, value in self.param_value_pairs:
param(value)
def __exit__(self, type, value, traceback):
for param, value in self.old_value_pairs:
param(value)
log.debug('Exited TemporaryValueContext')
return TemporaryValueContext(*param_value_pairs)
def configure_qubit_mux_drive(qubits, lo_freqs_dict):
mwgs_set = set()
for qb in qubits:
qb_ge_mwg = qb.instr_ge_lo()
if qb_ge_mwg not in lo_freqs_dict:
raise ValueError(
f'{qb_ge_mwg} for {qb.name} not found in lo_freqs_dict.')
else:
qb.ge_mod_freq(qb.ge_freq()-lo_freqs_dict[qb_ge_mwg])
if qb_ge_mwg not in mwgs_set:
qb.instr_ge_lo.get_instr().frequency(lo_freqs_dict[qb_ge_mwg])
mwgs_set.add(qb_ge_mwg)
def configure_qubit_mux_readout(qubits, lo_freqs_dict):
mwgs_set = set()
idx = {}
for lo in lo_freqs_dict:
idx[lo] = 0
for i, qb in enumerate(qubits):
qb_ro_mwg = qb.instr_ro_lo()
if qb_ro_mwg not in lo_freqs_dict:
raise ValueError(
f'{qb_ro_mwg} for {qb.name} not found in lo_freqs_dict.')
else:
qb.ro_mod_freq(qb.ro_freq() - lo_freqs_dict[qb_ro_mwg])
qb.acq_I_channel(2 * idx[qb_ro_mwg])
qb.acq_Q_channel(2 * idx[qb_ro_mwg] + 1)
idx[qb_ro_mwg] += 1
if qb_ro_mwg not in mwgs_set:
qb.instr_ro_lo.get_instr().frequency(lo_freqs_dict[qb_ro_mwg])
mwgs_set.add(qb_ro_mwg)
def configure_qubit_feedback_params(qubits, for_ef=False):
if for_ef:
raise NotImplementedError('for_ef feedback_params')
for qb in qubits:
ge_ch = qb.ge_I_channel()
pulsar = qb.instr_pulsar.get_instr()
AWG = qb.find_instrument(pulsar.get(f'{ge_ch}_awg'))
vawg = (int(pulsar.get(f'{ge_ch}_id')[2:])-1)//2
acq_ch = qb.acq_I_channel()
AWG.set(f'awgs_{vawg}_dio_mask_shift', 1+acq_ch)
AWG.set(f'awgs_{vawg}_dio_mask_value', 1)
UHF = qb.instr_uhf.get_instr()
threshs = qb.acq_classifier_params()
if threshs is not None:
threshs = threshs.get('thresholds', None)
if threshs is not None:
UHF.set(f'qas_0_thresholds_{acq_ch}_level', threshs[0])
def find_symmetry_index(data):
data = data.copy()
data -= data.mean()
corr = []
for iflip in np.arange(0, len(data)-0.5, 0.5):
span = min(iflip, len(data)-1-iflip)
data_filtered = data[np.int(iflip-span):np.int(iflip+span+1)]
corr.append((data_filtered*data_filtered[::-1]).sum())
return np.argmax(corr), corr
| mit | 6,031,985,754,801,021,000 | 34.326683 | 83 | 0.55012 | false |
flaub/plaidml | plaidml/keras/backend_test.py | 1 | 34290 | # Copyright Vertex.AI.
import functools
import numpy as np
import numpy.testing as npt
import operator
import os
import unittest
import sys
import plaidml
import testing.plaidml_config
#import plaidml #only needed if adjusting vlog
from keras.backend.common import set_floatx, floatx
set_floatx('float32')
from keras.backend import theano_backend as th
from plaidml.keras import backend as pkb
from keras.backend import tensorflow_backend as tf
# Tensorflow needs some code called directly
import tensorflow
# Theano breaks on convolution if given a default optimizer
import theano
theano.config.optimizer = "None"
def m(*args, **kwargs):
dtype = kwargs.get('dtype', 'float32')
"""Makes a test matrix whose dimensions are the supplied arguments."""
total = functools.reduce(operator.mul, args, 1)
arr = np.array(range(-2, total-2), dtype=dtype)
arr = np.reshape(arr, args)
return arr
def n(*args):
"""Makes a test matrix whose dimensions are the supplied arguments.
Differs from m only in what values it has."""
total = functools.reduce(operator.mul, args, 1)
arr = np.array(range(-11, total-11), dtype='float32')
arr = np.reshape(arr, args)
for i in range(5):
if len(args) > i + 1:
np.swapaxes(arr, 0, i+1)
arr = np.reshape(arr, args)
return arr
def r(*args):
"""Makes a test matrix whose dimensions are the supplied arguments. Uniform random values"""
return np.random.uniform(0, 1.0, args)
def _conv_inp(IN, IC, OC, IS, KS, data_format=None):
kernel_mat_np = m(*(KS + [IC, OC]))
if data_format == 'channels_first':
input_mat_np = m(*([IN] + [IC] + IS))
else:
input_mat_np = m(*([IN] + IS + [IC]))
inputMat = input_mat_np
kernelMat = kernel_mat_np
return [inputMat, kernelMat, data_format]
def _separable_conv_inp(IN, IC, OC, CM, IS, KS, data_format=None):
depth_kernel_mat = m(*(KS + [IC, CM]))
point_kernel_mat = m(*([1]*len(KS) + [CM * IC, OC]))
if data_format == 'channels_first':
input_mat = m(*([IN] + [IC] + IS))
else:
input_mat = m(*([IN] + IS + [IC]))
return [input_mat, depth_kernel_mat, point_kernel_mat, data_format]
def compareForwardExact(skip_theano=False, skip_tensorflow=False):
"""Decorates test methods, checking equality under multiple backends."""
def decorator(test_func):
def compare(self, *args):
if not skip_theano:
theano_result = test_func(self, th, *args).eval()
if not skip_tensorflow:
tf_session = tensorflow.Session()
tf.set_session(tf_session)
tensorflow_result_intermediate = test_func(self, tf, *args)
tf_session.run(tensorflow.global_variables_initializer())
tensorflow_result = tensorflow_result_intermediate.eval(session=tf_session)
plaidml_result = test_func(self, pkb, *args).eval()
if not skip_theano:
npt.assert_array_equal(plaidml_result, theano_result, err_msg='x=plaidml, y=theano')
if not skip_tensorflow:
npt.assert_array_equal(plaidml_result, tensorflow_result, err_msg='x=plaidml, y=tensorflow')
tf_session.close()
return compare
return decorator
def compareForwardClose(epsilon=1e-03, atol=1e-8, skip_theano=False, skip_tensorflow=False):
"""Decorates test methods, checking near-equality under multiple backends."""
def decorator(test_func):
def compare(self, *args):
if not skip_theano:
theano_result = test_func(self, th, *args).eval()
if not skip_tensorflow:
tf_session = tensorflow.Session()
tf.set_session(tf_session)
tensorflow_result_intermediate = test_func(self, tf, *args)
tf_session.run(tensorflow.global_variables_initializer())
tensorflow_result = tensorflow_result_intermediate.eval(session=tf_session)
plaidml_result = test_func(self, pkb, *args).eval()
if not skip_theano:
npt.assert_allclose(plaidml_result, theano_result, rtol=epsilon, atol=atol,
err_msg='x=plaidml, y=theano')
if not skip_tensorflow:
npt.assert_allclose(plaidml_result, tensorflow_result, rtol=epsilon, atol=atol,
err_msg='x=plaidml, y=tensorflow')
tf_session.close()
return compare
return decorator
def opTest(in_data, tol=1e-3, atol=1e-8, skip_theano=False, skip_tensorflow=False, verbose=False):
# If using with non-tensor parameters, all tensor params must appear before
# all non-tensor params
def run_one_backend(self, data, test_func, b, *args):
tf_session = tensorflow.Session()
tf.set_session(tf_session)
results = []
with tf_session.as_default():
x = [b.placeholder(shape = t.shape) for t in data if hasattr(t, 'shape')]
xv = [b.variable(t, dtype='float32') for t in data if hasattr(t, 'shape')]
ps = [t for t in data if not hasattr(t, 'shape')]
grad_funcs = test_func(self, b, *(x + ps + list(args)))
funcs = test_func(self, b, *(xv + ps + list(args)))
tf_session.run(tensorflow.global_variables_initializer())
for gf, f in zip(grad_funcs, funcs):
df = b.gradients(b.mean(gf), x)
gfn = b.function(x, df, updates=[])
fr = f.eval()
gr = gfn([t for t in data if hasattr(t, 'shape')])
if verbose:
print(b, fr, gr)
results.append((fr,gr))
tf_session.close()
return results
def apply(test_func):
def output(self, *args):
for didx, data in enumerate(in_data):
if not skip_theano:
theano_results = run_one_backend(self, data, test_func, th, *args)
if not skip_tensorflow:
tensorflow_results = run_one_backend(self, data, test_func, tf, *args)
plaidml_results = run_one_backend(self, data, test_func, pkb, *args)
if not skip_theano:
for idx, (pmlr, thr) in enumerate(zip(plaidml_results, theano_results)):
idx = idx + 1
npt.assert_allclose(pmlr[0], thr[0], rtol=tol, atol=atol,
err_msg='ERR: datum={}, test={}, x=plaidml, y=theano'.format(didx, idx))
for x in range(0, len(pmlr[1])):
npt.assert_allclose(pmlr[1][x], thr[1][x], rtol=tol, atol=atol,
err_msg='ERR: datum={}, test={}, grad, x=plaidml, y=theano'.format(didx, idx))
if not skip_tensorflow:
for idx, (pmlr, tfr) in enumerate(zip(plaidml_results, tensorflow_results)):
idx = idx + 1
npt.assert_allclose(pmlr[0], tfr[0], rtol=tol, atol=atol,
err_msg='ERR: datum={}, test={}, x=plaidml, y=tensorflow'.format(didx, idx))
for x in range(0, len(pmlr[1])):
npt.assert_allclose(pmlr[1][x], tfr[1][x], rtol=tol, atol=atol,
err_msg='ERR: datum={}, test={}, grad, x=plaidml, y=tensorflow'.format(didx, idx))
return output
return apply
class TestBackendOps(unittest.TestCase):
"""Tests PlaidML Keras operation definitions"""
def a_testLearningPhase(self):
# Test name prefixed with 'a_' because this needs to run before other tests
npt.assert_equal(pkb.learning_phase().eval(), 0)
pkb.set_learning_phase(1)
npt.assert_equal(pkb.learning_phase().eval(), 1)
pkb.set_learning_phase(0)
npt.assert_equal(pkb.learning_phase().eval(), 0)
@compareForwardExact()
def testShape(self, b):
return b.shape(b.variable(m(3,3,3,3,4)))
@compareForwardExact()
def testPassthrough(self, b):
return b.variable(m(3,3))
#@compareExact()
#def testFp16(self, b):
# set_floatx('float16')
# return b.dot(b.variable(m(4, 4, dtype='float16')), b.variable(m(4, 4, dtype='float16')))
@opTest([[m(3, 3), m(3, 3)], [m(2, 3, 4, 5), m(2, 3, 5, 2)]])
def testDot(self, b, x, y):
return [b.dot(x, y)]
# TODO(T1046): Once Keras is updated beyond 2.0.8, re-enable TF on batch_dot tests
@opTest([[m(10, 20), m(10, 30, 20), (1, 2)],
[m(2, 3, 4, 5), m(2, 3, 5, 2), None],
[m(2, 3, 4, 5), m(2, 16, 5, 3), (1, 3)],
[m(2, 5), m(2, 5), 1],
[m(2, 4, 5), m(2, 5, 2), None],],
skip_tensorflow=True)
def testBatchDot(self, b, x, y, ax):
if ax is None:
return [b.batch_dot(x, y)]
else:
return [b.batch_dot(x, y, axes=ax)]
@opTest([[m(2, 3, 4, 5)]], skip_tensorflow=True)
def testBatchDot2(self, b, x):
return [b.batch_dot(x, b.variable(m(2, 3, 5, 2))),
b.batch_dot(x, b.variable(m(2, 6, 5, 3)), axes=(1,3))]
@opTest([[m(2, 5)]])
def testBatchDot3(self, b, x):
return [b.batch_dot(x, b.variable(m(2, 5)), axes=1)]
@opTest([[m(2, 4, 5)]])
def testBatchDot4(self, b, x):
return [b.batch_dot(x, b.variable(m(2, 5, 2)))]
#TODO: Does not need to exist longterm
@unittest.skip("Helper test for debugging testAddElements, not standalone")
def testMicroAddElementsFail(self):
data = [m(3, 3), m(3, 3)]
test_func = self.testAddElements
args = list()
###############
x = [pkb.placeholder(shape = t.shape) for t in data if isinstance(t, np.ndarray)]
xv = [pkb.variable(t, dtype='float32') for t in data if isinstance(t, np.ndarray)]
par = [t for t in data if not isinstance(t, np.ndarray)]
grad_funcs = test_func(pkb, *(x + par + list(args)))
funcs = test_func(pkb, *(xv + par + list(args)))
#for gf, f in zip(grad_funcs, funcs):
gf = grad_funcs[0]
f = funcs[0]
df = pkb.gradients(pkb.mean(gf), x)
gfn = pkb.function(x, df, updates=[])
fr = f.eval()
gr = gfn([t for t in data if isinstance(t, np.ndarray)])
if verbose:
print(pkb, fr, gr)
results.append((fr,gr))
return results
def testTileIdentity(self):
x = pkb.variable(m(3))
f = '''function (I[N]) -> (O) { O = I; }'''
result = pkb._Op("TileIdent", x.dtype, (3,), f, {'I': x}, ['O'])
output = result.eval()
return 0
def testTwoOutputs(self):
x = pkb.variable(m(3))
f = '''function (I[N]) -> (O1, O2) { O1 = I; O2 = I; }'''
result = pkb._Op("TwoOut", x.dtype, (None,), f, {'I': x}, ['O1', 'O2'])
output = result.eval()
return 0
@unittest.skip("TODO(T1028): This test is known to fail")
@opTest([[m(3, 3), m(3, 3)]])
def testAddElements(self, b, x, y):
return [x + y]
@opTest([[m(3, 3), 1.0]])
def testAddConstant(self, b, x, c):
return [x + c,
c + x]
@opTest([[m(3, 3), m(3, 3)]])
def testSubElements(self, b, x, y):
return [x - y]
@opTest([[m(3, 3), 1.0]])
def testSubConstant(self, b, x, c):
return [x - c,
c - x]
@opTest([[m(3, 3), m(3, 3)],
[m(2, 4), m(2, 4)],])
def testMulElements(self, b, x, y):
return [x * y]
@opTest([[m(3, 3), 2.0]])
def testMulConstant(self, b, x, c):
return [x * c,
c * x]
@opTest([[m(3, 3), m(3, 3)],
[m(2, 1, 1), m(1)],
[m(2), m(1)]
], skip_theano=True)
def testDivElements(self, b, x, y):
return [x / y]
@opTest([[m(3, 3), 2.0]])
def testDivConstant(self, b, x, c):
return [x / c,
c / x]
@opTest([[m(3, 3)],
[m(3, 3), None, True],
[m(2, 3, 4, 5), [1, 3]],
[m(3, 4, 5), -1],])
def testSum(self, b, x, ax=None, kd=False):
return [b.sum(x, axis=ax, keepdims=kd)]
# TODO(T1026): Switch to opTest once PROD AggregationOp supports derivatives
@compareForwardExact()
def testProd(self, b):
return b.prod(b.variable(m(3,3)))
# TODO(T1026): Switch to opTest once PROD AggregationOp supports derivatives
@compareForwardExact()
def testProdKeepdims(self, b):
return b.prod(b.variable(m(3, 3)), keepdims=True)
# TODO(T1026): Switch to opTest once PROD AggregationOp supports derivatives
@compareForwardClose()
def testProdAxis(self, b):
return b.prod(b.variable(m(2,3,4,5)), axis=[1,3])
# TODO(T1026): Switch to opTest once PROD AggregationOp supports derivs
@compareForwardClose()
def testProdNegAxis(self, b):
return b.prod(b.variable(m(3,4,5)), axis=-1)
@opTest([[m(3, 4)],
[m(3, 3, 2), None, True],
[m(4, 3, 2, 1), [1, 3]]])
def testMax(self, b, x, ax=None, kd=False):
return [b.max(x, axis=ax, keepdims=kd)]
# T1031: This doesn't match TF/Theano on boundaries
@opTest([[m(3, 4) - 3.3, m(3, 4) / 2.0]])
def testMaximum(self, b, x, y):
return [b.maximum(x, y)]
@opTest([[m(2, 4, 3)]])
def testMin(self, b, x):
return [b.min(x)]
# T1031: This doesn't match TF/Theano on boundaries
@opTest([[m(4, 3) - 3.1, m(4, 3) / 2.0]])
def testMinimum(self, b, x, y):
return [b.minimum(x, y)]
@opTest([[m(3, 3)],
[m(3, 3), None, True],
[m(2, 3, 4, 5), [1, 3]],
[m(1, 2, 3, 4, 5), [-2, 2]], # Note: axis -2 means next to last axis
])
def testMean(self, b, x, ax=None, kd=False):
return [b.mean(x, axis=ax, keepdims=kd)]
# T1031: This doesn't match TF/Theano on boundaries
@opTest([[m(3, 3), 2.0001, 5.0001]])
def testClip(self, b, x, lo, hi):
return [b.clip(x, lo, hi)]
# T1031: This doesn't match TF/Theano on corner
@opTest([[m(3, 3) - 0.0001, 0.5, 3],
[m(3, 4) + 0.0001, 0.1, 5],])
def testRelu(self, b, x, a=0.0, m=None):
return [b.relu(x),
b.relu(x, alpha=a),
b.relu(x, max_value=m),
b.relu(x, alpha=a, max_value=m)]
@compareForwardExact()
def testEqual(self, b):
return b.equal(b.variable(m(3, 3)), b.variable(m(3, 3)))
@compareForwardExact()
def testNotEqual(self, b):
return b.not_equal(b.variable(m(3, 3)), b.variable(m(3, 3)))
@compareForwardExact()
def testLess(self, b):
return b.less(b.variable(2 * m(3, 3)), b.variable(m(3, 3)))
@compareForwardExact()
def testLessEqual(self, b):
return b.less_equal(b.variable(2 * m(3, 3)), b.variable(m(3, 3)))
@compareForwardExact()
def testGreater(self, b):
return b.greater(b.variable(2 * m(3, 3)), b.variable(m(3, 3)))
@compareForwardExact()
def testGreaterEqual(self, b):
return b.greater_equal(b.variable(2 * m(3, 3)), b.variable(m(3, 3)))
@opTest([[m(3, 3)]])
def testSquare(self, b, x):
return [b.square(x)]
@opTest([[m(2,3) + 3], [m(2,3,4) + 3]])
def testSqrt(self, b, x):
return [b.sqrt(x)]
@opTest([[np.sqrt(m(5, 5, 10) + 2) - 3]], 1e-02, skip_theano=True)
def testSoftmax(self, b, x):
return [-b.log(b.softmax(x))]
@opTest([[m(10, 10)]], skip_theano=True)
def testSigmoid(self, b, x):
return [b.sigmoid(x)]
@opTest([[m(2, 2)]], skip_theano=True)
def testBinaryCrossentropy(self, b, x):
return [
b.binary_crossentropy(b.variable(np.array([[0,1],[1,0]])), x, from_logits=True),
b.binary_crossentropy(b.variable(np.array([[0.3,0.7],[0.1,0.9]])), x, from_logits=False),
b.binary_crossentropy(b.variable(np.array([[0,0.7],[1,.3]])), b.sigmoid(x))
]
@opTest([[np.array([[0,0,0], [0,1,0], [0,0,0]]), (m(3, 3) + 3) / 15.0],
[np.array([0, 0, 1, 0, 0, 0]), (m(6) + 7) / 11.0]])
def testCategoricalCrossentropy(self, b, x, y):
return [b.categorical_crossentropy(x, y)]
@opTest([[np.array([[0,0,0], [0,0,1], [1,1,0]]), (m(3, 3) + 3)]])
def testSoftCat(self, b, x, y):
return [b.categorical_crossentropy(x, b.softmax(y))]
@unittest.skip("Doesn't need to agree b/c what we do with garbage input is implementation detail")
@opTest([[(m(2, 2) + 3) / 10.0, np.array([[0., 0.], [1., 2.]])]])
def testCategoricalCrossentropyGarbageIn(self, b, x, y):
return [b.categorical_crossentropy(x, y)]
#TODO: Merge with general cat xentropy if we can resolve the TF problems
@opTest([[np.array([[0,0,0], [0,1,0], [0,0,0]]), (m(3, 3) + 3) / 15.0]],
atol=1e-7, skip_tensorflow=True)
def testCategoricalCrossentropyLogits(self, b, x, y):
return [b.categorical_crossentropy(x, y, from_logits=True)]
@opTest([[m(3, 3, 10)]], skip_theano=True, tol=0.01)
def testSparseCategoricalCrossentropy(self, b, x):
smax = b.softmax(x)
sbest = b.variable(np.array([[7,8,5], [9,3,8], [0,7,6]]))
return [b.sparse_categorical_crossentropy(sbest, smax),
b.sparse_categorical_crossentropy(sbest, smax, from_logits=True)]
@compareForwardExact(skip_theano=True)
def testOneHot(self, b):
A = b.variable(np.array([[0,1,2],[2,4,0],[0,2,7]]), dtype='int32')
return b.one_hot(A, 20)
@opTest([[m(20)], [m(7,3)]])
def testExp(self, b, x):
return [b.exp(x)]
@opTest([[m(20)], [m(2,2,2)]])
def testPow(self, b, x):
return [b.pow(x, 5)]
@opTest([[m(20) + 3], [m(10,3)]])
def testLog(self, b, x):
return [b.log(x)]
@opTest([[m(10)],[m(2,2,2,3)]], 1e-2)
def testTanh(self, b, x):
return [b.tanh(x)]
@compareForwardClose(.1)
def testRandomNormalMean(self, b):
rand = b.random_normal((1000, 1000), mean=42.0, stddev=0.1)
return b.mean(rand)
@compareForwardClose(.1)
def testRandomNormalDev(self, b):
rand = b.random_normal((1000, 1000), mean=42.0, stddev=0.1)
mean = b.mean(rand)
diffs = rand - mean
return b.mean(b.square(diffs))
@compareForwardClose(.1)
def testTruncatedNormalMean(self, b):
rand = b.truncated_normal((1000, 1000), mean=42.0, stddev=0.1)
return b.mean(b.variable(rand))
@compareForwardClose(.1, skip_theano=True)
def testTruncatedNormalDev(self, b):
rand = b.truncated_normal((1000, 1000), mean=42.0, stddev=0.1)
X = b.variable(rand)
mean = b.mean(X)
diffs = X - mean
return b.mean(b.square(diffs))
def _separable_conv2d(self, b, IN, IH, IW, IC, OC, CM, KH, KW,
strides=(1,1), padding='same', data_format=None):
depth_kernel_mat_np = m(KH, KW, IC, CM)
point_kernel_mat_np = m(1, 1, CM*IC, OC)
if data_format == 'channels_first':
input_mat_np = m(IN, IC, IH, IW)
else:
input_mat_np = m(IN, IH, IW, IC)
inputMat = b.variable(input_mat_np, dtype='float32')
depthKernelMat = b.variable(depth_kernel_mat_np, dtype='float32')
pointKernelMat = b.variable(point_kernel_mat_np, dtype='float32')
return b.separable_conv2d(inputMat, depthKernelMat, pointKernelMat, padding=padding,
strides=strides, data_format=data_format)
@opTest([_conv_inp(IN=1, IC=16, OC=16, IS=[4, 5], KS=[3, 3]),],
1e-04, skip_theano=True)
def testWinograd(self, b, im, km, df):
return [
b.conv2d(im, km, padding='same', force_winograd=True) if b == pkb else b.conv2d(im, km, padding='same'),
]
# Asymmetric stride examples not included for separable convolutions b/c they
# aren't implemented in tensorflow (and theano doesn't do separable convs)
@opTest([ _separable_conv_inp(IN=1, IC=2, OC=6, CM=3, IS=[8, 8], KS=[3, 3]),
_separable_conv_inp(IN=4, IC=3, OC=6, CM=2, IS=[7, 9], KS=[3, 4]),
_separable_conv_inp(IN=1, IC=2, OC=5, CM=1, IS=[10, 12], KS=[2, 5]),
_separable_conv_inp(IN=2, IC=4, OC=8, CM=2, IS=[12, 12], KS=[3, 3],
data_format='channels_first'),
],
atol=1e-5, # TF separable conv math is really loose, and ends up with
# values like 2.59e-6 where a 0 should be.
skip_theano=True)
def testSeparableConv2d(self, b, im, dkm, pkm, df):
return [
b.separable_conv2d(im, dkm, pkm, padding='valid', strides=(2, 2), data_format=df),
b.separable_conv2d(im, dkm, pkm, padding='valid', strides=(1, 1), data_format=df),
b.separable_conv2d(im, dkm, pkm, padding='same', strides=(3, 3), data_format=df),
]
@opTest(
[_conv_inp(IN=1, IC=3, OC=1, IS=[5], KS=[2], data_format='channels_last'),
_conv_inp(IN=2, IC=1, OC=4, IS=[5], KS=[3], data_format='channels_last')],
# Tensorflow doesn't support 1d convos in this order yet
#_conv_inp(IN=4, IC=1, OC=5, IS=[9], KS=[4], data_format='channels_first')],
1e-04, skip_theano=True)
def testConv1d(self, b, im, km, df):
return [
b.conv1d(im, km, padding='same', data_format=df),
b.conv1d(im, km, padding='valid', data_format=df),
b.conv1d(im, km, padding='valid', strides=(2), data_format=df),
]
@opTest([
_conv_inp(IN=2, IC=2, OC=4, IS=[4, 7], KS=[3, 3]),
_conv_inp(IN=3, IC=3, OC=1, IS=[9, 8], KS=[2, 2], data_format='channels_last'),
_conv_inp(IN=1, IC=1, OC=3, IS=[5, 4], KS=[3, 3], data_format='channels_first'),
_conv_inp(IN=2, IC=4, OC=2, IS=[5, 5], KS=[2, 2], data_format='channels_first'),
],
1e-04, skip_theano=True)
def testConv2d(self, b, im, km, df):
return [
b.conv2d(im, km, padding='same', data_format=df),
b.conv2d(im, km, padding='valid', data_format=df),
b.conv2d(im, km, padding='same', strides=(2,2), data_format=df),
b.conv2d(im, km, padding='valid', strides=(3,1), data_format=df),
]
@unittest.skip("TODO(T1046): This case is bugged in Keras 2.0.8 TF")
@opTest([_conv_inp(IN=1, IC=1, OC=1, IS=[1, 6], KS=[1, 1], data_format='channels_last')],
1e-04, skip_theano=True)
def testConv2dSpecial(self, b, im, km, df):
'''A simplified example highlighting a bug in Keras 2.0.8 TF
Probably doesn't need to be retained once the corresponding case in conv3d
is fixed.'''
return [b.conv2d(im, km, padding='same', strides=(2,3), data_format=df)]
@opTest(
[_conv_inp(IN=3, IC=1, OC=3, IS=[4, 7, 4], KS=[3, 3, 3]),
_conv_inp(IN=3, IC=4, OC=2, IS=[3, 6, 3], KS=[2, 1, 2], data_format='channels_last'),
_conv_inp(IN=2, IC=3, OC=1, IS=[5, 5, 3], KS=[3, 2, 2], data_format='channels_first'),
],
1e-04, skip_theano=True)
def testConv3d(self, b, im, km, df):
return [
b.conv3d(im, km, padding='same', data_format=df),
# TODO(T1046): TF broken in Keras 2.0.8 on this; see testConv2dSpecial
#b.conv3d(im, km, padding='same', strides=(2,3,3), data_format=df),
b.conv3d(im, km, padding='valid', strides=(2,1,2), data_format=df),
]
@opTest([[m(1, 4, 4, 1)], [m(1, 7, 5, 1)], [m(2, 11, 13, 3)]], skip_theano=True)
def testAvgPool(self, b, x):
return [b.pool2d(x, (2, 2), strides=(2, 2), pool_mode='avg'),
b.pool2d(x, (3, 3), strides=(1, 1), pool_mode='avg', padding='same'),
b.pool2d(x, (3, 4), strides=(2, 3), pool_mode='avg', padding='valid'),]
@opTest([[m(1, 4, 4, 1)],
[m(1, 9, 9, 1)],
[m(1, 8, 10, 1)],
[m(2, 9, 11, 3)],
],
skip_theano=True)
def testMaxPool(self, b, x):
return [b.pool2d(x, (2, 2), strides=(2, 2), pool_mode='max'),
b.pool2d(x, (3, 3), strides=(1, 1), pool_mode='max'),
b.pool2d(x, (3, 3), strides=(2, 2), pool_mode='max'),
b.pool2d(x, (2, 2), strides=(2, 2), pool_mode='max', padding='same'),
b.pool2d(x, (3, 3), strides=(2, 2), pool_mode='max', padding='same')
]
@opTest([[m(1, 1, 60), (60,)],
[m(4, 3, 70, 2), (14, 10, 6, 2)],
[m(7, 3, 2, 4), (-1,)],
[m(4, 4), (-1,)]])
def testReshape(self, b, x, s):
return [b.reshape(x, s)]
@opTest([[m(1, 1, 60), (60,)],
[m(4, 3, 70, 2), (14, 10, 6, 2)],
[m(7, 3, 2, 4), (-1,)],
[m(4, 4), (-1,)],])
def testTransposeReshape(self, b, x, s):
return [b.reshape(b.transpose(x), s)]
@opTest([[m(4, 2, 1, 3, 2), 2],
[m(5, 3, 2, 1), -1]])
def testSqueeze(self, b, x, ax):
return [b.squeeze(x, ax)]
@compareForwardExact()
def testZeros(self, b):
a = b.zeros(shape=(10,))
return a
@compareForwardExact()
def testOnes(self, b):
a = b.ones(shape=(10,))
return a
@compareForwardExact()
def testConstant(self, b):
a = b.constant(5, shape=(10,))
return a
# Note: we skip tensorflow since init_global must be called in the middle of this function
# for correct semantics, and Theano is sufficient.
@compareForwardExact(skip_tensorflow=True)
def testUpdate(self, b):
a = b.variable(m(10, 10))
a2 = a * a
up = b.update(a, a2)
f = b.function([],[], updates=[up])
f([])
f([])
return a
@compareForwardExact()
def testRandomChanges(self, b):
a = b.random_uniform((10, 10))
f = b.function([], [a])
out1 = f([])[0]
out2 = f([])[0]
diff = np.abs(out1 - out2).max()
if diff < .01:
raise Exception("Random isn't random")
return b.constant(0)
# Note: This test assumes that our update code matches Theano's, and
# that testing the second component of the returned update tuple is
# sufficient. It may be worthwhile to make this test more resilient
# to refactoring and make it test that the update portion is working
# as expected.
@compareForwardClose(skip_tensorflow=True)
def testMovingAverageUpdate(self, b):
return b.moving_average_update(b.variable(m(5,4,9,3,2)), b.variable(n(5,4,9,3,2)), 0.95)[1]
@compareForwardClose(skip_tensorflow=True, atol=1e-6)
def testBatchNormAndUpdate(self, b):
b.set_learning_phase(1)
x = b.variable(n(4, 7))
moving_mean = b.variable(m(4, 1))
moving_var = b.variable(m(4, 1))
beta = b.zeros([4, 1])
gamma = b.ones([4, 1])
normed, mean, var = b.normalize_batch_in_training(x, gamma, beta, reduction_axes=[1])
mean_update = b.moving_average_update(moving_mean, mean, 0.01)
var_update = b.moving_average_update(moving_var, var, 0.01)
f = b.function([], [], updates=[mean_update, var_update])
f([])
return moving_var
@opTest([[m(2, 3, 5),
m(2, 3, 1) + 3,
m(2, 3, 1) + 4,
]],
atol=1e-7)
def testNormalizeBatchInTrainingSimple(self, b, x, mov_avg, mov_var):
return [(b.normalize_batch_in_training(x, mov_avg, mov_var, [2]))[0]]
@opTest([[n(2,3), np.array([3., 4.,.7]), np.array([1.44, .99, .98])]],
skip_theano=True, skip_tensorflow=True)
def testNormalizeBatchInTraining(self, b, x, beta, gamma):
return [b.normalize_batch_in_training(x, gamma, beta, [1])[0]]
@compareForwardClose(skip_tensorflow=True)
def testNormalizeBatchInTrainingWeirdAxis(self, b):
return b.normalize_batch_in_training(b.variable(n(5,4,7,3)), b.constant(0.8, shape=(5,1,7,3)), b.constant(-5, shape=(5,1,7,3)), [1])[1]
@compareForwardClose(skip_tensorflow=True)
def testNormalizeBatchInTrainingMultiAxis(self, b):
return b.normalize_batch_in_training(b.variable(n(2,3,5,7,11)), b.constant(11, shape=(1,3,1,1,11)), b.constant(0, shape=(1,3,1,1,11)), [0,2,3])[2]
@opTest([[n(4,3), np.array([0.0, 0.1, 0.1]), np.array([100., 101., 50.]),
np.array([3., 4.,.7]), np.array([1.44, .99, .98])]])
def testBatchNormalization(self, b, x, mean, var, beta, gamma):
return [b.batch_normalization(x, mean, var, beta, gamma)]
@opTest([[np.array([100])]], skip_theano=True)
def testBatchNormalizationVar(self, b, var):
return [b.batch_normalization(b.variable(n(1, 1, 2)), b.variable(np.array([15])), var, None, None),
b.batch_normalization(b.variable(n(2, 1, 1)), b.variable(np.array([15])), var, None, None)]
@opTest([[np.array([15])]], skip_theano=True)
def testBatchNormalizationMean(self, b, mean):
return [b.batch_normalization(b.variable(n(3,4,5)), mean, b.variable(np.array([100])), None, None)]
@compareForwardClose()
def testBatchNormalizationOneElement(self, b):
x = b.variable(n(1,4,5))
return b.batch_normalization(b.variable(n(1,4,5)), b.variable(np.array([15])), b.variable(np.array([100])), b.variable(np.array([3])), b.variable(np.array([1.44])))
@compareForwardClose()
def testBatchNormalizationNoBeta(self, b):
return b.batch_normalization(b.variable(n(3,4,5)), b.variable(np.array([15])), b.variable(np.array([100])), None, b.variable(np.array([1.44])))
@compareForwardClose()
def testBatchNormalizationNoGamma(self, b):
return b.batch_normalization(b.variable(n(3,4,5)), b.variable(np.array([15])), b.variable(np.array([100])), b.variable(np.array([3])), None)
@opTest([[m(4, 6)],
[m(4, 3, 5)],
[m(3, 7), None, True],
[m(2, 5, 4, 7, 3), 1],
], atol=1e-7)
def testVarSimple(self, b, x, ax=None, kd=False):
return [b.var(x, axis=ax, keepdims=kd)]
@opTest([[m(3, 3)]])
def testSelfMult(self, b, x):
A = x
return [b.dot(A, A)]
@unittest.skip("TODO(T1037): This test is not yet working")
@opTest([[np.array([[1.0, 2.0], [2.0, 7.0], [5.0, 6.0]])]])
def testGather(self, b, v):
I = b.variable(np.array([0, 2, 1, 0], dtype='int32'), dtype='int32')
return [b.gather(v, I)]
@compareForwardClose()
def testGatherLong(self, b):
V = b.variable(np.array([[1.0, 2.0], [2.0, 7.0], [5.0, 6.0]]))
I = b.variable(np.array([[0, 1, 1, 0], [0, 0, 0, 1], [1, 0, 1, 0]], dtype='int32'), dtype='int32')
return b.gather(V, I)
@compareForwardClose()
def testGatherLong2(self, b):
V = b.variable(np.array([[1.0, 2.0], [2.0, 7.0], [5.0, 6.0]]))
I = b.variable(np.array([[[0, 1, 1, 0], [1, 0, 0, 1]], [[1, 0, 1, 0], [0, 0, 1, 1]]], dtype='int32'), dtype='int32')
return b.gather(V, I)
@opTest([[m(2, 3)]])
def testRepeat(self, b, x):
return [b.repeat(x, 4)]
@opTest([[m(3, 2, 4, 5, 6)]])
def testRepeatElements(self, b, x):
return [b.repeat_elements(x, 3, 4)]
@opTest([[m(4, 6, 9, 3), 3, 1, 'channels_last'],
[m(2, 3, 12, 12), 2, 3, 'channels_first'],])
def testResizeImages(self, b, x, h, w, df):
return [b.resize_images(x, h, w, df)]
@opTest([[m(3, 5)]])
def testL2Normalize(self, b, x):
return [b.l2_normalize(x, axis=1)]
@opTest([[m(2, 3, 4), m(2, 3, 3), m(2, 3, 1)],
[m(3, 2, 4), m(3, 1, 4), m(3, 3, 4), 1],])
def testConcatenate(self, b, x, y, z, ax=-1):
return [b.concatenate([x, y, z], axis=ax)]
@compareForwardExact()
def testZerosLike(self, b):
A = b.variable(m(3,2,4))
return b.zeros_like(A)
@compareForwardExact()
def testOnesLike(self, b):
A = b.variable(m(3,2,4))
return b.ones_like(A)
@opTest([[m(3, 2, 4), 1],
[m(2, 5, 3)],])
def testExpandDims(self, b, x, ax=-1):
return [b.expand_dims(x, ax)]
@opTest([[m(3, 2, 4), [1, 7, 3]],
[m(2, 3, 1), [2, 1, 4]]])
def testTile(self, b, x, n):
return [b.tile(x, n)]
@opTest([[m(34)]])
def testSliceBasic(self, b, x):
return [b.exp(x[2:30]), b.log(x[:5]), b.tanh(x[-4:]), b.sqrt(x[-1])]
@opTest([[m(4, 3, 3, 2, 5)]])
def testSliceMessy(self, b, x):
return [x[-1::-3,:2:2,-3:-2,::-1,-1:-5:-2]]
@opTest([[m(2, 3, 2)]])
def testSliceShort(self, b, x):
return [x[1]]
def testConvParameterRankExceptions(self):
A = pkb.variable(m(2,3,1))
B = pkb.variable(m(1,2,1))
C = pkb.variable(m(2,2,2,1))
with self.assertRaises(ValueError):
pkb.conv(A, C)
with self.assertRaises(ValueError):
pkb.conv(A, B, strides=(2,3))
with self.assertRaises(ValueError):
pkb.conv(A, B, dilation_rate=(1,1))
@compareForwardExact()
def testCastToInt(self, b):
A = b.variable(m(3,2,4))
return b.cast(A, dtype='int16')
@compareForwardExact()
def testCastToFloat(self, b):
A = b.variable(m(3,2,4))
A2 = b.cast(A, dtype='int32')
return b.cast(A, dtype='float32')
@compareForwardExact()
def testCastToUInt(self, b):
A = b.variable(m(3,2,4))
return b.cast(A + 2, dtype='uint8')
# Th/TF disagree w/ us about negative zeros and I think the even/odd rounding
# direction for numbers ending in *.5, so we'll settle for a close match.
@compareForwardClose()
def testRound(self, b):
vals = np.array([[1.7, 0.8, 1.5], [0.9, -0.3, -0.8], [0, 1.7, 0.6]])
return b.round(b.variable(vals))
@opTest([[m(3, 2, 4), n(3, 2, 4), 0],
[m(2, 3), n(2, 3), 1]])
def testSwitch(self, b, e, t, c):
c_tensor = b.variable(c)
return [b.switch(c_tensor, e, t),]
if __name__ == '__main__':
np.set_printoptions(threshold=np.nan)
#plaidml._internal_set_vlog(4)
testing.plaidml_config.default_config()
unittest.main()
| agpl-3.0 | -6,233,698,069,429,689,000 | 38.323394 | 172 | 0.545728 | false |
aletheia7/norm | norm/protolib/setup.py | 1 | 1641 | # This Python script can be used to build and install a Python "protokit" module that
# provides a wrapper to select portions of the "Protlib" code base.
# This currently includes just the "protopipe" interprocess communication class.
# Example scripts are provided in the "examples" subdirectory.
import platform
from distutils.core import setup, Extension
# This setup.py script assumes that Protolib (libprotokit.a) has already
# been built and located in "protolib/lib with respect to this script
# You can 'cd makefiles' or 'cd protolib/makefiles' to build Protolib for
# your system before attempting to install this Python module.
PYTHON = "src/python/"
srcFiles = [PYTHON + 'protokit.cpp']
# Determine system-specific macro definitions, etc
# (For now we support only "linux", "darwin" (MacOS), "freebsd", and "win32")
system = platform.system().lower()
sys_macros = [('HAVE_ASSERT',None), ('HAVE_IPV6',None), ('PROTO_DEBUG', None)]
sys_libs = ['protokit']
if system in ('linux', 'darwin', 'freebsd'):
sys_macros.append(('UNIX',None))
elif system in ('windows'):
sys_macros.append(('WIN32',None))
else:
raise Exception("setup.py: unsupported operating system \"%s\"" % system)
if system == 'darwin':
sys_libs.append('resolv')
setup(name='protokit',
version = '1.0',
ext_modules = [Extension('protokit',
srcFiles,
include_dirs = ['./include'],
define_macros = sys_macros,
library_dirs = ['./lib'],
libraries = sys_libs)])
| bsd-2-clause | 8,900,504,731,457,318,000 | 37.162791 | 85 | 0.635588 | false |
sandromello/themis-py | src/themis/ratelimiter.py | 1 | 8838 | """
This module implements a RateLimiter class.
RateLimiter is a Redis backed object used to define one or more rules to rate limit requests.
This module can be run to show an example of a running RateLimiter instance.
"""
import logging, math, redis, time
from itertools import izip
class RateLimiter(object):
"""
RateLimiter is used to define one or more rate limit rules.
These rules are checked on .acquire() and we either return True or False based on if we can make the request,
or we can block until we make the request.
Manual blocks are also supported with the block method.
"""
def __init__(self, redis, redis_namespace, conditions=None):
"""
Initalize an instance of a RateLimiter
conditions - list or tuple of rate limit rules
redis_host - Redis host to use
redis_port - Redis port (if different than default 6379)
redis_db - Redis DB to use (if different than 0)
redis_password - Redis password (if needed)
redis_namespace - Redis key namespace
"""
#self.redis = redis.Redis(host=redis_host, port=redis_port, db=redis_db, password=redis_password)
self.redis = redis
self.log = logging.getLogger(__name__)
self.namespace = redis_namespace
self.conditions = []
self.list_ttl = 0
if conditions:
self.add_condition(*conditions)
def add_condition(self, *conditions):
"""
Adds one or more conditions to this RateLimiter instance
Conditions can be given as:
add_condition(1, 10)
add_condition((1, 10))
add_condition((1, 10), (30, 600))
add_condition({'requests': 1, 'seconds': 10})
add_condition({'requests': 1, 'seconds': 10}, {'requests': 200, 'hours': 6})
dict can contain 'seconds', 'minutes', 'hours', and 'days' time period parameters
"""
# allow add_condition(1,2) as well as add_condition((1,2))
if len(conditions) == 2 and isinstance(conditions[0], int):
conditions = [conditions]
for condition in conditions:
if isinstance(condition, dict):
requests = condition['requests']
seconds = condition.get('seconds', 0) + (
60 * (condition.get('minutes', 0) +
60 * (condition.get('hours', 0) +
24 * condition.get('days', 0))))
else:
requests, seconds = condition
# requests and seconds always a positive integer
requests = int(requests)
seconds = int(seconds)
if requests < 0:
raise ValueError('negative number of requests (%s)' % requests)
if seconds < 0:
raise ValueError('negative time period given (%s)' % seconds)
if seconds > 0:
if requests == 0:
self.log.warn('added block all condition (%s/%s)', requests, seconds)
else:
self.log.debug('added condition (%s/%s)', requests, seconds)
self.conditions.append((requests, seconds))
if seconds > self.list_ttl:
self.list_ttl = seconds
else:
self.log.warn('time period of 0 seconds. not adding condition')
# sort by requests so we query redis list in order as well as know max and min requests by position
self.conditions.sort()
def block(self, key, seconds=0, minutes=0, hours=0, days=0):
"""
Set manual block for key for a period of time
key - key to track what to rate limit
Time parameters are added together and is the period to block for
seconds
minutes
hours
days
"""
seconds = seconds + 60 * (minutes + 60 * (hours + 24 * days))
# default to largest time period we are limiting by
if not seconds:
seconds = self.list_ttl
if not seconds:
self.log.warn('block called but no default block time. not blocking')
return 0
if not isinstance(seconds, int):
seconds = int(math.ceil(seconds))
key = ':'.join(('block', self.namespace, key))
self.log.warn('block key (%s) for %ds', key, seconds)
with self.redis.pipeline() as pipe:
pipe.set(key, '1')
pipe.expire(key, seconds)
pipe.execute()
return seconds
def is_manual_block(self, key):
block_key = ':'.join(('block', self.namespace, key))
log_key = ':'.join(('rate', self.namespace, key))
block_ttl = int(self.redis.ttl(block_key))
if block_ttl >= 0:
self.redis.delete(log_key)
return block_ttl
def acquire(self, key, block_size=1, block=True):
"""
Tests whether we can make a request, or if we are currently being limited
key - key to track what to rate limit
block - Whether to wait until we can make the request
"""
if block:
while True:
success, wait = self._make_ping(key)
if success:
return True, wait
self.log.debug('blocking acquire sleeping for %.1fs', wait)
time.sleep(wait)
else:
for _ in range(0, block_size):
success, wait = self._make_ping(key)
if not success:
return success, wait
return success, wait
# alternative acquire interface ratelimiter(key)
__call__ = acquire
def _make_ping(self, key):
# shortcut if no configured conditions
if not self.conditions:
return True, 0.0
# short cut if we are limiting to 0 requests
min_requests, min_request_seconds = self.conditions[0]
if min_requests == 0:
self.log.warn('(%s) hit block all limit (%s/%s)', key, min_requests, min_request_seconds)
return False, min_request_seconds
log_key = ':'.join(('rate', self.namespace, key))
block_key = ':'.join(('block', self.namespace, key))
lock_key = ':'.join(('lock', self.namespace, key))
with self.redis.lock(lock_key, timeout=10):
with self.redis.pipeline() as pipe:
for requests, _ in self.conditions:
pipe.lindex(log_key, requests-1) # subtract 1 as 0 indexed
# check manual block keys
pipe.ttl(block_key)
pipe.get(block_key)
boundry_timestamps = pipe.execute()
blocked = boundry_timestamps.pop()
block_ttl = boundry_timestamps.pop()
if blocked is not None:
# block_ttl is None for last second of a keys life. set min of 0.5
if block_ttl is None:
block_ttl = 0.5
self.log.warn('(%s) hit manual block. %ss remaining', key, block_ttl)
return False, block_ttl
timestamp = time.time()
for boundry_timestamp, (requests, seconds) in izip(boundry_timestamps, self.conditions):
# if we dont yet have n number of requests boundry_timestamp will be None and this condition wont be limiting
if boundry_timestamp is not None:
boundry_timestamp = float(boundry_timestamp)
if boundry_timestamp + seconds > timestamp:
# Here we need extract statistics
self.log.warn('(%s) hit limit (%s/%s) time to allow %.1fs',
key, requests, seconds, boundry_timestamp + seconds - timestamp)
return False, boundry_timestamp + seconds - timestamp
# record our success
with self.redis.pipeline() as pipe:
pipe.lpush(log_key, timestamp)
max_requests, _ = self.conditions[-1]
pipe.ltrim(log_key, 0, max_requests-1) # 0 indexed so subtract 1
# if we never use this key again, let it fall out of the DB after max seconds has past
pipe.expire(log_key, self.list_ttl)
pipe.execute()
return True, 0.0
if __name__ == '__main__':
"""
This is an example of rate limiting using the RateLimiter class
"""
import sys
logging.basicConfig(format='%(asctime)s %(process)s %(levelname)s %(name)s %(message)s', level=logging.DEBUG, stream=sys.stdout)
log = logging.getLogger('ratelimit.main')
key = 'TestRateLimiter'
redis = redis.StrictRedis('localhost', db=4)
rate = RateLimiter(redis, 'bla')
#rate.add_condition((3, 10), (4, 15))
#rate.add_condition({'requests':20, 'minutes':5})
rate.add_condition({'requests':2, 'seconds':3})
rate.add_condition({'requests':3, 'minutes':1})
#rate.custom_block = True
#rate.list_ttl = 10
i = 1
#for _ in xrange(100):
#rate.custom_block = 20
#success, wait = rate.acquire(key, 1, False)
#print rate.block(key, seconds=20)
#rate.block(key, seconds=20)
#success, wait = rate.acquire(key, 1, False)
#print success, wait, rate.conditions
#log.info('*************** ping %d ***************', i)
success, wait = rate.acquire(key, 1, False)
print success, wait
if success is False:
if not rate.is_manual_block(key):
rate.block(key, seconds=20)
#for _ in xrange(10):
# rate.acquire(key)
# log.info('*************** ping %d ***************', i)
# i+=1
# block all keys
#rate.add_condition(0, 1)
#for _ in xrange(5):
# rate(key, block=False) # alternative interface
# time.sleep(1)
| apache-2.0 | -2,751,376,892,959,176,700 | 33.389105 | 130 | 0.625594 | false |
KevinOConnor/klipper | klippy/extras/query_endstops.py | 1 | 2160 | # Utility for querying the current state of all endstops
#
# Copyright (C) 2018-2019 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
class QueryEndstops:
def __init__(self, config):
self.printer = config.get_printer()
self.endstops = []
self.last_state = []
# Register webhook if server is available
webhooks = self.printer.lookup_object('webhooks')
webhooks.register_endpoint(
"query_endstops/status", self._handle_web_request)
gcode = self.printer.lookup_object('gcode')
gcode.register_command("QUERY_ENDSTOPS", self.cmd_QUERY_ENDSTOPS,
desc=self.cmd_QUERY_ENDSTOPS_help)
gcode.register_command("M119", self.cmd_QUERY_ENDSTOPS)
def register_endstop(self, mcu_endstop, name):
self.endstops.append((mcu_endstop, name))
def get_status(self, eventtime):
return {'last_query': {name: value for name, value in self.last_state}}
def _handle_web_request(self, web_request):
gc_mutex = self.printer.lookup_object('gcode').get_mutex()
toolhead = self.printer.lookup_object('toolhead')
with gc_mutex:
print_time = toolhead.get_last_move_time()
self.last_state = [(name, mcu_endstop.query_endstop(print_time))
for mcu_endstop, name in self.endstops]
web_request.send({name: ["open", "TRIGGERED"][not not t]
for name, t in self.last_state})
cmd_QUERY_ENDSTOPS_help = "Report on the status of each endstop"
def cmd_QUERY_ENDSTOPS(self, gcmd):
# Query the endstops
print_time = self.printer.lookup_object('toolhead').get_last_move_time()
self.last_state = [(name, mcu_endstop.query_endstop(print_time))
for mcu_endstop, name in self.endstops]
# Report results
msg = " ".join(["%s:%s" % (name, ["open", "TRIGGERED"][not not t])
for name, t in self.last_state])
gcmd.respond_raw(msg)
def load_config(config):
return QueryEndstops(config)
| gpl-3.0 | -2,217,313,421,855,998,500 | 47 | 80 | 0.615278 | false |
Yelp/docker-compose | setup.py | 1 | 1673 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from setuptools import setup, find_packages
import codecs
import os
import re
import sys
def read(*parts):
path = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(path, encoding='utf-8') as fobj:
return fobj.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
install_requires = [
'docopt >= 0.6.1, < 0.7',
'PyYAML >= 3.10, < 4',
'requests >= 2.6.1, < 2.7',
'texttable >= 0.8.1, < 0.9',
'websocket-client >= 0.32.0, < 1.0',
'docker-py >= 1.3.1, < 1.4',
'dockerpty >= 0.3.4, < 0.4',
'retrying >= 1.2',
'six >= 1.3.0, < 2',
]
tests_require = [
'mock >= 1.0.1',
'nose',
'pyinstaller',
'flake8',
]
if sys.version_info < (2, 7):
tests_require.append('unittest2')
setup(
name='docker-compose',
version=find_version("compose", "__init__.py"),
description='Multi-container orchestration for Docker',
url='https://www.docker.com/',
author='Docker, Inc.',
license='Apache License 2.0',
packages=find_packages(exclude=['tests.*', 'tests']),
include_package_data=True,
test_suite='nose.collector',
install_requires=install_requires,
tests_require=tests_require,
entry_points="""
[console_scripts]
docker-compose=compose.cli.main:main
""",
)
| apache-2.0 | -1,142,923,560,250,649,000 | 23.602941 | 68 | 0.590556 | false |
tompecina/legal | legal/common/templatetags/compact.py | 1 | 2052 | # -*- coding: utf-8 -*-
#
# common/templatetags/compact.py
#
# Copyright (C) 2011-19 Tomáš Pecina <[email protected]>
#
# This file is part of legal.pecina.cz, a web-based toolbox for lawyers.
#
# This application is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from re import compile, sub
from django.utils.functional import keep_lazy_text
from django.utils.encoding import force_text
from django.template import Library
from django.template.defaulttags import SpacelessNode
register = Library()
COMP_RE1 = compile(r'>\s+([^<]*)<')
COMP_RE2 = compile(r'>(\s?\S+)\s+<')
COMP_RE3 = compile(r'>\s+<')
COMP_RE4 = compile(r'\s+(\S+="[^"]*")')
COMP_RE5 = compile(r'(\S)\s+(/?)>')
COMP_RE6 = compile(r'\s+(\S+)="\s*(\S+)\s*"')
COMP_RE7 = compile(r'\s+class="\s*"')
COMP_RE8 = compile(r' +')
@keep_lazy_text
def compactify_html(value):
res = force_text(value)
res = sub(COMP_RE1, r'> \1<', res)
res = sub(COMP_RE2, r'>\1 <', res)
res = sub(COMP_RE3, r'><', res)
res = sub(COMP_RE4, r' \1', res)
res = sub(COMP_RE5, r'\1\2>', res)
res = sub(COMP_RE6, r' \1="\2"', res)
res = sub(COMP_RE7, r'', res)
res = sub(COMP_RE8, r' ', res)
return res
class CompactNode(SpacelessNode):
def render(self, context):
return compactify_html(self.nodelist.render(context).strip())
@register.tag
def compact(parser, token):
nodelist = parser.parse(('endcompact',))
parser.delete_first_token()
return CompactNode(nodelist)
| gpl-3.0 | 2,795,398,504,757,798,400 | 28.710145 | 72 | 0.670244 | false |
CartoDB/cartoframes | cartoframes/data/observatory/catalog/repository/geography_repo.py | 1 | 2591 | from geopandas import GeoDataFrame
from .....utils.geom_utils import set_geometry
from .constants import COUNTRY_FILTER, CATEGORY_FILTER, PROVIDER_FILTER, PUBLIC_FILTER
from .entity_repo import EntityRepository
GEOGRAPHY_TYPE = 'geography'
_GEOGRAPHY_ID_FIELD = 'id'
_GEOGRAPHY_SLUG_FIELD = 'slug'
_ALLOWED_FILTERS = [COUNTRY_FILTER, CATEGORY_FILTER, PROVIDER_FILTER, PUBLIC_FILTER]
def get_geography_repo():
return _REPO
class GeographyRepository(EntityRepository):
def __init__(self):
super(GeographyRepository, self).__init__(_GEOGRAPHY_ID_FIELD, _ALLOWED_FILTERS, _GEOGRAPHY_SLUG_FIELD)
def get_all(self, filters=None, credentials=None):
if credentials is not None:
filters = self._add_subscription_ids(filters, credentials, GEOGRAPHY_TYPE)
if filters is None:
return []
# Using user credentials to fetch entities
self.client.set_user_credentials(credentials)
entities = self._get_filtered_entities(filters)
self.client.reset_user_credentials()
return entities
@classmethod
def _get_entity_class(cls):
from cartoframes.data.observatory.catalog.geography import Geography
return Geography
def _get_rows(self, filters=None):
return self.client.get_geographies(filters)
def _map_row(self, row):
return {
'slug': self._normalize_field(row, 'slug'),
'name': self._normalize_field(row, 'name'),
'description': self._normalize_field(row, 'description'),
'country_id': self._normalize_field(row, 'country_id'),
'provider_id': self._normalize_field(row, 'provider_id'),
'geom_type': self._normalize_field(row, 'geom_type'),
'geom_coverage': self._normalize_field(row, 'geom_coverage'),
'update_frequency': self._normalize_field(row, 'update_frequency'),
'is_public_data': self._normalize_field(row, 'is_public_data'),
'lang': self._normalize_field(row, 'lang'),
'version': self._normalize_field(row, 'version'),
'provider_name': self._normalize_field(row, 'provider_name'),
'summary_json': self._normalize_field(row, 'summary_json'),
'id': self._normalize_field(row, self.id_field)
}
def get_geographies_gdf(self):
data = self.client.get_geographies({'get_geoms_coverage': True})
gdf = GeoDataFrame(data, crs='epsg:4326')
set_geometry(gdf, 'geom_coverage', inplace=True)
return gdf
_REPO = GeographyRepository()
| bsd-3-clause | -8,902,950,525,506,630,000 | 37.102941 | 111 | 0.643767 | false |
impallari/Impallari-Fontlab-Macros | IMP Kerning/21 Analize Kerning all fonts.py | 1 | 1594 | #FLM: Analize Kerning Pairs in from all open Fonts
# Description:
# Count the occurence of kerning pairs across all open fonts
# To-do:
# It only report main pairs, not fully expanded if the font uses classes
# Credits:
# Pablo Impallari1
# http://www.impallari.com
# Level of coincindence ( 1 to 100 )
# Example: 50 means: Pairs appearing in more than 50% of all the open fonts
percentaje = 60
# Clear Output windows
from FL import *
fl.output=""
# Dependencies
from robofab.world import AllFonts, CurrentFont
from collections import defaultdict
import os.path
# Variables
tree = lambda: defaultdict(tree)
results = tree()
#Get Path to write the results
f = CurrentFont()
path = f.path
dir, fileName = os.path.split(path)
output = open(dir+'/Kern-Analisis.txt', 'w')
# Get count of all open fonts
mytotal = sum(f.info.familyName != "" for f in AllFonts())
thresold = percentaje * mytotal / 100
print "Open Fonts: " + str(mytotal)
print "Searching for Kerning Pairs precent in more than " + str(thresold) + " fonts"
# Initialize Values all set to Zero
for f in AllFonts():
print 'Analizing ' + str(f.info.familyName) + ' ' + str(f.info.styleName) + '...'
kerning = f.kerning
for (left, right), value in kerning.items():
try:
results[left][right] += 1
except:
results[left][right] = 0
print ''
# Print Results
for l,r in results.iteritems(): # will become d.items() in py3k
for r,v in r.iteritems(): # will become d.items() in py3k
if v >= thresold:
output.write( str(l)+'; '+str(r)+'; '+str(v))
output.write( '\n')
output.close()
# Done!
print "Done!" | apache-2.0 | 937,841,781,610,993,800 | 24.269841 | 84 | 0.691343 | false |
guillaume-philippon/aquilon | tests/read_events.py | 1 | 4738 | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import logging
import argparse
try:
import ms.version
except ImportError:
pass
else:
ms.version.addpkg('twisted', '12.0.0')
ms.version.addpkg('zope.interface', '3.6.1')
ms.version.addpkg('setuptools', '0.6c11')
ms.version.addpkg('protobuf', '3.0.0b2')
ms.version.addpkg('six', '1.7.3')
ms.version.addpkg('python-daemon', '2.0.5')
ms.version.addpkg('lockfile', '0.9.1')
from twisted.internet.protocol import Factory
from twisted.protocols.basic import Int32StringReceiver
from twisted.internet import reactor
from google.protobuf.json_format import MessageToJson
from daemon import DaemonContext
from daemon.pidfile import TimeoutPIDLockFile
# -- begin path_setup --
BINDIR = os.path.dirname(os.path.realpath(sys.argv[0]))
LIBDIR = os.path.join(BINDIR, "..", "lib")
if LIBDIR not in sys.path:
sys.path.append(LIBDIR)
# -- end path_setup --
from aquilon.config import Config
class EventProtocol(Int32StringReceiver):
def __init__(self, storedir):
self.storedir = storedir
# Late import of protocol buffers after path correction
import aqdnotifications_pb2
self.skeleton = aqdnotifications_pb2.Notification
def stringReceived(self, data):
msg = self.skeleton()
msg.ParseFromString(data)
json_str = MessageToJson(msg)
if self.storedir:
path = os.path.join(self.storedir, '{}.json'.format(msg.uuid))
with open(path, 'w') as fh:
fh.write(json_str)
else:
sys.stdout.write(json_str)
sys.stdout.write('\n')
class EventFactory(Factory):
def __init__(self, storedir):
self.storedir = storedir
def buildProtocol(self, addr):
return EventProtocol(self.storedir)
def run_reactor(sockname, storedir):
reactor.listenUNIX(sockname, EventFactory(storedir))
reactor.run()
def daemonize(pidfile, sockname, storedir):
pidcmgr = TimeoutPIDLockFile(pidfile)
with DaemonContext(pidfile=pidcmgr) as dc:
run_reactor(sockname, storedir)
def main():
parser = argparse.ArgumentParser(description="Send out broker notifications")
parser.add_argument("-d", "--daemon", action='store_true',
help="Run as a daemon process")
parser.add_argument("-s", "--store", action='store_true',
help="Write messages to a file")
parser.add_argument("-c", "--config", dest="config",
help="location of the broker configuration file")
opts = parser.parse_args()
logger = logging.getLogger("read_events")
# Load configuration
config = Config(configfile=opts.config)
# Load the specified version of the protcol buffers
sys.path.append(config.get("protocols", "directory"))
# Find and create the socket directory
sockdir = config.get("broker", "sockdir")
if not os.path.exists(sockdir):
os.makedirs(sockdir)
# Remove a stale socket
sockname = os.path.join(sockdir, "events")
if os.path.exists(sockname):
logger.info("Removing old socket " + sockname)
try:
os.unlink(sockname)
except OSError as err:
logger.error("Failed to remove %s: %s", sockname, err)
# Are we storing messages we recieve?
storedir = None
if opts.store:
if config.has_section('unittest'):
storedir = os.path.join(config.get('unittest', 'scratchdir'), 'events')
else:
storedir = os.path.join(config.get('quattordir'), 'scratch', 'events')
if not os.path.exists(storedir):
os.makedirs(storedir)
# Decide if we want to daemionize
if opts.daemon:
rundir = config.get('broker', 'rundir')
if not os.path.exists(rundir):
os.makedirs(rundir)
pidfile = os.path.join(rundir, 'read_events.pid')
daemonize(pidfile, sockname, storedir)
else:
run_reactor(sockname, storedir)
if __name__ == '__main__':
main()
| apache-2.0 | 663,914,866,534,460,200 | 31.013514 | 83 | 0.658506 | false |
allevaton/lana | src/Class.py | 1 | 1334 | #
# Contains an encapsulation of class data for the schedules
# This should not be abstracted as it is already in a final mode
# The data for this class should be parsed by the individual
# modules, and should be represented as a global class
#
class Class():
"""A class to encapsulate all kinds of class data
This should be in a global form, so all modules should end up
with the same data.
"""
subject = '' # Ex: 'COMP'
course = '' # Ex: '285'
section = '' # Ex '09'
credits = 0.00 # Ex: 4.00
start_time = 0 # 24 hour time
end_time = 0 # 24 hour time
start_date = () # Ex: 8, 29 Month, Day
end_date = () # Ex: 12, 3 Month, Day
weekdays = [] # MTWRFSU
title = '' # Ex: 'Object Oriented Programming'
instructor = '' # Ex: 'Michael Werner'
class_max = 0 # How many students can be in the class
class_cur = 0 # How many students ARE in the class
location = '' # Ex: ANNXC 102
campus = '' # Ex: 'WIT'
# Select few have something like this:
crn = '' # Course registration number
# Other stuff this may have missed
misc = ''
def __init__( self ):
pass
| gpl-2.0 | -4,557,105,319,848,632,300 | 35.054054 | 67 | 0.532984 | false |
stuckj/dupeguru | cocoa/base/ui/deletion_options.py | 1 | 2030 | ownerclass = 'DeletionOptions'
ownerimport = 'DeletionOptions.h'
result = Window(450, 240, "Deletion Options")
messageLabel = Label(result, "")
linkCheckbox = Checkbox(result, "Link deleted files")
linkLabel = Label(result, "After having deleted a duplicate, place a link targeting the "
"reference file to replace the deleted file.")
linkTypeChoice = RadioButtons(result, ["Symlink", "Hardlink"], columns=2)
directCheckbox = Checkbox(result, "Directly delete files")
directLabel = Label(result, "Instead of sending files to trash, delete them directly. This option "
"is usually used as a workaround when the normal deletion method doesn't work.")
proceedButton = Button(result, "Proceed")
cancelButton = Button(result, "Cancel")
owner.linkButton = linkCheckbox
owner.linkTypeRadio = linkTypeChoice
owner.directButton = directCheckbox
owner.messageTextField = messageLabel
result.canMinimize = False
result.canResize = False
linkLabel.controlSize = ControlSize.Small
directLabel.controlSize = ControlSize.Small
linkTypeChoice.controlSize = ControlSize.Small
proceedButton.keyEquivalent = '\\r'
cancelButton.keyEquivalent = '\\e'
linkCheckbox.action = directCheckbox.action = linkTypeChoice.action = Action(owner, 'updateOptions')
proceedButton.action = Action(owner, 'proceed')
cancelButton.action = Action(owner, 'cancel')
linkLabel.height *= 2 # 2 lines
directLabel.height *= 3 # 3 lines
proceedButton.width = 92
cancelButton.width = 92
mainLayout = VLayout([messageLabel, linkCheckbox, linkLabel, linkTypeChoice, directCheckbox,
directLabel])
mainLayout.packToCorner(Pack.UpperLeft)
mainLayout.fill(Pack.Right)
buttonLayout = HLayout([cancelButton, proceedButton])
buttonLayout.packToCorner(Pack.LowerRight)
# indent the labels under checkboxes a little bit to the right
for indentedView in (linkLabel, directLabel, linkTypeChoice):
indentedView.x += 20
indentedView.width -= 20
# We actually don't want the link choice radio buttons to take all the width, it looks weird.
linkTypeChoice.width = 170
| gpl-3.0 | 261,472,818,439,872,830 | 40.428571 | 100 | 0.783744 | false |
Diacamma2/financial | diacamma/invoice/migrations/0018_articlesituation.py | 1 | 1572 | # Generated by Django 3.2 on 2021-04-30 07:17
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import lucterios.framework.model_fields
class Migration(migrations.Migration):
dependencies = [
('invoice', '0017_print_articlesituation'),
]
operations = [
migrations.CreateModel(
name='ArticleSituation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', lucterios.framework.model_fields.LucteriosDecimalField(decimal_places=3, default=0.0, max_digits=10, validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(9999999.999)], verbose_name='buying price')),
('quantity', models.DecimalField(decimal_places=3, default=1.0, max_digits=12, validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(9999999.999)], verbose_name='quantity')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='invoice.article', verbose_name='article')),
('storagearea', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='invoice.storagearea', verbose_name='storage area')),
],
options={
'verbose_name': 'article situation',
'verbose_name_plural': 'articles situations',
'default_permissions': [],
},
),
]
| gpl-3.0 | -2,918,573,821,315,981,300 | 49.709677 | 273 | 0.659669 | false |
groovehunter/datamapper | DataMapper.py | 1 | 1544 | import yaml
import csv
class HandlerBase(object): pass
class CsvHandler(HandlerBase):
def __init__(self):
pass
def load(self):
csv.register_dialect('tabbed',delimiter="\t",quoting=csv.QUOTE_NONE)
self.data = []
with open(self.src,'r') as f:
reader = csv.reader(f, 'tabbed')
for row in reader:
self.data.append(row)
def get_header(self):
self.load()
self.fieldnames = self.data[0]
print self.fieldnames
return self.fieldnames
class DataMapper(object):
def __init__(self):
pass
def load_config(self):
cfgcon = file('cfg/main.yml','r').read()
cfg = yaml.load(cfgcon)
self.cfg = cfg
cfgcon = file('cfg/conf.yml','r').read()
self.cfg_fields = yaml.load(cfgcon)
#print self.cfg_fields
def run(self):
self.load_config()
# for now
self.do_csv_to_csv_template()
def do_csv_to_csv_template(self):
self.handler1 = CsvHandler()
self.handler1.src = self.cfg['dest']['file']
self.handler1.delim = self.cfg['dest']['delim']
dest_fieldnames = self.handler1.get_header()
for line in self.handler1.data:
pass
for fn in dest_fieldnames:
pass
#if fn in
#self.cfg_fields['dest_to_fill']:
| gpl-2.0 | 5,061,304,231,314,770,000 | 20.760563 | 76 | 0.499352 | false |
djevans071/Rebalancing-Citibike | cleanup.py | 1 | 1117 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 16:19:11 2017
@author: psamtik071
"""
from workflow.data import trip_data
import os
for year in xrange(2017,2018):
for month in xrange(1,13):
basepath = 'tripdata/'
to_filename = '{}{:02}-citibike-tripdata.csv'.format(year, month)
path = basepath + to_filename
print "cleaning trips from {}".format(path)
if os.path.exists(to_filename):
print "{} already exists".format(to_filename)
pass
else:
df = pd.read_csv(path)
# rename columns
new_cols = ['duration', 'start_time', 'stop_time', 'start_id', 'start_name',
'start_lat', 'start_long', 'stop_id', 'stop_name', 'stop_lat',
'stop_long', 'bike_id', 'user_type', 'birth_year', 'gender']
df.columns = new_cols
df.start_time = pd.to_datetime(df.start_time, format = '%Y-%m-%d %H:%M:%S')
df.stop_time = pd.to_datetime(df.stop_time, format = '%Y-%m-%d %H:%M:%S')
df.to_csv(to_filename,index = None)
| mit | 7,779,977,621,227,614,000 | 28.394737 | 88 | 0.548791 | false |
ealogar/curso-python | advanced/fib_fac.py | 1 | 1159 | #-*- coding: utf-8 -*-
def factorial(n):
"""Return the factorial of n"""
if n < 2:
return 1
return n * factorial(n - 1)
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def fib_fac(x=30, y=900):
fib = fibonacci(x)
fac = factorial(y)
print "fibonacci({}):".format(x), fib
print "factorial({}):".format(y), fac
if __name__ == "__main__":
def opc1():
fruits = tuple(str(i) for i in xrange(100))
out = ''
for fruit in fruits:
out += fruit +':'
return out
def opc2():
format_str = '%s:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str % fruits
return out
def opc3():
format_str = '{}:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str.format(*fruits)
return out
def opc4():
fruits = tuple(str(i) for i in xrange(100))
out = ':'.join(fruits)
return out
import timeit
print timeit.timeit(stmt=opc4, number=100)
fib_fac()
| apache-2.0 | 3,533,258,224,906,285,600 | 21.72549 | 51 | 0.513374 | false |
fos/fos-legacy | fos/core/intersection.py | 1 | 4809 | import numpy as np
def intersect_ray_sphere(p, d, sphereR, sphereC):
""" Intersects ray r = p + td, |d| = 1, with sphere s
Parameters
----------
p : (3,1)
starting point of ray
d : (3,1)
normalized direction vector of ray
sphereR : float
sphere radius
sphereC : (3,1)
sphere center coordinates
Returns
-------
If intersects
t : float
value of intersection
q : (3,1)
intersection point
If not intersect, returns (None, None)
"""
m = p - sphereC
b = np.dot(m, d)
c = np.dot(m, m) - sphereR * sphereR
# exit if sphreR's origin outside of s (c > 0)
# and sphereR pionting away from s (b<0)
if c > 0.0 and b > 0.0:
return (None, None)
discr = b * b - c
# A negative discriminant corresponds to ray missing sphere
if (discr < 0.0):
return (None, None)
# ray now found to intersect sphere, compute smallest t value of intersection
t = -b - np.sqrt(discr)
# if t is negative, ray started inside sphere, so clamp t to zero
if t < 0.0:
t = 0.0
q = p + t * d
return (t, q)
def point_inside_volume(p, ab1, ab2, eps = 0.01):
"""
Check if point p is inside the aabb volume
"""
ab1 = ab1.copy() - eps
ab2 = ab2.copy() + eps
if ab1[0] <= p[0] <= ab2[0] and \
ab1[1] <= p[1] <= ab2[1] and \
ab1[2] <= p[2] <= ab2[2]:
return True
else:
return False
def ray_aabb_intersection(p0, p1, ab1, ab2):
# http://www.cs.princeton.edu/courses/archive/fall00/cs426/lectures/raycast/sld017.htm
# faces are interpreted as planes, need
# first test segment intersection to confine to bounding box volume
v = (p1-p0) / np.linalg.norm( (p1-p0) )
# ray: p = p0 + t * v
# plane: p * n + d = 0
# create the face planes
# because they are axis aligned, to define n is easy
xn = -np.array( [1,0,0], dtype = np.float32 )
yn = -np.array( [0,1,0], dtype = np.float32 )
zn = -np.array( [0,0,1], dtype = np.float32 )
norm_vect = (xn, yn, zn)
ret = []
for n in norm_vect:
for d in (ab1, ab2):
di = np.dot(v,n)
if di == 0.0:
continue
t = -( np.dot(p0, n) + d ) / di
pout = p0 + t * v
if point_inside_volume(pout, ab1, ab2):
ret.append(pout)
return ret
def ray_triangle_intersection(p, d, v0, v1, v2):
""" Implemented from http://www.lighthouse3d.com/tutorials/maths/ray-triangle-intersection/
"""
e1 = v1 - v0
e2 = v2 - v0
h = np.cross(d, e2)
a = np.dot(e1, h)
if a > -0.00001 and a < 0.00001:
return False
f = 1 / a
s = p - v0
u = f * np.dot(s, h)
if u < 0.0 or u > 1.0:
return False
q = np.cross(s, e1)
v = f * np.dot(d, q)
if v < 0.0 or u + v > 1.0:
return False
# at this stage we can compute t to find out where
# the intersection point is on the lin
t = f * np.dot(e2, q)
if t > 0.00001: # ray intersection
# return (t, u, v)
return p + t * d
else: # this means that there is a line intersection
return False # but not a ray intersection
def test_segment_aabb(p0, p1, aabb_c1, aabb_c2):
""" Test if segement specified by points p0 and p1
intersects aabb """
aabbc1 = np.array(aabb_c1)
aabbc2 = np.array(aabb_c2)
p0 = np.array(p0)
p1 = np.array(p1)
c = (aabbc1 + aabbc2) * 0.5 # box center-point
e = aabbc2 - c # box halflength extents
m = (p0 + p1) * 0.5 # segment midpoint
d = p1 - m # segment halflength
m = m -c # translate box and segment to origin
# try world coordinate axes as separating axes
adx = np.abs(d[0])
if np.abs(m[0]) > e[0] + adx:
return False
ady = np.abs(d[1])
if np.abs(m[1]) > e[1] + ady:
return False
adz = np.abs(d[2])
if np.abs(m[2]) > e[2] + adz:
return False
# add in an epsilon term to counteract arithmetic errors when segment
# is (near) parallel to a coordinate axis
eps = 0.001
adx += eps; ady += eps; adz += eps
# try cross products of segment direction vector with coordinate axes
if np.abs(m[1] * d[2] - m[2] * d[1]) > e[1] * adz + e[2] * ady:
return False
if np.abs(m[2] * d[0] - m[0] * d[2]) > e[0] * adz + e[2] * adx:
return False
if np.abs(m[0] * d[1] - m[1] * d[0]) > e[0] * ady + e[2] * adx:
return False
# no separating axis found: segment must be overlapping aabb
return True
if __name__ == '__main__':
p0 = np.array([2,2,4])
p1 = np.array([2,2,-2])
ab1 = np.array([0,0,2])
ab2 = np.array([5,5,-1])
ray_aabb_intersection(p0, p1, ab1, ab2) | bsd-3-clause | -190,592,544,210,251,600 | 27.294118 | 95 | 0.539405 | false |
JordanReiter/django-mailer | mailer/engine.py | 1 | 5501 | import time
import os
import smtplib
import logging
from mailer.lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as socket_error
from django.conf import settings
from django.core.mail import send_mail as core_send_mail
try:
# Django 1.2
from django.core.mail import get_connection
except ImportError:
# ImportError: cannot import name get_connection
from django.core.mail import SMTPConnection
get_connection = lambda backend=None, fail_silently=False, **kwds: SMTPConnection(fail_silently=fail_silently)
from mailer.models import Message, DontSendEntry, MessageLog
# when queue is empty, how long to wait (in seconds) before checking again
EMPTY_QUEUE_SLEEP = getattr(settings, "MAILER_EMPTY_QUEUE_SLEEP", 30)
# lock timeout value. how long to wait for the lock to become available.
# default behavior is to never wait for the lock to be available.
LOCK_WAIT_TIMEOUT = getattr(settings, "MAILER_LOCK_WAIT_TIMEOUT", -1)
# The actual backend to use for sending, defaulting to the Django default.
EMAIL_BACKEND = getattr(settings, "MAILER_EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
EMAIL_PRIORITY_BACKENDS = getattr(settings, "MAILER_EMAIL_PRIORITY_BACKENDS", {})
EMAIL_EXCEPTIONS = getattr(settings, 'MAILER_EMAIL_EXCEPTIONS', [])
def prioritize():
"""
Yield the messages in the queue in the order they should be sent.
"""
while True:
while Message.objects.high_priority().using('default').count() or Message.objects.medium_priority().using('default').count():
while Message.objects.high_priority().using('default').count():
for message in Message.objects.high_priority().using('default').order_by("when_added"):
yield message
while Message.objects.high_priority().using('default').count() == 0 and Message.objects.medium_priority().using('default').count():
yield Message.objects.medium_priority().using('default').order_by("when_added")[0]
while Message.objects.high_priority().using('default').count() == 0 and Message.objects.medium_priority().using('default').count() == 0 and Message.objects.low_priority().using('default').count():
yield Message.objects.low_priority().using('default').order_by("when_added")[0]
if Message.objects.non_deferred().using('default').count() == 0:
break
def send_all():
"""
Send all eligible messages in the queue.
"""
try:
lock_path = settings.MAILER_LOCKFILE
except AttributeError:
lock_path = "send_mail"
lock = FileLock(lock_path)
logging.debug("acquiring lock...")
try:
lock.acquire(LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logging.debug("lock already in place. quitting.")
return
except LockTimeout:
logging.debug("waiting for the lock timed out. quitting.")
return
logging.debug("acquired.")
start_time = time.time()
dont_send = 0
deferred = 0
sent = 0
try:
connections = {}
for message in prioritize():
try:
connection = connections.get(message.get_priority_display())
if connection is None:
connection = get_connection(backend=EMAIL_PRIORITY_BACKENDS.get(message.get_priority_display()) or EMAIL_BACKEND)
connections[message.get_priority_display()] = connection
logging.info("sending message '%s' to %s" % (message.subject.encode("utf-8"), u", ".join(message.to_addresses).encode("utf-8")))
email = message.email
email.connection = connection
email.send()
MessageLog.objects.log(message, 1) # @@@ avoid using literal result code
message.delete()
sent += 1
except (socket_error, smtplib.SMTPSenderRefused, smtplib.SMTPRecipientsRefused, smtplib.SMTPAuthenticationError) as err:
message.defer()
logging.info("message deferred due to failure: %s" % err)
MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
deferred += 1
# Get new connection, it case the connection itself has an error.
connection = None
except Exception, err:
if type(err) not in EMAIL_EXCEPTIONS:
raise
message.defer()
logging.info("message deferred due to failure: %s" % err)
MessageLog.objects.log(message, 3, log_message=str(err)) # @@@ avoid using literal result code
deferred += 1
# Get new connection, it case the connection itself has an error.
connection = None
finally:
logging.debug("releasing lock...")
lock.release()
logging.debug("released.")
logging.info("")
logging.info("%s sent; %s deferred;" % (sent, deferred))
logging.info("done in %.2f seconds" % (time.time() - start_time))
def send_loop():
"""
Loop indefinitely, checking queue at intervals of EMPTY_QUEUE_SLEEP and
sending messages if any are on queue.
"""
while True:
while not Message.objects.all():
logging.debug("sleeping for %s seconds before checking queue again" % EMPTY_QUEUE_SLEEP)
time.sleep(EMPTY_QUEUE_SLEEP)
send_all()
| mit | 6,249,034,943,903,731,000 | 41.643411 | 204 | 0.636066 | false |
zkota/pyblio-1.2 | Pyblio/GnomeUI/Fields.py | 1 | 15638 | # This file is part of pybliographer
#
# Copyright (C) 1998-2004 Frederic GOBRY
# Email : [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
'''This module provides a dialog to configure the structure of the
bibliography '''
# TO DO:
# adapt menu item for this dialog
# cleaning up
import gobject, gtk
import copy, os, re, string
from Pyblio import Config, Fields, Types, version
from Pyblio.GnomeUI import Utils
_typename = {
Fields.AuthorGroup : _('Authors'),
Fields.Text : _('Text'),
Fields.LongText : _('Long Text'),
Fields.URL : _('URL'),
Fields.Reference : _('Reference'),
Fields.Date : _('Date')
}
class FieldsDialog (Utils.GladeWindow):
gladeinfo = {
'file': 'fields1.glade',
'name': 'fields',
'root': 'fields1'
}
def __init__ (self, parent = None):
Utils.GladeWindow.__init__ (self, parent)
self.dialog = self.xml.get_widget ('fields1')
self.w = self.xml.get_widget ('notebook')
## tooltips = gtk.Tooltips ()
## tooltips.enable ()
self.warning = 0
self.parent = parent
self.init_page_1()
self.init_page_2()
self.init_page_3()
self.show()
self.changed = 0
return
def show(self):
self.dialog.show_all ()
def on_close (self, w):
self.dialog.hide_all()
self.size_save ()
return
def on_add(self, *args):
page = self.w.get_current_page ()
if page == 0: self.page1_add (*args)
elif page == 1: self.page2_add (*args)
elif page == 2: self.page3_add (*args)
def on_remove (self, *args):
page = self.w.get_current_page()
if page == 0: self.page1_rm (*args)
elif page == 1: self.page2_rm (*args)
elif page == 2: self.page3_rm (*args)
def on_help (self, *args):
print 'ON HELP:', args
def check (self):
if len(self.fields) != len(self.fm):
print 'ERROR LEN OF FIELDS (%d) /= LEN OF FM (%d)' %(
len(self.fields), len(self.fm))
import traceback
traceback.print_tb()
k = self.fields.keys()
l = []
for i in self.fm:
j = i[2]
l.append(j)
try: k.remove(j)
except KeyError:
print 'fieldname %s (%s) not in Keys' %(
j, i[0])
if k:
print 'keys %s unused' %(k)
#------------------------------------------------------------
# Page 1
def init_page_1 (self):
self.fields1 = self.xml.get_widget('f_list_1')
rend = gtk.CellRendererText()
col = gtk.TreeViewColumn(_('Name'), rend, text = 0)
self.fields1.append_column(col)
rend = gtk.CellRendererText()
col = gtk.TreeViewColumn(_('Type'), rend, text = 1)
self.fields1.append_column(col)
self.fm = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING,
gobject.TYPE_STRING, gobject.TYPE_PYOBJECT)
self.sfm = gtk.TreeModelSort(self.fm)
self.sfm.set_sort_column_id(2, gtk.SORT_ASCENDING)
self.fields1.set_model(self.sfm)
self.s1 = self.fields1.get_selection()
self.s1.connect ('changed', self.list_1_select)
self.fields = copy.copy (Config.get ('base/fields').data)
for key, item in self.fields.iteritems():
self.fm.append((item.name,
_typename [item.type], key, item))
self.name1 = self.xml.get_widget('name1')
self.menu1 = self.xml.get_widget('type1')
menu = gtk.Menu ()
self.menu1.set_menu (menu)
self.menu_items = _typename.keys ()
for item in self.menu_items:
Utils.popup_add (menu, _typename [item], self.select_menu, item)
self.menu1.set_history (0)
self.current_menu = self.menu_items [0]
self.check()
def page1_add (self, *args):
t = self.menu_items[0]
description = Types.FieldDescription('')
iter = self.fm.append((
'new field', _typename[t], '_new field_',
description))
if iter:
s_iter = self.sfm.convert_child_iter_to_iter(None, iter)
s_path = self.sfm.get_path(s_iter)
self.fields1.scroll_to_cell(s_path)
self.s1.select_iter(s_iter)
self.check()
# Config save?
def page1_rm (self, *args):
m, iter = self.s1.get_selected()
if iter:
p = self.sfm.convert_iter_to_child_iter(None, iter)
#print 'SELF:FM[P][2]:', self.fm[p] [2]
try: del self.fields [self.fm[p][2]]
except KeyError: pass
self.fm.remove(p)
Config.set_and_save('base/fields', self.fields)
self.check()
def list_1_select (self, sel):
m, iter = sel.get_selected()
if iter:
p = self.sfm.convert_iter_to_child_iter(None, iter)
data = self.fm[p]
self.name1.set_text(self.fm[p][0])
try:
self.menu1.set_history (
self.menu_items.index(self.fm[p][3].type))
except ValueError:
print self.menu_items, self.fm[p][0], self.fm[p][2]
def on_name1_changed (self, *args):
sel = self.fields1.get_selection()
m, iter = sel.get_selected()
if iter:
p = self.sfm.convert_iter_to_child_iter(None, iter)
oldname = self.fm[p][2]
newname = self.name1.get_text()
try: del self.fields [oldname]
except KeyError: pass
self.fm[p] [0] = newname
self.fm[p] [2] = newname.lower()
self.fm[p] [3].name = newname
self.fields [newname.lower()] = self.fm[p][3]
self.check()
self.change_fields()
def on_type1_changed (self, *args):
x = self.menu_items[self.menu1.get_history()]
sel = self.fields1.get_selection()
m, iter = sel.get_selected()
if iter:
p = self.sfm.convert_iter_to_child_iter(None, iter)
#print 'TYP!', args, x, sel, m, iter
self.fm[p] [1] = _typename[x]
self.fm[p] [3].type = x
self.change_fields()
self.check()
#------------------------------------------------------------
# Page 2
def init_page_2 (self):
# PAGE 2
self.entries2 = self.xml.get_widget('e_list_2')
self.em = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_PYOBJECT,
gobject.TYPE_STRING )
self.entries = copy.copy (Config.get ('base/entries').data)
for i in self.entries.itervalues():
self.em.append ((i.name, i, i.name.lower()))
self.sem = gtk.TreeModelSort(self.em)
self.sem.set_sort_column_id(2, gtk.SORT_ASCENDING)
self.entries2.set_model(self.sem)
rend = gtk.CellRendererText()
col = gtk.TreeViewColumn(_('Entry type'), rend, text = 0)
self.entries2.append_column(col)
self.name2 = self.xml.get_widget('name2')
self.s2 = self.entries2.get_selection()
self.s2.connect('changed', self.elist_select)
self.check()
def page2_add (self, *args):
description = Types.EntryDescription('NEW')
iter = self.em.append(('NEW', description, 'new'))
if iter:
s_iter = self.sem.convert_child_iter_to_iter(None, iter)
s_path = self.sem.get_path(s_iter)
self.entries2.scroll_to_cell(s_path)
self.s2.select_iter(s_iter)
self.entries [self.em[iter][2]] = self.em[iter][1]
self.check()
def page2_rm (self, *args):
self.check()
m, iter = self.s2.get_selected()
if iter:
p = self.sem.convert_iter_to_child_iter(None, iter)
del self.entries [self.em[p] [2]]
self.em.remove(p)
Config.set_and_save('base/entries', self.entries)
self.check()
def elist_select (self, sel):
self.list_2_select(sel)
def list_2_select (self, sel):
m, iter = sel.get_selected()
if iter:
p = self.sem.convert_iter_to_child_iter(None, iter)
self.name2.set_text (self.em[p] [0])
self.page3_setup (self.em[p] [1])
self.check()
def on_name2_changed (self, *args):
sel = self.entries2.get_selection()
m, iter = sel.get_selected()
if iter:
p = self.sem.convert_iter_to_child_iter(None, iter)
newname = self.name2.get_text()
try: del self.entries [self.em[p][2]]
except KeyError: print 'Keyerror', self.em[
p] [2], self.entries.keys()
self.em[p][1].name = newname
self.em[p][0] = newname
self.em[p][2] = newname.lower()
self.entries[newname.lower()] = self.em[p][1]
Config.set_and_save ('base/entries', self.entries)
self.check()
#print self.entries.keys()
#------------------------------------------------------------
# Page 3
def init_page_3 (self):
self.flist3a = self.xml.get_widget ('f_list_3a')
self.flist3a.set_model (self.sfm)
rend = gtk.CellRendererText()
col = gtk.TreeViewColumn(_('Available'), rend, text = 0)
self.flist3a.append_column(col)
self.s3a = self.flist3a.get_selection()
self.label3 = self.xml.get_widget ('entry_type_label')
self.flist3b = self.xml.get_widget ('f_list_3b')
rend = gtk.CellRendererToggle()
rend.connect('toggled', self.toggle_mandatory)
col = gtk.TreeViewColumn('X', rend, active = 1)
self.flist3b.append_column(col)
rend = gtk.CellRendererText()
col = gtk.TreeViewColumn(_('Associated'), rend, text = 2)
self.flist3b.append_column(col)
self.sm = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN,
gobject.TYPE_STRING,
gobject.TYPE_PYOBJECT)
self.ssm = gtk.TreeModelSort(self.sm)
self.ssm.set_sort_column_id(0, gtk.SORT_ASCENDING)
self.flist3b.set_model(self.ssm)
self.s3b = self.flist3b.get_selection()
self.label3.set_markup (
_('Please, select an entry type from previous page.' ))
self.check()
def page3_setup (self, item):
self.sm.clear()
self.current_entry = item
for i in item.mandatory:
self.sm.append((i.name, True, i.name, i))
for i in item.optional:
self.sm.append((i.name, False, i.name, i))
self.label3.set_markup (
_('Fields associated with <b>%s</b> entry type') %
item.name)
self.check()
def page3_add (self, *args):
m, iter = self.s3a.get_selected()
if iter:
p = self.sfm.convert_iter_to_child_iter(None, iter)
field = self.fm[p] [3]
self.current_entry.optional.append(field)
self.sm.append ((field.name, False, field.name, field))
Config.set_and_save('base/entries', self.entries)
self.check()
def page3_rm (self, *args):
m, iter = self.s3b.get_selected()
if iter:
p = self.ssm.convert_iter_to_child_iter (None, iter)
field = self.sm[p] [3]
if self.sm[p] [1]:
self.current_entry.mandatory.remove(field)
else:
self.current_entry.optional.remove(field)
del self.sm [p]
Config.set_and_save('base/entries', self.entries)
self.check()
def toggle_mandatory (self, rend, path):
p = self.ssm.convert_path_to_child_path(path)
iter = self.sm.get_iter(p)
field = self.sm[iter][3]
x = self.sm.get_value (iter, 1)
self.sm.set_value(iter, 1, not x)
if x:
self.current_entry.mandatory.remove(field)
self.current_entry.optional.append(field)
else:
self.current_entry.optional.remove(field)
self.current_entry.mandatory.append(field)
self.entries [self.current_entry.name.lower()] = self.current_entry
Config.set_and_save ('base/entries', self.entries)
self.check()
def select_menu (self, w, data):
self.current_menu = data
return
def change_fields (self, item=None):
Config.set_and_save('base/fields', self.fields)
def set (self, data):
self.list.freeze ()
self.list.clear ()
self.data = data
keys = self.data.keys ()
keys.sort ()
for key in keys:
item = self.data [key]
self.list.append ((item.name, _typename [item.type]))
self.list.set_row_data (self.list.rows - 1, item)
self.list.thaw ()
pass
def get (self):
return self.data
def select_row (self, widget, row, col, event):
item = self.list.get_row_data (row)
self.name.set_text (item.name)
self.menu1.set_history (self.menu_items.index (item.type))
self.current_menu = item.type
return
def apply (self, * arg):
if not self.changed: return
result = self.get ()
Config.set_and_save ('base/fields', result)
if self.parent:
self.parent.warning (_("Some changes require to restart Pybliographic\n"
"to be correctly taken into account"))
return
def add_cb (self, * arg):
name = string.strip (self.name.get_text ())
if name == '': return
table = self.get ()
field = Types.FieldDescription (name, self.current_menu)
table [string.lower (name)] = field
self.set (table)
self.changed = 1
return
def remove_cb (self, * arg):
selection = self.list.selection
if not selection: return
selection = selection [0]
item = self.list.get_row_data (selection)
table = self.get ()
del table [string.lower (item.name)]
self.set (table)
self.changed = 1
return
_status = (
'',
_("Mandatory"),
_("Optional")
)
__fields_object = None
def run (w):
global __fields_object
if __fields_object:
__fields_object.show()
else:
def is_destroyed (* args):
global __fields_object
__fields_object = None
__fields_object = FieldsDialog(w)
__fields_object.dialog.connect ('destroy', is_destroyed)
| gpl-2.0 | -3,745,977,072,091,253,000 | 32.486081 | 84 | 0.537857 | false |
tvalacarta/tvalacarta | python/main-classic/channels/rtve.py | 1 | 22750 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para RTVE
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import urlparse, re
from core import config
from core import logger
from core import scrapertools
from core.item import Item
logger.info("[rtve.py] init")
DEBUG = True
CHANNELNAME = "rtve"
def isGeneric():
return True
def mainlist(item):
logger.info("[rtve.py] mainlist")
itemlist = []
# El primer nivel de menú es un listado por canales
itemlist.append( Item(channel=CHANNELNAME, title="Directos" , action="loadlives", folder=True))
itemlist.append( Item(channel=CHANNELNAME, title="Todas las cadenas" , action="canal" , thumbnail = "" , url="http://www.rtve.es/alacarta/tve/", extra="tve"))
itemlist.append( Item(channel=CHANNELNAME, title="La 1" , action="canal" , thumbnail = "" , url="http://www.rtve.es/alacarta/tve/la1/", extra="la1"))
itemlist.append( Item(channel=CHANNELNAME, title="La 2" , action="canal" , thumbnail = "" , url="http://www.rtve.es/alacarta/tve/la2/", extra="la2"))
itemlist.append( Item(channel=CHANNELNAME, title="Canal 24 horas" , action="canal" , thumbnail = "" , url="http://www.rtve.es/alacarta/tve/24-horas/", extra="24-horas"))
itemlist.append( Item(channel=CHANNELNAME, title="Teledeporte" , action="canal" , thumbnail = "" , url="http://www.rtve.es/alacarta/tve/teledeporte/", extra="teledeporte"))
itemlist.append( Item(channel=CHANNELNAME, title="Playz" , action="canal" , thumbnail = "" , url="http://www.rtve.es/alacarta/tve/playz/", extra="playz"))
return itemlist
def directos(item=None):
logger.info("tvalacarta.channels.rtve directos")
itemlist = []
itemlist.append( Item(channel=CHANNELNAME, title="La 1", url="http://rtvev4-live.hss.adaptive.level3.net/egress/ahandler/rtvegl7/la1_lv3_aosv4_gl7/la1_lv3_aosv4_gl7.isml/la1_lv3_aosv4_gl7-audio=128000-video=400000.m3u8", thumbnail="http://media.tvalacarta.info/canales/128x128/tvela1-transparente.png", category="Nacionales", action="play", folder=False ) )
itemlist.append( Item(channel=CHANNELNAME, title="La 2", url="http://rtvev4-live.hss.adaptive.level3.net/egress/ahandler/rtvegl0/la2_lv3_aosv4_gl0/la2_lv3_aosv4_gl0.isml/la2_lv3_aosv4_gl0-audio=128000-video=400000.m3u8", thumbnail="http://media.tvalacarta.info/canales/128x128/tvela2-transparente.png", category="Nacionales", action="play", folder=False ) )
itemlist.append( Item(channel=CHANNELNAME, title="Teledeporte", url="http://rtvev4-live.hss.adaptive.level3.net/egress/ahandler/rtvegl1/tdphd_lv3_aosv4_gl1/tdphd_lv3_aosv4_gl1.isml/master.m3u8", thumbnail="http://media.tvalacarta.info/canales/128x128/tvetdp-transparente.png", category="Nacionales", action="play", folder=False ) )
itemlist.append( Item(channel=CHANNELNAME, title="Canal 24H", url="http://rtvev4-live.hss.adaptive.level3.net/egress/ahandler/rtvegl0/irtve01_lv3_aosv4_gl0/irtve01_lv3_aosv4_gl0.isml/irtve01_lv3_aosv4_gl0-audio=128000-video=400000.m3u8", thumbnail="http://media.tvalacarta.info/canales/128x128/tve24h-transparente.png", category="Nacionales", action="play", folder=False ) )
return itemlist
def loadlives(item):
logger.info("tvalacarta.channels.rtve play loadlives")
itemlist = []
for directo in directos(item):
itemlist.append(directo)
# Radio
url_rne = "http://rtve-mp3-radiolive.flumotion.com/rtve/rtve-rne.mp3.m3u"
url_cls = "http://radioclasica-fme.rtve.stream.flumotion.com/rtve/radioclasica.mp3.m3u"
url_rd3 = "http://radio3-fme.rtve.stream.flumotion.com/rtve/radio3.mp3.m3u"
url_rd4 = "http://radio4-fme.rtve.stream.flumotion.com/rtve/radio4.mp3.m3u"
url_rd5 = "http://radio5-fme.rtve.stream.flumotion.com/rtve/radio5.mp3.m3u"
url_rex = "http://radioexterior-fme.rtve.stream.flumotion.com/rtve/radioexterior.mp3.m3u"
itemlist.append( Item(channel=CHANNELNAME, title="Radio: Radio Nacional", action="play", url=url_rne, folder=False) )
itemlist.append( Item(channel=CHANNELNAME, title="Radio: Radio Clásica", action="play", url=url_cls, folder=False) )
itemlist.append( Item(channel=CHANNELNAME, title="Radio: Radio 3", action="play", url=url_rd3, folder=False) )
itemlist.append( Item(channel=CHANNELNAME, title="Radio: Radio 4", action="play", url=url_rd4, folder=False) )
itemlist.append( Item(channel=CHANNELNAME, title="Radio: Radio 5", action="play", url=url_rd5, folder=False) )
itemlist.append( Item(channel=CHANNELNAME, title="Radio: Radio Exterior", action="play", url=url_rex, folder=False) )
return itemlist
def canal(item):
logger.info("[rtve.py] canal")
itemlist = []
# El segundo nivel de menú es un listado por categorías
itemlist.append( Item(channel=CHANNELNAME, title="Destacados" , action="destacados" , url=item.url , extra=item.extra))
itemlist.append( Item(channel=CHANNELNAME, title="Todos los programas" , action="programas" , url="" , extra=item.extra+"/todos/1"))
# Descarga la página que tiene el desplegable de categorias de programas
url = "http://www.rtve.es/alacarta/programas/"+item.extra+"/todos/1/"
data = scrapertools.cachePage(url)
# Extrae las categorias de programas
patron = '<li><a title="Seleccionar[^"]+" href="/alacarta/programas/tve/([^/]+)/1/"><span>([^<]+)</span></a></li>'
matches = re.findall(patron,data,re.DOTALL)
if DEBUG: scrapertools.printMatches(matches)
# Crea una lista con las entradas
for match in matches:
scrapedtitle = match[1]
scrapedurl = match[1]
scrapedthumbnail = ""
scrapedplot = ""
scrapedextra = match[0]
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="programas" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , extra = item.extra + "/" + scrapedextra + "/1" , category = scrapedtitle ) )
return itemlist
def destacados(item):
logger.info("[rtve.py] destacados")
itemlist = []
data = scrapertools.cachePage(item.url)
'''
<div class="dest_title">Destacados versi¿n libre</div>
<div class="dest_page oculto"> <div bourne:iseditable="false" class="unit c100 last"> <div class="unit c100 last"><div class="mark">
<div class="news comp">
<span class="tipo video">vídeo</span><span class="imgT"><a href="/alacarta/videos/informe-semanal/informe-semanal-soberanismo-suspenso/1814688/" title="Informe Semanal - Soberanismo en suspenso"><img src="http://img.irtve.es/imagenes/jpg/1368305081366.jpg" alt="Imagen Informe Semanal - Soberanismo en suspenso" title="Informe Semanal - Soberanismo en suspenso"/></a></span>
</div>
</div>
</div> </div> </div> <div class="dest_title">Destacados versi¿n libre</div> <div class="dest_page oculto"> <div bourne:iseditable="false" class="unit c100 last"> <div class="unit c100 last"> <div class="mark"><div class="news comp"><span class="tipo video">vídeo</span><span class="imgT"><a href="/alacarta/videos/completos/cuentame-cap-251-150313/1768614/" title="Cuéntame cómo pasó - T14 - No hay cuento de Navidad - C
'''
logger.info("data="+data)
patron = '<div class="dest_title[^<]+</div[^<]+'
patron += '<div class="dest_page oculto"[^<]+<div[^<]+<div[^<]+<div[^<]+'
patron += '<div class="news[^<]+'
patron += '<span class="tipo.*?</span><span class="imgT"><a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
matches = re.findall(patron,data,re.DOTALL)
if DEBUG: scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
url=urlparse.urljoin(item.url,scrapedurl)
title=scrapertools.htmlclean(scrapedtitle)
thumbnail=scrapedthumbnail
thumbnail = thumbnail.replace("&","&")
plot=""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
try:
logger.info("url="+url)
#http://www.rtve.es/alacarta/videos/cocina-con-sergio/cocina-sergio-quiche-cebolla-queso-curado/1814210/
episodio = scrapertools.get_match(url,'http\://www.rtve.es/alacarta/videos/[^\/]+/([^\/]+)/')
logger.info("es episodio")
itemlist.append( Item(channel=CHANNELNAME, title=title , action="play" , server="rtve" , url=url, thumbnail=thumbnail, plot=plot, fanart=thumbnail, folder=False) )
except:
logger.info("es serie")
itemlist.append( Item(channel=CHANNELNAME, title=title , action="episodios" , url=url, thumbnail=thumbnail, plot=plot, fanart=thumbnail, folder=True) )
return itemlist
def programas(item):
logger.info("[rtve.py] programas")
# En la paginación la URL vendrá fijada, si no se construye aquí la primera página
if not item.url.startswith("http"):
item.url = "http://www.rtve.es/alacarta/programas/"+item.extra+"/?pageSize=100&order=1&criteria=asc&emissionFilter=all"
logger.info("[rtve.py] programas url="+item.url)
itemlist = []
data = scrapertools.cachePage(item.url)
itemlist.extend(addprogramas(item,data))
salir = False
while not salir:
# Extrae el enlace a la página siguiente
patron = '<a name="paginaIR" href="[^"]+" class="active"><span>[^<]+</span></a>[^<]+'
patron += '<a name="paginaIR" href="([^"]+)"><span>'
matches = re.findall(patron,data,re.DOTALL)
if DEBUG: scrapertools.printMatches(matches)
if len(matches)>0:
# Carga la página siguiente
url = urlparse.urljoin(item.url,matches[0]).replace("&","&")
data = scrapertools.cachePage(url)
# Extrae todos los programas
itemlist.extend(addprogramas(item,data))
else:
salir = True
return itemlist
def addprogramas(item,data):
itemlist = []
# Extrae los programas
patron = '<li class="[^"]+">.*?'
patron += '<span class="col_tit" id="([^"]+)" name="progname">[^<]+'
patron += '<a href="([^"]+)" title="Ver programa seleccionado">([^<]+)</a>[^<]+'
patron += '</span>[^<]+'
patron += '<span class="col_fec">([^<]+)</span>.*?'
patron += '<span class="col_cat">([^<]*)</span>'
matches = re.findall(patron,data,re.DOTALL)
if DEBUG: scrapertools.printMatches(matches)
# Crea una lista con las entradas
for match in matches:
if config.get_setting("rtve.programa.extendido")=="true":
scrapedtitle = match[2]+" (Ult. emisión "+match[3]+") ("+match[4]+")"
else:
scrapedtitle = match[2]
scrapedurl = urlparse.urljoin(item.url,match[1])
scrapedthumbnail = ""
scrapedplot = ""#match[5]
scrapedextra = match[0]
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="episodios" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , extra = scrapedextra, show=scrapedtitle, category = item.category) )
return itemlist
def detalle_programa(item):
data = scrapertools.cache_page(item.url)
# Extrae plot
patron = '<p class="intro">(.*?)</div>'
matches = re.findall(patron, data, re.DOTALL)
if len(matches)>0:
item.plot = scrapertools.htmlclean( matches[0] ).strip()
# Extrae thumbnail
patron = '<span class="imgPrograma">.*?'
patron += '<img title="[^"]+" alt="[^"]+" src="([^"]+)" />'
matches = re.findall(patron, data, re.DOTALL)
if len(matches)>0:
item.thumbnail = urlparse.urljoin(item.url,matches[0])
# Extrae title
patron = '<div class="false_cab">[^<]+'
patron += '<h2>[^<]+'
patron += '<a[^>]+>[^<]+'
patron += '<span>([^<]+)</span>'
matches = re.findall(patron, data, re.DOTALL)
if len(matches)>0:
item.title = matches[0].strip()
return item
def episodios(item):
logger.info("[rtve.py] episodios")
# En la paginación la URL vendrá fijada, si no se construye aquí la primera página
if item.url=="":
# El ID del programa está en item.extra (ej: 42610)
# La URL de los vídeos de un programa es
# http://www.rtve.es/alacarta/interno/contenttable.shtml?ctx=42610&pageSize=20&pbq=1
item.url = "http://www.rtve.es/alacarta/interno/contenttable.shtml?ctx="+item.extra+"&pageSize=20&pbq=1"
itemlist = get_episodios(item,1)
if len(itemlist)==0:
itemlist = get_episodios_documentales(item,1)
if len(itemlist)>0:
if config.is_xbmc() and len(itemlist)>0:
itemlist.append( Item(channel=item.channel, title=">> Opciones para esta serie", url=item.url, action="serie_options##episodios", thumbnail=item.thumbnail, extra = item.extra , show=item.show, folder=False))
return itemlist
def get_episodios(item,recursion):
logger.info("[rtve.py] get_episodios_documentales")
itemlist = []
data = scrapertools.cachePage(item.url)
# Extrae los vídeos
'''
<li class="odd">
<span class="col_tit" id="2851919" name="progname">
<a href="/alacarta/videos/atencion-obras/atencion-obras-josep-maria-flotats-ferran-adria-sanchis-sinisterra/2851919/">Atención Obras - 07/11/14</a>
</span>
<span class="col_tip">
<span>Completo</span>
</span>
<span class="col_dur">55:35</span>
<span class="col_pop"><span title="32% popularidad" class="pc32"><em><strong><span>32%</span></strong></em></span></span>
<span class="col_fec">07 nov 2014</span>
<div id="popup2851919" class="tultip hddn">
<span id="progToolTip" class="tooltip curved">
<span class="pointer"></span>
<span class="cerrar" id="close2851919"></span>
<span class="titulo-tooltip"><a href="/alacarta/videos/atencion-obras/atencion-obras-josep-maria-flotats-ferran-adria-sanchis-sinisterra/2851919/" title="Ver Atención Obras - 07/11/14">Atención Obras - 07/11/14</a></span>
<span class="fecha">07 nov 2014</span>
<span class="detalle">Josep María Flotats trae al Teatro María Guerrero de Madrid “El juego del amor y del azar” de Pierre de Marivaux. Un texto que ya ha sido estrenado en el Teatre Nacional de Catalunya. C...</span>
'''
patron = '<li class="[^"]+">.*?'
patron += '<span class="col_tit"[^<]+'
patron += '<a href="([^"]+)">(.*?)</a[^<]+'
patron += '</span>[^<]+'
patron += '<span class="col_tip"[^<]+<span>([^<]+)</span[^<]+</span[^<]+'
patron += '<span class="col_dur">([^<]+)</span>.*?'
patron += '<span class="col_fec">([^<]+)</span>.*?'
patron += '<span class="detalle">([^>]+)</span>'
matches = re.findall(patron,data,re.DOTALL)
if DEBUG: scrapertools.printMatches(matches)
# Crea una lista con las entradas
for match in matches:
if not "developer" in config.get_platform():
scrapedtitle = match[1]+" ("+match[2].strip()+") ("+match[3].strip()+") ("+match[4]+")"
else:
scrapedtitle = match[1]
scrapedtitle = scrapedtitle.replace("<em>Nuevo</em> ","")
scrapedtitle = scrapertools.unescape(scrapedtitle)
scrapedtitle = scrapedtitle.strip()
scrapedurl = urlparse.urljoin(item.url,match[0])
scrapedthumbnail = item.thumbnail
scrapedplot = scrapertools.unescape(match[5].strip())
scrapedplot = scrapertools.htmlclean(scrapedplot).strip()
scrapedextra = match[2]
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="play" , server="rtve" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , show=item.show, category = item.category, extra=scrapedextra, folder=False) )
# Paginación
if len(itemlist)>0:
next_page_url = scrapertools.find_single_match(data,'<a name="paginaIR" href="([^"]+)"><span>Siguiente</span></a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url).replace("&","&")
#http://www.rtve.es/alacarta/interno/contenttable.shtml?pbq=2&modl=TOC&locale=es&pageSize=15&ctx=36850&advSearchOpen=false
if not next_page_url.endswith("&advSearchOpen=false"):
next_page_url = next_page_url + "&advSearchOpen=false"
siguiente_item = Item(channel=CHANNELNAME,action="episodios",url=urlparse.urljoin(item.url,next_page_url),title=item.title,show=item.show,category=item.category)
logger.info("siguiente_item="+siguiente_item.tostring())
# Para evitar listas eternas, si tiene más de 3 páginas añade el item de "siguiente"
if recursion<=3:
itemlist.extend( get_episodios(siguiente_item,recursion+1) )
else:
siguiente_item.title=">> Página siguiente"
itemlist.append(siguiente_item)
return itemlist
def get_episodios_documentales(item,recursion):
logger.info("[rtve.py] get_episodios_documentales")
itemlist = []
data = scrapertools.cachePage(item.url)
# Cabecera
'''
<div class="mark">
<a title="Valencia" href="http://www.rtve.es/alacarta/videos/a-vista-de-pajaro/vista-pajaro-valencia/3165763/" alt="Valencia">
<span class="ima f16x9 T">
<img src="http://img.irtve.es/v/3165763/?w=800&h=451&crop=si"></span>
<span class="textBox mantitle">Valencia</span>
</a>
<span class="textBox">
<span class="hourdata">27:50</span>
<span class="separata"> </span>
<span class="datedata">17 junio 2015</span>
</span>
<div class="textBox descript">
<p><P>Programa que recorre desde el cielo las tierras de la provincia de Valencia.</P></p>
<p></p>
</div>
</div>
'''
patron = '<div class="mark"[^<]+'
patron += '<a title="([^"]+)" href="([^"]+)"[^<]+'
patron += '<span class="[^<]+'
patron += '<img src="([^"]+)".*?'
patron += '<span class="hourdata">([^<]+)</span[^<]+'
patron += '<span class="separata[^<]+</span[^<]+'
patron += '<span class="datedata">([^<]+)</span>(.*?)</div'
matches = re.findall(patron,data,re.DOTALL)
if DEBUG: scrapertools.printMatches(matches)
primera_url = ""
# Crea una lista con las entradas
for scrapedtitle,scrapedurl,scrapedthumbnail,duracion,fecha,plot in matches:
title = scrapedtitle+" ("+duracion+")("+fecha+")"
url = urlparse.urljoin(item.url,scrapedurl)
primera_url = url
plot = scrapertools.htmlclean(plot).strip()
thumbnail = scrapedthumbnail
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=CHANNELNAME, title=title , action="play" , server="rtve" , url=url, thumbnail=thumbnail, plot=plot , show=item.show, category = item.category, fanart=thumbnail, viewmode="movie_with_plot", folder=False) )
# Items
'''
<div class="mark">
<a href="/alacarta/videos/a-vista-de-pajaro/vista-pajaro-via-plata/2990389/" title="La Vía de la Plata">
<span class="ima f16x9 T">
<img src="http://img.rtve.es/v/2990389/?w=300&h=200&crop=no" alt="La Vía de la Plata">
</span>
<div class="apiCall mainTiTle">
<h3><span>La Vía de la Plata</span></h3>
</div>
</a>
<div class="apiCall data">
<span class="time">27:37</span>
<span class="date">22 sep 1991</span>
</div>
</div>
'''
patron = '<div class="mark"[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
patron += '<span class="[^<]+'
patron += '<img src="([^"]+)".*?'
patron += '<div class="apiCall summary"[^<]+'
patron += '<p[^<]+'
patron += '<span class="time">([^<]+)</span[^<]+'
patron += '<span class="date">([^<]+)</span>([^<]+)<'
matches = re.findall(patron,data,re.DOTALL)
if DEBUG: scrapertools.printMatches(matches)
# Crea una lista con las entradas
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion,fecha,plot in matches:
title = scrapedtitle+" ("+duracion+")("+fecha+")"
url = urlparse.urljoin(item.url,scrapedurl)
# A veces el vídeo de cabecera se repite en los items
if url==primera_url:
continue
plot = plot.strip()
thumbnail = scrapedthumbnail
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=CHANNELNAME, title=title , action="play" , server="rtve" , url=url, thumbnail=thumbnail, plot=plot , show=item.show, category = item.category, fanart=thumbnail, viewmode="movie_with_plot", folder=False) )
# Paginación
if len(itemlist)>0:
next_page_url = scrapertools.find_single_match(data,'<a title="Ver m[^"]+" href="([^"]+)"')
if next_page_url!="":
siguiente_item = Item(channel=CHANNELNAME,action="episodios",url=urlparse.urljoin(item.url,next_page_url),title=item.title,show=item.show,category=item.category)
logger.info("siguiente_item="+siguiente_item.tostring())
# Para evitar listas eternas, si tiene más de 3 páginas añade el item de "siguiente"
if recursion<=3:
itemlist.extend( get_episodios_documentales(siguiente_item,recursion+1) )
else:
siguiente_item.title=">> Página siguiente"
itemlist.append(siguiente_item)
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
def test():
# Todas las opciones tienen que tener algo
items = mainlist(Item())
# Lista de series
la1_items = canal(items[1])
la1_destacados = destacados(la1_items[0])
if len(la1_destacados)==0:
print "No hay destacados de La1"
return False
la1_programas = programas(la1_items[1])
if len(la1_programas)==0:
print "No programas en La1"
return False
la1_episodios = episodios(la1_programas[0])
if len(la1_episodios)==0:
print "La serie "+la1_programas[0].title+" no tiene episodios en La1"
return False
return True
| gpl-3.0 | 8,962,532,944,840,568,000 | 47.41791 | 517 | 0.63493 | false |
rande/python-element | tests/manager/test_fs.py | 1 | 4473 | #
# Copyright 2014 Thomas Rabaix <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import element.manager.fs
from element.manager import get_uuid
import element.loaders
import element.plugins.static.loader
import os
import shutil
import element.exceptions
class FsManagerTest(unittest.TestCase):
def setUp(self):
self.fixture = "%s/../fixtures/data/" % os.path.dirname(os.path.abspath(__file__))
if os.path.isdir('%s/tmp' % self.fixture):
shutil.rmtree('%s/tmp' % self.fixture)
self.fs = element.manager.fs.FsManager(
self.fixture,
element.loaders.LoaderChain([
('yaml', element.loaders.YamlNodeLoader()),
('static', element.plugins.static.loader.StaticNodeLoader({
'jpg': 'image/jpeg',
'png': 'image/png',
})),
])
)
def tearDown(self):
if os.path.isdir('%s/tmp' % self.fixture):
shutil.rmtree('%s/tmp' % self.fixture)
def test_build_references(self):
self.assertEquals(5, len(self.fs.files))
expected = {
'eacacfab-74cf-6c8d-5e393165': 'feeds',
'50093cac-fdc1-5ba6-6f12d44e': 'feeds/all.rss',
'fca0ea55-c21b-186e-fe6924a5': 'sonata_small.png',
'c3e6be59-3448-0daa-be2dd043': '2013/my-post-content',
'fa3b5e88-acfb-fc73-90125d9e': '2013/inline-content',
}
self.assertEquals(expected, self.fs.files)
def test_contains_uuid(self):
node = self.fs.retrieve("fca0ea55-c21b-186e-fe6924a5")
self.assertEquals(node['id'], "sonata_small.png")
self.assertEquals(node['uuid'], "fca0ea55-c21b-186e-fe6924a5")
def test_retrieve(self):
node = self.fs.retrieve('fca0ea55-c21b-186e-fe6924a5')
self.assertEquals(node['id'], "sonata_small.png")
self.assertEquals(node['uuid'], "fca0ea55-c21b-186e-fe6924a5")
def test_index(self):
data = self.fs.find_one(alias="/feeds")
self.assertIsNotNone(data)
self.assertEquals(data['path'], 'feeds')
data = self.fs.find_one(alias="/feeds/_index")
self.assertIsNotNone(data)
self.assertEquals(data['path'], 'feeds')
def test_exists(self):
self.assertTrue(self.fs.exists('fca0ea55-c21b-186e-fe6924a5'))
def test_find(self):
cases = [
({}, 5),
({'type': 'blog.post'}, 2),
({'type': 'fake'}, 0),
({'type': 'fake', 'types': ['blog.post']}, 2),
({'types': ['blog.post', 'fake']}, 2),
({'types': [], 'tags': ['red', 'yellow']}, 2),
({'types': [], 'tags': ['red', 'yellow', 'brown']}, 0),
({'types': [], 'tags': []}, 5)
]
for kwarg, expected in cases:
nodes = self.fs.find(**kwarg)
self.assertEquals(expected, len(nodes))
def test_private(self):
cases = [
({'path': "/../private"}, 0),
]
for kwarg, expected in cases:
with self.assertRaises(element.exceptions.SecurityAccessException):
self.fs.find(**kwarg)
def test_save_and_delete(self):
uuid = get_uuid('tmp/simple_save')
self.assertFalse(self.fs.delete(uuid))
self.assertTrue(self.fs.save(uuid, {'hello': 'world', 'type': 'mytype', 'path': 'tmp/simple_save'}))
self.assertTrue(self.fs.delete(uuid))
def test_save_nested_folder(self):
self.assertTrue(self.fs.save(None, {'hello': 'world', 'type': 'mytype', 'path': 'tmp/nested/fake'}))
def test_save_binary_file(self):
self.assertTrue(self.fs.save(None, {
'path': 'tmp/foo/image.png',
'type': 'element.static',
'content': file("%s/sonata_small.png" % self.fixture, 'r').read()
}))
self.assertTrue(os.path.isfile("%s/tmp/foo/image.png" % self.fixture))
| apache-2.0 | -4,802,604,834,111,884,000 | 33.945313 | 108 | 0.588867 | false |
emulbreh/eelale | eelale/cli.py | 1 | 2228 | import click
import subprocess
import os
import logging
from .builder import Builder
logger = logging.getLogger(__name__)
@click.group()
def main():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
)
@main.command()
@click.option(
'--requirement', '-r',
multiple=True,
metavar='<file>',
help='Install from the given requirements file. This option can be used multiple times.'
)
@click.option(
'--wheeldir', '-w',
type=click.Path(),
default='.',
metavar='<dir>',
help='Build wheels into <dir>, where the default is the current working directory.'
)
@click.option(
'--image',
metavar='<image>',
help='Build in a container based on <image>.',
)
@click.option(
'--python',
metavar='<path>',
help='Use Python executable at <path> inside the container.',
)
@click.option(
'--policy',
metavar='<policy>',
help='auditwheel policy, should be manylinux1_PLATFORM.',
)
@click.option(
'--force-build',
multiple=True,
metavar='<package>',
help='Build the given wheel inside the container even if a precompiled wheel is available. Set to :all: to force build all wheels. This option can be used multiple times.'
)
@click.argument('package', nargs=-1)
def build(requirement, wheeldir, image, python, policy, force_build, package):
if not image:
image = 'quay.io/pypa/manylinux1_x86_64'
logger.info('Defaulting to manylinux1 image: %s' % image)
if not python:
python = '/opt/python/cp36-cp36m/bin/python'
elif not python:
python = 'python'
builder = Builder(
base_image=image,
python=python,
policy=policy,
)
def build_args():
yield from package
for req in requirement:
yield '-r'
yield builder.copy(req)
try:
wheel_paths = builder.build(*build_args(), force=force_build)
except subprocess.CalledProcessError as e:
raise click.ClickException(str(e)) from e
os.makedirs(wheeldir, exist_ok=True)
for wheel_path in wheel_paths:
os.rename(wheel_path, os.path.join(wheeldir, os.path.basename(wheel_path)))
| mit | 4,085,580,199,796,000,000 | 25.52381 | 175 | 0.635548 | false |
wdbm/abstraction | classification_ttH_ttbb_1_from_saved_model.py | 1 | 4988 | #!/usr/bin/env python
"""
################################################################################
# #
# classification_ttH_ttbb_1_from_saved_model #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program is for classification training on ttH and ttbb HEP MC #
# events. #
# #
# copyright (C) 2015 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
usage:
program [options]
options:
-h, --help display help message
--version display version and exit
-v, --verbose verbose logging
-s, --silent silent
-u, --username=USERNAME username
--data=FILENAME input ROOT data file [default: output_ttH.root]
"""
from __future__ import division
name = "classification_ttH_ttbb_1_from_saved_model"
version = "2016-02-05T1623Z"
logo = name
import docopt
import logging
import os
import sys
import time
import propyte
import pyprel
import shijian
import datavision
import abstraction
import ROOT
def main(options):
global program
program = propyte.Program(
options = options,
name = name,
version = version,
logo = logo
)
global log
from propyte import log
log.info("")
# access options and arguments
ROOT_filename = options["--data"]
log.info("load classification model")
classifier = abstraction.Classification(
load_from_directory = "abstraction_classifier_ttH_ttbb_300000_50_200_400_50_300"
#load_from_directory = "abstraction_classifier_ttH_ttbb_300000_50_150_250_300_400"
#load_from_directory = "abstraction_classifier"
)
# Access data.
data = abstraction.load_HEP_data(
ROOT_filename = ROOT_filename,
tree_name = "nominal",
maximum_number_of_events = 5000
)
# Add class labels.
if "ttH" in ROOT_filename:
class_value = 1
if "ttbb" in ROOT_filename:
class_value = 0
for index in data.indices():
data.variable(index = index, name = "class", value = class_value)
# Preprocess all data.
data.preprocess_all()
# Convert the datavision dataset to an abstraction dataset.
dataset = abstraction.convert_HEP_datasets_from_datavision_datasets_to_abstraction_datasets(
datasets = data
)
# Classify data and add the results to the datavision dataset.
results = list(classifier._model.predict(dataset.features()))
for count, index in enumerate(data.indices()):
data.variable(index = index, name = "abstraction1", value = results[count])
log.info(data.table())
log.info("")
program.terminate()
def string_to_bool(x):
return x.lower() in ("yes", "true", "t", "1")
if __name__ == "__main__":
options = docopt.docopt(__doc__)
if options["--version"]:
print(version)
exit()
main(options)
| gpl-3.0 | -6,672,606,103,455,961,000 | 37.96875 | 96 | 0.453087 | false |
gooddata/openstack-nova | nova/compute/api.py | 1 | 281186 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import collections
import copy
import functools
import re
import string
from castellan import key_manager
from oslo_log import log as logging
from oslo_messaging import exceptions as oslo_exceptions
from oslo_serialization import base64 as base64utils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import instance_list
from nova.compute import migration_list
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova import conductor
import nova.conf
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context as nova_context
from nova import crypto
from nova.db import base
from nova import exception
from nova import exception_wrapper
from nova import hooks
from nova.i18n import _
from nova import image
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields as fields_obj
from nova.objects import keypair as keypair_obj
from nova.objects import quotas as quotas_obj
from nova.pci import request as pci_request
from nova.policies import servers as servers_policies
import nova.policy
from nova import profiler
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
from nova.virt import hardware
from nova.volume import cinder
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
# NOTE(gibi): legacy notification used compute as a service but these
# calls still run on the client side of the compute service which is
# nova-api. By setting the binary to nova-api below, we can make sure
# that the new versioned notifications has the right publisher_id but the
# legacy notifications does not change.
wrap_exception = functools.partial(exception_wrapper.wrap_exception,
get_notifier=get_notifier,
binary='nova-api')
CONF = nova.conf.CONF
RO_SECURITY_GROUPS = ['default']
AGGREGATE_ACTION_UPDATE = 'Update'
AGGREGATE_ACTION_UPDATE_META = 'UpdateMeta'
AGGREGATE_ACTION_DELETE = 'Delete'
AGGREGATE_ACTION_ADD = 'Add'
CINDER_V3_ATTACH_MIN_COMPUTE_VERSION = 24
MIN_COMPUTE_MULTIATTACH = 27
MIN_COMPUTE_TRUSTED_CERTS = 31
MIN_COMPUTE_ABORT_QUEUED_LIVE_MIGRATION = 34
MIN_COMPUTE_VOLUME_TYPE = 36
# FIXME(danms): Keep a global cache of the cells we find the
# first time we look. This needs to be refreshed on a timer or
# trigger.
CELLS = []
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@six.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance.vm_state not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance.uuid,
state=instance.vm_state,
method=f.__name__)
if (task_state is not None and
instance.task_state not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance.uuid,
state=instance.task_state,
method=f.__name__)
if must_have_launched and not instance.launched_at:
raise exception.InstanceInvalidState(
attr='launched_at',
instance_uuid=instance.uuid,
state=instance.launched_at,
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def _set_or_none(q):
return q if q is None or isinstance(q, set) else set(q)
def reject_instance_state(vm_state=None, task_state=None):
"""Decorator. Raise InstanceInvalidState if instance is in any of the
given states.
"""
vm_state = _set_or_none(vm_state)
task_state = _set_or_none(task_state)
def outer(f):
@six.wraps(f)
def inner(self, context, instance, *args, **kw):
_InstanceInvalidState = functools.partial(
exception.InstanceInvalidState,
instance_uuid=instance.uuid,
method=f.__name__)
if vm_state is not None and instance.vm_state in vm_state:
raise _InstanceInvalidState(
attr='vm_state', state=instance.vm_state)
if task_state is not None and instance.task_state in task_state:
raise _InstanceInvalidState(
attr='task_state', state=instance.task_state)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@six.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@six.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance.locked and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return inner
def check_instance_cell(fn):
@six.wraps(fn)
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance)
return fn(self, context, instance, *args, **kwargs)
return _wrapped
def _diff_dict(orig, new):
"""Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = {k: ['-'] for k in set(orig.keys()) - set(new.keys())}
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
def load_cells():
global CELLS
if not CELLS:
CELLS = objects.CellMappingList.get_all(
nova_context.get_admin_context())
LOG.debug('Found %(count)i cells: %(cells)s',
dict(count=len(CELLS),
cells=','.join([c.identity for c in CELLS])))
if not CELLS:
LOG.error('No cells are configured, unable to continue')
@profiler.trace_cls("compute_api")
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_api=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_api = image_api or image.API()
self.network_api = network_api or network.API()
self.volume_api = volume_api or cinder.API()
# NOTE(mriedem): This looks a bit weird but we get the reportclient
# via SchedulerClient since it lazy-loads SchedulerReportClient on
# the first usage which helps to avoid a bunch of lockutils spam in
# the nova-api logs every time the service is restarted (remember
# that pretty much all of the REST API controllers construct this
# API class).
self.placementclient = scheduler_client.SchedulerClient().reportclient
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.compute_task_api = conductor.ComputeTaskAPI()
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('compute', CONF.host)
if CONF.ephemeral_storage_encryption.enabled:
self.key_manager = key_manager.API()
# Help us to record host in EventReporter
self.host = CONF.host
super(API, self).__init__(**kwargs)
@property
def cell_type(self):
return getattr(self, '_cell_type', cells_opts.get_cell_type())
def _validate_cell(self, instance):
if self.cell_type != 'api':
return
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance.uuid)
def _record_action_start(self, context, instance, action):
objects.InstanceAction.action_start(context, instance.uuid,
action, want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
objects.Quotas.limit_check(context,
injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
objects.Quotas.limit_check(context,
injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded(
allowed=exc.kwargs['quotas']['injected_file_path_bytes'])
else:
raise exception.OnsetFileContentLimitExceeded(
allowed=exc.kwargs['quotas']['injected_file_content_bytes'])
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
objects.Quotas.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility.
# Those are only used in V2 API, from V2.1 API, those checks are
# validated at API layer schema validation.
for k, v in metadata.items():
try:
utils.check_string_length(v)
utils.check_string_length(k, min_length=1)
except exception.InvalidInput as e:
raise exception.InvalidMetadata(reason=e.format_message())
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""Check if the security group requested exists and belongs to
the project.
:param context: The nova request context.
:type context: nova.context.RequestContext
:param secgroups: list of requested security group names, or uuids in
the case of Neutron.
:type secgroups: list
:returns: list of requested security group names unmodified if using
nova-network. If using Neutron, the list returned is all uuids.
Note that 'default' is a special case and will be unmodified if
it's requested.
"""
security_groups = []
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
security_groups.append(secgroup)
continue
secgroup_dict = self.security_group_api.get(context, secgroup)
if not secgroup_dict:
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
# Check to see if it's a nova-network or neutron type.
if isinstance(secgroup_dict['id'], int):
# This is nova-network so just return the requested name.
security_groups.append(secgroup)
else:
# The id for neutron is a uuid, so we return the id (uuid).
security_groups.append(secgroup_dict['id'])
return security_groups
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
if requested_networks is not None:
if requested_networks.no_allocate:
# If the network request was specifically 'none' meaning don't
# allocate any networks, we just return the number of requested
# instances since quotas don't change at all.
return max_count
# NOTE(danms): Temporary transition
requested_networks = requested_networks.as_tuples()
return self.network_api.validate_networks(context, requested_networks,
max_count)
def _handle_kernel_and_ramdisk(self, context, kernel_id, ramdisk_id,
image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of two ways:
1. Passed in with create-instance request.
2. Inherited from image metadata.
If inherited from image metadata, and if that image metadata value is
set to 'nokernel', both kernel and ramdisk will default to None.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if kernel_id indicates that a kernel is not to be used
if kernel_id == 'nokernel':
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
kernel_image = self.image_api.get(context, kernel_id)
# kernel_id could have been a URI, not a UUID, so to keep behaviour
# from before, which leaked that implementation detail out to the
# caller, we return the image UUID of the kernel image and ramdisk
# image (below) and not any image URIs that might have been
# supplied.
# TODO(jaypipes): Get rid of this silliness once we move to a real
# Image object and hide all of that stuff within nova.image.api.
kernel_id = kernel_image['id']
if ramdisk_id is not None:
ramdisk_image = self.image_api.get(context, ramdisk_id)
ramdisk_id = ramdisk_image['id']
return kernel_id, ramdisk_id
@staticmethod
def parse_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image,
instance_type, root_bdm):
if not image:
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
image_properties = image.get('properties', {})
config_drive_option = image_properties.get(
'img_config_drive', 'optional')
if config_drive_option not in ['optional', 'mandatory']:
raise exception.InvalidImageConfigDrive(
config_drive=config_drive_option)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# Image min_disk is in gb, size is in bytes. For sanity, have them both
# in bytes.
image_min_disk = int(image.get('min_disk') or 0) * units.Gi
image_size = int(image.get('size') or 0)
# Target disk is a volume. Don't check flavor disk size because it
# doesn't make sense, and check min_disk against the volume size.
if (root_bdm is not None and root_bdm.is_volume):
# There are 2 possibilities here: either the target volume already
# exists, or it doesn't, in which case the bdm will contain the
# intended volume size.
#
# Cinder does its own check against min_disk, so if the target
# volume already exists this has already been done and we don't
# need to check it again here. In this case, volume_size may not be
# set on the bdm.
#
# If we're going to create the volume, the bdm will contain
# volume_size. Therefore we should check it if it exists. This will
# still be checked again by cinder when the volume is created, but
# that will not happen until the request reaches a host. By
# checking it here, the user gets an immediate and useful failure
# indication.
#
# The third possibility is that we have failed to consider
# something, and there are actually more than 2 possibilities. In
# this case cinder will still do the check at volume creation time.
# The behaviour will still be correct, but the user will not get an
# immediate failure from the api, and will instead have to
# determine why the instance is in an error state with a task of
# block_device_mapping.
#
# We could reasonably refactor this check into _validate_bdm at
# some future date, as the various size logic is already split out
# in there.
dest_size = root_bdm.volume_size
if dest_size is not None:
dest_size *= units.Gi
if image_min_disk > dest_size:
raise exception.VolumeSmallerThanMinDisk(
volume_size=dest_size, image_min_disk=image_min_disk)
# Target disk is a local disk whose size is taken from the flavor
else:
dest_size = instance_type['root_gb'] * units.Gi
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
if dest_size != 0:
if image_size > dest_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=dest_size, image_size=image_size)
if image_min_disk > dest_size:
raise exception.FlavorDiskSmallerThanMinDisk(
flavor_size=dest_size, image_min_disk=image_min_disk)
else:
# The user is attempting to create a server with a 0-disk
# image-backed flavor, which can lead to issues with a large
# image consuming an unexpectedly large amount of local disk
# on the compute host. Check to see if the deployment will
# allow that.
if not context.can(
servers_policies.ZERO_DISK_FLAVOR, fatal=False):
raise exception.BootFromVolumeRequiredForZeroDiskFlavor()
def _get_image_defined_bdms(self, instance_type, image_meta,
root_device_name):
image_properties = image_meta.get('properties', {})
# Get the block device mappings defined by the image.
image_defined_bdms = image_properties.get('block_device_mapping', [])
legacy_image_defined = not image_properties.get('bdm_v2', False)
image_mapping = image_properties.get('mappings', [])
if legacy_image_defined:
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
else:
image_defined_bdms = list(map(block_device.BlockDeviceDict,
image_defined_bdms))
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
image_mapping)
image_defined_bdms = self._merge_bdms_lists(
image_mapping, image_defined_bdms)
return image_defined_bdms
def _get_flavor_defined_bdms(self, instance_type, block_device_mapping):
flavor_defined_bdms = []
have_ephemeral_bdms = any(filter(
block_device.new_format_is_ephemeral, block_device_mapping))
have_swap_bdms = any(filter(
block_device.new_format_is_swap, block_device_mapping))
if instance_type.get('ephemeral_gb') and not have_ephemeral_bdms:
flavor_defined_bdms.append(
block_device.create_blank_bdm(instance_type['ephemeral_gb']))
if instance_type.get('swap') and not have_swap_bdms:
flavor_defined_bdms.append(
block_device.create_blank_bdm(instance_type['swap'], 'swap'))
return flavor_defined_bdms
def _merge_bdms_lists(self, overridable_mappings, overrider_mappings):
"""Override any block devices from the first list by device name
:param overridable_mappings: list which items are overridden
:param overrider_mappings: list which items override
:returns: A merged list of bdms
"""
device_names = set(bdm['device_name'] for bdm in overrider_mappings
if bdm['device_name'])
return (overrider_mappings +
[bdm for bdm in overridable_mappings
if bdm['device_name'] not in device_names])
def _check_and_transform_bdm(self, context, base_options, instance_type,
image_meta, min_count, max_count,
block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
# If the instance is booted by image and has a volume attached,
# the volume cannot have the same device name as root_device_name
if image_ref:
for bdm in block_device_mapping:
if (bdm.get('destination_type') == 'volume' and
block_device.strip_dev(bdm.get(
'device_name')) == root_device_name):
msg = _('The volume cannot be assigned the same device'
' name as the root device %s') % root_device_name
raise exception.InvalidRequest(msg)
image_defined_bdms = self._get_image_defined_bdms(
instance_type, image_meta, root_device_name)
root_in_image_bdms = (
block_device.get_root_bdm(image_defined_bdms) is not None)
if legacy_bdm:
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name,
no_root=root_in_image_bdms)
elif root_in_image_bdms:
# NOTE (ndipanov): client will insert an image mapping into the v2
# block_device_mapping, but if there is a bootable device in image
# mappings - we need to get rid of the inserted image
# NOTE (gibi): another case is when a server is booted with an
# image to bdm mapping where the image only contains a bdm to a
# snapshot. In this case the other image to bdm mapping
# contains an unnecessary device with boot_index == 0.
# Also in this case the image_ref is None as we are booting from
# an image to volume bdm.
def not_image_and_root_bdm(bdm):
return not (bdm.get('boot_index') == 0 and
bdm.get('source_type') == 'image')
block_device_mapping = list(
filter(not_image_and_root_bdm, block_device_mapping))
block_device_mapping = self._merge_bdms_lists(
image_defined_bdms, block_device_mapping)
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
block_device_mapping += self._get_flavor_defined_bdms(
instance_type, block_device_mapping)
return block_device_obj.block_device_make_list_from_dicts(
context, block_device_mapping)
def _get_image(self, context, image_href):
if not image_href:
return None, {}
image = self.image_api.get(context, image_href)
return image['id'], image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject, root_bdm):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
self._check_requested_image(context, image_id, image,
instance_type, root_bdm)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, user_data,
metadata, access_ip_v4, access_ip_v6,
requested_networks, config_drive,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
try:
base64utils.decode_as_bytes(user_data)
except TypeError:
raise exception.InstanceUserDataMalformed()
# When using Neutron, _check_requested_secgroups will translate and
# return any requested security group names to uuids.
security_groups = (
self._check_requested_secgroups(context, security_groups))
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name is not None:
key_pair = objects.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
else:
key_pair = None
root_device_name = block_device.prepend_dev(
block_device.properties_root_device_name(
boot_meta.get('properties', {})))
try:
image_meta = objects.ImageMeta.from_dict(boot_meta)
except ValueError as e:
# there must be invalid values in the image meta properties so
# consider this an invalid request
msg = _('Invalid image metadata. Error: %s') % six.text_type(e)
raise exception.InvalidRequest(msg)
numa_topology = hardware.numa_get_constraints(
instance_type, image_meta)
system_metadata = {}
# PCI requests come from two sources: instance flavor and
# requested_networks. The first call in below returns an
# InstancePCIRequests object which is a list of InstancePCIRequest
# objects. The second call in below creates an InstancePCIRequest
# object for each SR-IOV port, and append it to the list in the
# InstancePCIRequests object
pci_request_info = pci_request.get_pci_requests_from_flavor(
instance_type)
network_metadata = self.network_api.create_resource_requests(
context, requested_networks, pci_request_info)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description,
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'pci_requests': pci_request_info,
'numa_topology': numa_topology,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return (base_options, max_network_count, key_pair, security_groups,
network_metadata)
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota, filter_properties,
key_pair, tags, trusted_certs, supports_multiattach,
network_metadata=None):
# Check quotas
num_instances = compute_utils.check_num_instances_quota(
context, instance_type, min_count, max_count)
security_groups = self.security_group_api.populate_security_groups(
security_groups)
self.security_group_api.ensure_default(context)
LOG.debug("Going to run %s instances...", num_instances)
instances_to_build = []
try:
for i in range(num_instances):
# Create a uuid for the instance so we can store the
# RequestSpec before the instance is created.
instance_uuid = uuidutils.generate_uuid()
# Store the RequestSpec that will be used for scheduling.
req_spec = objects.RequestSpec.from_components(context,
instance_uuid, boot_meta, instance_type,
base_options['numa_topology'],
base_options['pci_requests'], filter_properties,
instance_group, base_options['availability_zone'],
security_groups=security_groups)
if block_device_mapping:
# Record whether or not we are a BFV instance
root = block_device_mapping.root_bdm()
req_spec.is_bfv = bool(root and root.is_volume)
else:
# If we have no BDMs, we're clearly not BFV
req_spec.is_bfv = False
# NOTE(danms): We need to record num_instances on the request
# spec as this is how the conductor knows how many were in this
# batch.
req_spec.num_instances = num_instances
req_spec.create()
# NOTE(stephenfin): The network_metadata field is not persisted
# and is therefore set after 'create' is called.
if network_metadata:
req_spec.network_metadata = network_metadata
# Create an instance object, but do not store in db yet.
instance = objects.Instance(context=context)
instance.uuid = instance_uuid
instance.update(base_options)
instance.keypairs = objects.KeyPairList(objects=[])
if key_pair:
instance.keypairs.objects.append(key_pair)
instance.trusted_certs = self._retrieve_trusted_certs_object(
context, trusted_certs)
instance = self.create_db_entry_for_new_instance(context,
instance_type, boot_meta, instance, security_groups,
block_device_mapping, num_instances, i,
shutdown_terminate, create_instance=False)
block_device_mapping = (
self._bdm_validate_set_size_and_instance(context,
instance, instance_type, block_device_mapping,
supports_multiattach))
instance_tags = self._transform_tags(tags, instance.uuid)
build_request = objects.BuildRequest(context,
instance=instance, instance_uuid=instance.uuid,
project_id=instance.project_id,
block_device_mappings=block_device_mapping,
tags=instance_tags)
build_request.create()
# Create an instance_mapping. The null cell_mapping indicates
# that the instance doesn't yet exist in a cell, and lookups
# for it need to instead look for the RequestSpec.
# cell_mapping will be populated after scheduling, with a
# scheduling failure using the cell_mapping for the special
# cell0.
inst_mapping = objects.InstanceMapping(context=context)
inst_mapping.instance_uuid = instance_uuid
inst_mapping.project_id = context.project_id
inst_mapping.cell_mapping = None
inst_mapping.create()
instances_to_build.append(
(req_spec, build_request, inst_mapping))
if instance_group:
if check_server_group_quota:
try:
objects.Quotas.check_deltas(
context, {'server_group_members': 1},
instance_group, context.user_id)
except exception.OverQuota:
msg = _("Quota exceeded, too many servers in "
"group")
raise exception.QuotaError(msg)
members = objects.InstanceGroup.add_members(
context, instance_group.uuid, [instance.uuid])
# NOTE(melwitt): We recheck the quota after creating the
# object to prevent users from allocating more resources
# than their allowed quota in the event of a race. This is
# configurable because it can be expensive if strict quota
# limits are not required in a deployment.
if CONF.quota.recheck_quota and check_server_group_quota:
try:
objects.Quotas.check_deltas(
context, {'server_group_members': 0},
instance_group, context.user_id)
except exception.OverQuota:
objects.InstanceGroup._remove_members_in_db(
context, instance_group.id, [instance.uuid])
msg = _("Quota exceeded, too many servers in "
"group")
raise exception.QuotaError(msg)
# list of members added to servers group in this iteration
# is needed to check quota of server group during add next
# instance
instance_group.members.extend(members)
# In the case of any exceptions, attempt DB cleanup
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(None, instances_to_build)
return instances_to_build
@staticmethod
def _retrieve_trusted_certs_object(context, trusted_certs, rebuild=False):
"""Convert user-requested trusted cert IDs to TrustedCerts object
Also validates that the deployment is new enough to support trusted
image certification validation.
:param context: The user request auth context
:param trusted_certs: list of user-specified trusted cert string IDs,
may be None
:param rebuild: True if rebuilding the server, False if creating a
new server
:returns: nova.objects.TrustedCerts object or None if no user-specified
trusted cert IDs were given and nova is not configured with
default trusted cert IDs
:raises: nova.exception.CertificateValidationNotYetAvailable: If
rebuilding a server with trusted certs on a compute host that is
too old to supported trusted image cert validation, or if creating
a server with trusted certs and there are no compute hosts in the
deployment that are new enough to support trusted image cert
validation
"""
# Retrieve trusted_certs parameter, or use CONF value if certificate
# validation is enabled
if trusted_certs:
certs_to_return = objects.TrustedCerts(ids=trusted_certs)
elif (CONF.glance.verify_glance_signatures and
CONF.glance.enable_certificate_validation and
CONF.glance.default_trusted_certificate_ids):
certs_to_return = objects.TrustedCerts(
ids=CONF.glance.default_trusted_certificate_ids)
else:
return None
# Confirm trusted_certs are supported by the minimum nova
# compute service version
# TODO(mriedem): This minimum version compat code can be dropped in the
# 19.0.0 Stein release when all computes must be at a minimum running
# Rocky code.
if rebuild:
# we only care about the current cell since this is
# a rebuild
min_compute_version = objects.Service.get_minimum_version(
context, 'nova-compute')
else:
# we don't know which cell it's going to get scheduled
# to, so check all cells
# NOTE(mriedem): For multi-create server requests, we're hitting
# this for each instance since it's not cached; we could likely
# optimize this.
min_compute_version = \
objects.service.get_minimum_version_all_cells(
context, ['nova-compute'])
if min_compute_version < MIN_COMPUTE_TRUSTED_CERTS:
raise exception.CertificateValidationNotYetAvailable()
return certs_to_return
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if (legacy_bdm and
block_device.get_device_letter(
bdm.get('device_name', '')) != 'a'):
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
volume_id = bdm.get('volume_id')
snapshot_id = bdm.get('snapshot_id')
if snapshot_id:
# NOTE(alaski): A volume snapshot inherits metadata from the
# originating volume, but the API does not expose metadata
# on the snapshot itself. So we query the volume for it below.
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
volume_id = snapshot['volume_id']
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_api.get(context, image_id)
return image_meta
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id:
try:
volume = self.volume_api.get(context, volume_id)
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
if not volume.get('bootable', True):
raise exception.InvalidBDMVolumeNotBootable(id=volume_id)
return utils.get_image_metadata_from_volume(volume)
return {}
@staticmethod
def _get_requested_instance_group(context, filter_properties):
if (not filter_properties or
not filter_properties.get('scheduler_hints')):
return
group_hint = filter_properties.get('scheduler_hints').get('group')
if not group_hint:
return
return objects.InstanceGroup.get_by_uuid(context, group_hint)
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata, injected_files,
admin_password, access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config, filter_properties,
reservation_id=None, legacy_bdm=True, shutdown_terminate=False,
check_server_group_quota=False, tags=None,
supports_multiattach=False, trusted_certs=None):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
tags = tags or []
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
# This is similar to the logic in _retrieve_trusted_certs_object.
if (trusted_certs or
(CONF.glance.verify_glance_signatures and
CONF.glance.enable_certificate_validation and
CONF.glance.default_trusted_certificate_ids)):
msg = _("Image certificate validation is not supported "
"when booting from volume")
raise exception.CertificateValidationFailed(message=msg)
image_id = None
boot_meta = self._get_bdm_image_metadata(
context, block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
base_options, max_net_count, key_pair, security_groups, \
network_metadata = self._validate_and_build_base_options(
context, instance_type, boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
user_data, metadata, access_ip_v4, access_ip_v6,
requested_networks, config_drive, auto_disk_config,
reservation_id, max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# consideration of connections to each requested network
if max_net_count < min_count:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.info("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota",
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(context,
base_options, instance_type, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
# We can't do this check earlier because we need bdms from all sources
# to have been merged in order to get the root bdm.
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files,
block_device_mapping.root_bdm())
instance_group = self._get_requested_instance_group(context,
filter_properties)
tags = self._create_tag_list_obj(context, tags)
instances_to_build = self._provision_instances(
context, instance_type, min_count, max_count, base_options,
boot_meta, security_groups, block_device_mapping,
shutdown_terminate, instance_group, check_server_group_quota,
filter_properties, key_pair, tags, trusted_certs,
supports_multiattach, network_metadata)
instances = []
request_specs = []
build_requests = []
for rs, build_request, im in instances_to_build:
build_requests.append(build_request)
instance = build_request.get_new_instance(context)
instances.append(instance)
request_specs.append(rs)
if CONF.cells.enable:
# NOTE(danms): CellsV1 can't do the new thing, so we
# do the old thing here. We can remove this path once
# we stop supporting v1.
for instance in instances:
instance.create()
# NOTE(melwitt): We recheck the quota after creating the objects
# to prevent users from allocating more resources than their
# allowed quota in the event of a race. This is configurable
# because it can be expensive if strict quota limits are not
# required in a deployment.
if CONF.quota.recheck_quota:
try:
compute_utils.check_num_instances_quota(
context, instance_type, 0, 0,
orig_num_req=len(instances))
except exception.TooManyInstances:
with excutils.save_and_reraise_exception():
# Need to clean up all the instances we created
# along with the build requests, request specs,
# and instance mappings.
self._cleanup_build_artifacts(instances,
instances_to_build)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
else:
self.compute_task_api.schedule_and_build_instances(
context,
build_requests=build_requests,
request_spec=request_specs,
image=boot_meta,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
block_device_mapping=block_device_mapping,
tags=tags)
return instances, reservation_id
@staticmethod
def _cleanup_build_artifacts(instances, instances_to_build):
# instances_to_build is a list of tuples:
# (RequestSpec, BuildRequest, InstanceMapping)
# Be paranoid about artifacts being deleted underneath us.
for instance in instances or []:
try:
instance.destroy()
except exception.InstanceNotFound:
pass
for rs, build_request, im in instances_to_build or []:
try:
rs.destroy()
except exception.RequestSpecNotFound:
pass
try:
build_request.destroy()
except exception.BuildRequestNotFound:
pass
try:
im.destroy()
except exception.InstanceMappingNotFound:
pass
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug("Image bdm %s", bdm)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _bdm_validate_set_size_and_instance(self, context, instance,
instance_type,
block_device_mapping,
supports_multiattach=False):
"""Ensure the bdms are valid, then set size and associate with instance
Because this method can be called multiple times when more than one
instance is booted in a single request it makes a copy of the bdm list.
"""
LOG.debug("block_device_mapping %s", list(block_device_mapping),
instance_uuid=instance.uuid)
self._validate_bdm(
context, instance, instance_type, block_device_mapping,
supports_multiattach)
instance_block_device_mapping = block_device_mapping.obj_clone()
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
bdm.instance_uuid = instance.uuid
return instance_block_device_mapping
def _create_block_device_mapping(self, block_device_mapping):
# Copy the block_device_mapping because this method can be called
# multiple times when more than one instance is booted in a single
# request. This avoids 'id' being set and triggering the object dupe
# detection
db_block_device_mapping = copy.deepcopy(block_device_mapping)
# Create the BlockDeviceMapping objects in the db.
for bdm in db_block_device_mapping:
# TODO(alaski): Why is this done?
if bdm.volume_size == 0:
continue
bdm.update_or_create()
@staticmethod
def _check_requested_volume_type(bdm, volume_type_id_or_name,
volume_types):
"""If we are specifying a volume type, we need to get the
volume type details from Cinder and make sure the ``volume_type``
is available.
"""
# NOTE(brinzhang): Verify that the specified volume type exists.
# And save the volume type name internally for consistency in the
# BlockDeviceMapping object.
for vol_type in volume_types:
if (volume_type_id_or_name == vol_type['id'] or
volume_type_id_or_name == vol_type['name']):
bdm.volume_type = vol_type['name']
break
else:
raise exception.VolumeTypeNotFound(
id_or_name=volume_type_id_or_name)
@staticmethod
def _check_compute_supports_volume_type(context):
# NOTE(brinzhang): Checking the minimum nova-compute service
# version across the deployment. Just make sure the volume
# type can be supported when the bdm.volume_type is requested.
min_compute_version = objects.service.get_minimum_version_all_cells(
context, ['nova-compute'])
if min_compute_version < MIN_COMPUTE_VOLUME_TYPE:
raise exception.VolumeTypeSupportNotYetAvailable()
def _validate_bdm(self, context, instance, instance_type,
block_device_mappings, supports_multiattach=False):
# Make sure that the boot indexes make sense.
# Setting a negative value or None indicates that the device should not
# be used for booting.
boot_indexes = sorted([bdm.boot_index
for bdm in block_device_mappings
if bdm.boot_index is not None
and bdm.boot_index >= 0])
# Each device which is capable of being used as boot device should
# be given a unique boot index, starting from 0 in ascending order, and
# there needs to be at least one boot device.
if not boot_indexes or any(i != v for i, v in enumerate(boot_indexes)):
# Convert the BlockDeviceMappingList to a list for repr details.
LOG.debug('Invalid block device mapping boot sequence for '
'instance: %s', list(block_device_mappings),
instance=instance)
raise exception.InvalidBDMBootSequence()
volume_types = None
volume_type_is_supported = False
for bdm in block_device_mappings:
volume_type = bdm.volume_type
if volume_type:
if not volume_type_is_supported:
# The following method raises
# VolumeTypeSupportNotYetAvailable if the minimum
# nova-compute service version across the deployment is
# not new enough to support creating volumes with a
# specific type.
self._check_compute_supports_volume_type(context)
# Set the flag to avoid calling
# _check_compute_supports_volume_type more than once in
# this for loop.
volume_type_is_supported = True
if not volume_types:
# In order to reduce the number of hit cinder APIs,
# initialize our cache of volume types.
volume_types = self.volume_api.get_all_volume_types(
context)
# NOTE(brinzhang): Ensure the validity of volume_type.
self._check_requested_volume_type(bdm, volume_type,
volume_types)
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.snapshot_id
volume_id = bdm.volume_id
image_id = bdm.image_id
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
if (bdm.source_type == 'image' and
bdm.destination_type == 'volume' and
not bdm.volume_size):
raise exception.InvalidBDM(message=_("Images with "
"destination_type 'volume' need to have a non-zero "
"size specified"))
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self._check_attach_and_reserve_volume(
context, volume, instance, bdm, supports_multiattach)
bdm.volume_size = volume.get('size')
# NOTE(mnaser): If we end up reserving the volume, it will
# not have an attachment_id which is needed
# for cleanups. This can be removed once
# all calls to reserve_volume are gone.
if 'attachment_id' not in bdm:
bdm.attachment_id = None
except (exception.CinderConnectionFailed,
exception.InvalidVolume,
exception.MultiattachNotSupportedOldMicroversion,
exception.MultiattachSupportNotYetAvailable):
raise
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
snap = self.volume_api.get_snapshot(context, snapshot_id)
bdm.volume_size = bdm.volume_size or snap.get('size')
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
elif (bdm.source_type == 'blank' and
bdm.destination_type == 'volume' and
not bdm.volume_size):
raise exception.InvalidBDM(message=_("Blank volumes "
"(source: 'blank', dest: 'volume') need to have non-zero "
"size"))
ephemeral_size = sum(bdm.volume_size or instance_type['ephemeral_gb']
for bdm in block_device_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = block_device.get_bdm_swap_list(block_device_mappings)
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].volume_size or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in block_device_mappings
if bdm.destination_type == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_names(self, instance, num_instances, index):
"""Populate instance display_name and hostname.
:param instance: The instance to set the display_name, hostname for
:type instance: nova.objects.Instance
:param num_instances: Total number of instances being created in this
request
:param index: The 0-based index of this particular instance
"""
# NOTE(mriedem): This is only here for test simplicity since a server
# name is required in the REST API.
if 'display_name' not in instance or instance.display_name is None:
instance.display_name = 'Server %s' % instance.uuid
# if we're booting multiple instances, we need to add an indexing
# suffix to both instance.hostname and instance.display_name. This is
# not necessary for a single instance.
if num_instances == 1:
default_hostname = 'Server-%s' % instance.uuid
instance.hostname = utils.sanitize_hostname(
instance.display_name, default_hostname)
elif num_instances > 1 and self.cell_type != 'api':
old_display_name = instance.display_name
new_display_name = '%s-%d' % (old_display_name, index + 1)
if utils.sanitize_hostname(old_display_name) == "":
instance.hostname = 'Server-%s' % instance.uuid
else:
instance.hostname = utils.sanitize_hostname(
new_display_name)
instance.display_name = new_display_name
def _populate_instance_for_create(self, context, instance, image,
index, security_groups, instance_type,
num_instances, shutdown_terminate):
"""Build the beginning of a new instance."""
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = objects.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
instance.flavor = instance_type
instance.old_flavor = None
instance.new_flavor = None
if CONF.ephemeral_storage_encryption.enabled:
# NOTE(kfarr): dm-crypt expects the cipher in a
# hyphenated format: cipher-chainmode-ivmode
# (ex: aes-xts-plain64). The algorithm needs
# to be parsed out to pass to the key manager (ex: aes).
cipher = CONF.ephemeral_storage_encryption.cipher
algorithm = cipher.split('-')[0] if cipher else None
instance.ephemeral_key_uuid = self.key_manager.create_key(
context,
algorithm=algorithm,
length=CONF.ephemeral_storage_encryption.key_size)
else:
instance.ephemeral_key_uuid = None
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance.system_metadata = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance.image_ref)
system_meta['owner_user_name'] = context.user_name
system_meta['owner_project_name'] = context.project_name
instance.system_metadata.update(system_meta)
if CONF.use_neutron:
# For Neutron we don't actually store anything in the database, we
# proxy the security groups on the instance from the ports
# attached to the instance.
instance.security_groups = objects.SecurityGroupList()
else:
instance.security_groups = security_groups
self._populate_instance_names(instance, num_instances, index)
instance.shutdown_terminate = shutdown_terminate
return instance
def _create_tag_list_obj(self, context, tags):
"""Create TagList objects from simple string tags.
:param context: security context.
:param tags: simple string tags from API request.
:returns: TagList object.
"""
tag_list = [objects.Tag(context=context, tag=t) for t in tags]
tag_list_obj = objects.TagList(objects=tag_list)
return tag_list_obj
def _transform_tags(self, tags, resource_id):
"""Change the resource_id of the tags according to the input param.
Because this method can be called multiple times when more than one
instance is booted in a single request it makes a copy of the tags
list.
:param tags: TagList object.
:param resource_id: string.
:returns: TagList object.
"""
instance_tags = tags.obj_clone()
for tag in instance_tags:
tag.resource_id = resource_id
return instance_tags
# This method remains because cellsv1 uses it in the scheduler
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index, shutdown_terminate=False, create_instance=True):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
:param create_instance: Determines if the instance is created here or
just populated for later creation. This is done so that this code
can be shared with cellsv1 which needs the instance creation to
happen here. It should be removed and this method cleaned up when
cellsv1 is a distant memory.
"""
self._populate_instance_for_create(context, instance, image, index,
security_group, instance_type,
num_instances, shutdown_terminate)
if create_instance:
instance.create()
return instance
def _check_multiple_instances_with_neutron_ports(self,
requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for requested_net in requested_networks:
if requested_net.port_id:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
def _check_multiple_instances_with_specified_ip(self, requested_networks):
"""Check whether multiple instances are created with specified ip."""
for requested_net in requested_networks:
if requested_net.network_id and requested_net.address:
msg = _("max_count cannot be greater than 1 if an fixed_ip "
"is specified.")
raise exception.InvalidFixedIpAndMaxCountRequest(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_groups=None,
availability_zone=None, forced_host=None, forced_node=None,
user_data=None, metadata=None, injected_files=None,
admin_password=None, block_device_mapping=None,
access_ip_v4=None, access_ip_v6=None, requested_networks=None,
config_drive=None, auto_disk_config=None, scheduler_hints=None,
legacy_bdm=True, shutdown_terminate=False,
check_server_group_quota=False, tags=None,
supports_multiattach=False, trusted_certs=None):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
if requested_networks and max_count is not None and max_count > 1:
self._check_multiple_instances_with_specified_ip(
requested_networks)
if utils.is_neutron():
self._check_multiple_instances_with_neutron_ports(
requested_networks)
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
filter_properties = scheduler_utils.build_filter_properties(
scheduler_hints, forced_host, forced_node, instance_type)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
filter_properties=filter_properties,
legacy_bdm=legacy_bdm,
shutdown_terminate=shutdown_terminate,
check_server_group_quota=check_server_group_quota,
tags=tags, supports_multiattach=supports_multiattach,
trusted_certs=trusted_certs)
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _lookup_instance(self, context, uuid):
'''Helper method for pulling an instance object from a database.
During the transition to cellsv2 there is some complexity around
retrieving an instance from the database which this method hides. If
there is an instance mapping then query the cell for the instance, if
no mapping exists then query the configured nova database.
Once we are past the point that all deployments can be assumed to be
migrated to cellsv2 this method can go away.
'''
inst_map = None
try:
inst_map = objects.InstanceMapping.get_by_instance_uuid(
context, uuid)
except exception.InstanceMappingNotFound:
# TODO(alaski): This exception block can be removed once we're
# guaranteed everyone is using cellsv2.
pass
if (inst_map is None or inst_map.cell_mapping is None or
CONF.cells.enable):
# If inst_map is None then the deployment has not migrated to
# cellsv2 yet.
# If inst_map.cell_mapping is None then the instance is not in a
# cell yet. Until instance creation moves to the conductor the
# instance can be found in the configured database, so attempt
# to look it up.
# If we're on cellsv1, we can't yet short-circuit the cells
# messaging path
cell = None
try:
instance = objects.Instance.get_by_uuid(context, uuid)
except exception.InstanceNotFound:
# If we get here then the conductor is in charge of writing the
# instance to the database and hasn't done that yet. It's up to
# the caller of this method to determine what to do with that
# information.
return None, None
else:
cell = inst_map.cell_mapping
with nova_context.target_cell(context, cell) as cctxt:
try:
instance = objects.Instance.get_by_uuid(cctxt, uuid)
except exception.InstanceNotFound:
# Since the cell_mapping exists we know the instance is in
# the cell, however InstanceNotFound means it's already
# deleted.
return None, None
return cell, instance
def _delete_while_booting(self, context, instance):
"""Handle deletion if the instance has not reached a cell yet
Deletion before an instance reaches a cell needs to be handled
differently. What we're attempting to do is delete the BuildRequest
before the api level conductor does. If we succeed here then the boot
request stops before reaching a cell. If not then the instance will
need to be looked up in a cell db and the normal delete path taken.
"""
deleted = self._attempt_delete_of_buildrequest(context, instance)
# After service version 15 deletion of the BuildRequest will halt the
# build process in the conductor. In that case run the rest of this
# method and consider the instance deleted. If we have not yet reached
# service version 15 then just return False so the rest of the delete
# process will proceed usually.
service_version = objects.Service.get_minimum_version(
context, 'nova-osapi_compute')
if service_version < 15:
return False
if deleted:
# If we've reached this block the successful deletion of the
# buildrequest indicates that the build process should be halted by
# the conductor.
# NOTE(alaski): Though the conductor halts the build process it
# does not currently delete the instance record. This is
# because in the near future the instance record will not be
# created if the buildrequest has been deleted here. For now we
# ensure the instance has been set to deleted at this point.
# Yes this directly contradicts the comment earlier in this
# method, but this is a temporary measure.
# Look up the instance because the current instance object was
# stashed on the buildrequest and therefore not complete enough
# to run .destroy().
try:
instance_uuid = instance.uuid
cell, instance = self._lookup_instance(context, instance_uuid)
if instance is not None:
# If instance is None it has already been deleted.
if cell:
with nova_context.target_cell(context, cell) as cctxt:
# FIXME: When the instance context is targeted,
# we can remove this
with compute_utils.notify_about_instance_delete(
self.notifier, cctxt, instance):
instance.destroy()
else:
instance.destroy()
except exception.InstanceNotFound:
pass
return True
return False
def _attempt_delete_of_buildrequest(self, context, instance):
# If there is a BuildRequest then the instance may not have been
# written to a cell db yet. Delete the BuildRequest here, which
# will indicate that the Instance build should not proceed.
try:
build_req = objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
build_req.destroy()
except exception.BuildRequestNotFound:
# This means that conductor has deleted the BuildRequest so the
# instance is now in a cell and the delete needs to proceed
# normally.
return False
# We need to detach from any volumes so they aren't orphaned.
self._local_cleanup_bdm_volumes(
build_req.block_device_mappings, instance, context)
return True
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info('instance termination disabled', instance=instance)
return
cell = None
# If there is an instance.host (or the instance is shelved-offloaded or
# in error state), the instance has been scheduled and sent to a
# cell/compute which means it was pulled from the cell db.
# Normal delete should be attempted.
may_have_ports_or_volumes = compute_utils.may_have_ports_or_volumes(
instance)
if not instance.host and not may_have_ports_or_volumes:
try:
if self._delete_while_booting(context, instance):
return
# If instance.host was not set it's possible that the Instance
# object here was pulled from a BuildRequest object and is not
# fully populated. Notably it will be missing an 'id' field
# which will prevent instance.destroy from functioning
# properly. A lookup is attempted which will either return a
# full Instance or None if not found. If not found then it's
# acceptable to skip the rest of the delete processing.
cell, instance = self._lookup_instance(context, instance.uuid)
if cell and instance:
try:
# Now destroy the instance from the cell it lives in.
with compute_utils.notify_about_instance_delete(
self.notifier, context, instance):
instance.destroy()
except exception.InstanceNotFound:
pass
# The instance was deleted or is already gone.
return
if not instance:
# Instance is already deleted.
return
except exception.ObjectActionError:
# NOTE(melwitt): This means the instance.host changed
# under us indicating the instance became scheduled
# during the destroy(). Refresh the instance from the DB and
# continue on with the delete logic for a scheduled instance.
# NOTE(danms): If instance.host is set, we should be able to
# do the following lookup. If not, there's not much we can
# do to recover.
cell, instance = self._lookup_instance(context, instance.uuid)
if not instance:
# Instance is already deleted
return
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# At these states an instance has a snapshot associate.
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info("Working on deleting snapshot %s "
"from shelved instance...",
snapshot_id, instance=instance)
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning("Failed to delete snapshot "
"from shelved instance (%s).",
exc.format_message(), instance=instance)
except Exception:
LOG.exception("Something wrong happened when trying to "
"delete snapshot from shelved instance.",
instance=instance)
original_task_state = instance.task_state
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
# NOTE(dtp): cells.enable = False means "use cells v2".
# Run everywhere except v1 compute cells.
if (not CONF.cells.enable and CONF.workarounds.enable_consoleauth
) or self.cell_type == 'api':
# TODO(melwitt): Remove the conditions for running this line
# with cells v2, when consoleauth is no longer being used by
# cells v2, in Stein.
self.consoleauth_rpcapi.delete_tokens_for_instance(
context, instance.uuid)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell.
cb(context, instance, bdms)
return
if not instance.host and not may_have_ports_or_volumes:
try:
with compute_utils.notify_about_instance_delete(
self.notifier, context, instance,
delete_type
if delete_type != 'soft_delete'
else 'delete'):
instance.destroy()
LOG.info('Instance deleted and does not have host '
'field, its vm_state is %(state)s.',
{'state': instance.vm_state},
instance=instance)
return
except exception.ObjectActionError as ex:
# The instance's host likely changed under us as
# this instance could be building and has since been
# scheduled. Continue with attempts to delete it.
LOG.debug('Refreshing instance because: %s', ex,
instance=instance)
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
# NOTE(neha_alhat): After confirm resize vm_state will become
# 'active' and task_state will be set to 'None'. But for soft
# deleting a vm, the _do_soft_delete callback requires
# task_state in 'SOFT_DELETING' status. So, we need to set
# task_state as 'SOFT_DELETING' again for soft_delete case.
# After confirm resize and before saving the task_state to
# "SOFT_DELETING", during the short window, user can submit
# soft delete vm request again and system will accept and
# process it without any errors.
if delete_type == 'soft_delete':
instance.task_state = instance_attrs['task_state']
instance.save()
is_local_delete = True
try:
# instance.host must be set in order to look up the service.
if instance.host is not None:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
is_local_delete = not self.servicegroup_api.service_is_up(
service)
if not is_local_delete:
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info('Instance is already in deleting state, '
'ignoring this request',
instance=instance)
return
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms)
except exception.ComputeHostNotFound:
LOG.debug('Compute host %s not found during service up check, '
'going to local delete instance', instance.host,
instance=instance)
if is_local_delete:
# If instance is in shelved_offloaded state or compute node
# isn't up, delete instance from db and clean bdms info and
# network info
if cell is None:
# NOTE(danms): If we didn't get our cell from one of the
# paths above, look it up now.
try:
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
cell = im.cell_mapping
except exception.InstanceMappingNotFound:
LOG.warning('During local delete, failed to find '
'instance mapping', instance=instance)
return
LOG.debug('Doing local delete in cell %s', cell.identity,
instance=instance)
with nova_context.target_cell(context, cell) as cctxt:
self._local_delete(cctxt, instance, bdms, delete_type, cb)
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
pass
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
migration = None
for status in ('finished', 'confirming'):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s',
{'id': migration.id,
'status': migration.status},
instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info('Instance may have been confirmed during delete',
instance=instance)
return
src_host = migration.source_compute
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration, src_host, cast=False)
def _local_cleanup_bdm_volumes(self, bdms, instance, context):
"""The method deletes the bdm records and, if a bdm is a volume, call
the terminate connection and the detach volume via the Volume API.
"""
elevated = context.elevated()
for bdm in bdms:
if bdm.is_volume:
try:
if bdm.attachment_id:
self.volume_api.attachment_delete(context,
bdm.attachment_id)
else:
connector = compute_utils.get_stashed_volume_connector(
bdm, instance)
if connector:
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
else:
LOG.debug('Unable to find connector for volume %s,'
' not attempting terminate_connection.',
bdm.volume_id, instance=instance)
# Attempt to detach the volume. If there was no
# connection made in the first place this is just
# cleaning up the volume state in the Cinder DB.
self.volume_api.detach(elevated, bdm.volume_id,
instance.uuid)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
LOG.warning("Ignoring volume cleanup failure due to %s",
exc, instance=instance)
# If we're cleaning up volumes from an instance that wasn't yet
# created in a cell, i.e. the user deleted the server while
# the BuildRequest still existed, then the BDM doesn't actually
# exist in the DB to destroy it.
if 'id' in bdm:
bdm.destroy()
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
LOG.info("instance is in SHELVED_OFFLOADED state, cleanup"
" the instance's info from database.",
instance=instance)
else:
LOG.warning("instance's host %s is down, deleting from "
"database", instance.host, instance=instance)
with compute_utils.notify_about_instance_delete(
self.notifier, context, instance,
delete_type if delete_type != 'soft_delete' else 'delete'):
elevated = context.elevated()
if self.cell_type != 'api':
# NOTE(liusheng): In nova-network multi_host scenario,deleting
# network info of the instance may need instance['host'] as
# destination host of RPC call. If instance in
# SHELVED_OFFLOADED state, instance['host'] is None, here, use
# shelved_host as host to deallocate network info and reset
# instance['host'] after that. Here we shouldn't use
# instance.save(), because this will mislead user who may think
# the instance's host has been changed, and actually, the
# instance.host is always None.
orig_host = instance.host
try:
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
sysmeta = getattr(instance,
obj_base.get_attrname(
'system_metadata'))
instance.host = sysmeta.get('shelved_host')
self.network_api.deallocate_for_instance(elevated,
instance)
finally:
instance.host = orig_host
# cleanup volumes
self._local_cleanup_bdm_volumes(bdms, instance, context)
# Cleanup allocations in Placement since we can't do it from the
# compute service.
self.placementclient.delete_allocation_for_instance(
context, instance.uuid)
cb(context, instance, bdms, local=True)
instance.destroy()
@staticmethod
def _update_queued_for_deletion(context, instance, qfd):
# NOTE(tssurya): We query the instance_mapping record of this instance
# and update the queued_for_delete flag to True (or False according to
# the state of the instance). This just means that the instance is
# queued for deletion (or is no longer queued for deletion). It does
# not guarantee its successful deletion (or restoration). Hence the
# value could be stale which is fine, considering its use is only
# during down cell (desperate) situation.
im = objects.InstanceMapping.get_by_instance_uuid(context,
instance.uuid)
im.queued_for_delete = qfd
im.save()
def _do_delete(self, context, instance, bdms, local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
delete_type='delete')
self._update_queued_for_deletion(context, instance, True)
def _do_force_delete(self, context, instance, bdms, local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
delete_type='force_delete')
self._update_queued_for_deletion(context, instance, True)
def _do_soft_delete(self, context, instance, bdms, local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance)
self._update_queued_for_deletion(context, instance, True)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug('Going to try to soft delete instance',
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug("Going to try to terminate instance", instance=instance)
self._delete_instance(context, instance)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Check quotas
flavor = instance.get_flavor()
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
compute_utils.check_num_instances_quota(context, flavor, 1, 1,
project_id=project_id, user_id=user_id)
self._record_action_start(context, instance, instance_actions.RESTORE)
if instance.host:
instance.task_state = task_states.RESTORING
instance.deleted_at = None
instance.save(expected_task_state=[None])
# TODO(melwitt): We're not rechecking for strict quota here to
# guard against going over quota during a race at this time because
# the resource consumption for this operation is written to the
# database by compute.
self.compute_rpcapi.restore_instance(context, instance)
else:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.deleted_at = None
instance.save(expected_task_state=[None])
self._update_queued_for_deletion(context, instance, False)
@check_instance_lock
@check_instance_state(task_state=None,
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete an instance in any vm_state/task_state."""
self._delete(context, instance, 'force_delete', self._do_force_delete,
task_state=task_states.DELETING)
def force_stop(self, context, instance, do_cast=True, clean_shutdown=True):
LOG.debug("Going to try to stop instance", instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast,
clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR])
def stop(self, context, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast, clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug("Going to try to start instance", instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=vm_states.ALLOW_TRIGGER_CRASH_DUMP)
def trigger_crash_dump(self, context, instance):
"""Trigger crash dump in an instance."""
LOG.debug("Try to trigger crash dump", instance=instance)
self._record_action_start(context, instance,
instance_actions.TRIGGER_CRASH_DUMP)
self.compute_rpcapi.trigger_crash_dump(context, instance)
def _get_instance_map_or_none(self, context, instance_uuid):
try:
inst_map = objects.InstanceMapping.get_by_instance_uuid(
context, instance_uuid)
except exception.InstanceMappingNotFound:
# InstanceMapping should always be found generally. This exception
# may be raised if a deployment has partially migrated the nova-api
# services.
inst_map = None
return inst_map
def _get_instance(self, context, instance_uuid, expected_attrs):
# Before service version 15 the BuildRequest is not cleaned up during
# a delete request so there is no reason to look it up here as we can't
# trust that it's not referencing a deleted instance. Also even if
# there is an instance mapping we don't need to honor it for older
# service versions.
service_version = objects.Service.get_minimum_version(
context, 'nova-osapi_compute')
# If we're on cellsv1, we also need to consult the top-level
# merged replica instead of the cell directly, so fall through
# here in that case as well.
if service_version < 15 or CONF.cells.enable:
# If not using cells v1, we need to log a warning about the API
# service version being less than 15 (that check was added in
# newton), which indicates there is some lingering data during the
# transition to cells v2 which could cause an InstanceNotFound
# here. The warning message is a sort of breadcrumb.
# This can all go away once we drop cells v1 and assert that all
# deployments have upgraded from a base cells v2 setup with
# mappings.
if not CONF.cells.enable:
LOG.warning('The nova-osapi_compute service version is from '
'before Ocata and may cause problems looking up '
'instances in a cells v2 setup. Check your '
'nova-api service configuration and cell '
'mappings. You may need to remove stale '
'nova-osapi_compute service records from the cell '
'database.')
return objects.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=expected_attrs)
inst_map = self._get_instance_map_or_none(context, instance_uuid)
if inst_map and (inst_map.cell_mapping is not None):
nova_context.set_target_cell(context, inst_map.cell_mapping)
instance = objects.Instance.get_by_uuid(
context, instance_uuid, expected_attrs=expected_attrs)
elif inst_map and (inst_map.cell_mapping is None):
# This means the instance has not been scheduled and put in
# a cell yet. For now it also may mean that the deployer
# has not created their cell(s) yet.
try:
build_req = objects.BuildRequest.get_by_instance_uuid(
context, instance_uuid)
instance = build_req.instance
except exception.BuildRequestNotFound:
# Instance was mapped and the BuildRequest was deleted
# while fetching. Try again.
inst_map = self._get_instance_map_or_none(context,
instance_uuid)
if inst_map and (inst_map.cell_mapping is not None):
nova_context.set_target_cell(context,
inst_map.cell_mapping)
instance = objects.Instance.get_by_uuid(
context, instance_uuid,
expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_uuid)
else:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return instance
def get(self, context, instance_id, expected_attrs=None):
"""Get a single instance with the given instance_id."""
if not expected_attrs:
expected_attrs = []
expected_attrs.extend(['metadata', 'system_metadata',
'security_groups', 'info_cache'])
# NOTE(ameade): we still need to support integer ids for ec2
try:
if uuidutils.is_uuid_like(instance_id):
LOG.debug("Fetching instance by UUID",
instance_uuid=instance_id)
instance = self._get_instance(context, instance_id,
expected_attrs)
else:
LOG.debug("Failed to fetch instance by id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
LOG.debug("Invalid instance id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
return instance
def get_all(self, context, search_opts=None, limit=None, marker=None,
expected_attrs=None, sort_keys=None, sort_dirs=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be sorted based on the list of sort keys in the
'sort_keys' parameter (first value is primary sort key, second value is
secondary sort ket, etc.). For each sort key, the associated sort
direction is based on the list of sort directions in the 'sort_dirs'
parameter.
"""
if search_opts is None:
search_opts = {}
LOG.debug("Searching by: %s", str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.items():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return objects.InstanceList()
# IP address filtering cannot be applied at the DB layer, remove any DB
# limit so that it can be applied after the IP filter.
filter_ip = 'ip6' in filters or 'ip' in filters
skip_build_request = False
orig_limit = limit
if filter_ip:
# We cannot skip build requests if there is a marker since the
# the marker could be a build request.
skip_build_request = marker is None
if self.network_api.has_substr_port_filtering_extension(context):
# We're going to filter by IP using Neutron so set filter_ip
# to False so we don't attempt post-DB query filtering in
# memory below.
filter_ip = False
instance_uuids = self._ip_filter_using_neutron(context,
filters)
if instance_uuids:
# Note that 'uuid' is not in the 2.1 GET /servers query
# parameter schema, however, we allow additionalProperties
# so someone could filter instances by uuid, which doesn't
# make a lot of sense but we have to account for it.
if 'uuid' in filters and filters['uuid']:
filter_uuids = filters['uuid']
if isinstance(filter_uuids, list):
instance_uuids.extend(filter_uuids)
else:
# Assume a string. If it's a dict or tuple or
# something, well...that's too bad. This is why
# we have query parameter schema definitions.
if filter_uuids not in instance_uuids:
instance_uuids.append(filter_uuids)
filters['uuid'] = instance_uuids
else:
# No matches on the ip filter(s), return an empty list.
return objects.InstanceList()
elif limit:
LOG.debug('Removing limit for DB query due to IP filter')
limit = None
# Skip get BuildRequest if filtering by IP address, as building
# instances will not have IP addresses.
if skip_build_request:
build_requests = objects.BuildRequestList()
else:
# The ordering of instances will be
# [sorted instances with no host] + [sorted instances with host].
# This means BuildRequest and cell0 instances first, then cell
# instances
try:
build_requests = objects.BuildRequestList.get_by_filters(
context, filters, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dirs=sort_dirs)
# If we found the marker in we need to set it to None
# so we don't expect to find it in the cells below.
marker = None
except exception.MarkerNotFound:
# If we didn't find the marker in the build requests then keep
# looking for it in the cells.
build_requests = objects.BuildRequestList()
build_req_instances = objects.InstanceList(
objects=[build_req.instance for build_req in build_requests])
# Only subtract from limit if it is not None
limit = (limit - len(build_req_instances)) if limit else limit
# We could arguably avoid joining on security_groups if we're using
# neutron (which is the default) but if you're using neutron then the
# security_group_instance_association table should be empty anyway
# and the DB should optimize out that join, making it insignificant.
fields = ['metadata', 'info_cache', 'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
if CONF.cells.enable:
insts = self._do_old_style_instance_list_for_poor_cellsv1_users(
context, filters, limit, marker, fields, sort_keys,
sort_dirs)
else:
insts = instance_list.get_instance_objects_sorted(
context, filters, limit, marker, fields, sort_keys, sort_dirs)
def _get_unique_filter_method():
seen_uuids = set()
def _filter(instance):
if instance.uuid in seen_uuids:
return False
seen_uuids.add(instance.uuid)
return True
return _filter
filter_method = _get_unique_filter_method()
# Only subtract from limit if it is not None
limit = (limit - len(insts)) if limit else limit
# TODO(alaski): Clean up the objects concatenation when List objects
# support it natively.
instances = objects.InstanceList(
objects=list(filter(filter_method,
build_req_instances.objects +
insts.objects)))
if filter_ip:
instances = self._ip_filter(instances, filters, orig_limit)
return instances
def _do_old_style_instance_list_for_poor_cellsv1_users(self,
context, filters,
limit, marker,
fields,
sort_keys,
sort_dirs):
try:
cell0_mapping = objects.CellMapping.get_by_uuid(context,
objects.CellMapping.CELL0_UUID)
except exception.CellMappingNotFound:
cell0_instances = objects.InstanceList(objects=[])
else:
with nova_context.target_cell(context, cell0_mapping) as cctxt:
try:
cell0_instances = self._get_instances_by_filters(
cctxt, filters, limit=limit, marker=marker,
fields=fields, sort_keys=sort_keys,
sort_dirs=sort_dirs)
# If we found the marker in cell0 we need to set it to None
# so we don't expect to find it in the cells below.
marker = None
except exception.MarkerNotFound:
# We can ignore this since we need to look in the cell DB
cell0_instances = objects.InstanceList(objects=[])
# Only subtract from limit if it is not None
limit = (limit - len(cell0_instances)) if limit else limit
# There is only planned support for a single cell here. Multiple cell
# instance lists should be proxied to project Searchlight, or a similar
# alternative.
if limit is None or limit > 0:
# NOTE(melwitt): If we're on cells v1, we need to read
# instances from the top-level database because reading from
# cells results in changed behavior, because of the syncing.
# We can remove this path once we stop supporting cells v1.
cell_instances = self._get_instances_by_filters(
context, filters, limit=limit, marker=marker,
fields=fields, sort_keys=sort_keys,
sort_dirs=sort_dirs)
else:
LOG.debug('Limit excludes any results from real cells')
cell_instances = objects.InstanceList(objects=[])
return cell0_instances + cell_instances
@staticmethod
def _ip_filter(inst_models, filters, limit):
ipv4_f = re.compile(str(filters.get('ip')))
ipv6_f = re.compile(str(filters.get('ip6')))
def _match_instance(instance):
nw_info = instance.get_network_info()
for vif in nw_info:
for fixed_ip in vif.fixed_ips():
address = fixed_ip.get('address')
if not address:
continue
version = fixed_ip.get('version')
if ((version == 4 and ipv4_f.match(address)) or
(version == 6 and ipv6_f.match(address))):
return True
return False
result_objs = []
for instance in inst_models:
if _match_instance(instance):
result_objs.append(instance)
if limit and len(result_objs) == limit:
break
return objects.InstanceList(objects=result_objs)
def _ip_filter_using_neutron(self, context, filters):
ip4_address = filters.get('ip')
ip6_address = filters.get('ip6')
addresses = [ip4_address, ip6_address]
uuids = []
for address in addresses:
if address:
try:
ports = self.network_api.list_ports(
context, fixed_ips='ip_address_substr=' + address,
fields=['device_id'])['ports']
for port in ports:
uuids.append(port['device_id'])
except Exception as e:
LOG.error('An error occurred while listing ports '
'with an ip_address filter value of "%s". '
'Error: %s',
address, six.text_type(e))
return uuids
def _get_instances_by_filters(self, context, filters,
limit=None, marker=None, fields=None,
sort_keys=None, sort_dirs=None):
return objects.InstanceList.get_by_filters(
context, filters=filters, limit=limit, marker=marker,
expected_attrs=fields, sort_keys=sort_keys, sort_dirs=sort_dirs)
def update_instance(self, context, instance, updates):
"""Updates a single Instance object with some updates dict.
Returns the updated instance.
"""
# NOTE(sbauza): Given we only persist the Instance object after we
# create the BuildRequest, we are sure that if the Instance object
# has an ID field set, then it was persisted in the right Cell DB.
if instance.obj_attr_is_set('id'):
instance.update(updates)
# Instance has been scheduled and the BuildRequest has been deleted
# we can directly write the update down to the right cell.
inst_map = self._get_instance_map_or_none(context, instance.uuid)
# If we have a cell_mapping and we're not on cells v1, then
# look up the instance in the cell database
if inst_map and (inst_map.cell_mapping is not None) and (
not CONF.cells.enable):
with nova_context.target_cell(context,
inst_map.cell_mapping) as cctxt:
with instance.obj_alternate_context(cctxt):
instance.save()
else:
# If inst_map.cell_mapping does not point at a cell then cell
# migration has not happened yet.
# TODO(alaski): Make this a failure case after we put in
# a block that requires migrating to cellsv2.
instance.save()
else:
# Instance is not yet mapped to a cell, so we need to update
# BuildRequest instead
# TODO(sbauza): Fix the possible race conditions where BuildRequest
# could be deleted because of either a concurrent instance delete
# or because the scheduler just returned a destination right
# after we called the instance in the API.
try:
build_req = objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
instance = build_req.instance
instance.update(updates)
# FIXME(sbauza): Here we are updating the current
# thread-related BuildRequest object. Given that another worker
# could have looking up at that BuildRequest in the API, it
# means that it could pass it down to the conductor without
# making sure that it's not updated, we could have some race
# condition where it would missing the updated fields, but
# that's something we could discuss once the instance record
# is persisted by the conductor.
build_req.save()
except exception.BuildRequestNotFound:
# Instance was mapped and the BuildRequest was deleted
# while fetching (and possibly the instance could have been
# deleted as well). We need to lookup again the Instance object
# in order to correctly update it.
# TODO(sbauza): Figure out a good way to know the expected
# attributes by checking which fields are set or not.
expected_attrs = ['flavor', 'pci_devices', 'numa_topology',
'tags', 'metadata', 'system_metadata',
'security_groups', 'info_cache']
inst_map = self._get_instance_map_or_none(context,
instance.uuid)
if inst_map and (inst_map.cell_mapping is not None):
with nova_context.target_cell(
context,
inst_map.cell_mapping) as cctxt:
instance = objects.Instance.get_by_uuid(
cctxt, instance.uuid,
expected_attrs=expected_attrs)
instance.update(updates)
instance.save()
else:
# If inst_map.cell_mapping does not point at a cell then
# cell migration has not happened yet.
# TODO(alaski): Make this a failure case after we put in
# a block that requires migrating to cellsv2.
instance = objects.Instance.get_by_uuid(
context, instance.uuid, expected_attrs=expected_attrs)
instance.update(updates)
instance.save()
return instance
# NOTE(melwitt): We don't check instance lock for backup because lock is
# intended to prevent accidental change/delete of instances
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.objects.instance.Instance object
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
if compute_utils.is_volume_backed_instance(context, instance):
LOG.info("It's not supported to backup volume backed "
"instance.", instance=instance)
raise exception.InvalidRequest(
_('Backup is not supported for volume-backed instances.'))
else:
image_meta = self._create_image(context, instance,
name, 'backup',
extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.BACKUP)
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.objects.instance.Instance object
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
try:
instance.save(expected_task_state=[None])
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as ex:
# Changing the instance task state to use in raising the
# InstanceInvalidException below
LOG.debug('Instance disappeared during snapshot.',
instance=instance)
try:
image_id = image_meta['id']
self.image_api.delete(context, image_id)
LOG.info('Image %s deleted because instance '
'deleted before snapshot started.',
image_id, instance=instance)
except exception.ImageNotFound:
pass
except Exception as exc:
LOG.warning("Error while trying to clean up image %(img_id)s: "
"%(error_msg)s",
{"img_id": image_meta['id'],
"error_msg": six.text_type(exc)})
attr = 'task_state'
state = task_states.DELETING
if type(ex) == exception.InstanceNotFound:
attr = 'vm_state'
state = vm_states.DELETED
raise exception.InstanceInvalidState(attr=attr,
instance_uuid=instance.uuid,
state=state,
method='snapshot')
self._record_action_start(context, instance,
instance_actions.CREATE_IMAGE)
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
properties = {
'instance_uuid': instance.uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
properties.update(extra_properties or {})
image_meta = self._initialize_instance_snapshot_metadata(
instance, name, properties)
# if we're making a snapshot, omit the disk and container formats,
# since the image may have been converted to another format, and the
# original values won't be accurate. The driver will populate these
# with the correct values later, on image upload.
if image_type == 'snapshot':
image_meta.pop('disk_format', None)
image_meta.pop('container_format', None)
return self.image_api.create(context, image_meta)
def _initialize_instance_snapshot_metadata(self, instance, name,
extra_properties=None):
"""Initialize new metadata for a snapshot of the given instance.
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param extra_properties: dict of extra metadata properties to include
:returns: the new instance snapshot metadata
"""
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
image_meta.update({'name': name,
'is_public': False})
# Delete properties that are non-inheritable
properties = image_meta['properties']
for key in CONF.non_inheritable_image_properties:
properties.pop(key, None)
# The properties in extra_properties have precedence
properties.update(extra_properties or {})
return image_meta
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.SUSPENDED])
def snapshot_volume_backed(self, context, instance, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.objects.instance.Instance object
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta = self._initialize_instance_snapshot_metadata(
instance, name, extra_properties)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
for attr in ('container_format', 'disk_format'):
image_meta.pop(attr, None)
properties = image_meta['properties']
# clean properties before filling
for key in ('block_device_mapping', 'bdm_v2', 'root_device_name'):
properties.pop(key, None)
if instance.root_device_name:
properties['root_device_name'] = instance.root_device_name
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
mapping = [] # list of BDM dicts that can go into the image properties
# Do some up-front filtering of the list of BDMs from
# which we are going to create snapshots.
volume_bdms = []
for bdm in bdms:
if bdm.no_device:
continue
if bdm.is_volume:
# These will be handled below.
volume_bdms.append(bdm)
else:
mapping.append(bdm.get_image_mapping())
# Check limits in Cinder before creating snapshots to avoid going over
# quota in the middle of a list of volumes. This is a best-effort check
# but concurrently running snapshot requests from the same project
# could still fail to create volume snapshots if they go over limit.
if volume_bdms:
limits = self.volume_api.get_absolute_limits(context)
total_snapshots_used = limits['totalSnapshotsUsed']
max_snapshots = limits['maxTotalSnapshots']
# -1 means there is unlimited quota for snapshots
if (max_snapshots > -1 and
len(volume_bdms) + total_snapshots_used > max_snapshots):
LOG.debug('Unable to create volume snapshots for instance. '
'Currently has %s snapshots, requesting %s new '
'snapshots, with a limit of %s.',
total_snapshots_used, len(volume_bdms),
max_snapshots, instance=instance)
raise exception.OverQuota(overs='snapshots')
quiesced = False
if instance.vm_state == vm_states.ACTIVE:
try:
LOG.info("Attempting to quiesce instance before volume "
"snapshot.", instance=instance)
self.compute_rpcapi.quiesce_instance(context, instance)
quiesced = True
except (exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException, NotImplementedError) as err:
if strutils.bool_from_string(instance.system_metadata.get(
'image_os_require_quiesce')):
raise
else:
LOG.info('Skipping quiescing instance: %(reason)s.',
{'reason': err},
instance=instance)
# NOTE(tasker): discovered that an uncaught exception could occur
# after the instance has been frozen. catch and thaw.
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error("An error occurred during quiesce of instance. "
"Unquiescing to ensure instance is thawed. "
"Error: %s", six.text_type(ex),
instance=instance)
self.compute_rpcapi.unquiesce_instance(context, instance,
mapping=None)
@wrap_instance_event(prefix='api')
def snapshot_instance(self, context, instance, bdms):
try:
for bdm in volume_bdms:
# create snapshot based on volume_id
volume = self.volume_api.get(context, bdm.volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
LOG.debug('Creating snapshot from volume %s.',
volume['id'], instance=instance)
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'],
name, volume['display_description'])
mapping_dict = block_device.snapshot_from_bdm(
snapshot['id'], bdm)
mapping_dict = mapping_dict.get_image_mapping()
mapping.append(mapping_dict)
return mapping
# NOTE(tasker): No error handling is done in the above for loop.
# This means that if the snapshot fails and throws an exception
# the traceback will skip right over the unquiesce needed below.
# Here, catch any exception, unquiesce the instance, and raise the
# error so that the calling function can do what it needs to in
# order to properly treat a failed snap.
except Exception:
with excutils.save_and_reraise_exception():
if quiesced:
LOG.info("Unquiescing instance after volume snapshot "
"failure.", instance=instance)
self.compute_rpcapi.unquiesce_instance(
context, instance, mapping)
self._record_action_start(context, instance,
instance_actions.CREATE_IMAGE)
mapping = snapshot_instance(self, context, instance, bdms)
if quiesced:
self.compute_rpcapi.unquiesce_instance(context, instance, mapping)
if mapping:
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
return self.image_api.create(context, image_meta)
@check_instance_lock
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if reboot_type == 'SOFT':
self._soft_reboot(context, instance)
else:
self._hard_reboot(context, instance)
@check_instance_state(vm_state=set(vm_states.ALLOW_SOFT_REBOOT),
task_state=[None])
def _soft_reboot(self, context, instance):
expected_task_state = [None]
instance.task_state = task_states.REBOOTING
instance.save(expected_task_state=expected_task_state)
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type='SOFT')
@check_instance_state(vm_state=set(vm_states.ALLOW_HARD_REBOOT),
task_state=task_states.ALLOW_REBOOT)
def _hard_reboot(self, context, instance):
instance.task_state = task_states.REBOOTING_HARD
instance.save(expected_task_state=task_states.ALLOW_REBOOT)
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type='HARD')
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
preserve_ephemeral = kwargs.get('preserve_ephemeral', False)
auto_disk_config = kwargs.get('auto_disk_config')
if 'key_name' in kwargs:
key_name = kwargs.pop('key_name')
if key_name:
# NOTE(liuyulong): we are intentionally using the user_id from
# the request context rather than the instance.user_id because
# users own keys but instances are owned by projects, and
# another user in the same project can rebuild an instance
# even if they didn't create it.
key_pair = objects.KeyPair.get_by_name(context,
context.user_id,
key_name)
instance.key_name = key_pair.name
instance.key_data = key_pair.public_key
instance.keypairs = objects.KeyPairList(objects=[key_pair])
else:
instance.key_name = None
instance.key_data = None
instance.keypairs = objects.KeyPairList(objects=[])
# Use trusted_certs value from kwargs to create TrustedCerts object
trusted_certs = None
if 'trusted_certs' in kwargs:
# Note that the user can set, change, or unset / reset trusted
# certs. If they are explicitly specifying
# trusted_image_certificates=None, that means we'll either unset
# them on the instance *or* reset to use the defaults (if defaults
# are configured).
trusted_certs = kwargs.pop('trusted_certs')
instance.trusted_certs = self._retrieve_trusted_certs_object(
context, trusted_certs, rebuild=True)
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
root_bdm = compute_utils.get_root_bdm(context, instance, bdms)
# Check to see if the image is changing and we have a volume-backed
# server. The compute doesn't support changing the image in the
# root disk of a volume-backed server, so we need to just fail fast.
is_volume_backed = compute_utils.is_volume_backed_instance(
context, instance, bdms)
if is_volume_backed:
if trusted_certs:
# The only way we can get here is if the user tried to set
# trusted certs or specified trusted_image_certificates=None
# and default_trusted_certificate_ids is configured.
msg = _("Image certificate validation is not supported "
"for volume-backed servers.")
raise exception.CertificateValidationFailed(message=msg)
# For boot from volume, instance.image_ref is empty, so we need to
# query the image from the volume.
if root_bdm is None:
# This shouldn't happen and is an error, we need to fail. This
# is not the users fault, it's an internal error. Without a
# root BDM we have no way of knowing the backing volume (or
# image in that volume) for this instance.
raise exception.NovaException(
_('Unable to find root block device mapping for '
'volume-backed instance.'))
volume = self.volume_api.get(context, root_bdm.volume_id)
volume_image_metadata = volume.get('volume_image_metadata', {})
orig_image_ref = volume_image_metadata.get('image_id')
if orig_image_ref != image_href:
# Leave a breadcrumb.
LOG.debug('Requested to rebuild instance with a new image %s '
'for a volume-backed server with image %s in its '
'root volume which is not supported.', image_href,
orig_image_ref, instance=instance)
msg = _('Unable to rebuild with a different image for a '
'volume-backed server.')
raise exception.ImageUnacceptable(
image_id=image_href, reason=msg)
else:
orig_image_ref = instance.image_ref
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject, root_bdm)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in list(instance.system_metadata.keys()):
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
# Since image might have changed, we may have new values for
# os_type, vm_mode, etc
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
instance.update(options_from_image)
instance.task_state = task_states.REBUILDING
# An empty instance.image_ref is currently used as an indication
# of BFV. Preserve that over a rebuild to not break users.
if not is_volume_backed:
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
self._record_action_start(context, instance, instance_actions.REBUILD)
# NOTE(sbauza): The migration script we provided in Newton should make
# sure that all our instances are currently migrated to have an
# attached RequestSpec object but let's consider that the operator only
# half migrated all their instances in the meantime.
host = instance.host
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
# If a new image is provided on rebuild, we will need to run
# through the scheduler again, but we want the instance to be
# rebuilt on the same host it's already on.
if orig_image_ref != image_href:
# We have to modify the request spec that goes to the scheduler
# to contain the new image. We persist this since we've already
# changed the instance.image_ref above so we're being
# consistent.
request_spec.image = objects.ImageMeta.from_dict(image)
request_spec.save()
if 'scheduler_hints' not in request_spec:
request_spec.scheduler_hints = {}
# Nuke the id on this so we can't accidentally save
# this hint hack later
del request_spec.id
# NOTE(danms): Passing host=None tells conductor to
# call the scheduler. The _nova_check_type hint
# requires that the scheduler returns only the same
# host that we are currently on and only checks
# rebuild-related filters.
request_spec.scheduler_hints['_nova_check_type'] = ['rebuild']
request_spec.force_hosts = [instance.host]
request_spec.force_nodes = [instance.node]
host = None
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
self.compute_task_api.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=host,
request_spec=request_spec,
kwargs=kwargs)
@staticmethod
def _check_quota_for_upsize(context, instance, current_flavor, new_flavor):
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
# Deltas will be empty if the resize is not an upsize.
deltas = compute_utils.upsize_quota_delta(new_flavor,
current_flavor)
if deltas:
try:
res_deltas = {'cores': deltas.get('cores', 0),
'ram': deltas.get('ram', 0)}
objects.Quotas.check_deltas(context, res_deltas,
project_id, user_id=user_id,
check_project_id=project_id,
check_user_id=user_id)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
headroom = compute_utils.get_headroom(quotas, usages,
deltas)
(overs, reqs, total_alloweds,
useds) = compute_utils.get_over_quota_detail(headroom,
overs,
quotas,
deltas)
LOG.info("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance.",
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# If this is a resize down, a revert might go over quota.
self._check_quota_for_upsize(context, instance, instance.flavor,
instance.old_flavor)
instance.task_state = task_states.RESIZE_REVERTING
instance.save(expected_task_state=[None])
migration.status = 'reverting'
migration.save()
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
# Conductor updated the RequestSpec.flavor during the initial resize
# operation to point at the new flavor, so we need to update the
# RequestSpec to point back at the original flavor, otherwise
# subsequent move operations through the scheduler will be using the
# wrong flavor.
try:
reqspec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
reqspec.flavor = instance.old_flavor
reqspec.save()
except exception.RequestSpecNotFound:
# TODO(mriedem): Make this a failure in Stein when we drop
# compatibility for missing request specs.
pass
# TODO(melwitt): We're not rechecking for strict quota here to guard
# against going over quota during a race at this time because the
# resource consumption for this operation is written to the database
# by compute.
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
# NOTE(melwitt): We're not checking quota here because there isn't a
# change in resource usage when confirming a resize. Resource
# consumption for resizes are written to the database by compute, so
# a confirm resize is just a clean up of the migration objects and a
# state change in compute.
if migration is None:
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
migration.status = 'confirming'
migration.save()
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute)
@staticmethod
def _resize_cells_support(context, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts.
# We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = objects.Migration(context=context.elevated())
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.migration_type = (
mig.old_instance_type_id != mig.new_instance_type_id and
'resize' or 'migration')
mig.create()
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
host_name=None, **extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
host_name is always None in the resize case.
host_name can be set in the cold migration case only.
"""
if host_name is not None:
# Cannot migrate to the host where the instance exists
# because it is useless.
if host_name == instance.host:
raise exception.CannotMigrateToSameHost()
# Check whether host exists or not.
node = objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host_name, use_slave=True)
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = instance.get_flavor()
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug("flavor_id is None. Assuming migration.",
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
if (new_instance_type.get('root_gb') == 0 and
current_instance_type.get('root_gb') != 0 and
not compute_utils.is_volume_backed_instance(context,
instance)):
reason = _('Resize to zero disk flavor is not allowed.')
raise exception.CannotResizeDisk(reason=reason)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug("Old instance type %(current_instance_type_name)s, "
"new instance type %(new_instance_type_name)s",
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
if flavor_id:
self._check_quota_for_upsize(context, instance,
current_instance_type,
new_instance_type)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance.host)
if self.cell_type == 'api':
# Create migration record.
self._resize_cells_support(context, instance,
current_instance_type,
new_instance_type)
if not flavor_id:
self._record_action_start(context, instance,
instance_actions.MIGRATE)
else:
self._record_action_start(context, instance,
instance_actions.RESIZE)
# NOTE(sbauza): The migration script we provided in Newton should make
# sure that all our instances are currently migrated to have an
# attached RequestSpec object but let's consider that the operator only
# half migrated all their instances in the meantime.
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
request_spec.ignore_hosts = filter_properties['ignore_hosts']
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
if host_name is not None:
# If there is no request spec we cannot honor the request
# and we need to fail.
raise exception.CannotMigrateWithTargetHost()
request_spec = None
# TODO(melwitt): We're not rechecking for strict quota here to guard
# against going over quota during a race at this time because the
# resource consumption for this operation is written to the database
# by compute.
scheduler_hint = {'filter_properties': filter_properties}
if request_spec:
if host_name is None:
# If 'host_name' is not specified,
# clear the 'requested_destination' field of the RequestSpec.
request_spec.requested_destination = None
else:
# Set the host and the node so that the scheduler will
# validate them.
request_spec.requested_destination = objects.Destination(
host=node.host, node=node.hypervisor_hostname)
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
clean_shutdown=clean_shutdown,
request_spec=request_spec)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def shelve(self, context, instance, clean_shutdown=True):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
if not compute_utils.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance.display_name
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id, clean_shutdown=clean_shutdown)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance, clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED])
def shelve_offload(self, context, instance, clean_shutdown=True):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.SHELVE_OFFLOAD)
self.compute_rpcapi.shelve_offload_instance(context, instance=instance,
clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
self.compute_task_api.unshelve_instance(context, instance,
request_spec)
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@check_instance_host
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@check_instance_host
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_instance_diagnostics(context,
instance=instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None, clean_shutdown=True):
"""Rescue the given instance."""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
if compute_utils.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance.uuid,
reason=reason)
instance.task_state = task_states.RESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password, rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
instance.task_state = task_states.UNRESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance.
@param context: Nova auth context.
@param instance: Nova instance object.
@param password: The admin password for the instance.
"""
instance.task_state = task_states.UPDATING_PASSWORD
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
# TODO(melwitt): In Rocky, the compute manager puts the
# console authorization in the database in the above method.
# The following will be removed when everything has been
# converted to use the database, in Stein.
if CONF.workarounds.enable_consoleauth:
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
# TODO(melwitt): In Rocky, the compute manager puts the
# console authorization in the database in the above method.
# The following will be removed when everything has been
# converted to use the database, in Stein.
if CONF.workarounds.enable_consoleauth:
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_rdp_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
# TODO(melwitt): In Rocky, the compute manager puts the
# console authorization in the database in the above method.
# The following will be removed when everything has been
# converted to use the database, in Stein.
if CONF.workarounds.enable_consoleauth:
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_serial_console(self, context, instance, console_type):
"""Get a url to a serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
# TODO(melwitt): In Rocky, the compute manager puts the
# console authorization in the database in the above method.
# The following will be removed when everything has been
# converted to use the database, in Stein.
if CONF.workarounds.enable_consoleauth:
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_serial_console_connect_info(self, context, instance, console_type):
"""Used in a child cell to get serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
return connect_info
@check_instance_host
@reject_instance_state(
task_state=[task_states.DELETING, task_states.MIGRATING])
def get_mks_console(self, context, instance, console_type):
"""Get a url to a MKS console."""
connect_info = self.compute_rpcapi.get_mks_console(context,
instance=instance, console_type=console_type)
# TODO(melwitt): In Rocky, the compute manager puts the
# console authorization in the database in the above method.
# The following will be removed when everything has been
# converted to use the database, in Stein.
if CONF.workarounds.enable_consoleauth:
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
self._record_action_start(context, instance,
instance_actions.LOCK)
@wrap_instance_event(prefix='api')
def lock(self, context, instance):
LOG.debug('Locking', instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
lock(self, context, instance)
compute_utils.notify_about_instance_action(
context, instance, CONF.host,
action=fields_obj.NotificationAction.LOCK,
source=fields_obj.NotificationSource.API)
def is_expected_locked_by(self, context, instance):
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
return False
return True
def unlock(self, context, instance):
"""Unlock the given instance."""
context = context.elevated()
self._record_action_start(context, instance,
instance_actions.UNLOCK)
@wrap_instance_event(prefix='api')
def unlock(self, context, instance):
LOG.debug('Unlocking', instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
unlock(self, context, instance)
compute_utils.notify_about_instance_action(
context, instance, CONF.host,
action=fields_obj.NotificationAction.UNLOCK,
source=fields_obj.NotificationSource.API)
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
def _create_volume_bdm(self, context, instance, device, volume,
disk_bus, device_type, is_local_creation=False,
tag=None):
volume_id = volume['id']
if is_local_creation:
# when the creation is done locally we can't specify the device
# name as we do not have a way to check that the name specified is
# a valid one.
# We leave the setting of that value when the actual attach
# happens on the compute manager
# NOTE(artom) Local attach (to a shelved-offload instance) cannot
# support device tagging because we have no way to call the compute
# manager to check that it supports device tagging. In fact, we
# don't even know which computer manager the instance will
# eventually end up on when it's unshelved.
volume_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid, boot_index=None,
volume_id=volume_id,
device_name=None, guest_format=None,
disk_bus=disk_bus, device_type=device_type)
volume_bdm.create()
else:
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
volume_bdm = self.compute_rpcapi.reserve_block_device_name(
context, instance, device, volume_id, disk_bus=disk_bus,
device_type=device_type, tag=tag,
multiattach=volume['multiattach'])
return volume_bdm
def _check_volume_already_attached_to_instance(self, context, instance,
volume_id):
"""Avoid attaching the same volume to the same instance twice.
As the new Cinder flow (microversion 3.44) is handling the checks
differently and allows to attach the same volume to the same
instance twice to enable live_migrate we are checking whether the
BDM already exists for this combination for the new flow and fail
if it does.
"""
try:
objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
msg = _("volume %s already attached") % volume_id
raise exception.InvalidVolume(reason=msg)
except exception.VolumeBDMNotFound:
pass
def _check_attach_and_reserve_volume(self, context, volume, instance,
bdm, supports_multiattach=False):
volume_id = volume['id']
self.volume_api.check_availability_zone(context, volume,
instance=instance)
# If volume.multiattach=True and the microversion to
# support multiattach is not used, fail the request.
if volume['multiattach'] and not supports_multiattach:
raise exception.MultiattachNotSupportedOldMicroversion()
if 'id' in instance:
# This is a volume attach to an existing instance, so
# we only care about the cell the instance is in.
min_compute_version = objects.Service.get_minimum_version(
context, 'nova-compute')
else:
# The instance is being created and we don't know which
# cell it's going to land in, so check all cells.
# NOTE(danms): We don't require all cells to report here since
# we're really concerned about the new-ness of cells that the
# instance may be scheduled into. If a cell doesn't respond here,
# then it won't be a candidate for the instance and thus doesn't
# matter.
min_compute_version = \
objects.service.get_minimum_version_all_cells(
context, ['nova-compute'])
# Check to see if the computes have been upgraded to support
# booting from a multiattach volume.
if (volume['multiattach'] and
min_compute_version < MIN_COMPUTE_MULTIATTACH):
raise exception.MultiattachSupportNotYetAvailable()
if min_compute_version >= CINDER_V3_ATTACH_MIN_COMPUTE_VERSION:
# Attempt a new style volume attachment, but fallback to old-style
# in case Cinder API 3.44 isn't available.
try:
attachment_id = self.volume_api.attachment_create(
context, volume_id, instance.uuid)['id']
bdm.attachment_id = attachment_id
# NOTE(ildikov): In case of boot from volume the BDM at this
# point is not yet created in a cell database, so we can't
# call save(). When attaching a volume to an existing
# instance, the instance is already in a cell and the BDM has
# been created in that same cell so updating here in that case
# is "ok".
if bdm.obj_attr_is_set('id'):
bdm.save()
except exception.CinderAPIVersionNotAvailable:
LOG.debug('The available Cinder microversion is not high '
'enough to create new style volume attachment.')
self.volume_api.reserve_volume(context, volume_id)
else:
LOG.debug('The compute service version is not high enough to '
'create a new style volume attachment.')
self.volume_api.reserve_volume(context, volume_id)
def _attach_volume(self, context, instance, volume, device,
disk_bus, device_type, tag=None,
supports_multiattach=False):
"""Attach an existing volume to an existing instance.
This method is separated to make it possible for cells version
to override it.
"""
volume_bdm = self._create_volume_bdm(
context, instance, device, volume, disk_bus=disk_bus,
device_type=device_type, tag=tag)
try:
self._check_attach_and_reserve_volume(context, volume, instance,
volume_bdm,
supports_multiattach)
self._record_action_start(
context, instance, instance_actions.ATTACH_VOLUME)
self.compute_rpcapi.attach_volume(context, instance, volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy()
return volume_bdm.device_name
def _attach_volume_shelved_offloaded(self, context, instance, volume,
device, disk_bus, device_type):
"""Attach an existing volume to an instance in shelved offloaded state.
Attaching a volume for an instance in shelved offloaded state requires
to perform the regular check to see if we can attach and reserve the
volume then we need to call the attach method on the volume API
to mark the volume as 'in-use'.
The instance at this stage is not managed by a compute manager
therefore the actual attachment will be performed once the
instance will be unshelved.
"""
volume_id = volume['id']
@wrap_instance_event(prefix='api')
def attach_volume(self, context, v_id, instance, dev, attachment_id):
if attachment_id:
# Normally we wouldn't complete an attachment without a host
# connector, but we do this to make the volume status change
# to "in-use" to maintain the API semantics with the old flow.
# When unshelving the instance, the compute service will deal
# with this disconnected attachment.
self.volume_api.attachment_complete(context, attachment_id)
else:
self.volume_api.attach(context,
v_id,
instance.uuid,
dev)
volume_bdm = self._create_volume_bdm(
context, instance, device, volume, disk_bus=disk_bus,
device_type=device_type, is_local_creation=True)
try:
self._check_attach_and_reserve_volume(context, volume, instance,
volume_bdm)
self._record_action_start(
context, instance,
instance_actions.ATTACH_VOLUME)
attach_volume(self, context, volume_id, instance, device,
volume_bdm.attachment_id)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy()
return volume_bdm.device_name
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None, tag=None,
supports_multiattach=False):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
# Check to see if the computes in this cell can support new-style
# volume attachments.
min_compute_version = objects.Service.get_minimum_version(
context, 'nova-compute')
if min_compute_version >= CINDER_V3_ATTACH_MIN_COMPUTE_VERSION:
try:
# Check to see if Cinder is new enough to create new-style
# attachments.
cinder.is_microversion_supported(context, '3.44')
except exception.CinderAPIVersionNotAvailable:
pass
else:
# Make sure the volume isn't already attached to this instance
# because based on the above checks, we'll use the new style
# attachment flow in _check_attach_and_reserve_volume and
# Cinder will allow multiple attachments between the same
# volume and instance but the old flow API semantics don't
# allow that so we enforce it here.
self._check_volume_already_attached_to_instance(context,
instance,
volume_id)
volume = self.volume_api.get(context, volume_id)
is_shelved_offloaded = instance.vm_state == vm_states.SHELVED_OFFLOADED
if is_shelved_offloaded:
if tag:
# NOTE(artom) Local attach (to a shelved-offload instance)
# cannot support device tagging because we have no way to call
# the compute manager to check that it supports device tagging.
# In fact, we don't even know which computer manager the
# instance will eventually end up on when it's unshelved.
raise exception.VolumeTaggedAttachToShelvedNotSupported()
if volume['multiattach']:
# NOTE(mriedem): Similar to tagged attach, we don't support
# attaching a multiattach volume to shelved offloaded instances
# because we can't tell if the compute host (since there isn't
# one) supports it. This could possibly be supported in the
# future if the scheduler was made aware of which computes
# support multiattach volumes.
raise exception.MultiattachToShelvedNotSupported()
return self._attach_volume_shelved_offloaded(context,
instance,
volume,
device,
disk_bus,
device_type)
return self._attach_volume(context, instance, volume, device,
disk_bus, device_type, tag=tag,
supports_multiattach=supports_multiattach)
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
try:
self.volume_api.begin_detaching(context, volume['id'])
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
attachments = volume.get('attachments', {})
attachment_id = None
if attachments and instance.uuid in attachments:
attachment_id = attachments[instance.uuid]['attachment_id']
self._record_action_start(
context, instance, instance_actions.DETACH_VOLUME)
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'], attachment_id=attachment_id)
def _detach_volume_shelved_offloaded(self, context, instance, volume):
"""Detach a volume from an instance in shelved offloaded state.
If the instance is shelved offloaded we just need to cleanup volume
calling the volume api detach, the volume api terminate_connection
and delete the bdm record.
If the volume has delete_on_termination option set then we call the
volume api delete as well.
"""
@wrap_instance_event(prefix='api')
def detach_volume(self, context, instance, bdms):
self._local_cleanup_bdm_volumes(bdms, instance, context)
try:
self.volume_api.begin_detaching(context, volume['id'])
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
bdms = [objects.BlockDeviceMapping.get_by_volume_id(
context, volume['id'], instance.uuid)]
self._record_action_start(
context, instance,
instance_actions.DETACH_VOLUME)
detach_volume(self, context, instance, bdms)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
self._detach_volume_shelved_offloaded(context, instance, volume)
else:
self._detach_volume(context, instance, volume)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.RESIZED])
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
# The caller likely got the instance from volume['attachments']
# in the first place, but let's sanity check.
if not old_volume.get('attachments', {}).get(instance.uuid):
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_availability_zone(context, new_volume,
instance=instance)
try:
self.volume_api.begin_detaching(context, old_volume['id'])
except exception.InvalidInput as exc:
raise exception.InvalidVolume(reason=exc.format_message())
# Get the BDM for the attached (old) volume so we can tell if it was
# attached with the new-style Cinder 3.44 API.
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, old_volume['id'], instance.uuid)
new_attachment_id = None
if bdm.attachment_id is None:
# This is an old-style attachment so reserve the new volume before
# we cast to the compute host.
self.volume_api.reserve_volume(context, new_volume['id'])
else:
try:
self._check_volume_already_attached_to_instance(
context, instance, new_volume['id'])
except exception.InvalidVolume:
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
# This is a new-style attachment so for the volume that we are
# going to swap to, create a new volume attachment.
new_attachment_id = self.volume_api.attachment_create(
context, new_volume['id'], instance.uuid)['id']
self._record_action_start(
context, instance, instance_actions.SWAP_VOLUME)
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'],
new_attachment_id=new_attachment_id)
except Exception:
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
if new_attachment_id is None:
self.volume_api.unreserve_volume(context, new_volume['id'])
else:
self.volume_api.attachment_delete(
context, new_attachment_id)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def attach_interface(self, context, instance, network_id, port_id,
requested_ip, tag=None):
"""Use hotplug to add an network adapter to an instance."""
self._record_action_start(
context, instance, instance_actions.ATTACH_INTERFACE)
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip, tag=tag)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self._record_action_start(
context, instance, instance_actions.DETACH_INTERFACE)
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
return self.db.instance_metadata_get(context, instance.uuid)
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
instance.delete_metadata_key(key)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = dict(instance.metadata)
if delete:
_metadata = metadata
else:
_metadata = dict(instance.metadata)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
instance.metadata = _metadata
instance.save()
diff = _diff_dict(orig, instance.metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name, force=None, async_=False):
"""Migrate a server lively to a new host."""
LOG.debug("Going to try to live migrate instance to %s",
host_name or "another host", instance=instance)
if host_name:
# Validate the specified host before changing the instance task
# state.
nodes = objects.ComputeNodeList.get_all_by_host(context, host_name)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION)
# TODO(melwitt): In Rocky, we optionally store console authorizations
# in both the consoleauth service and the database while
# we convert to using the database. Remove the condition for running
# this line with cells v2, when consoleauth is no longer being used by
# cells v2, in Stein.
if CONF.cells.enable or CONF.workarounds.enable_consoleauth:
self.consoleauth_rpcapi.delete_tokens_for_instance(
context, instance.uuid)
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
# NOTE(sbauza): Force is a boolean by the new related API version
if force is False and host_name:
# Unset the host to make sure we call the scheduler
# from the conductor LiveMigrationTask. Yes this is tightly-coupled
# to behavior in conductor and not great.
host_name = None
# FIXME(sbauza): Since only Ironic driver uses more than one
# compute per service but doesn't support live migrations,
# let's provide the first one.
target = nodes[0]
if request_spec:
# TODO(sbauza): Hydrate a fake spec for old instances not yet
# having a request spec attached to them (particularly true for
# cells v1). For the moment, let's keep the same behaviour for
# all the instances but provide the destination only if a spec
# is found.
destination = objects.Destination(
host=target.host,
node=target.hypervisor_hostname
)
# This is essentially a hint to the scheduler to only consider
# the specified host but still run it through the filters.
request_spec.requested_destination = destination
try:
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit,
request_spec=request_spec, async_=async_)
except oslo_exceptions.MessagingTimeout as messaging_timeout:
with excutils.save_and_reraise_exception():
# NOTE(pkoniszewski): It is possible that MessagingTimeout
# occurs, but LM will still be in progress, so write
# instance fault to database
compute_utils.add_instance_fault_from_exc(context,
instance,
messaging_timeout)
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE],
task_state=[task_states.MIGRATING])
def live_migrate_force_complete(self, context, instance, migration_id):
"""Force live migration to complete.
:param context: Security context
:param instance: The instance that is being migrated
:param migration_id: ID of ongoing migration
"""
LOG.debug("Going to try to force live migration to complete",
instance=instance)
# NOTE(pkoniszewski): Get migration object to check if there is ongoing
# live migration for particular instance. Also pass migration id to
# compute to double check and avoid possible race condition.
migration = objects.Migration.get_by_id_and_instance(
context, migration_id, instance.uuid)
if migration.status != 'running':
raise exception.InvalidMigrationState(migration_id=migration_id,
instance_uuid=instance.uuid,
state=migration.status,
method='force complete')
self._record_action_start(
context, instance, instance_actions.LIVE_MIGRATION_FORCE_COMPLETE)
self.compute_rpcapi.live_migration_force_complete(
context, instance, migration)
@check_instance_lock
@check_instance_cell
@check_instance_state(task_state=[task_states.MIGRATING])
def live_migrate_abort(self, context, instance, migration_id,
support_abort_in_queue=False):
"""Abort an in-progress live migration.
:param context: Security context
:param instance: The instance that is being migrated
:param migration_id: ID of in-progress live migration
:param support_abort_in_queue: Flag indicating whether we can support
abort migrations in "queued" or "preparing" status.
"""
migration = objects.Migration.get_by_id_and_instance(context,
migration_id, instance.uuid)
LOG.debug("Going to cancel live migration %s",
migration.id, instance=instance)
# If the microversion does not support abort migration in queue,
# we are only be able to abort migrations with `running` status;
# if it is supported, we are able to also abort migrations in
# `queued` and `preparing` status.
allowed_states = ['running']
queued_states = ['queued', 'preparing']
if support_abort_in_queue:
# The user requested a microversion that supports aborting a queued
# or preparing live migration. But we need to check that the
# compute service hosting the instance is new enough to support
# aborting a queued/preparing live migration, so we check the
# service version here.
# TODO(Kevin_Zheng): This service version check can be removed in
# Stein (at the earliest) when the API only supports Rocky or
# newer computes.
if migration.status in queued_states:
service = objects.Service.get_by_compute_host(
context, instance.host)
if service.version < MIN_COMPUTE_ABORT_QUEUED_LIVE_MIGRATION:
raise exception.AbortQueuedLiveMigrationNotYetSupported(
migration_id=migration_id, status=migration.status)
allowed_states.extend(queued_states)
if migration.status not in allowed_states:
raise exception.InvalidMigrationState(migration_id=migration_id,
instance_uuid=instance.uuid,
state=migration.status,
method='abort live migration')
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION_CANCEL)
self.compute_rpcapi.live_migration_abort(context,
instance, migration.id)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None, force=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
:param instance: The instance to evacuate
:param host: Target host. if not set, the scheduler will pick up one
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
:param force: Force the evacuation to the specific host target
"""
LOG.debug('vm evacuation scheduled', instance=instance)
inst_host = instance.host
service = objects.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
LOG.error('Instance compute service state on %s '
'expected to be down, but it was up.', inst_host)
raise exception.ComputeServiceInUse(host=inst_host)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NOTE(danms): Create this as a tombstone for the source compute
# to find and cleanup. No need to pass it anywhere else.
migration = objects.Migration(context,
source_compute=instance.host,
source_node=instance.node,
instance_uuid=instance.uuid,
status='accepted',
migration_type='evacuation')
if host:
migration.dest_compute = host
migration.create()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "evacuate")
compute_utils.notify_about_instance_action(
context, instance, CONF.host,
action=fields_obj.NotificationAction.EVACUATE,
source=fields_obj.NotificationSource.API)
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
request_spec = None
# NOTE(sbauza): Force is a boolean by the new related API version
if force is False and host:
nodes = objects.ComputeNodeList.get_all_by_host(context, host)
# NOTE(sbauza): Unset the host to make sure we call the scheduler
host = None
# FIXME(sbauza): Since only Ironic driver uses more than one
# compute per service but doesn't support evacuations,
# let's provide the first one.
target = nodes[0]
if request_spec:
# TODO(sbauza): Hydrate a fake spec for old instances not yet
# having a request spec attached to them (particularly true for
# cells v1). For the moment, let's keep the same behaviour for
# all the instances but provide the destination only if a spec
# is found.
destination = objects.Destination(
host=target.host,
node=target.hypervisor_hostname
)
request_spec.requested_destination = destination
return self.compute_task_api.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host,
request_spec=request_spec,
)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
load_cells()
migrations = []
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
migrations.extend(objects.MigrationList.get_by_filters(
cctxt, filters).objects)
return objects.MigrationList(objects=migrations)
def get_migrations_sorted(self, context, filters, sort_dirs=None,
sort_keys=None, limit=None, marker=None):
"""Get all migrations for the given parameters."""
mig_objs = migration_list.get_migration_objects_sorted(
context, filters, limit, marker, sort_keys, sort_dirs)
return mig_objs
def get_migrations_in_progress_by_instance(self, context, instance_uuid,
migration_type=None):
"""Get all migrations of an instance in progress."""
return objects.MigrationList.get_in_progress_by_instance(
context, instance_uuid, migration_type)
def get_migration_by_id_and_instance(self, context,
migration_id, instance_uuid):
"""Get the migration of an instance by id."""
return objects.Migration.get_by_id_and_instance(
context, migration_id, instance_uuid)
def _get_bdm_by_volume_id(self, context, volume_id, expected_attrs=None):
"""Retrieve a BDM without knowing its cell.
.. note:: The context will be targeted to the cell in which the
BDM is found, if any.
:param context: The API request context.
:param volume_id: The ID of the volume.
:param expected_attrs: list of any additional attributes that should
be joined when the BDM is loaded from the database.
:raises: nova.exception.VolumeBDMNotFound if not found in any cell
"""
load_cells()
for cell in CELLS:
nova_context.set_target_cell(context, cell)
try:
return objects.BlockDeviceMapping.get_by_volume(
context, volume_id, expected_attrs=expected_attrs)
except exception.NotFound:
continue
raise exception.VolumeBDMNotFound(volume_id=volume_id)
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = self._get_bdm_by_volume_id(
context, volume_id, expected_attrs=['instance'])
# We allow creating the snapshot in any vm_state as long as there is
# no task being performed on the instance and it has a host.
@check_instance_host
@check_instance_state(vm_state=None)
def do_volume_snapshot_create(self, context, instance):
self.compute_rpcapi.volume_snapshot_create(context, instance,
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
return do_volume_snapshot_create(self, context, bdm.instance)
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = self._get_bdm_by_volume_id(
context, volume_id, expected_attrs=['instance'])
# We allow deleting the snapshot in any vm_state as long as there is
# no task being performed on the instance and it has a host.
@check_instance_host
@check_instance_state(vm_state=None)
def do_volume_snapshot_delete(self, context, instance):
self.compute_rpcapi.volume_snapshot_delete(context, instance,
volume_id, snapshot_id, delete_info)
do_volume_snapshot_delete(self, context, bdm.instance)
def external_instance_event(self, api_context, instances, events):
# NOTE(danms): The external API consumer just provides events,
# but doesn't know where they go. We need to collate lists
# by the host the affected instance is on and dispatch them
# according to host
instances_by_host = collections.defaultdict(list)
events_by_host = collections.defaultdict(list)
hosts_by_instance = collections.defaultdict(list)
cell_contexts_by_host = {}
for instance in instances:
# instance._context is used here since it's already targeted to
# the cell that the instance lives in, and we need to use that
# cell context to lookup any migrations associated to the instance.
for host in self._get_relevant_hosts(instance._context, instance):
# NOTE(danms): All instances on a host must have the same
# mapping, so just use that
# NOTE(mdbooth): We don't currently support migrations between
# cells, and given that the Migration record is hosted in the
# cell _get_relevant_hosts will likely have to change before we
# do. Consequently we can currently assume that the context for
# both the source and destination hosts of a migration is the
# same.
if host not in cell_contexts_by_host:
cell_contexts_by_host[host] = instance._context
instances_by_host[host].append(instance)
hosts_by_instance[instance.uuid].append(host)
for event in events:
if event.name == 'volume-extended':
# Volume extend is a user-initiated operation starting in the
# Block Storage service API. We record an instance action so
# the user can monitor the operation to completion.
host = hosts_by_instance[event.instance_uuid][0]
cell_context = cell_contexts_by_host[host]
objects.InstanceAction.action_start(
cell_context, event.instance_uuid,
instance_actions.EXTEND_VOLUME, want_result=False)
for host in hosts_by_instance[event.instance_uuid]:
events_by_host[host].append(event)
for host in instances_by_host:
cell_context = cell_contexts_by_host[host]
# TODO(salv-orlando): Handle exceptions raised by the rpc api layer
# in order to ensure that a failure in processing events on a host
# will not prevent processing events on other hosts
self.compute_rpcapi.external_instance_event(
cell_context, instances_by_host[host], events_by_host[host],
host=host)
def _get_relevant_hosts(self, context, instance):
hosts = set()
hosts.add(instance.host)
if instance.migration_context is not None:
migration_id = instance.migration_context.migration_id
migration = objects.Migration.get_by_id(context, migration_id)
hosts.add(migration.dest_compute)
hosts.add(migration.source_compute)
LOG.debug('Instance %(instance)s is migrating, '
'copying events to all relevant hosts: '
'%(hosts)s', {'instance': instance.uuid,
'hosts': hosts})
return hosts
def get_instance_host_status(self, instance):
if instance.host:
try:
service = [service for service in instance.services if
service.binary == 'nova-compute'][0]
if service.forced_down:
host_status = fields_obj.HostStatus.DOWN
elif service.disabled:
host_status = fields_obj.HostStatus.MAINTENANCE
else:
alive = self.servicegroup_api.service_is_up(service)
host_status = ((alive and fields_obj.HostStatus.UP) or
fields_obj.HostStatus.UNKNOWN)
except IndexError:
host_status = fields_obj.HostStatus.NONE
else:
host_status = fields_obj.HostStatus.NONE
return host_status
def get_instances_host_statuses(self, instance_list):
host_status_dict = dict()
host_statuses = dict()
for instance in instance_list:
if instance.host:
if instance.host not in host_status_dict:
host_status = self.get_instance_host_status(instance)
host_status_dict[instance.host] = host_status
else:
host_status = host_status_dict[instance.host]
else:
host_status = fields_obj.HostStatus.NONE
host_statuses[instance.uuid] = host_status
return host_statuses
def target_host_cell(fn):
"""Target a host-based function to a cell.
Expects to wrap a function of signature:
func(self, context, host, ...)
"""
@functools.wraps(fn)
def targeted(self, context, host, *args, **kwargs):
mapping = objects.HostMapping.get_by_host(context, host)
nova_context.set_target_cell(context, mapping.cell_mapping)
return fn(self, context, host, *args, **kwargs)
return targeted
def _find_service_in_cell(context, service_id=None, service_host=None):
"""Find a service by id or hostname by searching all cells.
If one matching service is found, return it. If none or multiple
are found, raise an exception.
:param context: A context.RequestContext
:param service_id: If not none, the DB ID of the service to find
:param service_host: If not None, the hostname of the service to find
:returns: An objects.Service
:raises: ServiceNotUnique if multiple matching IDs are found
:raises: NotFound if no matches are found
:raises: NovaException if called with neither search option
"""
load_cells()
service = None
found_in_cell = None
is_uuid = False
if service_id is not None:
is_uuid = uuidutils.is_uuid_like(service_id)
if is_uuid:
lookup_fn = lambda c: objects.Service.get_by_uuid(c, service_id)
else:
lookup_fn = lambda c: objects.Service.get_by_id(c, service_id)
elif service_host is not None:
lookup_fn = lambda c: (
objects.Service.get_by_compute_host(c, service_host))
else:
LOG.exception('_find_service_in_cell called with no search parameters')
# This is intentionally cryptic so we don't leak implementation details
# out of the API.
raise exception.NovaException()
for cell in CELLS:
# NOTE(danms): Services can be in cell0, so don't skip it here
try:
with nova_context.target_cell(context, cell) as cctxt:
cell_service = lookup_fn(cctxt)
except exception.NotFound:
# NOTE(danms): Keep looking in other cells
continue
if service and cell_service:
raise exception.ServiceNotUnique()
service = cell_service
found_in_cell = cell
if service and is_uuid:
break
if service:
# NOTE(danms): Set the cell on the context so it remains
# when we return to our caller
nova_context.set_target_cell(context, found_in_cell)
return service
else:
raise exception.NotFound()
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = objects.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
@target_host_cell
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
@target_host_cell
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
@target_host_cell
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
@target_host_cell
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False,
all_cells=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
If all_cells=True, then scan all cells and merge the results.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
# NOTE(danms): Eventually this all_cells nonsense should go away
# and we should always iterate over the cells. However, certain
# callers need the legacy behavior for now.
if all_cells:
services = []
service_dict = nova_context.scatter_gather_all_cells(context,
objects.ServiceList.get_all, disabled, set_zones=set_zones)
for service in service_dict.values():
if service not in (nova_context.did_not_respond_sentinel,
nova_context.raised_exception_sentinel):
services.extend(service)
else:
services = objects.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.items():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_id(self, context, service_id):
"""Get service entry for the given service id or uuid."""
try:
return _find_service_in_cell(context, service_id=service_id)
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
@target_host_cell
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return objects.Service.get_by_compute_host(context, host_name)
def _service_update(self, context, host_name, binary, params_to_update):
"""Performs the actual service update operation."""
service = objects.Service.get_by_args(context, host_name, binary)
service.update(params_to_update)
service.save()
return service
@target_host_cell
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
return self._service_update(context, host_name, binary,
params_to_update)
def _service_delete(self, context, service_id):
"""Performs the actual Service deletion operation."""
try:
service = _find_service_in_cell(context, service_id=service_id)
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
service.destroy()
# TODO(mriedem): Nothing outside of tests is using this now so we should
# be able to remove it.
def service_delete(self, context, service_id):
"""Deletes the specified service found via id or uuid."""
self._service_delete(context, service_id)
@target_host_cell
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return objects.InstanceList.get_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID or UUID."""
load_cells()
# NOTE(danms): Unfortunately this API exposes database identifiers
# which means we really can't do something efficient here
is_uuid = uuidutils.is_uuid_like(compute_id)
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
try:
if is_uuid:
return objects.ComputeNode.get_by_uuid(cctxt,
compute_id)
return objects.ComputeNode.get_by_id(cctxt,
int(compute_id))
except exception.ComputeHostNotFound:
# NOTE(danms): Keep looking in other cells
continue
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_get_all(self, context, limit=None, marker=None):
load_cells()
computes = []
uuid_marker = marker and uuidutils.is_uuid_like(marker)
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
# If we have a marker and it's a uuid, see if the compute node
# is in this cell.
if marker and uuid_marker:
try:
compute_marker = objects.ComputeNode.get_by_uuid(
cctxt, marker)
# we found the marker compute node, so use it's id
# for the actual marker for paging in this cell's db
marker = compute_marker.id
except exception.ComputeHostNotFound:
# The marker node isn't in this cell so keep looking.
continue
try:
cell_computes = objects.ComputeNodeList.get_by_pagination(
cctxt, limit=limit, marker=marker)
except exception.MarkerNotFound:
# NOTE(danms): Keep looking through cells
continue
computes.extend(cell_computes)
# NOTE(danms): We must have found the marker, so continue on
# without one
marker = None
if limit:
limit -= len(cell_computes)
if limit <= 0:
break
if marker is not None and len(computes) == 0:
# NOTE(danms): If we did not find the marker in any cell,
# mimic the db_api behavior here.
raise exception.MarkerNotFound(marker=marker)
return objects.ComputeNodeList(objects=computes)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
load_cells()
computes = []
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
cell_computes = objects.ComputeNodeList.get_by_hypervisor(
cctxt, hypervisor_match)
computes.extend(cell_computes)
return objects.ComputeNodeList(objects=computes)
def compute_node_statistics(self, context):
load_cells()
cell_stats = []
for cell in CELLS:
if cell.uuid == objects.CellMapping.CELL0_UUID:
continue
with nova_context.target_cell(context, cell) as cctxt:
cell_stats.append(self.db.compute_node_statistics(cctxt))
if cell_stats:
keys = cell_stats[0].keys()
return {k: sum(stats[k] for stats in cell_stats)
for k in keys}
else:
return {}
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance, limit=None, marker=None,
filters=None):
return objects.InstanceActionList.get_by_instance_uuid(
context, instance.uuid, limit, marker, filters)
def action_get_by_request_id(self, context, instance, request_id):
return objects.InstanceAction.get_by_request_id(
context, instance.uuid, request_id)
def action_events_get(self, context, instance, action_id):
return objects.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
self.placement_client = self.scheduler_client.reportclient
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = objects.Aggregate(context=context)
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create()
self.scheduler_client.update_aggregates(context, [aggregate])
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
return objects.Aggregate.get_by_id(context, aggregate_id)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
return objects.AggregateList.get_all(context)
def get_aggregates_by_host(self, context, compute_host):
"""Get all the aggregates where the given host is presented."""
return objects.AggregateList.get_by_host(context, compute_host)
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
aggregate.save()
self.is_safe_to_update_az(context, values, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE)
if values:
aggregate.update_metadata(values)
aggregate.updated_at = timeutils.utcnow()
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, metadata, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE_META)
aggregate.update_metadata(metadata)
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated metadata include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if metadata and metadata.get('availability_zone'):
availability_zones.reset_cache()
aggregate.updated_at = timeutils.utcnow()
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.DELETE,
phase=fields_obj.NotificationPhase.START)
if len(aggregate.hosts) > 0:
msg = _("Host aggregate is not empty")
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=msg)
aggregate.destroy()
self.scheduler_client.delete_aggregate(context, aggregate)
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.DELETE,
phase=fields_obj.NotificationPhase.END)
def is_safe_to_update_az(self, context, metadata, aggregate,
hosts=None,
action_name=AGGREGATE_ACTION_ADD):
"""Determine if updates alter an aggregate's availability zone.
:param context: local context
:param metadata: Target metadata for updating aggregate
:param aggregate: Aggregate to update
:param hosts: Hosts to check. If None, aggregate.hosts is used
:type hosts: list
:action_name: Calling method for logging purposes
"""
if 'availability_zone' in metadata:
if not metadata['availability_zone']:
msg = _("Aggregate %s does not support empty named "
"availability zone") % aggregate.name
self._raise_invalid_aggregate_exc(action_name, aggregate.id,
msg)
_hosts = hosts or aggregate.hosts
host_aggregates = objects.AggregateList.get_by_metadata_key(
context, 'availability_zone', hosts=_hosts)
conflicting_azs = [
agg.availability_zone for agg in host_aggregates
if agg.availability_zone != metadata['availability_zone']
and agg.id != aggregate.id]
if conflicting_azs:
msg = _("One or more hosts already in availability zone(s) "
"%s") % conflicting_azs
self._raise_invalid_aggregate_exc(action_name, aggregate.id,
msg)
def _raise_invalid_aggregate_exc(self, action_name, aggregate_id, reason):
if action_name == AGGREGATE_ACTION_ADD:
raise exception.InvalidAggregateActionAdd(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE:
raise exception.InvalidAggregateActionUpdate(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE_META:
raise exception.InvalidAggregateActionUpdateMeta(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_DELETE:
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=reason)
raise exception.NovaException(
_("Unexpected aggregate action %s") % action_name)
def _update_az_cache_for_host(self, context, host_name, aggregate_meta):
# Update the availability_zone cache to avoid getting wrong
# availability_zone in cache retention time when add/remove
# host to/from aggregate.
if aggregate_meta and aggregate_meta.get('availability_zone'):
availability_zones.update_host_availability_zone_cache(context,
host_name)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; HostMappingNotFound or ComputeHostNotFound
# is raised if invalid
try:
mapping = objects.HostMapping.get_by_host(context, host_name)
nova_context.set_target_cell(context, mapping.cell_mapping)
service = objects.Service.get_by_compute_host(context, host_name)
except exception.HostMappingNotFound:
try:
# NOTE(danms): This targets our cell
service = _find_service_in_cell(context,
service_host=host_name)
except exception.NotFound:
raise exception.ComputeHostNotFound(host=host_name)
if service.host != host_name:
# NOTE(danms): If we found a service but it is not an
# exact match, we may have a case-insensitive backend
# database (like mysql) which will end up with us
# adding the host-aggregate mapping with a
# non-matching hostname.
raise exception.ComputeHostNotFound(host=host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.ADD_HOST,
phase=fields_obj.NotificationPhase.START)
self.is_safe_to_update_az(context, aggregate.metadata,
hosts=[host_name], aggregate=aggregate)
aggregate.add_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
try:
self.placement_client.aggregate_add_host(
context, aggregate.uuid, host_name)
except exception.PlacementAPIConnectFailure:
# NOTE(jaypipes): Rocky should be able to tolerate the nova-api
# service not communicating with the Placement API, so just log a
# warning here.
# TODO(jaypipes): Remove this in Stein, when placement must be able
# to be contacted from the nova-api service.
LOG.warning("Failed to associate %s with a placement "
"aggregate: %s. There was a failure to communicate "
"with the placement service.",
host_name, aggregate.uuid)
except (exception.ResourceProviderNotFound,
exception.ResourceProviderAggregateRetrievalFailed,
exception.ResourceProviderUpdateFailed,
exception.ResourceProviderUpdateConflict) as err:
# NOTE(jaypipes): We don't want a failure perform the mirroring
# action in the placement service to be returned to the user (they
# probably don't know anything about the placement service and
# would just be confused). So, we just log a warning here, noting
# that on the next run of nova-manage placement sync_aggregates
# things will go back to normal
LOG.warning("Failed to associate %s with a placement "
"aggregate: %s. This may be corrected after running "
"nova-manage placement sync_aggregates.",
host_name, err)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
# NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate.name})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.ADD_HOST,
phase=fields_obj.NotificationPhase.END)
return aggregate
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; HostMappingNotFound or ComputeHostNotFound
# is raised if invalid
mapping = objects.HostMapping.get_by_host(context, host_name)
nova_context.set_target_cell(context, mapping.cell_mapping)
objects.Service.get_by_compute_host(context, host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.REMOVE_HOST,
phase=fields_obj.NotificationPhase.START)
aggregate.delete_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
try:
self.placement_client.aggregate_remove_host(
context, aggregate.uuid, host_name)
except exception.PlacementAPIConnectFailure:
# NOTE(jaypipes): Rocky should be able to tolerate the nova-api
# service not communicating with the Placement API, so just log a
# warning here.
# TODO(jaypipes): Remove this in Stein, when placement must be able
# to be contacted from the nova-api service.
LOG.warning("Failed to remove association of %s with a placement "
"aggregate: %s. There was a failure to communicate "
"with the placement service.",
host_name, aggregate.uuid)
except (exception.ResourceProviderNotFound,
exception.ResourceProviderAggregateRetrievalFailed,
exception.ResourceProviderUpdateFailed,
exception.ResourceProviderUpdateConflict) as err:
# NOTE(jaypipes): We don't want a failure perform the mirroring
# action in the placement service to be returned to the user (they
# probably don't know anything about the placement service and
# would just be confused). So, we just log a warning here, noting
# that on the next run of nova-manage placement sync_aggregates
# things will go back to normal
LOG.warning("Failed to remove association of %s with a placement "
"aggregate: %s. This may be corrected after running "
"nova-manage placement sync_aggregates.",
host_name, err)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
compute_utils.notify_about_aggregate_action(
context=context,
aggregate=aggregate,
action=fields_obj.NotificationAction.REMOVE_HOST,
phase=fields_obj.NotificationPhase.END)
return aggregate
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
get_notifier = functools.partial(rpc.get_notifier, service='api')
wrap_exception = functools.partial(exception_wrapper.wrap_exception,
get_notifier=get_notifier,
binary='nova-api')
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = self.get_notifier()
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name, key_type):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
try:
utils.check_string_length(key_name, min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidKeypair(
reason=_('Keypair name must be string and between '
'1 and 255 characters long'))
try:
objects.Quotas.check_deltas(context, {'key_pairs': 1}, user_id)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@wrap_exception()
def import_key_pair(self, context, user_id, key_name, public_key,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
self._notify(context, 'import.start', key_name)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = None
keypair.public_key = public_key
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.IMPORT,
phase=fields_obj.NotificationPhase.START)
fingerprint = self._generate_fingerprint(public_key, key_type)
keypair.fingerprint = fingerprint
keypair.create()
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.IMPORT,
phase=fields_obj.NotificationPhase.END)
self._notify(context, 'import.end', key_name)
return keypair
@wrap_exception()
def create_key_pair(self, context, user_id, key_name,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = None
keypair.public_key = None
self._notify(context, 'create.start', key_name)
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.CREATE,
phase=fields_obj.NotificationPhase.START)
private_key, public_key, fingerprint = self._generate_key_pair(
user_id, key_type)
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
# NOTE(melwitt): We recheck the quota after creating the object to
# prevent users from allocating more resources than their allowed quota
# in the event of a race. This is configurable because it can be
# expensive if strict quota limits are not required in a deployment.
if CONF.quota.recheck_quota:
try:
objects.Quotas.check_deltas(context, {'key_pairs': 0}, user_id)
except exception.OverQuota:
keypair.destroy()
raise exception.KeypairLimitExceeded()
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.CREATE,
phase=fields_obj.NotificationPhase.END)
self._notify(context, 'create.end', key_name)
return keypair, private_key
def _generate_fingerprint(self, public_key, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_fingerprint(public_key)
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_x509_fingerprint(public_key)
def _generate_key_pair(self, user_id, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_key_pair()
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_winrm_x509_cert(user_id)
@wrap_exception()
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
keypair = self.get_key_pair(context, user_id, key_name)
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.DELETE,
phase=fields_obj.NotificationPhase.START)
objects.KeyPair.destroy_by_name(context, user_id, key_name)
compute_utils.notify_about_keypair_action(
context=context,
keypair=keypair,
action=fields_obj.NotificationAction.DELETE,
phase=fields_obj.NotificationPhase.END)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id, limit=None, marker=None):
"""List key pairs."""
return objects.KeyPairList.get_by_user(
context, user_id, limit=limit, marker=marker)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return objects.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def validate_property(self, value, property, allowed):
"""Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
utils.check_string_length(val, name=property, min_length=1,
max_length=255)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
objects.Quotas.check_deltas(context, {'security_groups': 1},
context.project_id,
user_id=context.user_id)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.info("Create Security Group %s", name)
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# NOTE(melwitt): We recheck the quota after creating the object to
# prevent users from allocating more resources than their allowed quota
# in the event of a race. This is configurable because it can be
# expensive if strict quota limits are not required in a deployment.
if CONF.quota.recheck_quota:
try:
objects.Quotas.check_deltas(context, {'security_groups': 0},
context.project_id,
user_id=context.user_id)
except exception.OverQuota:
self.db.security_group_destroy(context, group_ref['id'])
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
cols = ['rules']
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name,
columns_to_join=cols)
elif id:
return self.db.security_group_get(context, id,
columns_to_join=cols)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
LOG.info("Delete security group %s", security_group['name'])
self.db.security_group_destroy(context, security_group['id'])
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
if instance.host:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance, instance.host)
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
if instance.host:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance, instance.host)
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
try:
objects.Quotas.check_deltas(context,
{'security_group_rules': len(vals)},
id)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = ("Security group %(name)s added %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
rules = []
for v in vals:
rule = self.db.security_group_rule_create(context, v)
# NOTE(melwitt): We recheck the quota after creating the object to
# prevent users from allocating more resources than their allowed
# quota in the event of a race. This is configurable because it can
# be expensive if strict quota limits are not required in a
# deployment.
if CONF.quota.recheck_quota:
try:
objects.Quotas.check_deltas(context,
{'security_group_rules': 0},
id)
except exception.OverQuota:
self.db.security_group_rule_destroy(context, rule['id'])
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
rules.append(rule)
LOG.info(msg, {'name': name,
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = ("Security group %(name)s removed %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
for rule_id in rule_ids:
rule = self.get_rule(context, rule_id)
LOG.info(msg, {'name': security_group['name'],
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
break
else:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
return self.db.security_group_default_rule_get(context, id)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def _refresh_instance_security_rules(self, context, instances):
for instance in instances:
if instance.host is not None:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance, instance.host)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
instances = objects.InstanceList.get_by_security_group_id(context, id)
self._refresh_instance_security_rules(context, instances)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
instances = objects.InstanceList.get_by_grantee_security_group_ids(
context, group_ids)
self._refresh_instance_security_rules(context, instances)
def get_instance_security_groups(self, context, instance, detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance.uuid)
return [{'name': group.name} for group in instance.security_groups]
| apache-2.0 | -3,930,178,248,232,551,000 | 45.794142 | 79 | 0.579275 | false |
sebMathieu/dsima | server/lgiinterface.py | 1 | 1067 | ##@package lgiinterface
# Interface to list the generated instances.
#@author Sebastien MATHIEU
import os
import xml.etree.ElementTree as ElementTree
import asyncio,websockets
# Interact with the user to interact with the instance generator module.
# @param client Client we are interacting with.
@asyncio.coroutine
def interact(client):
instances=[]
# Create instances directory if it does not exist
if not os.path.isdir(client.instancesFolder):
os.makedirs(client.instancesFolder)
# List instances in directory
for dir in os.listdir(client.instancesFolder):
xmlFile="%s/%s/%s.xml"%(client.instancesFolder,dir,dir)
if not os.path.isdir("%s/%s"%(client.instancesFolder,dir)) or not os.path.isfile("%s/%s/%s.xml"%(client.instancesFolder,dir,dir)):
continue
root=ElementTree.parse(xmlFile).getroot()
hash=root.find('hash').text
title=root.find('title').text
if hash == dir:
if title == None:
title=""
instances.append("%s;%s"%(hash,title))
yield from client.socket.send("\n".join(instances))
| bsd-3-clause | -6,826,134,603,561,917,000 | 30.333333 | 132 | 0.712277 | false |
telefonicaid/orchestrator | src/orchestrator/api/ldap_view.py | 1 | 21117 | #
# Copyright 2018 Telefonica Espana
#
# This file is part of IoT orchestrator
#
# IoT orchestrator is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# IoT orchestrator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with IoT orchestrator. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# iot_support at tid dot es
#
# Author: IoT team
#
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.exceptions import ParseError
from django.conf import settings
from orchestrator.core.flow.LdapUserHelper import LdapUserHelper
from orchestrator.core.flow.LdapGroupHelper import LdapGroupHelper
from orchestrator.api import parsers
from orchestrator.api.iotconf import IoTConf
from orchestrator.api.stats import Stats
class LdapUser_RESTView(APIView, IoTConf):
"""
{ Create, Read, Update, Delete } LDAP Users
"""
schema_name = "LdapUser"
parser_classes = (parsers.JSONSchemaParser,)
def __init__(self):
IoTConf.__init__(self)
def post(self, request):
response = flow = None
CORRELATOR_ID = self.getCorrelatorIdHeader(request)
try:
request.data # json validation
flow = LdapUserHelper(
None, None, None,
LDAP_HOST=self.LDAP_HOST,
LDAP_PORT=self.LDAP_PORT,
LDAP_BASEDN=self.LDAP_BASEDN,
MAILER_HOST=self.MAILER_HOST,
MAILER_PORT=self.MAILER_PORT,
MAILER_TLS=self.MAILER_TLS,
MAILER_USER=self.MAILER_USER,
MAILER_PASSWORD=self.MAILER_PASSWORD,
MAILER_FROM=self.MAILER_FROM,
MAILER_TO=self.MAILER_TO,
CORRELATOR_ID=CORRELATOR_ID)
CORRELATOR_ID = self.getCorrelatorId(flow, CORRELATOR_ID)
if (request.data.get("LDAP_ADMIN_USER", None) and
request.data.get("LDAP_ADMIN_PASSWORD", None)):
result = flow.createNewUser(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("NEW_USER_NAME", None),
request.data.get("NEW_USER_PASSWORD", None),
request.data.get("NEW_USER_EMAIL", None),
request.data.get("NEW_USER_DESCRIPTION", None),
request.data.get("GROUP_NAMES", None))
else:
result = flow.askForCreateNewUser(
request.data.get("NEW_USER_NAME", None),
request.data.get("NEW_USER_PASSWORD", None),
request.data.get("NEW_USER_EMAIL", None),
request.data.get("NEW_USER_DESCRIPTION", None),
request.data.get("GROUP_NAMES", None))
if 'error' not in result:
Stats.num_post_ldap += 1
response = Response(result, status=status.HTTP_201_CREATED,
headers={"Fiware-Correlator": CORRELATOR_ID})
else:
Stats.num_flow_errors += 1
response = Response(result['error'],
status=self.getStatusFromCode(result['code']),
headers={"Fiware-Correlator": CORRELATOR_ID})
except ParseError as error:
Stats.num_api_errors += 1
response = Response(
'Input validation error - {0} {1}'.format(error.message,
error.detail),
status=status.HTTP_400_BAD_REQUEST,
headers={"Fiware-Correlator": CORRELATOR_ID}
)
return response
def get(self, request):
response = flow = None
CORRELATOR_ID = self.getCorrelatorIdHeader(request)
try:
request.data # json validation
flow = LdapUserHelper(
None, None, None,
LDAP_HOST=self.LDAP_HOST,
LDAP_PORT=self.LDAP_PORT,
LDAP_BASEDN=self.LDAP_BASEDN,
CORRELATOR_ID=CORRELATOR_ID)
CORRELATOR_ID = self.getCorrelatorId(flow, CORRELATOR_ID)
if ( request.data.get("LDAP_ADMIN_USER", None) and
request.data.get("LDAP_ADMIN_PASSWORD", None) and
request.data.get("FILTER", None)):
result = flow.listUsers(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("FILTER", None))
elif ( request.data.get("LDAP_ADMIN_USER", None) and
request.data.get("LDAP_ADMIN_PASSWORD", None) and
request.data.get("USER_NAME", None)):
result = flow.getUserDetailByAdmin(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("USER_NAME", None))
elif (request.data.get("USER_NAME", None) and
request.data.get("USER_PASSWORD", None)):
result = flow.getUserDetail(
request.data.get("USER_NAME", None),
request.data.get("USER_PASSWORD", None))
else:
result = { "error": "not valid parameters", "code": 400 }
if 'error' not in result:
Stats.num_get_ldap += 1
response = Response(result, status=status.HTTP_200_OK,
headers={"Fiware-Correlator": CORRELATOR_ID})
else:
Stats.num_flow_errors += 1
response = Response(result['error'],
status=self.getStatusFromCode(result['code']),
headers={"Fiware-Correlator": CORRELATOR_ID})
except ParseError as error:
Stats.num_api_errors += 1
response = Response(
'Input validation error - {0} {1}'.format(error.message,
error.detail),
status=status.HTTP_400_BAD_REQUEST,
headers={"Fiware-Correlator": CORRELATOR_ID}
)
return response
def put(self, request):
response = flow = None
CORRELATOR_ID = self.getCorrelatorIdHeader(request)
try:
request.data # json validation
flow = LdapUserHelper(
None, None, None,
LDAP_HOST=self.LDAP_HOST,
LDAP_PORT=self.LDAP_PORT,
LDAP_BASEDN=self.LDAP_BASEDN,
CORRELATOR_ID=CORRELATOR_ID)
CORRELATOR_ID = self.getCorrelatorId(flow, CORRELATOR_ID)
result = flow.updateUser(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("USER_NAME", None),
request.data.get("USER_PASSWORD", None),
request.data.get("USER_DATA", None))
if 'error' not in result:
Stats.num_put_ldap += 1
response = Response(result, status=status.HTTP_200_OK,
headers={"Fiware-Correlator": CORRELATOR_ID})
else:
Stats.num_flow_errors += 1
response = Response(result['error'],
status=self.getStatusFromCode(result['code']),
headers={"Fiware-Correlator": CORRELATOR_ID})
except ParseError as error:
Stats.num_api_errors += 1
response = Response(
'Input validation error - {0} {1}'.format(error.message,
error.detail),
status=status.HTTP_400_BAD_REQUEST,
headers={"Fiware-Correlator": CORRELATOR_ID}
)
return response
def delete(self, request):
response = flow = None
CORRELATOR_ID = self.getCorrelatorIdHeader(request)
try:
request.data # json validation
flow = LdapUserHelper(
None, None, None,
LDAP_HOST=self.LDAP_HOST,
LDAP_PORT=self.LDAP_PORT,
LDAP_BASEDN=self.LDAP_BASEDN,
CORRELATOR_ID=CORRELATOR_ID)
CORRELATOR_ID = self.getCorrelatorId(flow, CORRELATOR_ID)
result = flow.deleteUser(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("USER_NAME", None),
request.data.get("USER_PASSWORD", None))
if 'error' not in result:
Stats.num_delete_ldap += 1
response = Response(result, status=status.HTTP_204_NO_CONTENT,
headers={"Fiware-Correlator": CORRELATOR_ID})
else:
Stats.num_flow_errors += 1
response = Response(result['error'],
status=self.getStatusFromCode(result['code']),
headers={"Fiware-Correlator": CORRELATOR_ID})
except ParseError as error:
Stats.num_api_errors += 1
response = Response(
'Input validation error - {0} {1}'.format(error.message,
error.detail),
status=status.HTTP_400_BAD_REQUEST,
headers={"Fiware-Correlator": CORRELATOR_ID}
)
return response
class LdapAuth_RESTView(APIView, IoTConf):
"""
{ post } LDAP Auth
"""
schema_name = "LdapUser"
parser_classes = (parsers.JSONSchemaParser,)
def __init__(self):
IoTConf.__init__(self)
def post(self, request):
response = flow = None
CORRELATOR_ID = self.getCorrelatorIdHeader(request)
try:
request.data # json validation
flow = LdapUserHelper(
None, None, None,
LDAP_HOST=self.LDAP_HOST,
LDAP_PORT=self.LDAP_PORT,
LDAP_BASEDN=self.LDAP_BASEDN,
CORRELATOR_ID=CORRELATOR_ID)
CORRELATOR_ID = self.getCorrelatorId(flow, CORRELATOR_ID)
result = flow.authUser(
request.data.get("USER_NAME", None),
request.data.get("USER_PASSWORD", None))
if 'error' not in result:
Stats.num_post_ldap += 1
response = Response(result, status=status.HTTP_201_CREATED,
headers={"Fiware-Correlator": CORRELATOR_ID})
else:
Stats.num_flow_errors += 1
response = Response(result['error'],
status=self.getStatusFromCode(result['code']),
headers={"Fiware-Correlator": CORRELATOR_ID})
except ParseError as error:
Stats.num_api_errors += 1
response = Response(
'Input validation error - {0} {1}'.format(error.message,
error.detail),
status=status.HTTP_400_BAD_REQUEST,
headers={"Fiware-Correlator": CORRELATOR_ID}
)
return response
class LdapGroup_RESTView(APIView, IoTConf):
"""
{ Create, Read, Update, Delete } LDAP Groups
"""
schema_name = "LdapGroup"
parser_classes = (parsers.JSONSchemaParser,)
def __init__(self):
IoTConf.__init__(self)
def post(self, request):
response = flow = None
CORRELATOR_ID = self.getCorrelatorIdHeader(request)
try:
request.data # json validation
flow = LdapGroupHelper(
None, None, None,
LDAP_HOST=self.LDAP_HOST,
LDAP_PORT=self.LDAP_PORT,
LDAP_BASEDN=self.LDAP_BASEDN,
MAILER_HOST=self.MAILER_HOST,
MAILER_PORT=self.MAILER_PORT,
MAILER_TLS=self.MAILER_TLS,
MAILER_USER=self.MAILER_USER,
MAILER_PASSWORD=self.MAILER_PASSWORD,
MAILER_FROM=self.MAILER_FROM,
MAILER_TO=self.MAILER_TO,
CORRELATOR_ID=CORRELATOR_ID)
CORRELATOR_ID = self.getCorrelatorId(flow, CORRELATOR_ID)
result = flow.createNewGroup(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("NEW_GROUP_NAME", None),
request.data.get("NEW_GROUP_DESCRIPTION", None))
if 'error' not in result:
Stats.num_post_ldap += 1
response = Response(result, status=status.HTTP_201_CREATED,
headers={"Fiware-Correlator": CORRELATOR_ID})
else:
Stats.num_flow_errors += 1
response = Response(result['error'],
status=self.getStatusFromCode(result['code']),
headers={"Fiware-Correlator": CORRELATOR_ID})
except ParseError as error:
Stats.num_api_errors += 1
response = Response(
'Input validation error - {0} {1}'.format(error.message,
error.detail),
status=status.HTTP_400_BAD_REQUEST,
headers={"Fiware-Correlator": CORRELATOR_ID}
)
return response
def get(self, request):
response = flow = None
CORRELATOR_ID = self.getCorrelatorIdHeader(request)
try:
request.data # json validation
flow = LdapGroupHelper(
None, None, None,
LDAP_HOST=self.LDAP_HOST,
LDAP_PORT=self.LDAP_PORT,
LDAP_BASEDN=self.LDAP_BASEDN,
CORRELATOR_ID=CORRELATOR_ID)
CORRELATOR_ID = self.getCorrelatorId(flow, CORRELATOR_ID)
if ( request.data.get("LDAP_ADMIN_USER", None) and
request.data.get("LDAP_ADMIN_PASSWORD", None) and
request.data.get("FILTER", None)):
result = flow.listGroups(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("FILTER", None))
elif ( request.data.get("LDAP_ADMIN_USER", None) and
request.data.get("LDAP_ADMIN_PASSWORD", None) and
request.data.get("GROUP_NAME", None)):
result = flow.getGroupDetailByAdmin(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("GROUP_NAME", None))
else:
result = { "error": "not valid parameters", "code": 400 }
if 'error' not in result:
Stats.num_get_ldap += 1
response = Response(result, status=status.HTTP_200_OK,
headers={"Fiware-Correlator": CORRELATOR_ID})
else:
Stats.num_flow_errors += 1
response = Response(result['error'],
status=self.getStatusFromCode(result['code']),
headers={"Fiware-Correlator": CORRELATOR_ID})
except ParseError as error:
Stats.num_api_errors += 1
response = Response(
'Input validation error - {0} {1}'.format(error.message,
error.detail),
status=status.HTTP_400_BAD_REQUEST,
headers={"Fiware-Correlator": CORRELATOR_ID}
)
return response
def put(self, request):
response = flow = None
CORRELATOR_ID = self.getCorrelatorIdHeader(request)
try:
request.data # json validation
flow = LdapGroupHelper(
None, None, None,
LDAP_HOST=self.LDAP_HOST,
LDAP_PORT=self.LDAP_PORT,
LDAP_BASEDN=self.LDAP_BASEDN,
CORRELATOR_ID=CORRELATOR_ID)
CORRELATOR_ID = self.getCorrelatorId(flow, CORRELATOR_ID)
result = flow.updateGroup(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("GROUP_NAME", None),
request.data.get("GROUP_DESCRIPTION", None))
if 'error' not in result:
Stats.num_put_ldap += 1
response = Response(result, status=status.HTTP_200_OK,
headers={"Fiware-Correlator": CORRELATOR_ID})
else:
Stats.num_flow_errors += 1
response = Response(result['error'],
status=self.getStatusFromCode(result['code']),
headers={"Fiware-Correlator": CORRELATOR_ID})
except ParseError as error:
Stats.num_api_errors += 1
response = Response(
'Input validation error - {0} {1}'.format(error.message,
error.detail),
status=status.HTTP_400_BAD_REQUEST,
headers={"Fiware-Correlator": CORRELATOR_ID}
)
return response
def delete(self, request):
response = flow = None
CORRELATOR_ID = self.getCorrelatorIdHeader(request)
try:
request.data # json validation
flow = LdapGroupHelper(
None, None, None,
LDAP_HOST=self.LDAP_HOST,
LDAP_PORT=self.LDAP_PORT,
LDAP_BASEDN=self.LDAP_BASEDN,
CORRELATOR_ID=CORRELATOR_ID)
CORRELATOR_ID = self.getCorrelatorId(flow, CORRELATOR_ID)
result = flow.deleteGroup(
request.data.get("LDAP_ADMIN_USER", None),
request.data.get("LDAP_ADMIN_PASSWORD", None),
request.data.get("GROUP_NAME", None))
if 'error' not in result:
Stats.num_delete_ldap += 1
response = Response(result, status=status.HTTP_204_NO_CONTENT,
headers={"Fiware-Correlator": CORRELATOR_ID})
else:
Stats.num_flow_errors += 1
response = Response(result['error'],
status=self.getStatusFromCode(result['code']),
headers={"Fiware-Correlator": CORRELATOR_ID})
except ParseError as error:
Stats.num_api_errors += 1
response = Response(
'Input validation error - {0} {1}'.format(error.message,
error.detail),
status=status.HTTP_400_BAD_REQUEST,
headers={"Fiware-Correlator": CORRELATOR_ID}
)
return response
| agpl-3.0 | -5,715,640,797,791,896,000 | 45.006536 | 78 | 0.496283 | false |
AlanJAS/iknowAmerica | recursos/0bolivia/datos/0bolivia.py | 1 | 4528 | # -*- coding: utf-8 -*-
from gettext import gettext as _
NAME = _('Bolivia')
STATES = [
(_('Pando'), 254, 136, 124, 0),
(_('Beni'), 253, 283, 292, 0),
(_('La Paz'), 252, 124, 431, 0),
(_('Oruro'), 251, 146, 609, 0),
(_('Potosí'), 250, 204, 745, 0),
(_('Cochabamba'), 249, 279, 503, 0),
(_('Chuquisaca'), 248, 377, 730, 0),
(_('Tarija'), 247, 374, 788, 0),
(_('Santa Cruz'), 246, 529, 513, 0),
(_('Brazil'), 245, 552, 109, 0),
(_('Perú'), 244, 23, 271, 90),
(_('Chile'), 243, 50, 763, 90),
(_('Paraguay'), 242, 616, 800, 0),
(_('Argentina'), 241, 331, 884, 0)
]
CAPITALS = [
(_('La Paz'), 118, 464, 0, 0, -14),
(_('Cobija'), 66, 104, 1, 0, 14),
(_('Trinidad'), 318, 352, 1, 0, -14),
(_('Cochabamba'), 244, 519, 1, 0, -14),
(_('Oruro'), 193, 557, 1, -10, 14),
(_('Potosí'), 272, 662, 1, 0, 14),
(_('Sucre'), 305, 626, 1, 0, -14),
(_('Tarija'), 342, 789, 1, 0, 14),
(_('Santa Cruz'), 430, 544, 1, 0, 14)
]
CITIES = [
(_('Apolo'), 86, 356, 2, 0, -14),
(_('Reyes'), 159, 323, 2, 10, -14),
(_('Santa Ana'), 277, 280, 2, 0, -14),
(_('San Borja'), 202, 355, 2, 0, -14),
(_('Puerto Heath'), 79, 204, 2, 0, 14),
(_('Asunción'), 124, 160, 2, 0, -14),
(_('Riberalta'), 238, 104, 2, -15, -14),
(_('Magdalena'), 366, 255, 2, 0, -14),
(_('Loreto'), 330, 376, 2, 0, 14),
(_('Puerto Acosta'), 48, 403, 2, 30, -14),
(_('Caranavi'), 155, 419, 2, 0, 14),
(_('Guaqui'), 77, 475, 2, -15, 14),
(_('Ascención'), 435, 405, 2, 0, -14),
(_('Concepción'), 500, 434, 2, 0, -14),
(_('San Ignacio'), 563, 443, 2, 0, 14),
(_('Tarabuco'), 324, 634, 2, 0, 14),
(_('Aiquile'), 307, 569, 2, 0, -14),
(_('Villazón'), 289, 816, 2, 15, 14),
(_('Uyuni'), 209, 723, 2, 0, -14),
(_('Yucuiba'), 407, 809, 2, 0, 14),
(_('Villa Montes'), 421, 762, 2, 0, -14),
(_('Camiri'), 409, 694, 2, 20, -14),
(_('Santa Rosa del Sara'), 402, 497, 2, 15, -14),
(_('Montero'), 425, 513, 2, 0, 14),
(_('Las Petas'), 680, 449, 2, 0, 14),
(_('San José de Chiquitos'), 583, 544, 2, 0, -14),
(_('Roboré'), 643, 576, 2, 0, -14),
(_('Puerto Suárez'), 758, 614, 2, -30, -14)
]
RIVERS = [
(_('Pilcomayo River'), 254, 462, 796, -45),
(_('Parapetí River'), 253, 444, 690, 30),
(_('Sécure River'), 252, 260, 407, 30),
(_('Ichoa River'), 251, 296, 434, 40),
(_('Piray River'), 250, 406, 520, 90),
(_('Ichilo River'), 249, 311, 470, 90),
(_('Grande River'), 248, 461, 526, -80),
(_('Yacuma River'), 247, 204, 302, 30),
(_('Madre de Dios River'), 246, 133, 158, 40),
(_('Desaguadero River'), 245, 96, 538, -40),
(_('Grande de Lípez River'), 244, 171, 773, 90),
(_('San Miguel River'), 243, 400, 392, -45),
(_('San Martín River'), 242, 505, 332, -45),
(_('Abuná River'), 241, 176, 41, 30),
(_('Orton River'), 240, 188, 88, 20),
(_('Madeira River'), 239, 209, 54, 30),
(_('Madidi River'), 238, 123, 238, 30),
(_('Tequeje River'), 237, 118, 275, 20),
(_('Beni River'), 236, 166, 299, 60),
(_('Viata River'), 235, 207, 213, 70),
(_('Apere River'), 234, 260, 338, 30),
(_('Mamoré River'), 233, 338, 346, -80),
(_('Blanco River'), 232, 474, 366, -50),
(_('Paraguá River'), 231, 575, 351, -70),
(_('Guaporé River'), 230, 524, 244, -25),
(_('Tucavaca River'), 229, 682, 563, -40),
(_('Lateriquique River'), 228, 613, 610, -40),
(_('Lake Titicaca River'), 227, 47, 424, -45),
(_('Lake Poopo River'), 226, 180, 610, 0)
]
ROUTES = []
STATS = [
(_('Capital:'), _('Sucre') + _("(19º2' S - 65º15' W)")),
(_('Language:'), _('Spanish') + ' , ' + _('Quechua') + ' , ' + _('Guarani')),
(_('Government:'), _('Presidential republic')),
(_('President:'), _('Evo Morales Ayma')),
(_('Vice President:'), _('Álvaro García Linera')),
(_('Independence:'), _('from Spain')),
('', _('declared: %s') % _('August 6, 1825')),
('', _('recognized: %s') % _('July 21, 1847')),
(_('Area:'), '%(sup)s %(u)s (%(p)s)' % {'sup': _('1.098.581'), 'u': _('km²'), 'p': _('27th')}),
(_('Population:'), '%(v)s (%(p)s)' % {'v': _('11.410.651'), 'p': _('83rd')}),
(_('GDP:'), '%(c)s %(v)s %(u)s (%(p)s)' % {'c': _('USD'), 'v': _('33.537'), 'u': _('billion'), 'p': _('96th')}),
(_('HDI:'), '%(l)s - %(v)s (%(p)s)' % {'l': _('Medium'), 'v': _('0,662'), 'p': _('119th')}),
(_('Currency:'), _('Boliviano')),
(_('Updated:'), _('April 5, 2016'))
]
| gpl-3.0 | 7,551,089,572,770,961,000 | 37.504274 | 116 | 0.447947 | false |
nok/sklearn-porter | sklearn_porter/estimator/classifier/MLPClassifier/__init__.py | 1 | 9667 | # -*- coding: utf-8 -*-
import os
from json import encoder
from json import dumps
from sklearn_porter.estimator.classifier.Classifier import Classifier
class MLPClassifier(Classifier):
"""
See also
--------
sklearn.neural_network.MLPClassifier
http://scikit-learn.org/stable/modules/generated/
sklearn.neural_network.MLPClassifier.html
"""
SUPPORTED_METHODS = ['predict']
# @formatter:off
TEMPLATES = {
'java': {
'type': '{0}',
'arr': '{{{0}}}',
'new_arr': 'new {type}[{values}]',
'arr[]': '{type}[] {name} = {{{values}}};',
'arr[][]': '{type}[][] {name} = {{{values}}};',
'arr[][][]': '{type}[][][] {name} = {{{values}}};',
'indent': ' ',
},
'js': {
'type': '{0}',
'arr': '[{0}]',
'new_arr': 'new Array({values}).fill({fill_with})',
'arr[]': '{name} = [{values}];',
'arr[][]': '{name} = [{values}];',
'arr[][][]': '{name} = [{values}];',
'indent': ' ',
}
}
# @formatter:on
def __init__(self, estimator, target_language='java',
target_method='predict', **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming
language.
Parameters
----------
:param estimator : MLPClassifier
An instance of a trained MLPClassifier estimator.
:param target_language : string, default: 'java'
The target programming language.
:param target_method : string, default: 'predict'
The target method of the estimator.
"""
super(MLPClassifier, self).__init__(
estimator, target_language=target_language,
target_method=target_method, **kwargs)
# Activation function ('identity', 'logistic', 'tanh' or 'relu'):
hidden_activation = estimator.activation
if hidden_activation not in self.hidden_activation_functions:
raise ValueError(("The activation function '%s' of the estimator "
"is not supported.") % hidden_activation)
# Output activation function ('softmax' or 'logistic'):
output_activation = estimator.out_activation_
if output_activation not in self.output_activation_functions:
raise ValueError(("The activation function '%s' of the estimator "
"is not supported.") % output_activation)
self.estimator = estimator
@property
def hidden_activation_functions(self):
"""Get list of supported activation functions for the hidden layers."""
return ['relu', 'identity', 'tanh', 'logistic']
@property
def output_activation_functions(self):
"""Get list of supported activation functions for the output layer."""
return ['softmax', 'logistic']
def export(self, class_name, method_name, export_data=False,
export_dir='.', export_filename='data.json',
export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
self.output_activation = est.out_activation_
self.hidden_activation = est.activation
self.n_layers = est.n_layers_
self.n_hidden_layers = est.n_layers_ - 2
self.n_inputs = len(est.coefs_[0])
self.n_outputs = est.n_outputs_
self.hidden_layer_sizes = est.hidden_layer_sizes
if isinstance(self.hidden_layer_sizes, int):
self.hidden_layer_sizes = [self.hidden_layer_sizes]
self.hidden_layer_sizes = list(self.hidden_layer_sizes)
self.layer_units = \
[self.n_inputs] + self.hidden_layer_sizes + [est.n_outputs_]
# Weights:
self.coefficients = est.coefs_
# Bias:
self.intercepts = est.intercepts_
# Binary or multiclass classifier?
self.is_binary = self.n_outputs == 1
self.prefix = 'binary' if self.is_binary else 'multi'
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Separated:
return self.predict('separated')
def predict(self, temp_type):
"""
Transpile the predict method.
Parameters
----------
:param temp_type : string
The kind of export type (embedded, separated, exported).
Returns
-------
:return : string
The transpiled predict method as string.
"""
# Exported:
if temp_type == 'exported':
temp = self.temp('exported.class')
return temp.format(class_name=self.class_name,
method_name=self.method_name)
# Separated:
temp_arr = self.temp('arr')
temp_arr_ = self.temp('arr[]')
temp_arr__ = self.temp('arr[][]')
temp_arr___ = self.temp('arr[][][]')
# Activations:
layers = list(self._get_activations())
layers = ', '.join(layers)
layers = temp_arr_.format(type='int', name='layers', values=layers)
# Coefficients (weights):
coefficients = []
for layer in self.coefficients:
layer_weights = []
for weights in layer:
weights = ', '.join([self.repr(w) for w in weights])
layer_weights.append(temp_arr.format(weights))
layer_weights = ', '.join(layer_weights)
coefficients.append(temp_arr.format(layer_weights))
coefficients = ', '.join(coefficients)
coefficients = temp_arr___.format(type='double',
name='weights',
values=coefficients)
# Intercepts (biases):
intercepts = list(self._get_intercepts())
intercepts = ', '.join(intercepts)
intercepts = temp_arr__.format(type='double',
name='bias',
values=intercepts)
temp_class = self.temp('separated.class')
file_name = '{}.js'.format(self.class_name.lower())
return temp_class.format(class_name=self.class_name,
method_name=self.method_name,
hidden_activation=self.hidden_activation,
output_activation=self.output_activation,
n_features=self.n_inputs,
weights=coefficients,
bias=intercepts,
layers=layers,
file_name=file_name)
def export_data(self, directory, filename, with_md5_hash=False):
"""
Save model data in a JSON file.
Parameters
----------
:param directory : string
The directory.
:param filename : string
The filename.
:param with_md5_hash : bool, default: False
Whether to append the checksum to the filename or not.
"""
model_data = {
'layers': [int(l) for l in list(self._get_activations())],
'weights': [c.tolist() for c in self.coefficients],
'bias': [i.tolist() for i in self.intercepts],
'hidden_activation': self.hidden_activation,
'output_activation': self.output_activation
}
encoder.FLOAT_REPR = lambda o: self.repr(o)
json_data = dumps(model_data, sort_keys=True)
if with_md5_hash:
import hashlib
json_hash = hashlib.md5(json_data).hexdigest()
filename = filename.split('.json')[0] + '_' + json_hash + '.json'
path = os.path.join(directory, filename)
with open(path, 'w') as fp:
fp.write(json_data)
def _get_intercepts(self):
"""
Concatenate all intercepts of the classifier.
"""
temp_arr = self.temp('arr')
for layer in self.intercepts:
inter = ', '.join([self.repr(b) for b in layer])
yield temp_arr.format(inter)
def _get_activations(self):
"""
Concatenate the layers sizes of the classifier except the input layer.
"""
return [str(x) for x in self.layer_units[1:]]
| mit | 8,164,837,116,001,158,000 | 35.617424 | 80 | 0.53574 | false |
tmt514/mtsa-dishes-translator | app/data/__init__.py | 1 | 1599 | from .parsepokemon import add_pokemons
from .parsejokes import add_jokes
from .parsecategories import add_categories
from app.models import db, Term, Photo, Location, User, Joke, Category, Termcategories, Description
import os
import pickle
import json
if os.path.exists('app/data/pokemon_names_mapping') == False:
print("\033[1;34mDownloading Pokemon Data\033[m")
os.system("wget \"https://drive.google.com/uc?id=0BzTEEqZlZigxU2RQdHp4MmFYX00&export=download\" -O app/data/pokemon_names_mapping")
os.system("wget \"https://drive.google.com/uc?id=0BzTEEqZlZigxVGZhQ0pEeFNkN1E&export=download\" -O app/data/pokemon_reverse_index")
os.system("wget \"https://drive.google.com/uc?id=0BzTEEqZlZigxbE90eHFmUDY0VEE&export=download\" -O app/data/pokemon_doc_freq")
if os.path.exists('app/data/dict.txt.big') == False:
os.system("wget \"https://raw.githubusercontent.com/fxsjy/jiebademo/master/jiebademo/jieba/dict.txt.big\" -O app/data/dict.txt.big")
POKEMON_REVERSE_INDEX = pickle.load(open('app/data/pokemon_reverse_index', 'rb'))
POKEMON_NAMES_MAPPING = pickle.load(open('app/data/pokemon_names_mapping', 'rb'))
def add_data():
if Category.query.count() == 0:
print("\033[1;34mAdding Categories\033[m")
add_categories()
print(Category.query.count())
if Term.query.filter_by(english='pikachu').first() == None:
print("\033[1;34mAdding Pokemons\033[m")
add_pokemons()
print(Term.query.count())
if Joke.query.count() == 0:
print("\033[1;34mAdding Jokes\033[m")
add_jokes()
print(Joke.query.count())
| mit | 6,186,210,763,107,593,000 | 46.029412 | 136 | 0.706692 | false |
Azure/azure-sdk-for-python | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/operations/_proactive_detection_configurations_operations.py | 1 | 13054 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ProactiveDetectionConfigurationsOperations(object):
"""ProactiveDetectionConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2015_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]
"""Gets a list of ProactiveDetection configurations of an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of cls(response)
:rtype: list[~azure.mgmt.applicationinsights.v2015_05_01.models.ApplicationInsightsComponentProactiveDetectionConfiguration]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ApplicationInsightsComponentProactiveDetectionConfiguration]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
configuration_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationInsightsComponentProactiveDetectionConfiguration"
"""Get the ProactiveDetection configuration for this configuration id.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param configuration_id: The ProactiveDetection configuration ID. This is unique within a
Application Insights component.
:type configuration_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.applicationinsights.v2015_05_01.models.ApplicationInsightsComponentProactiveDetectionConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'ConfigurationId': self._serialize.url("configuration_id", configuration_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationInsightsComponentProactiveDetectionConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}'} # type: ignore
def update(
self,
resource_group_name, # type: str
resource_name, # type: str
configuration_id, # type: str
proactive_detection_properties, # type: "_models.ApplicationInsightsComponentProactiveDetectionConfiguration"
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationInsightsComponentProactiveDetectionConfiguration"
"""Update the ProactiveDetection configuration for this configuration id.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param configuration_id: The ProactiveDetection configuration ID. This is unique within a
Application Insights component.
:type configuration_id: str
:param proactive_detection_properties: Properties that need to be specified to update the
ProactiveDetection configuration.
:type proactive_detection_properties: ~azure.mgmt.applicationinsights.v2015_05_01.models.ApplicationInsightsComponentProactiveDetectionConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationInsightsComponentProactiveDetectionConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.applicationinsights.v2015_05_01.models.ApplicationInsightsComponentProactiveDetectionConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationInsightsComponentProactiveDetectionConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'ConfigurationId': self._serialize.url("configuration_id", configuration_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(proactive_detection_properties, 'ApplicationInsightsComponentProactiveDetectionConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationInsightsComponentProactiveDetectionConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/ProactiveDetectionConfigs/{ConfigurationId}'} # type: ignore
| mit | 4,413,926,440,169,966,000 | 53.165975 | 214 | 0.684541 | false |
community-ssu/telepathy-gabble | tests/twisted/vcard/update-rejected.py | 1 | 1263 | """
Regression test for fd.o #20442, where the XMPP error returned by a server that
doesn't like the avatar you tried to set was not mapped to a TP error before
being sent over the bus.
"""
from twisted.words.xish import domish
from servicetest import call_async
from gabbletest import exec_test, expect_and_handle_get_vcard, send_error_reply, sync_stream
import ns
import constants as cs
def test(q, bus, conn, stream):
conn.Connect()
expect_and_handle_get_vcard(q, stream)
sync_stream(q, stream)
call_async(q, conn.Avatars, 'SetAvatar', 'william shatner',
'image/x-actor-name')
# Gabble request the last version of the vCard before changing it
expect_and_handle_get_vcard(q, stream)
set_vcard_event = q.expect('stream-iq', query_ns=ns.VCARD_TEMP,
query_name='vCard', iq_type='set')
iq = set_vcard_event.stanza
error = domish.Element((None, 'error'))
error['code'] = '400'
error['type'] = 'modify'
error.addElement((ns.STANZA, 'bad-request'))
send_error_reply(stream, iq, error)
event = q.expect('dbus-error', method='SetAvatar')
assert event.error.get_dbus_name() == cs.INVALID_ARGUMENT, \
event.error.get_dbus_name()
if __name__ == '__main__':
exec_test(test)
| lgpl-2.1 | 3,823,451,831,685,819,000 | 28.372093 | 92 | 0.676168 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/ad_group_criterion_simulation_service/transports/grpc.py | 1 | 10538 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v6.resources.types import ad_group_criterion_simulation
from google.ads.googleads.v6.services.types import ad_group_criterion_simulation_service
from .base import AdGroupCriterionSimulationServiceTransport, DEFAULT_CLIENT_INFO
class AdGroupCriterionSimulationServiceGrpcTransport(AdGroupCriterionSimulationServiceTransport):
"""gRPC backend transport for AdGroupCriterionSimulationService.
Service to fetch ad group criterion simulations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_ad_group_criterion_simulation(self) -> Callable[
[ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest],
ad_group_criterion_simulation.AdGroupCriterionSimulation]:
r"""Return a callable for the get ad group criterion
simulation method over gRPC.
Returns the requested ad group criterion simulation
in full detail.
Returns:
Callable[[~.GetAdGroupCriterionSimulationRequest],
~.AdGroupCriterionSimulation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_ad_group_criterion_simulation' not in self._stubs:
self._stubs['get_ad_group_criterion_simulation'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v6.services.AdGroupCriterionSimulationService/GetAdGroupCriterionSimulation',
request_serializer=ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest.serialize,
response_deserializer=ad_group_criterion_simulation.AdGroupCriterionSimulation.deserialize,
)
return self._stubs['get_ad_group_criterion_simulation']
__all__ = (
'AdGroupCriterionSimulationServiceGrpcTransport',
)
| apache-2.0 | 3,827,448,597,332,809,000 | 44.619048 | 120 | 0.619283 | false |
jamespcole/home-assistant | homeassistant/components/synology_chat/notify.py | 1 | 2008 | """
SynologyChat platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.synology_chat/
"""
import json
import logging
import requests
import voluptuous as vol
from homeassistant.const import CONF_RESOURCE, CONF_VERIFY_SSL
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (ATTR_DATA, PLATFORM_SCHEMA,
BaseNotificationService)
ATTR_FILE_URL = 'file_url'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
})
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the Synology Chat notification service."""
resource = config.get(CONF_RESOURCE)
verify_ssl = config.get(CONF_VERIFY_SSL)
return SynologyChatNotificationService(resource, verify_ssl)
class SynologyChatNotificationService(BaseNotificationService):
"""Implementation of a notification service for Synology Chat."""
def __init__(self, resource, verify_ssl):
"""Initialize the service."""
self._resource = resource
self._verify_ssl = verify_ssl
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
data = {
'text': message
}
extended_data = kwargs.get(ATTR_DATA)
file_url = extended_data.get(ATTR_FILE_URL) if extended_data else None
if file_url:
data['file_url'] = file_url
to_send = 'payload={}'.format(json.dumps(data))
response = requests.post(self._resource, data=to_send, timeout=10,
verify=self._verify_ssl)
if response.status_code not in (200, 201):
_LOGGER.exception(
"Error sending message. Response %d: %s:",
response.status_code, response.reason)
| apache-2.0 | 4,965,785,065,355,764,000 | 29.892308 | 78 | 0.65488 | false |
ua-snap/downscale | snap_scripts/downscaling_v2/OLD_downscaling_v2/wrap_downscaler_cru_ts324_01_slurm_tem.py | 1 | 2369 | # # # EXAMPLE RUN SINGLE MODEL
def run_model( fn, command ):
import os, subprocess
head = '#!/bin/sh\n' + \
'#SBATCH --ntasks=32\n' + \
'#SBATCH --nodes=1\n' + \
'#SBATCH --ntasks-per-node=32\n' + \
'#SBATCH --account=snap\n' + \
'#SBATCH --mail-type=FAIL\n' + \
'#SBATCH [email protected]\n' + \
'#SBATCH -p main\n'
with open( fn, 'w' ) as f:
f.writelines( head + '\n' + command + '\n' )
subprocess.call([ 'sbatch', fn ])
return 1
if __name__ == '__main__':
import os, subprocess
# # args setup
base_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data'
ncores = '32'
model = 'ts324_01'
scenario = 'historical'
variables = ['hur','cld'] #['tmp','hur','pre','cld']
out_varnames = ['hur','clt'] #['tas','hur','pr','clt']
slurm_path = os.path.join( base_dir, 'downscaled','slurm_log' )
if not os.path.exists( slurm_path ):
os.makedirs( slurm_path )
os.chdir( slurm_path )
for variable, out_varname in zip( variables, out_varnames ):
if variable == 'pre':
metric = 'total'
units = 'mm'
elif variable == 'tmp':
metric = 'mean'
units = 'C'
elif variable in ['hur','cld','clt']:
metric = 'mean'
units = 'pct'
clim_path = os.path.join( base_dir, 'cru', 'akcan_2km_extent', 'cru_cl20', out_varname )
output_path = os.path.join( os.path.join( base_dir, 'downscaled', model, scenario, out_varname ) )
if not os.path.exists( output_path ):
os.makedirs( output_path )
cru_ts = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS324_01/cru_ts3.24.01.1901.2015.' + variable + '.dat.nc.gz'
if variable == 'hur': # since we made this variable and it lives with the raw files with a slightly diff name
cru_ts = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS324_01/cru_ts3.24.01.1901.2015.' + variable + '.dat_snap_conversion.nc'
# # make a command to pass to slurm
script_path = '/workspace/UA/malindgren/repos/downscale/snap_scripts/downscaling_v2/downscale_cru_tem.py'
command = ' '.join([ 'ipython', script_path, '--',
'-ts', cru_ts,
'-cl', clim_path,
'-o', output_path,
'-m', model,
'-v', variable,
'-u', units,
'-met', metric,
'-nc', ncores,
'-ov', out_varname ])
fn = os.path.join( slurm_path, '_'.join(['downscale', model, variable]) + '.slurm' )
_ = run_model( fn, command )
| mit | 3,552,470,111,626,347,500 | 32.842857 | 130 | 0.60363 | false |
kingtaurus/cs224d | assignment2/q2_NER.py | 1 | 17828 | import os
import getpass
import sys
import time
import numpy as np
import tensorflow as tf
from q2_initialization import xavier_weight_init
import data_utils.utils as du
import data_utils.ner as ner
from utils import data_iterator
from model import LanguageModel
def variable_summaries(variable, name):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(variable)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(variable - mean)))
tf.scalar_summary('stddev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(variable))
tf.scalar_summary('min/' + name, tf.reduce_min(variable))
tf.histogram_summary(name, variable)
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
embed_size = 50
batch_size = 64
label_size = 5
hidden_size = 100
max_epochs = 24
early_stopping = 5
dropout = 0.9
lr = 0.001
l2 = 0.001
window_size = 3
class NERModel(LanguageModel):
"""Implements a NER (Named Entity Recognition) model.
This class implements a deep network for named entity recognition. It
inherits from LanguageModel, which has an add_embedding method in addition to
the standard Model method.
"""
def load_data(self, debug=False):
"""Loads starter word-vectors and train/dev/test data."""
# Load the starter word vectors
self.wv, word_to_num, num_to_word = ner.load_wv(
'data/ner/vocab.txt', 'data/ner/wordVectors.txt')
tagnames = ['O', 'LOC', 'MISC', 'ORG', 'PER']
self.num_to_tag = dict(enumerate(tagnames))
tag_to_num = {v:k for k,v in self.num_to_tag.items()}
# Load the training set
docs = du.load_dataset('data/ner/train')
self.X_train, self.y_train = du.docs_to_windows(
docs, word_to_num, tag_to_num, wsize=self.config.window_size)
if debug:
self.X_train = self.X_train[:1024]
self.y_train = self.y_train[:1024]
# Load the dev set (for tuning hyperparameters)
docs = du.load_dataset('data/ner/dev')
self.X_dev, self.y_dev = du.docs_to_windows(
docs, word_to_num, tag_to_num, wsize=self.config.window_size)
if debug:
self.X_dev = self.X_dev[:1024]
self.y_dev = self.y_dev[:1024]
# Load the test set (dummy labels only)
docs = du.load_dataset('data/ner/test.masked')
self.X_test, self.y_test = du.docs_to_windows(
docs, word_to_num, tag_to_num, wsize=self.config.window_size)
def add_placeholders(self):
"""Generate placeholder variables to represent the input tensors
These placeholders are used as inputs by the rest of the model building
code and will be fed data during training. Note that when "None" is in a
placeholder's shape, it's flexible
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape
(None, window_size), type tf.int32
labels_placeholder: Labels placeholder tensor of shape
(None, label_size), type tf.float32
dropout_placeholder: Dropout value placeholder (scalar),
type tf.float32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
self.dropout_placeholder
(Don't change the variable names)
"""
### YOUR CODE HERE
self.input_placeholder = tf.placeholder(tf.int32, shape=[None, self.config.window_size])
self.labels_placeholder = tf.placeholder(tf.float32, shape=[None, self.config.label_size])
self.dropout_placeholder = tf.placeholder(tf.float32, name="dropout_keep_prob")
### END YOUR CODE
def create_feed_dict(self, input_batch, dropout, label_batch=None):
"""Creates the feed_dict for softmax classifier.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Hint: The keys for the feed_dict should be a subset of the placeholder
tensors created in add_placeholders.
Hint: When label_batch is None, don't add a labels entry to the feed_dict.
Args:
input_batch: A batch of input data.
label_batch: A batch of label data.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE HERE
feed_dict = {self.input_placeholder: input_batch,
self.dropout_placeholder: dropout}
if label_batch is not None:
feed_dict[self.labels_placeholder] = label_batch
### END YOUR CODE
return feed_dict
def add_embedding(self):
"""Add embedding layer that maps from vocabulary to vectors.
Creates an embedding tensor (of shape (len(self.wv), embed_size). Use the
input_placeholder to retrieve the embeddings for words in the current batch.
(Words are discrete entities. They need to be transformed into vectors for use
in deep-learning. Although we won't do so in this problem, in practice it's
useful to initialize the embedding with pre-trained word-vectors. For this
problem, using the default initializer is sufficient.)
Hint: This layer should use the input_placeholder to index into the
embedding.
Hint: You might find tf.nn.embedding_lookup useful.
Hint: See following link to understand what -1 in a shape means.
https://www.tensorflow.org/versions/r0.8/api_docs/python/array_ops.html#reshape
Hint: Check the last slide from the TensorFlow lecture.
Hint: Here are the dimensions of the variables you will need to create:
L: (len(self.wv), embed_size)
Returns:
window: tf.Tensor of shape (-1, window_size*embed_size)
"""
# The embedding lookup is currently only implemented for the CPU
with tf.device('/cpu:0'):
### YOUR CODE HERE
with tf.variable_scope("embedding_layer") as scope:
embedding = tf.get_variable("embedding",
[len(self.wv), self.config.embed_size],
initializer=xavier_weight_init())
window = tf.nn.embedding_lookup(params=embedding, ids=self.input_placeholder)
window = tf.reshape(window, shape=[-1, self.config.window_size * self.config.embed_size], name="window")
variable_summaries(window, window.name)
### END YOUR CODE
return window
def add_model(self, window):
"""Adds the 1-hidden-layer NN.
Hint: Use a variable_scope (e.g. "Layer") for the first hidden layer, and
another variable_scope (e.g. "Softmax") for the linear transformation
preceding the softmax. Make sure to use the xavier_weight_init you
defined in the previous part to initialize weights.
Hint: Make sure to add in regularization and dropout to this network.
Regularization should be an addition to the cost function, while
dropout should be added after both variable scopes.
Hint: You might consider using a tensorflow Graph Collection (e.g
"total_loss") to collect the regularization and loss terms (which you
will add in add_loss_op below).
Hint: Here are the dimensions of the various variables you will need to
create
W: (window_size*embed_size, hidden_size)
b1: (hidden_size,)
U: (hidden_size, label_size)
b2: (label_size)
https://www.tensorflow.org/versions/r0.7/api_docs/python/framework.html#graph-collections
Args:
window: tf.Tensor of shape (-1, window_size*embed_size)
Returns:
output: tf.Tensor of shape (batch_size, label_size)
"""
### YOUR CODE HERE
with tf.variable_scope("layer") as layer_scope:
W = tf.get_variable("W_l",
shape=[self.config.window_size * self.config.embed_size, self.config.hidden_size],
initializer=xavier_weight_init())
b1 = tf.get_variable("b1",
shape=[self.config.hidden_size],
initializer=tf.constant_initializer(0.0))
variable_summaries(W, W.name)
variable_summaries(b1, b1.name)
with tf.variable_scope("hidden_layer") as hidden_layer:
U = tf.get_variable("U_h",
shape=[self.config.hidden_size, self.config.label_size],
initializer=xavier_weight_init())
b2 = tf.get_variable("b2",
shape=[self.config.label_size],
initializer=tf.constant_initializer(0.0))
variable_summaries(U, U.name)
variable_summaries(b2, b2.name)
h_fc1 = tf.nn.relu(tf.matmul(window, W) + b1)
h_fc1 = tf.nn.dropout(h_fc1, self.dropout_placeholder)
h_fc2 = tf.matmul(h_fc1, U) + b2
h_fc2 = tf.nn.dropout(h_fc2, self.dropout_placeholder)
l2_loss = tf.nn.l2_loss(W) + tf.nn.l2_loss(b1) + tf.nn.l2_loss(U) + tf.nn.l2_loss(b2)
tf.add_to_collection(name="l2_loss", value=l2_loss)
output = h_fc2
### END YOUR CODE
return output
def add_loss_op(self, y):
"""Adds cross_entropy_loss ops to the computational graph.
Hint: You can use tf.nn.softmax_cross_entropy_with_logits to simplify your
implementation. You might find tf.reduce_mean useful.
Args:
y (pred): A tensor of shape (batch_size, n_classes)
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
# regularization = tf.nn.l2_loss(W) + tf.nn.l2_loss(b1) +
# tf.nn.l2_loss(U) + tf.nn.l2_loss(b2)
# loss += self.config.reg * regularization
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, self.labels_placeholder))
loss += self.config.l2 * tf.get_collection("l2_loss")[0]
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.AdamOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
train_op = tf.train.AdamOptimizer(self.config.lr).minimize(loss)
tf.scalar_summary('loss', loss)
#alternate:
# global_step = tf.Variable(0, name="global_step", trainable=False)
# optimizer = tf.train.AdamOptimizer(1e-4)
# gradients = optimizer.compute_gradients(loss)
# train_op = optimizer.apply_gradients(gradients, global_step=global_step)
### END YOUR CODE
return train_op
def __init__(self, config):
"""Constructs the network using the helper functions defined above."""
self.config = config
self.load_data(debug=False)
self.add_placeholders()
window = self.add_embedding()
y = self.add_model(window)
self.loss = self.add_loss_op(y)
self.predictions = tf.nn.softmax(y)
one_hot_prediction = tf.argmax(self.predictions, 1)
correct_prediction = tf.equal(
tf.argmax(self.labels_placeholder, 1), one_hot_prediction)
self.correct_predictions = tf.reduce_sum(tf.cast(correct_prediction, 'int32'))
self.train_op = self.add_training_op(self.loss)
self.merged_summaries = tf.merge_all_summaries()
self.summary_writer = None
def run_epoch(self, session, input_data, input_labels,
shuffle=True, verbose=True, epoch=0):
orig_X, orig_y = input_data, input_labels
dp = self.config.dropout
# We're interested in keeping track of the loss and accuracy during training
total_loss = []
total_correct_examples = 0
total_processed_examples = 0
total_steps = len(orig_X) / self.config.batch_size
for step, (x, y) in enumerate(
data_iterator(orig_X, orig_y, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=shuffle)):
feed = self.create_feed_dict(input_batch=x, dropout=dp, label_batch=y)
loss, total_correct, _, merged = session.run(
[self.loss, self.correct_predictions, self.train_op, self.merged_summaries],
feed_dict=feed)
if step % 50 == 0:
self.summary_writer.add_summary(merged, epoch * total_steps + step)
total_processed_examples += len(x)
total_correct_examples += total_correct
total_loss.append(loss)
##
if verbose and step % verbose == 0:
sys.stdout.write('\r{} / {} : loss = {}'.format(
step, total_steps, np.mean(total_loss)))
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
sys.stdout.flush()
return np.mean(total_loss), total_correct_examples / float(total_processed_examples)
def predict(self, session, X, y=None):
"""Make predictions from the provided model."""
# If y is given, the loss is also calculated
# We deactivate dropout by setting it to 1
dp = 1
losses = []
results = []
if np.any(y):
data = data_iterator(X, y, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=False)
else:
data = data_iterator(X, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=False)
for step, (x, y) in enumerate(data):
feed = self.create_feed_dict(input_batch=x, dropout=dp)
if np.any(y):
feed[self.labels_placeholder] = y
loss, preds = session.run(
[self.loss, self.predictions], feed_dict=feed)
losses.append(loss)
else:
preds = session.run(self.predictions, feed_dict=feed)
predicted_indices = preds.argmax(axis=1)
results.extend(predicted_indices)
if len(losses) == 0:
return 0, results
return np.mean(losses), results
def print_confusion(confusion, num_to_tag):
"""Helper method that prints confusion matrix."""
# Summing top to bottom gets the total number of tags guessed as T
total_guessed_tags = confusion.sum(axis=0)
# Summing left to right gets the total number of true tags
total_true_tags = confusion.sum(axis=1)
print()
print(confusion)
for i, tag in sorted(num_to_tag.items()):
prec = confusion[i, i] / float(total_guessed_tags[i])
recall = confusion[i, i] / float(total_true_tags[i])
print('Tag: {} - P {:2.4f} / R {:2.4f}'.format(tag, prec, recall))
def calculate_confusion(config, predicted_indices, y_indices):
"""Helper method that calculates confusion matrix."""
confusion = np.zeros((config.label_size, config.label_size), dtype=np.int32)
for i in range(len(y_indices)):
correct_label = y_indices[i]
guessed_label = predicted_indices[i]
confusion[correct_label, guessed_label] += 1
return confusion
def save_predictions(predictions, filename):
"""Saves predictions to provided file."""
# print(type(predictions), predictions)
with open(filename, "wb") as f:
for prediction in predictions:
f.write(bytes(prediction))
def test_NER():
"""Test NER model implementation.
You can use this function to test your implementation of the Named Entity
Recognition network. When debugging, set max_epochs in the Config object to 1
so you can rapidly iterate.
"""
config = Config()
with tf.Graph().as_default():
model = NERModel(config)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as session:
best_val_loss = float('inf')
best_val_epoch = 0
model.summary_writer = tf.train.SummaryWriter("NER_train_log/", session.graph)
session.run(init)
for epoch in range(config.max_epochs):
print('Epoch {}'.format(epoch))
start = time.time()
###
train_loss, train_acc = model.run_epoch(session, model.X_train,
model.y_train, epoch=epoch)
val_loss, predictions = model.predict(session, model.X_dev, model.y_dev)
print('Training loss: {}'.format(train_loss))
print('Training acc: {}'.format(train_acc))
print('Validation loss: {}'.format(val_loss))
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_epoch = epoch
if not os.path.exists("./weights"):
os.makedirs("./weights")
saver.save(session, './weights/ner.weights')
if epoch - best_val_epoch > config.early_stopping:
break
###
confusion = calculate_confusion(config, predictions, model.y_dev)
cm = confusion.copy()
cm = cm.astype(np.float32) / cm.sum(axis=1, keepdims=True)
cm = cm[np.newaxis, :, :, np.newaxis].astype(np.float32)
cm_tf_image = tf.convert_to_tensor(cm)
cm_is = tf.image_summary("confusion_matrix", cm_tf_image)
cm_current_epoch = session.run(cm_is)
model.summary_writer.add_summary(cm_current_epoch, epoch)
print_confusion(confusion, model.num_to_tag)
print('Total time: {}'.format(time.time() - start))
saver.restore(session, './weights/ner.weights')
print('Test')
print('=-=-=')
print('Writing predictions to q2_test.predicted')
_, predictions = model.predict(session, model.X_test, model.y_test)
save_predictions(predictions, "q2_test.predicted")
if __name__ == "__main__":
test_NER()
| mit | -5,546,587,094,945,920,000 | 38.096491 | 112 | 0.644604 | false |
dutwfk/pytest | base/queue.py | 1 | 1032 | __author__ = 'kris'
class Queue(object):
def __init__(self, size=16):
self.queue = []
self.size = size
self.front = 0
self.rear = size - 1
def isEmpty(self):
return self.rear == 0
def isFull(self):
if (self.front - self.rear + 1) == self.size:
return True
else:
return False
def first(self):
if self.isEmpty():
raise Exception('QueueIsEmpty')
else:
return self.queue[self.front]
def last(self):
if self.isEmpty():
raise Exception('QueueIsEmpty')
else:
return self.queue[self.rear]
def add(self, value):
if self.isFull():
raise Exception('QueueIsFull')
else:
self.queue.append(value)
self.rear += 1
def delete(self):
if self.isEmpty():
raise Exception('QueueIsEmpty')
else:
return self.queue.pop(0)
def show(self):
print self.queue
| mit | 3,347,516,269,478,357,500 | 21.434783 | 53 | 0.506783 | false |
eudoxos/sphinxcontrib-embedly | sphinxcontrib/embedly.py | 1 | 5637 | # -*- coding: utf-8 -*-
"""
sphinxcontrib.embedly
~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2013-2014 by the contributors (see AUTHORS file)
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function, unicode_literals, absolute_import
import os
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
from sphinx.errors import ExtensionError
# from sphinx.util.compat import Directive
from embedly import Embedly as EmbedlyClient
__version__ = '0.1'
DEFAULT_TIMEOUT = 60
USER_AGENT = ('Mozilla/5.0 (compatible; sphinxcontrib-embedly/%s;)' %
__version__)
class EmbedlyError(ExtensionError):
category = 'Embedly error'
class EmbedlyRenderer(object):
def photo(self, node, obj):
params = {
'width': obj['width'],
'height': obj['height'],
'title': obj.get('title', ''),
'url': obj['url'],
}
return """<img width="{width}"
height="{height}"
alt="{title}"
src="{url}" />""".format(**params)
def html(self, node, obj):
return obj['html']
video = html
# Sites like slideshare.com use the type "rich"
rich = html
def link(self, node, obj):
parts = []
obj['title'] = obj.get('title') or obj['url']
obj['description'] = obj.get('description') or ''
parts.append("""<a href="{url}" title="{description}">{title}</a>""")
return "\n".join([part.format(**obj) for part in parts])
def error(self, node, obj):
print(obj.__dict__)
raise EmbedlyError('code %s' % str(obj.get('error_code', 'unknown')))
def render(self, client, node):
response = client.oembed(**node.attributes)
return getattr(self, response['type'], self.error)(node, response)
renderer = EmbedlyRenderer()
class embedly(nodes.General, nodes.Element):
pass
def wmode(argument):
"""Conversion function for the "wmode" option."""
return directives.choice(argument, ('window', 'opaque', 'transparent'))
class EmbedlyDirective(Directive):
"""Directive for embedding using Embedly"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'maxwidth': directives.nonnegative_int,
'maxheight': directives.nonnegative_int,
'width': directives.nonnegative_int,
'wmode': wmode,
'nostyle': directives.flag,
'autoplay': directives.flag,
'videosrc': directives.flag,
'words': directives.nonnegative_int,
'chars': directives.nonnegative_int,
'frame': directives.flag,
'secure': directives.flag,
}
def run(self):
node = embedly()
node['url_or_urls'] = directives.uri(self.arguments[0].strip())
for option in ['maxwidth', 'maxheight', 'width', 'wmode',
'words', 'chars']:
if option in self.options:
node[option] = self.options[option]
for option in ['nostyle', 'autoplay', 'videosrc', 'frame', 'secure']:
if option in self.options:
node[option] = True
return [node]
def html_visit_embedly_docutils(self, node):
embedly_key = os.environ.get('EMBEDLY_KEY', None)
if embedly_key is None:
raise EmbedlyError('The environment variable EMBEDLY_KEY is not set')
client = EmbedlyClient(key=embedly_key,
user_agent=USER_AGENT,
timeout=os.environ.get('EMBEDLY_TIMEOUT',
DEFAULT_TIMEOUT))
try:
content = renderer.render(client, node)
except Exception as e:
msg = 'embedly "%s" error: %s' % (node['url_or_urls'], e)
raise EmbedlyError(msg)
else:
self.body.append(content)
raise nodes.SkipNode
def html_visit_embedly_sphinx(self, node):
if self.builder.config.embedly_key is None:
raise ValueError('The Sphinx config variable embedly_key must be set')
client = EmbedlyClient(key=self.builder.config.embedly_key,
user_agent=USER_AGENT,
timeout=self.builder.config.embedly_timeout)
try:
content = renderer.render(client, node)
except Exception as e:
msg = 'embedly "%s" error: %s' % (node['url_or_urls'], e)
self.builder.warn(msg)
else:
self.body.append(content)
raise nodes.SkipNode
def latex_visit_embedly(self,node):
'Show hyperlinnk in centered box for in LaTeX'
# move \url away from the raw string via %s, see http://stackoverflow.com/questions/7602171/unicode-error-unicodeescape-codec-cant-decode-bytes-string-with-u
self.body.append(r'\begin{quote}\begin{center}\fbox{%s{%s}}\end{center}\end{quote}'%('\\url',node['url_or_urls']))
def latex_depart_embedly(self,node):
pass
def setup(app):
app.add_node(embedly,
html=(html_visit_embedly_sphinx, None),
latex=(latex_visit_embedly,latex_depart_embedly)
)
app.add_config_value('embedly_key', None, 'env')
app.add_config_value('embedly_timeout', DEFAULT_TIMEOUT, 'env')
app.add_directive('embedly', EmbedlyDirective)
def setup_docutils():
directives.register_directive('embedly', EmbedlyDirective)
from docutils.writers.html4css1 import HTMLTranslator
setattr(HTMLTranslator, 'visit_embedly', html_visit_embedly_docutils)
setattr(HTMLTranslator, 'depart_embedly', lambda self, node: None)
| bsd-2-clause | -739,285,607,462,079,500 | 31.773256 | 161 | 0.612915 | false |
bytescout/ByteScout-SDK-SourceCode | Cloud API Server/PDF To Text API/Python/Convert PDF To Text From Uploaded File/ConvertPdfToTextFromUploadedFile.py | 1 | 3217 | import os
import requests # pip install requests
# Please NOTE: In this sample we're assuming Cloud Api Server is hosted at "https://localhost".
# If it's not then please replace this with with your hosting url.
# Base URL for PDF.co Web API requests
BASE_URL = "https://localhost"
# Source PDF file
SourceFile = ".\\sample.pdf"
# Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'.
Pages = ""
# PDF document password. Leave empty for unprotected documents.
Password = ""
# Destination TXT file name
DestinationFile = ".\\result.txt"
def main(args = None):
uploadedFileUrl = uploadFile(SourceFile)
if (uploadedFileUrl != None):
convertPdfToText(uploadedFileUrl, DestinationFile)
def convertPdfToText(uploadedFileUrl, destinationFile):
"""Converts PDF To Text using PDF.co Web API"""
# Prepare URL for 'PDF To Text' API request
url = "{}/pdf/convert/to/text?name={}&password={}&pages={}&url={}".format(
BASE_URL,
os.path.basename(destinationFile),
Password,
Pages,
uploadedFileUrl
)
# Execute request and get response as JSON
response = requests.get(url, headers={ "content-type": "application/octet-stream" })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Get URL of result file
resultFileUrl = json["url"]
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url)
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main() | apache-2.0 | -4,017,771,427,214,986,000 | 32.175258 | 110 | 0.606155 | false |
ChatAI/dumbots | bots/SynBot/SynBot.py | 1 | 3930 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Simple Bot to reply to Telegram messages
from telegram.ext import Updater
from telegram import ReplyKeyboardMarkup
import logging
import os
from nltk.corpus import wordnet as wn
from lib.wn import *
gWord = 'internet'
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
ERROR_STR = 'Only a meaningful WORD is accepted!'
DUMB_STR = 'I am too dumb to answer that!'
Means, Syno, Anto, Eg = ('Meaning','Synonyms','Antonyms','Usage')
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def start(bot, update):
bot.sendMessage(update.message.chat_id, text='Hi!')
def help(bot, update):
bot.sendMessage(update.message.chat_id, text='Help!')
def echo(bot, update):
bot.sendMessage(update.message.chat_id, text=update.message.text)
def reply(bot, update):
global gWord
# get word
word = update.message.text
print 'Message : ',word
if word == Means or word == Syno or word == Anto or word == Eg:
print 'Selected', word
if word == Means:
reply_msg = synsets(gWord)[0].definition()
elif word == Syno:
reply_msg = ', '.join(synonymsOf(synsets(gWord)))
elif word == Anto:
reply_msg = ', '.join(antonymsOf(synsets(gWord)))
else:
reply_msg = '\n'.join(wordEg(synsets(gWord)))
if reply_msg:
print 'Reply : ', reply_msg
bot.sendMessage(update.message.chat_id, text=reply_msg)
else:
print 'Reply : Something went wrong!'
bot.sendMessage(update.message.chat_id, text='Something went wrong!')
else:
gWord = word
reply_markup = ReplyKeyboardMarkup([[Means, Syno, Anto, Eg]], one_time_keyboard=True)
bot.sendMessage(update.message.chat_id, text="What do you want?",reply_markup=reply_markup)
'''
if ' ' not in word:
synms = wn.synsets(word)
if synms:
reply_msg = synms[0].definition()
print 'Reply : ',reply_msg
bot.sendMessage(update.message.chat_id, text=reply_msg)
else:
bot.sendMessage(update.message.chat_id, text=ERROR_STR)
else:
words = word.split(' ')
if len(words) == 2 and words[0].lower() == 'like':
synonyms = synonymsOf(synsets(words[1]))
_synonyms = [y for y in synonyms if y != synonyms[0]]
reply_msg = ', '.join(_synonyms)
print 'Reply : ',reply_msg
if reply_msg:
bot.sendMessage(update.message.chat_id, text=reply_msg)
else:
bot.sendMessage(update.message.chat_id, text=DUMB_STR)
else:
bot.sendMessage(update.message.chat_id, text=ERROR_STR)
'''
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
# Create the EventHandler and pass it your bot's token.
updater = Updater(os.environ['SYN_BOT_TOKEN'])
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.addTelegramCommandHandler("start", start)
dp.addTelegramCommandHandler("help", help)
# on noncommand i.e message - echo the message on Telegram
dp.addTelegramMessageHandler(reply)
# log all errors
dp.addErrorHandler(error)
# Start the Bot
updater.start_polling()
print 'Bot Started!!\n'
# Run the bot until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| gpl-3.0 | -2,590,771,391,261,764,000 | 30.190476 | 99 | 0.626463 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.