repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
tensorflow/lingvo | lingvo/compat.py | 1 | 17541 | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The compatible tensorflow library."""
import os
# pylint: disable=g-bad-import-order, unused-import, g-import-not-at-top
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf2
from tensorflow.compat.v2 import * # pylint: disable=wildcard-import
# Import absl.flags and absl.logging to overwrite the Tensorflow ones.
# This is the intended behavior in TF 2.0.
from absl import flags
from absl import logging
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import random_seed
from tensorflow.python.framework import function as _function_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.tf2 import enabled as tf2_enabled
from tensorflow.python.util import module_wrapper as _module_wrapper
# For determining if we are running with --define=tf_api_version=1 or 2.
from tensorflow import _major_api_version
# pylint: enable=g-direct-tensorflow-import
# pylint: enable=unused-import, g-bad-import-order, g-import-not-at-top
if tf1.executing_eagerly():
logging.warning("Lingvo with eager execution is not well tested. Consider "
"disabling eager with tf.compat.v1.disable_eager_execution() "
"or proceed at your own risk.")
def _clone_module(m):
"""Shallow clone of module `m`."""
if isinstance(m, _module_wrapper.TFModuleWrapper):
# pylint: disable=protected-access
return _module_wrapper.TFModuleWrapper(
wrapped=_clone_module(m._tfmw_wrapped_module),
module_name=m._tfmw_module_name,
public_apis=m._tfmw_public_apis,
deprecation=m._tfmw_print_deprecation_warnings,
has_lite=m._tfmw_has_lite)
# pylint: enable=protected-access
out = type(m)(m.__name__, m.__doc__)
out.__dict__.update(m.__dict__)
return out
def summarize_tf2_status():
"""Summarize the TF version environment."""
tf2_behavior_env = os.environ.get("TF2_BEHAVIOR")
return "; ".join([
f"tf._major_api_version: {_major_api_version}",
f"tf2_enabled() == {tf2_enabled()}",
f"TF2_BEHAVIOR == {tf2_behavior_env}",
])
# Aliases to a few routines lingvo libraries uses often.
Defun = _function_lib.Defun
While = functional_ops.While
If = functional_ops.If
InplaceUpdate = inplace_ops.alias_inplace_update
Empty = inplace_ops.empty
EmptyLike = inplace_ops.empty_like
# pylint: disable=undefined-variable, used-before-assignment
# Move this V2 symbol here to avoid being overwritten by its following V1
# version.
where_v2 = where
# Import the local V2 module to maker sure the following V1 overwritting never
# applies to the global module and symbol.
data = _clone_module(data)
graph_util = _clone_module(graph_util)
image = _clone_module(image)
io = _clone_module(io)
losses = _clone_module(keras.losses)
metrics = _clone_module(keras.metrics)
nn = _clone_module(nn)
saved_model = _clone_module(saved_model)
strings = _clone_module(strings)
summary = _clone_module(summary)
test = _clone_module(test)
train = _clone_module(train)
# By default, with TF2 enabled and (eager execution or tf.function),
# `tf.data` API will choose the stateful implementation for methods
# `tf.data.Dataset.shuffle()`, `tf.data.Dataset.cache()` and
# `tf.data.Dataset.list_files()`. which is not compatible with
# `tf.data.make_one_shot_iterator` in TF2 (see b/162270607).
# Here is a stateless implementation of `shuffle`, `cache` and
# `list_files` to resolve the TF2 imcompatibility issue.
# Note that, these methods are meant for internal use only. Please don't use
# it unless you know exactly what you do.
class _CacheDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that caches elements of its input."""
def __init__(self, input_dataset, filename):
"""Caches the elements in the dataset."""
self._input_dataset = input_dataset
self._filename = ops.convert_to_tensor(
filename, dtype=string, name="filename")
variant_tensor = gen_dataset_ops.cache_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
**self._flat_structure)
super().__init__(input_dataset, variant_tensor)
class _ShuffleDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset."""
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=int64, name="buffer_size")
self._seed, self._seed2 = random_seed.get_seed(seed)
if reshuffle_each_iteration is None:
reshuffle_each_iteration = True
self._reshuffle_each_iteration = reshuffle_each_iteration
variant_tensor = gen_dataset_ops.shuffle_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
**self._flat_structure)
super().__init__(input_dataset, variant_tensor)
def stateless_shuffle_dataset(buffer_size,
seed=None,
reshuffle_each_iteration=None):
"""Randomly shuffles the elements of the dataset based on a stateless shuffle implementation.
This method returns a stateless ShuffleDataset unconditionally. It can be
used with `dataset.apply()` to obtain a stateless shuffled dataset, which
supports the TF1 compatibility API `tf.data.make_one_shot_iterator()` in TF2.
Example:
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.apply(
... stateless_shuffle_dataset((3, reshuffle_each_iteration=True))
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.random.set_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
Dataset: A `Dataset`.
"""
def _apply_fn(dataset):
out_dataset = dataset_ops.DatasetV1Adapter(
_ShuffleDataset(dataset, buffer_size, seed, reshuffle_each_iteration))
return out_dataset
return _apply_fn
def stateless_cache_dataset(filename=""):
"""Caches the elements in the dataset based on a stateless cache implementation.
This method returns a stateless CacheDataset unconditionally. It can be
used with `dataset.apply()` to obtain a stateless cached dataset, which
supports the TF1 compatibility API `tf.data.make_one_shot_iterator()` in TF2.
Example:
>>> dataset = tf.data.Dataset.range(3)
>>> dataset = dataset.apply(stateless_cache_dataset())
Args:
filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
directory on the filesystem to use for caching elements in this Dataset.
If a filename is not provided, the dataset will be cached in memory.
Returns:
Dataset: A `Dataset`.
"""
def _apply_fn(dataset):
out_dataset = dataset_ops.DatasetV1Adapter(_CacheDataset(dataset, filename))
return out_dataset
return _apply_fn
def stateless_list_files(file_pattern, shuffle=None, seed=None):
"""A dataset of all files matching one or more glob patterns.
Note that, if `shuffle` is not None, it will use a stateless shuffle
implementation. Then the returned dataset supports the TF1 compatibility API
`tf.data.make_one_shot_iterator()` in TF2.
Example:
>>> dataset = tf.stateless_list_files("some_file_pattern")
Args:
file_pattern: A string, a list of strings, or a `tf.Tensor` of string type
(scalar or vector), representing the filename glob (i.e. shell wildcard)
pattern(s) that will be matched.
shuffle: (Optional.) If `True`, the file names will be shuffled randomly
based on a stateless implementation. Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.random.set_seed` for behavior.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
"""
with ops.name_scope("list_files"):
if shuffle is None:
shuffle = True
file_pattern = ops.convert_to_tensor(
file_pattern, dtype=string, name="file_pattern")
matching_files = gen_io_ops.matching_files(file_pattern)
# Raise an exception if `file_pattern` does not match any files.
condition = math_ops.greater(
array_ops.shape(matching_files)[0], 0, name="match_not_empty")
message = math_ops.add(
"No files matched pattern: ",
strings.reduce_join(file_pattern, separator=", "),
name="message")
assert_not_empty = debugging.Assert(
condition, [message], summarize=1, name="assert_not_empty")
with control_dependencies([assert_not_empty]):
matching_files = identity(matching_files)
dataset = data.Dataset.from_tensor_slices(matching_files)
if shuffle:
buffer_size = math_ops.maximum(
shape(matching_files, out_type=dtypes.int64)[0], 1)
# Use stateless shuffled dataset
dataset = dataset.apply(stateless_shuffle_dataset(buffer_size, seed=seed))
return dataset
# pylint: enable=undefined-variable, used-before-assignment
# TF 1.x symbols used in the codebase.
# To keep this list short, please use TF 2.x API whenever applicable.
# Only use TF 1.x API if it has no 2.x equivalent.
# pylint: disable=undefined-variable
add_to_collection = tf1.add_to_collection
all_variables = tf1.global_variables
# The following asserts can be directly replaced with TF2 `tf.debugging.*`
# after TF2/eager is enabled.
app = tf1.app
assert_integer = tf1.assert_integer
assert_positive = tf1.assert_positive
assert_type = tf1.assert_type
assert_scalar = tf1.assert_scalar
assign = tf1.assign
assign_add = tf1.assign_add
assign_sub = tf1.assign_sub
AUTO_REUSE = tf1.AUTO_REUSE
container = tf1.container
data.Dataset = tf1.data.Dataset
data.TFRecordDataset = tf1.data.TFRecordDataset
device = tf1.device
Dimension = tf1.Dimension
disable_eager_execution = tf1.disable_eager_execution
div = tf1.div
enable_eager_execution = tf1.enable_eager_execution
executing_eagerly_outside_functions = tf1.executing_eagerly_outside_functions
floor_div = tf1.floor_div
get_collection = tf1.get_collection
get_collection_ref = tf1.get_collection_ref
get_default_graph = tf1.get_default_graph
get_local_variable = tf1.get_local_variable
get_seed = tf1.get_seed
get_variable = tf1.get_variable
get_variable_scope = tf1.get_variable_scope
global_variables = tf1.global_variables
global_variables_initializer = tf1.global_variables_initializer
gradients = tf1.gradients
graph_util.convert_variables_to_constants = (
tf1.graph_util.convert_variables_to_constants)
graph_util.extract_sub_graph = tf1.graph_util.extract_sub_graph
GraphDef = tf1.GraphDef
GraphKeys = tf1.GraphKeys
GraphOptions = tf1.GraphOptions
group = tf1.group
image.resize_bilinear = tf1.image.resize_bilinear
image.resize_images = tf1.image.resize_images
image.resize_nearest_neighbor = tf1.image.resize_nearest_neighbor
initialize_all_tables = tf1.initialize_all_tables
InteractiveSession = tf1.InteractiveSession
io.tf_record_iterator = tf1.io.tf_record_iterator
is_variable_initialized = tf1.is_variable_initialized
layers = tf1.layers
local_variables_initializer = tf1.local_variables_initializer
losses.absolute_difference = tf1.losses.absolute_difference
losses.add_loss = tf1.losses.add_loss
losses.compute_weighted_loss = tf1.losses.compute_weighted_loss
losses.get_regularization_loss = tf1.losses.get_regularization_loss
losses.huber_loss = tf1.losses.huber_loss
losses.mean_squared_error = tf1.losses.mean_squared_error
losses.Reduction.MEAN = tf1.losses.Reduction.MEAN
losses.Reduction.SUM = tf1.losses.Reduction.SUM
losses.sigmoid_cross_entropy = tf1.losses.sigmoid_cross_entropy
losses.softmax_cross_entropy = tf1.losses.softmax_cross_entropy
losses.sparse_softmax_cross_entropy = (tf1.losses.sparse_softmax_cross_entropy)
make_template = tf1.make_template
metrics.accuracy = tf1.metrics.accuracy
metrics.auc = tf1.metrics.auc
metrics.precision = tf1.metrics.precision
metrics.recall = tf1.metrics.recall
moving_average_variables = tf1.moving_average_variables
multinomial = tf1.multinomial
name_scope = tf1.name_scope
OptimizerOptions = tf1.OptimizerOptions
placeholder = tf1.placeholder
placeholder_with_default = tf1.placeholder_with_default
Print = tf1.Print
py_func = tf1.py_func
python_io = tf1.python_io
report_uninitialized_variables = tf1.report_uninitialized_variables
reset_default_graph = tf1.reset_default_graph
resource_loader = tf1.resource_loader
RunMetadata = tf1.RunMetadata
RunOptions = tf1.RunOptions
saved_model.build_signature_def = tf1.saved_model.build_signature_def
saved_model.Builder = tf1.saved_model.Builder
saved_model.load_v2 = saved_model.load
saved_model.load = tf1.saved_model.load
saved_model.loader = tf1.saved_model.loader
saved_model.signature_constants = tf1.saved_model.signature_constants
saved_model.simple_save = tf1.saved_model.simple_save
saved_model.tag_constants = tf1.saved_model.tag_constants
saved_model.utils = tf1.saved_model.utils
Session = tf1.Session
sparse_to_dense = tf1.sparse_to_dense
string_split = tf1.string_split
strings.reduce_join = tf1.reduce_join
strings.split = tf1.strings.split
Summary = tf1.Summary
if tf1.summary is not None:
# tf.summary are not supported on TPU so we sometimes set tf.summary to None
# to prohibit the direct use of it.
# It is safe to skip copying tf.summary members in such cases.
summary.audio = tf1.summary.audio
summary.FileWriter = tf1.summary.FileWriter
summary.histogram = tf1.summary.histogram
summary.image = tf1.summary.image
summary.merge = tf1.summary.merge
summary.merge_all = tf1.summary.merge_all
summary.scalar = tf1.summary.scalar
summary.text = tf1.summary.text
summary.Summary = tf1.summary.Summary
summary.Summary.FromString = tf1.summary.Summary.FromString
tables_initializer = tf1.tables_initializer
test.compute_gradient_error = tf1.test.compute_gradient_error
test.get_temp_dir = tf1.test.get_temp_dir
test.mock = tf1.test.mock
tpu = tf1.tpu
train.AdadeltaOptimizer = tf1.train.AdadeltaOptimizer
train.AdagradOptimizer = tf1.train.AdagradOptimizer
train.AdamOptimizer = tf1.train.AdamOptimizer
train.export_meta_graph = tf1.train.export_meta_graph
train.get_or_create_global_step = tf1.train.get_or_create_global_step
train.get_global_step = tf1.train.get_global_step
train.GradientDescentOptimizer = tf1.train.GradientDescentOptimizer
train.MomentumOptimizer = tf1.train.MomentumOptimizer
train.MonitoredTrainingSession = tf1.train.MonitoredTrainingSession
train.NewCheckpointReader = tf1.train.NewCheckpointReader
train.Optimizer = tf1.train.Optimizer
train.RMSPropOptimizer = tf1.train.RMSPropOptimizer
train.Saver = tf1.train.Saver
train.SaverDef = tf1.train.SaverDef
train.summary_iterator = tf1.train.summary_iterator
trainable_variables = tf1.trainable_variables
Variable = tf1.Variable
variables_initializer = tf1.variables_initializer
VariableScope = tf1.VariableScope
variance_scaling_initializer = tf1.variance_scaling_initializer
variable_scope = tf1.variable_scope
where = tf1.where
while_loop = tf1.while_loop
wrap_function = tf1.wrap_function
convert_to_tensor_or_indexed_slices = tf1.convert_to_tensor_or_indexed_slices
# Explicit 1.x symbol import.
data.make_initializable_iterator = dataset_ops.make_initializable_iterator
data.make_one_shot_iterator = dataset_ops.make_one_shot_iterator
# For `nn.embedding_lookup` and `nn.embedding_lookup_sparse`, v2 doesn't have
# the arg 'partition_strategy' in the API, and uses 'partition_strategy="div"'
# by default; while v1 uses 'partition_strategy="mod"' by default.
# Keep this for now.
nn.embedding_lookup = embedding_ops.embedding_lookup
nn.embedding_lookup_sparse = embedding_ops.embedding_lookup_sparse
# pylint: enable=undefined-variable
| apache-2.0 | 2,149,067,392,667,626,000 | 39.698376 | 95 | 0.743002 | false |
flavour/eden | modules/templates/SAFIRE/Urgences-Sante/menus.py | 9 | 4745 | # -*- coding: utf-8 -*-
from gluon import current
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# =============================================================================
class S3MainMenu(default.S3MainMenu):
""" Custom Application Main Menu """
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
menu= [MM("Call Logs", c="event", f="incident_report"),
MM("Incidents", c="event", f="incident", m="summary"),
MM("Scenarios", c="event", f="scenario"),
MM("more", link=False)(
MM("Documents", c="doc", f="document"),
MM("Events", c="event", f="event"),
MM("Staff", c="hrm", f="staff"),
MM("Volunteers", c="vol", f="volunteer"),
MM("Assets", c="asset", f="asset"),
MM("Organizations", c="org", f="organisation"),
MM("Facilities", c="org", f="facility"),
#MM("Hospitals", c="hms", f="hospital", m="summary"),
MM("Shelters", c="cr", f="shelter"),
MM("Warehouses", c="inv", f="warehouse"),
MM("Item Catalog", c="supply", f="catalog_item"),
),
]
return menu
# -------------------------------------------------------------------------
@classmethod
def menu_auth(cls, **attr):
"""
Auth Menu
- switch Login to use OpenID Connect
"""
auth = current.auth
logged_in = auth.is_logged_in()
settings = current.deployment_settings
if not logged_in:
request = current.request
login_next = URL(args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = settings.get_security_registration_visible()
if self_registration == "index":
register = MM("Register", c="default", f="index", m="register",
vars=dict(_next=login_next),
check=self_registration)
else:
register = MM("Register", m="register",
vars=dict(_next=login_next),
check=self_registration)
if settings.get_auth_password_changes() and \
settings.get_auth_password_retrieval():
lost_pw = MM("Lost Password", m="retrieve_password")
else:
lost_pw = None
menu_auth = MM("Login", c="default", f="openid_connect", m="login",
_id="auth_menu_login",
vars=dict(_next=login_next), **attr)(
MM("Login", m="login",
vars=dict(_next=login_next)),
register,
lost_pw,
)
else:
# Logged-in
if settings.get_auth_password_changes():
change_pw = MM("Change Password", m="change_password")
else:
change_pw = None
menu_auth = MM(auth.user.email, c="default", f="user",
translate=False, link=False, _id="auth_menu_email",
**attr)(
MM("Logout", m="logout", _id="auth_menu_logout"),
MM("User Profile", m="profile"),
MM("Personal Data", c="default", f="person", m="update"),
MM("Contact Details", c="pr", f="person",
args="contact",
vars={"person.pe_id" : auth.user.pe_id}),
#MM("Subscriptions", c="pr", f="person",
# args="pe_subscription",
# vars={"person.pe_id" : auth.user.pe_id}),
change_pw,
SEP(),
MM({"name": current.T("Rapid Data Entry"),
"id": "rapid_toggle",
"value": current.session.s3.rapid_data_entry is True},
f="rapid"),
)
return menu_auth
# END =========================================================================
| mit | 6,823,988,331,201,383,000 | 40.26087 | 85 | 0.406322 | false |
brian-rose/climlab | climlab/radiation/aplusbt.py | 1 | 11588 | from __future__ import division
from climlab.process.energy_budget import EnergyBudget
from climlab.utils import constants as const
import numpy as np
class AplusBT(EnergyBudget):
r"""The simplest linear longwave radiation module.
Calculates the Outgoing Longwave Radation (OLR) :math:`R\uparrow` as
.. math::
R\uparrow = A + B \cdot T
where :math:`T` is the state variable.
Should be invoked with a single temperature state variable only.
**Initialization parameters** \n
An instance of ``AplusBT`` is initialized with the following
arguments:
:param float A: parameter for linear OLR parametrization \n
- unit: :math:`\frac{\textrm{W}}{\textrm{m}^2}` \n
- default value: ``200.0``
:param float B: parameter for linear OLR parametrization \n
- unit: :math:`\frac{\textrm{W}} {\textrm{m}^2 ^{\circ}\textrm{C}}` \n
- default value: ``2.0``
**Object attributes** \n
Additional to the parent class :class:`~climlab.process.energy_budget.EnergyBudget`
following object attributes are generated or modified during initialization:
:ivar float A: calls the setter function of :func:`A`
:ivar float B: calls the setter function of :func:`B`
:ivar dict diagnostics: key ``'OLR'`` initialized with value:
:class:`~climlab.domain.field.Field` of zeros
in size of ``self.Ts``
:ivar Field OLR: the subprocess attribute ``self.OLR`` is
created with correct dimensions
.. warning::
This module currently works only for a single state variable!
:Example:
Simple linear radiation module (stand alone)::
>>> import climlab
>>> # create a column atmosphere and scalar surface
>>> sfc, atm = climlab.domain.single_column()
>>> # Create a state variable
>>> Ts = climlab.Field(15., domain=sfc)
>>> # Make a dictionary of state variables
>>> s = {'Ts': Ts}
>>> # create process
>>> olr = climlab.radiation.AplusBT(state=s)
>>> print olr
climlab Process of type <class 'climlab.radiation.AplusBT.AplusBT'>.
State variables and domain shapes:
Ts: (1,)
The subprocess tree:
top: <class 'climlab.radiation.AplusBT.AplusBT'>
>>> # to compute tendencies and diagnostics
>>> olr.compute()
>>> # or to actually update the temperature
>>> olr.step_forward()
>>> print olr.state
{'Ts': Field([ 5.69123176])}
"""
def __init__(self, A=200., B=2., **kwargs):
super(AplusBT, self).__init__(**kwargs)
self.A = A
self.B = B
self.add_diagnostic('OLR', 0. * self.Ts)
@property
def A(self):
"""Property of AplusBT parameter A.
:getter: Returns the parameter A which is stored in attribute
``self._A``
:setter: * sets parameter A which is addressed as ``self._A``
to the new value
* updates the parameter dictionary ``self.param['A']``
:type: float
:Example:
::
>>> import climlab
>>> model = climlab.EBM()
>>> # getter
>>> model.subprocess['LW'].A
210.0
>>> # setter
>>> model.subprocess['LW'].A = 220
>>> # getter again
>>> model.subprocess['LW'].A
220
>>> # subprocess parameter dictionary
>>> model.subprocess['LW'].param['A']
220
"""
return self._A
@A.setter
def A(self, value):
self._A = value
self.param['A'] = value
@property
def B(self):
"""Property of AplusBT parameter B.
:getter: Returns the parameter B which is stored in attribute
``self._B``
:setter: * sets parameter B which is addressed as ``self._B``
to the new value
* updates the parameter dictionary ``self.param['B']``
:type: float
"""
return self._B
@B.setter
def B(self, value):
self._B = value
self.param['B'] = value
def _compute_emission(self):
for varname, value in self.state.items():
self.OLR[:] = self.A + self.B * value
def _compute_heating_rates(self):
'''Compute energy flux convergences to get heating rates in :math:`W/m^2`,'''
self._compute_emission()
for varname, value in self.state.items():
self.heating_rate[varname] = -self.OLR
class AplusBT_CO2(EnergyBudget):
"""Linear longwave radiation module considering CO2 concentration.
This radiation subprocess is based in the idea to linearize the Outgoing
Longwave Radiation (OLR) emitted to space according to the surface temperature
(see :class:`AplusBT`).
To consider a the change of the greenhouse effect through range of
:math:`CO_2` in the atmosphere, the parameters A and B are computed like
the following:
.. math::
A(c) = -326.4 + 9.161 c - 3.164 c^2 + 0.5468 c^3 \n
B(c) = 1.953 - 0.04866 c + 0.01309 c^2 - 0.002577 c^3
where :math:`c=\\log \\frac{p}{300}` and :math:`p` represents
the concentration of :math:`CO_2` in the atmosphere.
For further reading see :cite:`Caldeira_1992`.
**Initialization parameters** \n
An instance of ``AplusBT_CO2`` is initialized with the following
argument:
:param float CO2: The concentration of :math:`CO_2` in the atmosphere.
Referred to as :math:`p` in the above given formulas.\n
- unit: :math:`\\textrm{ppm}` (parts per million) \n
- default value: ``300.0``
**Object attributes** \n
Additional to the parent class :class:`~climlab.process.energy_budget.EnergyBudget`
following object attributes are generated or updated during initialization:
:ivar float CO2: calls the setter function of :func:`CO2`
:ivar dict diagnostics: the subprocess's diagnostic dictionary
``self.diagnostic`` is initialized
through calling
``self.add_diagnostic('OLR', 0. * self.Ts)``
:ivar Field OLR: the subprocess attribute ``self.OLR`` is
created with correct dimensions
:Example:
Replacing an the regular AplusBT subprocess in an energy balance model::
>>> import climlab
>>> from climlab.radiation.AplusBT import AplusBT_CO2
>>> # creating EBM model
>>> model = climlab.EBM()
>>> print model
.. code-block:: none
:emphasize-lines: 7
climlab Process of type <class 'climlab.model.ebm.EBM'>.
State variables and domain shapes:
Ts: (90, 1)
The subprocess tree:
top: <class 'climlab.model.ebm.EBM'>
diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>
LW: <class 'climlab.radiation.AplusBT.AplusBT'>
albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'>
iceline: <class 'climlab.surface.albedo.Iceline'>
cold_albedo: <class 'climlab.surface.albedo.ConstantAlbedo'>
warm_albedo: <class 'climlab.surface.albedo.P2Albedo'>
insolation: <class 'climlab.radiation.insolation.P2Insolation'>
::
>>> # creating and adding albedo feedback subprocess
>>> LW_CO2 = AplusBT_CO2(CO2=400, state=model.state, **model.param)
>>> # overwriting old 'LW' subprocess with same name
>>> model.add_subprocess('LW', LW_CO2)
>>> print model
.. code-block:: none
:emphasize-lines: 7
climlab Process of type <class 'climlab.model.ebm.EBM'>.
State variables and domain shapes:
Ts: (90, 1)
The subprocess tree:
top: <class 'climlab.model.ebm.EBM'>
diffusion: <class 'climlab.dynamics.diffusion.MeridionalDiffusion'>
LW: <class 'climlab.radiation.AplusBT.AplusBT_CO2'>
albedo: <class 'climlab.surface.albedo.StepFunctionAlbedo'>
iceline: <class 'climlab.surface.albedo.Iceline'>
cold_albedo: <class 'climlab.surface.albedo.ConstantAlbedo'>
warm_albedo: <class 'climlab.surface.albedo.P2Albedo'>
insolation: <class 'climlab.radiation.insolation.P2Insolation'>
"""
# implemented by m-kreuzer
def __init__(self, CO2=300., **kwargs):
super(AplusBT_CO2, self).__init__(**kwargs)
self.CO2 = CO2
#newdiags = ['OLR',]
#self.add_diagnostics(newdiags)
self.add_diagnostic('OLR', 0. * self.Ts)
@property
def CO2(self):
"""Property of AplusBT_CO2 parameter CO2.
:getter: Returns the CO2 concentration which is stored in attribute
``self._CO2``
:setter: * sets the CO2 concentration which is addressed as ``self._CO2``
to the new value
* updates the parameter dictionary ``self.param['CO2']``
:type: float
"""
return self._CO2
@CO2.setter
def CO2(self, value):
self._CO2 = value
self.param['CO2'] = value
# def emission(self):
# """Calculates the Outgoing Longwave Radiation (OLR) of the AplusBT_CO2
# subprocess.
#
# **Object attributes** \n
#
# During method execution following object attribute is modified:
#
# :ivar float OLR: the described formula is calculated and the
# result stored in the project attribute ``self.OLR``
# :ivar dict diagnostics: the same result is written in ``diagnostics``
# dictionary with the key ``'OLR'``
#
# .. warning::
#
# This method currently works only for a single state variable!
#
# """
# l = np.log(self.CO2/300.)
# A = -326.400 + 9.16100*l - 3.16400*l**2 + 0.546800*l**3
# B = 1.953 - 0.04866*l + 0.01309*l**2 - 0.002577*l**3
# for varname, value in self.state.iteritems():
# flux = A + B * (value + const.tempCtoK)
# self.OLR = flux
# self.diagnostics['OLR'] = self.OLR
def _compute_emission(self):
l = np.log(self.CO2/300.)
self.A = -326.400 + 9.16100*l - 3.16400*l**2 + 0.546800*l**3
self.B = 1.953 - 0.04866*l + 0.01309*l**2 - 0.002577*l**3
for varname, value in self.state.items():
self.OLR[:] = self.A + self.B * (value + const.tempCtoK)
def _compute_heating_rates(self):
"""Computes energy flux convergences to get heating rates in :math:`W/m^2`."""
self._compute_emission()
for varname, value in self.state.items():
self.heating_rate[varname] = -self.OLR
| mit | 5,655,771,523,168,865,000 | 34.987578 | 103 | 0.547722 | false |
tind/invenio-communities | invenio_communities/proxies.py | 1 | 2085 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Proxy definitions."""
from __future__ import absolute_import, print_function
from flask import current_app
from werkzeug.local import LocalProxy
from .permissions import (CommunityAdminActionNeed,
CommunityReadActionNeed,
CommunityManageActionNeed,
CommunityCurateActionNeed)
current_permission_factory = {
"communities-admin": LocalProxy(lambda:
current_app.extensions["invenio-communities"].admin_permission_factory),
"communities-read": LocalProxy(lambda:
current_app.extensions["invenio-communities"].read_permission_factory),
"communities-manage": LocalProxy(lambda:
current_app.extensions["invenio-communities"].manage_permission_factory),
"communities-curate": LocalProxy(lambda:
current_app.extensions["invenio-communities"].curate_permission_factory)
}
needs = {
"communities-admin": CommunityAdminActionNeed,
"communities-read": CommunityReadActionNeed,
"communities-manage": CommunityManageActionNeed,
"communities-curate": CommunityCurateActionNeed
}
| gpl-2.0 | 2,148,515,659,503,525,600 | 38.339623 | 81 | 0.733813 | false |
goblinhack/MundusMeus | python/things/road_snow.py | 1 | 1511 | import tp
import mm
def thing_init(t):
return
def road_snow_init(name, short_name, tiles=[]):
x = tp.Tp(name)
x.set_short_name(short_name)
x.set_is_road_snow(True)
x.set_is_world_item(True)
x.set_z_depth(mm.Z_DEPTH_ROAD)
if tiles is not None:
for t in tiles:
x.set_tile(t)
else:
x.set_tile(tile=name)
x.thing_init = thing_init
def init():
road_snow_init(name="road_snow",
short_name="Snowy road",
tiles=[
"road_snow.1",
"road_snow.2",
"road_snow.3",
"road_snow.4",
"road_snow.5",
"road_snow.6",
"road_snow.7",
"road_snow.8",
"road_snow.9",
"road_snow.10",
"road_snow.11",
"road_snow.12",
"road_snow.13",
"road_snow.14",
"road_snow.15",
"road_snow.16",
"road_snow.17",
"road_snow.18",
"road_snow.19",
"road_snow.20",
"road_snow.21",
"road_snow.22",
"road_snow.23",
])
init()
| lgpl-3.0 | -3,589,036,232,447,633,400 | 26.981481 | 47 | 0.338848 | false |
yosshy/bergenholm | tests/views/test_hosts.py | 1 | 3286 | import copy
from flask import json
from tests import base
from bergenholm.views import hosts
API = "/api/1.0/hosts/"
class HostsViewTestCase(base.TestCase):
def test_get_hosts(self):
expected = {u"hosts": [self.host_id]}
result = self.client.get(API)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), expected)
def test_get_host(self):
result = self.client.get(API + self.host_id)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), self.host_params)
def test_create_host(self):
result = self.client.post(API + self.host_id2,
data=json.dumps(self.host_params2),
headers=self.headers)
self.assertEqual(result.status_code, 201)
result = self.client.get(API + self.host_id2)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), self.host_params2)
def test_update_host(self):
result = self.client.put(API + self.host_id,
data=json.dumps(self.host_params2),
headers=self.headers)
self.assertEqual(result.status_code, 202)
result = self.client.get(API + self.host_id)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), self.host_params2)
def test_delete_host(self):
result = self.client.delete(API + self.host_id)
self.assertEqual(result.status_code, 204)
result = self.client.get(API + self.host_id)
self.assertEqual(result.status_code, 404)
def test_get_host_params(self):
expected = {
u'base_url': u'http://127.0.0.1',
u'groups': [u'ubuntu', u'default'],
u'hostname': u'test-200',
u'image_base_url': u'http://127.0.0.1/images',
u'ipaddr': u'192.168.10.200',
u'ipxe_script': u'ubuntu.temp',
u'kernel': u'http://127.0.0.1/images/linux',
u'kernel_opts': u'quiet',
u'mirror_host': u'jp.archive.ubuntu.com',
u'mirror_path': u'/ubuntu',
u'mirror_scheme': u'http',
u'module': u'http://127.0.0.1/images/initrd.gz',
u'module1': u'http://127.0.0.1/images/initrd1.gz',
u'power_driver': 'dummy',
u'test': u'test',
u'uuid': self.host_id}
result = self.client.get(API + self.host_id + "?params=all")
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), expected)
def test_mark_host_installed(self):
params = copy.deepcopy(self.host_params)
params["groups"].append("installed")
result = self.client.get(API + self.host_id + "?installed=mark")
self.assertEqual(result.status_code, 200)
result = self.client.get(API + self.host_id)
self.assertEqual(json.loads(result.data), params)
result = self.client.get(API + self.host_id + "?installed=unmark")
self.assertEqual(result.status_code, 200)
result = self.client.get(API + self.host_id)
self.assertEqual(json.loads(result.data), self.host_params)
| apache-2.0 | -1,911,109,451,051,491,000 | 38.590361 | 74 | 0.593427 | false |
ow2-compatibleone/accords-platform | pyaccordsSDK/pycompdev/setup.py | 1 | 1286 | from distutils.core import setup, Extension
module1 = Extension('pycompdev',
sources = ['pycompdev.c','list.c','../../pyaccords/pysrc/ctools.c','../../pyaccords/pysrc/pytools.c'],
depends = ['component.c','list.c','../../pyaccords/pysrc/listoccibuilder.h','../../pyaccords/pysrc/*'],
include_dirs=['../../occi/src/','../../cocarrier/src/','../../cocci/src/','../../coes/src/','../../command/src/','../../copabr/src','../../cords/src/','../../corest/src','../../coxml/src/','../../pubocci/src/','../../pyaccords/pysrc/'],
libraries=['occi','pubocci','coxml','corest','cords','cocci','cocarrier','copabr'],
library_dirs=['../../occi/src/.libs/','../../pubocci/src/.libs/','../../coxml/src/.libs/','../../corest/src/.libs/',
'../../cords/src/.libs/','../../cocci/src/.libs/','../../cocarrier/src/.libs/','../../copabr/src/.libs/'
]
)
setup (name = 'pycompdev',
version = '1.0',
author = 'Hamid Medjahed',
author_email = '[email protected]',
url = 'www.compatibleone.org',
description = 'This is the pycompdev package of pyaccordsSDK',
ext_modules = [module1])
| apache-2.0 | 8,552,627,209,542,532,000 | 66.684211 | 257 | 0.498445 | false |
mtury/scapy | scapy/arch/__init__.py | 1 | 2813 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
Operating system specific functionality.
"""
from __future__ import absolute_import
import socket
import scapy.consts
from scapy.consts import LINUX, SOLARIS, WINDOWS, BSD
from scapy.error import Scapy_Exception
from scapy.config import conf, _set_conf_sockets
from scapy.pton_ntop import inet_pton, inet_ntop
from scapy.data import ARPHDR_ETHER, ARPHDR_LOOPBACK, IPV6_ADDR_GLOBAL
from scapy.compat import orb
def str2mac(s):
return ("%02x:" * 6)[:-1] % tuple(orb(x) for x in s)
if not WINDOWS:
if not conf.use_pcap and not conf.use_dnet:
from scapy.arch.bpf.core import get_if_raw_addr
def get_if_addr(iff):
return inet_ntop(socket.AF_INET, get_if_raw_addr(iff))
def get_if_hwaddr(iff):
addrfamily, mac = get_if_raw_hwaddr(iff) # noqa: F405
if addrfamily in [ARPHDR_ETHER, ARPHDR_LOOPBACK]:
return str2mac(mac)
else:
raise Scapy_Exception("Unsupported address family (%i) for interface [%s]" % (addrfamily, iff)) # noqa: E501
# Next step is to import following architecture specific functions:
# def get_if_raw_hwaddr(iff)
# def get_if_raw_addr(iff):
# def get_if_list():
# def get_working_if():
# def attach_filter(s, filter, iface):
# def set_promisc(s,iff,val=1):
# def read_routes():
# def read_routes6():
# def get_if(iff,cmd):
# def get_if_index(iff):
if LINUX:
from scapy.arch.linux import * # noqa F403
elif BSD:
from scapy.arch.unix import read_routes, read_routes6, in6_getifaddr # noqa: F401, E501
from scapy.arch.bpf.core import * # noqa F403
if not (conf.use_pcap or conf.use_dnet):
# Native
from scapy.arch.bpf.supersocket import * # noqa F403
conf.use_bpf = True
elif SOLARIS:
from scapy.arch.solaris import * # noqa F403
elif WINDOWS:
from scapy.arch.windows import * # noqa F403
from scapy.arch.windows.native import * # noqa F403
if conf.iface is None:
conf.iface = scapy.consts.LOOPBACK_INTERFACE
_set_conf_sockets() # Apply config
def get_if_addr6(iff):
"""
Returns the main global unicast address associated with provided
interface, in human readable form. If no global address is found,
None is returned.
"""
return next((x[0] for x in in6_getifaddr()
if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL), None)
def get_if_raw_addr6(iff):
"""
Returns the main global unicast address associated with provided
interface, in network format. If no global address is found, None
is returned.
"""
ip6 = get_if_addr6(iff)
if ip6 is not None:
return inet_pton(socket.AF_INET6, ip6)
return None
| gpl-2.0 | -7,706,871,854,291,476,000 | 28.302083 | 117 | 0.685034 | false |
sergeneren/anima | anima/rig/curve.py | 1 | 2784 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import pymel.core as pm
class Curve(object):
def __init__(self, name_in, curve):
self._curveNode = pm.nt.Transform(pm.rename(curve, name_in))
self._curveInfo = self._create_curveInfo()
self._degree = None
self._spans = None
self._arcLen = None
self._numCVs = None
self._cvPositions = []
for j in range (0, self.numCVs):
self._cvPositions.append(pm.pointPosition(self.curveNode.cv[j], w = 1))
@property
# Curve Node Getter
def curveNode(self):
return self._curveNode
@property
# Curve Degree : self._degree Setter - Getter
def degree(self):
self._degree = pm.getAttr(self.curveNode.degree)
return self._degree
@degree.setter
def degree(self, degree):
self._degree = degree
@property
# Curve Spans : self._spans Setter - Getter
def spans(self):
self._spans = pm.getAttr(self.curveNode.spans)
return self._spans
@spans.setter
def spans(self, span):
self._spans = span
@property
# Number of CVs : self._numCvs Setter - Getter
def numCVs(self):
self._numCVs = self.degree + self.spans
return self._numCVs
@property
# CV Positions : Gets the positions of cvs
def cvPositions(self):
return self._cvPositions
@property
# CurveInfo Getter - Setter
def curveInfo(self):
return self._curveInfo
@curveInfo.setter
def curveInfo(self, infoNode):
self._curveInfo = infoNode
pm.connectAttr(self.curveNode.worldSpace, infoNode.inputCurve)
@property
# ArcLength of the Curve Getter
def arclen(self):
self._arcLen = pm.getAttr(self.curveInfo.arcLength)
return self._arcLen
def rebuildCurve(self, spans):
# Rebuild the curveNode
pm.rebuildCurve(self.curveNode, rpo = 1, rt = 0, end = 1, kr = 0, kcp = 0,
kep = 1, kt = 0, s = spans, d = 3, tol = 0.01)
del self._cvPositions[:]
for j in range (0, self.numCVs):
self._cvPositions.append(pm.pointPosition(self.curveNode.cv[j], w = 1))
def _create_curveInfo(self):
#create a new CurveInfo Node
self._curveInfo = pm.createNode("curveInfo", n= self._curveNode +
"_curveInfo")
pm.connectAttr(self._curveNode.worldSpace, self._curveInfo.inputCurve)
return self._curveInfo
| bsd-2-clause | 7,706,875,388,871,063,000 | 26.701031 | 83 | 0.58046 | false |
vayw/stuffy | archivate.py | 1 | 1303 | #!/usr/bin/env python3
import time
import os
import shutil
import tarfile
import logging
HOWOLD = 1
basepath = 'E:/1CBufferDirectory/1CUTAxelot'
logging.basicConfig(filename=basepath + '/archivate.log', format='%(levelname)s:%(message)s', level=logging.INFO)
try:
if os.path.getsize(basepath + '/archivate.log') > 52428800:
os.remove(basepath + '/archivate.log')
except IOError:
logging.info('previous log not found')
logging.info('%s | starting..', time.strftime('%d.%m.%Y %H:%M', time.localtime()))
arch_dir = basepath + '/lucky_' + time.strftime('%d_%m_%Y-%H%M%S', time.localtime())
logging.info('creating temporary dir %s..', arch_dir)
os.mkdir(arch_dir)
now = time.time()
logging.info('moving qualified files to temporary dir..')
for i in os.listdir(basepath + '/lucky'):
cr_time = os.path.getctime(basepath + '/lucky/' + i)
days_old = (now - cr_time)/60/60/24
if days_old > HOWOLD:
logging.debug('moving', i, '..')
shutil.move(basepath + '/lucky/' + i, arch_dir)
logging.info('compressing directory..')
with tarfile.open(arch_dir + '.tar.bz2', 'w:bz2', compresslevel=9) as tar:
tar.add(arch_dir)
logging.info('removing temporary dir..')
shutil.rmtree(arch_dir)
logging.info('done at %s', time.strftime('%d.%m.%Y %H:%M', time.localtime()))
| gpl-3.0 | -90,586,186,703,380,210 | 30.02381 | 113 | 0.66769 | false |
tylerclair/py3canvas | py3canvas/apis/calendar_events.py | 1 | 51704 | """CalendarEvents API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class CalendarEventsAPI(BaseCanvasAPI):
"""CalendarEvents API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for CalendarEventsAPI."""
super(CalendarEventsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.CalendarEventsAPI")
def list_calendar_events(self, all_events=None, context_codes=None, end_date=None, excludes=None, start_date=None, type=None, undated=None):
"""
List calendar events.
Retrieve the list of calendar events or assignments for the current user
"""
path = {}
data = {}
params = {}
# OPTIONAL - type
"""Defaults to 'event'"""
if type is not None:
self._validate_enum(type, ["event", "assignment"])
params["type"] = type
# OPTIONAL - start_date
"""Only return events since the start_date (inclusive).
Defaults to today. The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ."""
if start_date is not None:
params["start_date"] = start_date
# OPTIONAL - end_date
"""Only return events before the end_date (inclusive).
Defaults to start_date. The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ.
If end_date is the same as start_date, then only events on that day are
returned."""
if end_date is not None:
params["end_date"] = end_date
# OPTIONAL - undated
"""Defaults to false (dated events only).
If true, only return undated events and ignore start_date and end_date."""
if undated is not None:
params["undated"] = undated
# OPTIONAL - all_events
"""Defaults to false (uses start_date, end_date, and undated criteria).
If true, all events are returned, ignoring start_date, end_date, and undated criteria."""
if all_events is not None:
params["all_events"] = all_events
# OPTIONAL - context_codes
"""List of context codes of courses/groups/users whose events you want to see.
If not specified, defaults to the current user (i.e personal calendar,
no course/group events). Limited to 10 context codes, additional ones are
ignored. The format of this field is the context type, followed by an
underscore, followed by the context id. For example: course_42"""
if context_codes is not None:
params["context_codes"] = context_codes
# OPTIONAL - excludes
"""Array of attributes to exclude. Possible values are "description", "child_events" and 'assignment'"""
if excludes is not None:
params["excludes"] = excludes
self.logger.debug("GET /api/v1/calendar_events with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/calendar_events".format(**path), data=data, params=params, all_pages=True)
def list_calendar_events_for_user(self, user_id, all_events=None, context_codes=None, end_date=None, excludes=None, start_date=None, type=None, undated=None):
"""
List calendar events for a user.
Retrieve the list of calendar events or assignments for the specified user.
To view calendar events for a user other than yourself,
you must either be an observer of that user or an administrator.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - type
"""Defaults to 'event'"""
if type is not None:
self._validate_enum(type, ["event", "assignment"])
params["type"] = type
# OPTIONAL - start_date
"""Only return events since the start_date (inclusive).
Defaults to today. The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ."""
if start_date is not None:
params["start_date"] = start_date
# OPTIONAL - end_date
"""Only return events before the end_date (inclusive).
Defaults to start_date. The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ.
If end_date is the same as start_date, then only events on that day are
returned."""
if end_date is not None:
params["end_date"] = end_date
# OPTIONAL - undated
"""Defaults to false (dated events only).
If true, only return undated events and ignore start_date and end_date."""
if undated is not None:
params["undated"] = undated
# OPTIONAL - all_events
"""Defaults to false (uses start_date, end_date, and undated criteria).
If true, all events are returned, ignoring start_date, end_date, and undated criteria."""
if all_events is not None:
params["all_events"] = all_events
# OPTIONAL - context_codes
"""List of context codes of courses/groups/users whose events you want to see.
If not specified, defaults to the current user (i.e personal calendar,
no course/group events). Limited to 10 context codes, additional ones are
ignored. The format of this field is the context type, followed by an
underscore, followed by the context id. For example: course_42"""
if context_codes is not None:
params["context_codes"] = context_codes
# OPTIONAL - excludes
"""Array of attributes to exclude. Possible values are "description", "child_events" and 'assignment'"""
if excludes is not None:
params["excludes"] = excludes
self.logger.debug("GET /api/v1/users/{user_id}/calendar_events with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/calendar_events".format(**path), data=data, params=params, all_pages=True)
def create_calendar_event(self, calendar_event_context_code, calendar_event_child_event_data_X_context_code=None, calendar_event_child_event_data_X_end_at=None, calendar_event_child_event_data_X_start_at=None, calendar_event_description=None, calendar_event_duplicate_append_iterator=None, calendar_event_duplicate_count=None, calendar_event_duplicate_frequency=None, calendar_event_duplicate_interval=None, calendar_event_end_at=None, calendar_event_location_address=None, calendar_event_location_name=None, calendar_event_start_at=None, calendar_event_time_zone_edited=None, calendar_event_title=None):
"""
Create a calendar event.
Create and return a new calendar event
"""
path = {}
data = {}
params = {}
# REQUIRED - calendar_event[context_code]
"""Context code of the course/group/user whose calendar this event should be
added to."""
data["calendar_event[context_code]"] = calendar_event_context_code
# OPTIONAL - calendar_event[title]
"""Short title for the calendar event."""
if calendar_event_title is not None:
data["calendar_event[title]"] = calendar_event_title
# OPTIONAL - calendar_event[description]
"""Longer HTML description of the event."""
if calendar_event_description is not None:
data["calendar_event[description]"] = calendar_event_description
# OPTIONAL - calendar_event[start_at]
"""Start date/time of the event."""
if calendar_event_start_at is not None:
if issubclass(calendar_event_start_at.__class__, str):
calendar_event_start_at = self._validate_iso8601_string(calendar_event_start_at)
elif issubclass(calendar_event_start_at.__class__, date) or issubclass(calendar_event_start_at.__class__, datetime):
calendar_event_start_at = calendar_event_start_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["calendar_event[start_at]"] = calendar_event_start_at
# OPTIONAL - calendar_event[end_at]
"""End date/time of the event."""
if calendar_event_end_at is not None:
if issubclass(calendar_event_end_at.__class__, str):
calendar_event_end_at = self._validate_iso8601_string(calendar_event_end_at)
elif issubclass(calendar_event_end_at.__class__, date) or issubclass(calendar_event_end_at.__class__, datetime):
calendar_event_end_at = calendar_event_end_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["calendar_event[end_at]"] = calendar_event_end_at
# OPTIONAL - calendar_event[location_name]
"""Location name of the event."""
if calendar_event_location_name is not None:
data["calendar_event[location_name]"] = calendar_event_location_name
# OPTIONAL - calendar_event[location_address]
"""Location address"""
if calendar_event_location_address is not None:
data["calendar_event[location_address]"] = calendar_event_location_address
# OPTIONAL - calendar_event[time_zone_edited]
"""Time zone of the user editing the event. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
if calendar_event_time_zone_edited is not None:
data["calendar_event[time_zone_edited]"] = calendar_event_time_zone_edited
# OPTIONAL - calendar_event[child_event_data][X][start_at]
"""Section-level start time(s) if this is a course event. X can be any
identifier, provided that it is consistent across the start_at, end_at
and context_code"""
if calendar_event_child_event_data_X_start_at is not None:
if issubclass(calendar_event_child_event_data_X_start_at.__class__, str):
calendar_event_child_event_data_X_start_at = self._validate_iso8601_string(calendar_event_child_event_data_X_start_at)
elif issubclass(calendar_event_child_event_data_X_start_at.__class__, date) or issubclass(calendar_event_child_event_data_X_start_at.__class__, datetime):
calendar_event_child_event_data_X_start_at = calendar_event_child_event_data_X_start_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["calendar_event[child_event_data][X][start_at]"] = calendar_event_child_event_data_X_start_at
# OPTIONAL - calendar_event[child_event_data][X][end_at]
"""Section-level end time(s) if this is a course event."""
if calendar_event_child_event_data_X_end_at is not None:
if issubclass(calendar_event_child_event_data_X_end_at.__class__, str):
calendar_event_child_event_data_X_end_at = self._validate_iso8601_string(calendar_event_child_event_data_X_end_at)
elif issubclass(calendar_event_child_event_data_X_end_at.__class__, date) or issubclass(calendar_event_child_event_data_X_end_at.__class__, datetime):
calendar_event_child_event_data_X_end_at = calendar_event_child_event_data_X_end_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["calendar_event[child_event_data][X][end_at]"] = calendar_event_child_event_data_X_end_at
# OPTIONAL - calendar_event[child_event_data][X][context_code]
"""Context code(s) corresponding to the section-level start and end time(s)."""
if calendar_event_child_event_data_X_context_code is not None:
data["calendar_event[child_event_data][X][context_code]"] = calendar_event_child_event_data_X_context_code
# OPTIONAL - calendar_event[duplicate][count]
"""Number of times to copy/duplicate the event."""
if calendar_event_duplicate_count is not None:
data["calendar_event[duplicate][count]"] = calendar_event_duplicate_count
# OPTIONAL - calendar_event[duplicate][interval]
"""Defaults to 1 if duplicate `count` is set. The interval between the duplicated events."""
if calendar_event_duplicate_interval is not None:
data["calendar_event[duplicate][interval]"] = calendar_event_duplicate_interval
# OPTIONAL - calendar_event[duplicate][frequency]
"""Defaults to "weekly". The frequency at which to duplicate the event"""
if calendar_event_duplicate_frequency is not None:
self._validate_enum(calendar_event_duplicate_frequency, ["daily", "weekly", "monthly"])
data["calendar_event[duplicate][frequency]"] = calendar_event_duplicate_frequency
# OPTIONAL - calendar_event[duplicate][append_iterator]
"""Defaults to false. If set to `true`, an increasing counter number will be appended to the event title
when the event is duplicated. (e.g. Event 1, Event 2, Event 3, etc)"""
if calendar_event_duplicate_append_iterator is not None:
data["calendar_event[duplicate][append_iterator]"] = calendar_event_duplicate_append_iterator
self.logger.debug("POST /api/v1/calendar_events with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/calendar_events".format(**path), data=data, params=params, no_data=True)
def get_single_calendar_event_or_assignment(self, id):
"""
Get a single calendar event or assignment.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("GET /api/v1/calendar_events/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/calendar_events/{id}".format(**path), data=data, params=params, single_item=True)
def reserve_time_slot(self, id, cancel_existing=None, comments=None, participant_id=None):
"""
Reserve a time slot.
Reserves a particular time slot and return the new reservation
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - participant_id
"""User or group id for whom you are making the reservation (depends on the
participant type). Defaults to the current user (or user's candidate group)."""
if participant_id is not None:
data["participant_id"] = participant_id
# OPTIONAL - comments
"""Comments to associate with this reservation"""
if comments is not None:
data["comments"] = comments
# OPTIONAL - cancel_existing
"""Defaults to false. If true, cancel any previous reservation(s) for this
participant and appointment group."""
if cancel_existing is not None:
data["cancel_existing"] = cancel_existing
self.logger.debug("POST /api/v1/calendar_events/{id}/reservations with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/calendar_events/{id}/reservations".format(**path), data=data, params=params, no_data=True)
def reserve_time_slot_participant_id(self, id, participant_id, cancel_existing=None, comments=None):
"""
Reserve a time slot.
Reserves a particular time slot and return the new reservation
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - PATH - participant_id
"""User or group id for whom you are making the reservation (depends on the
participant type). Defaults to the current user (or user's candidate group)."""
path["participant_id"] = participant_id
# OPTIONAL - comments
"""Comments to associate with this reservation"""
if comments is not None:
data["comments"] = comments
# OPTIONAL - cancel_existing
"""Defaults to false. If true, cancel any previous reservation(s) for this
participant and appointment group."""
if cancel_existing is not None:
data["cancel_existing"] = cancel_existing
self.logger.debug("POST /api/v1/calendar_events/{id}/reservations/{participant_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/calendar_events/{id}/reservations/{participant_id}".format(**path), data=data, params=params, no_data=True)
def update_calendar_event(self, id, calendar_event_child_event_data_X_context_code=None, calendar_event_child_event_data_X_end_at=None, calendar_event_child_event_data_X_start_at=None, calendar_event_context_code=None, calendar_event_description=None, calendar_event_end_at=None, calendar_event_location_address=None, calendar_event_location_name=None, calendar_event_start_at=None, calendar_event_time_zone_edited=None, calendar_event_title=None):
"""
Update a calendar event.
Update and return a calendar event
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - calendar_event[context_code]
"""Context code of the course/group/user to move this event to.
Scheduler appointments and events with section-specific times cannot be moved between calendars."""
if calendar_event_context_code is not None:
data["calendar_event[context_code]"] = calendar_event_context_code
# OPTIONAL - calendar_event[title]
"""Short title for the calendar event."""
if calendar_event_title is not None:
data["calendar_event[title]"] = calendar_event_title
# OPTIONAL - calendar_event[description]
"""Longer HTML description of the event."""
if calendar_event_description is not None:
data["calendar_event[description]"] = calendar_event_description
# OPTIONAL - calendar_event[start_at]
"""Start date/time of the event."""
if calendar_event_start_at is not None:
if issubclass(calendar_event_start_at.__class__, str):
calendar_event_start_at = self._validate_iso8601_string(calendar_event_start_at)
elif issubclass(calendar_event_start_at.__class__, date) or issubclass(calendar_event_start_at.__class__, datetime):
calendar_event_start_at = calendar_event_start_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["calendar_event[start_at]"] = calendar_event_start_at
# OPTIONAL - calendar_event[end_at]
"""End date/time of the event."""
if calendar_event_end_at is not None:
if issubclass(calendar_event_end_at.__class__, str):
calendar_event_end_at = self._validate_iso8601_string(calendar_event_end_at)
elif issubclass(calendar_event_end_at.__class__, date) or issubclass(calendar_event_end_at.__class__, datetime):
calendar_event_end_at = calendar_event_end_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["calendar_event[end_at]"] = calendar_event_end_at
# OPTIONAL - calendar_event[location_name]
"""Location name of the event."""
if calendar_event_location_name is not None:
data["calendar_event[location_name]"] = calendar_event_location_name
# OPTIONAL - calendar_event[location_address]
"""Location address"""
if calendar_event_location_address is not None:
data["calendar_event[location_address]"] = calendar_event_location_address
# OPTIONAL - calendar_event[time_zone_edited]
"""Time zone of the user editing the event. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
if calendar_event_time_zone_edited is not None:
data["calendar_event[time_zone_edited]"] = calendar_event_time_zone_edited
# OPTIONAL - calendar_event[child_event_data][X][start_at]
"""Section-level start time(s) if this is a course event. X can be any
identifier, provided that it is consistent across the start_at, end_at
and context_code"""
if calendar_event_child_event_data_X_start_at is not None:
if issubclass(calendar_event_child_event_data_X_start_at.__class__, str):
calendar_event_child_event_data_X_start_at = self._validate_iso8601_string(calendar_event_child_event_data_X_start_at)
elif issubclass(calendar_event_child_event_data_X_start_at.__class__, date) or issubclass(calendar_event_child_event_data_X_start_at.__class__, datetime):
calendar_event_child_event_data_X_start_at = calendar_event_child_event_data_X_start_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["calendar_event[child_event_data][X][start_at]"] = calendar_event_child_event_data_X_start_at
# OPTIONAL - calendar_event[child_event_data][X][end_at]
"""Section-level end time(s) if this is a course event."""
if calendar_event_child_event_data_X_end_at is not None:
if issubclass(calendar_event_child_event_data_X_end_at.__class__, str):
calendar_event_child_event_data_X_end_at = self._validate_iso8601_string(calendar_event_child_event_data_X_end_at)
elif issubclass(calendar_event_child_event_data_X_end_at.__class__, date) or issubclass(calendar_event_child_event_data_X_end_at.__class__, datetime):
calendar_event_child_event_data_X_end_at = calendar_event_child_event_data_X_end_at.strftime('%Y-%m-%dT%H:%M:%S+00:00')
data["calendar_event[child_event_data][X][end_at]"] = calendar_event_child_event_data_X_end_at
# OPTIONAL - calendar_event[child_event_data][X][context_code]
"""Context code(s) corresponding to the section-level start and end time(s)."""
if calendar_event_child_event_data_X_context_code is not None:
data["calendar_event[child_event_data][X][context_code]"] = calendar_event_child_event_data_X_context_code
self.logger.debug("PUT /api/v1/calendar_events/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/calendar_events/{id}".format(**path), data=data, params=params, no_data=True)
def delete_calendar_event(self, id, cancel_reason=None):
"""
Delete a calendar event.
Delete an event from the calendar and return the deleted event
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - cancel_reason
"""Reason for deleting/canceling the event."""
if cancel_reason is not None:
params["cancel_reason"] = cancel_reason
self.logger.debug("DELETE /api/v1/calendar_events/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/calendar_events/{id}".format(**path), data=data, params=params, no_data=True)
def set_course_timetable(self, course_id, timetables_course_section_id=None, timetables_course_section_id_end_time=None, timetables_course_section_id_location_name=None, timetables_course_section_id_start_time=None, timetables_course_section_id_weekdays=None):
"""
Set a course timetable.
Creates and updates "timetable" events for a course.
Can automaticaly generate a series of calendar events based on simple schedules
(e.g. "Monday and Wednesday at 2:00pm" )
Existing timetable events for the course and course sections
will be updated if they still are part of the timetable.
Otherwise, they will be deleted.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - timetables[course_section_id]
"""An array of timetable objects for the course section specified by course_section_id.
If course_section_id is set to "all", events will be created for the entire course."""
if timetables_course_section_id is not None:
data["timetables[course_section_id]"] = timetables_course_section_id
# OPTIONAL - timetables[course_section_id][weekdays]
"""A comma-separated list of abbreviated weekdays
(Mon-Monday, Tue-Tuesday, Wed-Wednesday, Thu-Thursday, Fri-Friday, Sat-Saturday, Sun-Sunday)"""
if timetables_course_section_id_weekdays is not None:
data["timetables[course_section_id][weekdays]"] = timetables_course_section_id_weekdays
# OPTIONAL - timetables[course_section_id][start_time]
"""Time to start each event at (e.g. "9:00 am")"""
if timetables_course_section_id_start_time is not None:
data["timetables[course_section_id][start_time]"] = timetables_course_section_id_start_time
# OPTIONAL - timetables[course_section_id][end_time]
"""Time to end each event at (e.g. "9:00 am")"""
if timetables_course_section_id_end_time is not None:
data["timetables[course_section_id][end_time]"] = timetables_course_section_id_end_time
# OPTIONAL - timetables[course_section_id][location_name]
"""A location name to set for each event"""
if timetables_course_section_id_location_name is not None:
data["timetables[course_section_id][location_name]"] = timetables_course_section_id_location_name
self.logger.debug("POST /api/v1/courses/{course_id}/calendar_events/timetable with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/calendar_events/timetable".format(**path), data=data, params=params, no_data=True)
def get_course_timetable(self, course_id):
"""
Get course timetable.
Returns the last timetable set by the
{api:CalendarEventsApiController#set_course_timetable Set a course timetable} endpoint
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
self.logger.debug("GET /api/v1/courses/{course_id}/calendar_events/timetable with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/calendar_events/timetable".format(**path), data=data, params=params, no_data=True)
def create_or_update_events_directly_for_course_timetable(self, course_id, course_section_id=None, events=None, events_code=None, events_end_at=None, events_location_name=None, events_start_at=None):
"""
Create or update events directly for a course timetable.
Creates and updates "timetable" events for a course or course section.
Similar to {api:CalendarEventsApiController#set_course_timetable setting a course timetable},
but instead of generating a list of events based on a timetable schedule,
this endpoint expects a complete list of events.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - course_section_id
"""Events will be created for the course section specified by course_section_id.
If not present, events will be created for the entire course."""
if course_section_id is not None:
data["course_section_id"] = course_section_id
# OPTIONAL - events
"""An array of event objects to use."""
if events is not None:
data["events"] = events
# OPTIONAL - events[start_at]
"""Start time for the event"""
if events_start_at is not None:
data["events[start_at]"] = events_start_at
# OPTIONAL - events[end_at]
"""End time for the event"""
if events_end_at is not None:
data["events[end_at]"] = events_end_at
# OPTIONAL - events[location_name]
"""Location name for the event"""
if events_location_name is not None:
data["events[location_name]"] = events_location_name
# OPTIONAL - events[code]
"""A unique identifier that can be used to update the event at a later time
If one is not specified, an identifier will be generated based on the start and end times"""
if events_code is not None:
data["events[code]"] = events_code
self.logger.debug("POST /api/v1/courses/{course_id}/calendar_events/timetable_events with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/calendar_events/timetable_events".format(**path), data=data, params=params, no_data=True)
class Calendarevent(BaseModel):
"""Calendarevent Model."""
def __init__(self, reserved=None, updated_at=None, group=None, participant_type=None, child_events_count=None, available_slots=None, id=None, reserve_url=None, location_name=None, title=None, end_at=None, appointment_group_id=None, context_code=None, hidden=None, start_at=None, description=None, child_events=None, workflow_state=None, effective_context_code=None, html_url=None, all_day_date=None, user=None, participants_per_appointment=None, parent_event_id=None, created_at=None, all_day=None, url=None, location_address=None, own_reservation=None, appointment_group_url=None, all_context_codes=None):
"""Init method for Calendarevent class."""
self._reserved = reserved
self._updated_at = updated_at
self._group = group
self._participant_type = participant_type
self._child_events_count = child_events_count
self._available_slots = available_slots
self._id = id
self._reserve_url = reserve_url
self._location_name = location_name
self._title = title
self._end_at = end_at
self._appointment_group_id = appointment_group_id
self._context_code = context_code
self._hidden = hidden
self._start_at = start_at
self._description = description
self._child_events = child_events
self._workflow_state = workflow_state
self._effective_context_code = effective_context_code
self._html_url = html_url
self._all_day_date = all_day_date
self._user = user
self._participants_per_appointment = participants_per_appointment
self._parent_event_id = parent_event_id
self._created_at = created_at
self._all_day = all_day
self._url = url
self._location_address = location_address
self._own_reservation = own_reservation
self._appointment_group_url = appointment_group_url
self._all_context_codes = all_context_codes
self.logger = logging.getLogger('py3canvas.Calendarevent')
@property
def reserved(self):
"""If the event is a time slot, a boolean indicating whether the user has already made a reservation for it."""
return self._reserved
@reserved.setter
def reserved(self, value):
"""Setter for reserved property."""
self.logger.warn("Setting values on reserved will NOT update the remote Canvas instance.")
self._reserved = value
@property
def updated_at(self):
"""When the calendar event was last updated."""
return self._updated_at
@updated_at.setter
def updated_at(self, value):
"""Setter for updated_at property."""
self.logger.warn("Setting values on updated_at will NOT update the remote Canvas instance.")
self._updated_at = value
@property
def group(self):
"""If the event is a group-level reservation, this will contain the group participant JSON (refer to the Groups API)."""
return self._group
@group.setter
def group(self, value):
"""Setter for group property."""
self.logger.warn("Setting values on group will NOT update the remote Canvas instance.")
self._group = value
@property
def participant_type(self):
"""The type of participant to sign up for a slot: 'User' or 'Group'."""
return self._participant_type
@participant_type.setter
def participant_type(self, value):
"""Setter for participant_type property."""
self.logger.warn("Setting values on participant_type will NOT update the remote Canvas instance.")
self._participant_type = value
@property
def child_events_count(self):
"""The number of child_events. See child_events (and parent_event_id)."""
return self._child_events_count
@child_events_count.setter
def child_events_count(self, value):
"""Setter for child_events_count property."""
self.logger.warn("Setting values on child_events_count will NOT update the remote Canvas instance.")
self._child_events_count = value
@property
def available_slots(self):
"""If the event is a time slot and it has a participant limit, an integer indicating how many slots are available."""
return self._available_slots
@available_slots.setter
def available_slots(self, value):
"""Setter for available_slots property."""
self.logger.warn("Setting values on available_slots will NOT update the remote Canvas instance.")
self._available_slots = value
@property
def id(self):
"""The ID of the calendar event."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def reserve_url(self):
"""If the event is a time slot, the API URL for reserving it."""
return self._reserve_url
@reserve_url.setter
def reserve_url(self, value):
"""Setter for reserve_url property."""
self.logger.warn("Setting values on reserve_url will NOT update the remote Canvas instance.")
self._reserve_url = value
@property
def location_name(self):
"""The location name of the event."""
return self._location_name
@location_name.setter
def location_name(self, value):
"""Setter for location_name property."""
self.logger.warn("Setting values on location_name will NOT update the remote Canvas instance.")
self._location_name = value
@property
def title(self):
"""The title of the calendar event."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
@property
def end_at(self):
"""The end timestamp of the event."""
return self._end_at
@end_at.setter
def end_at(self, value):
"""Setter for end_at property."""
self.logger.warn("Setting values on end_at will NOT update the remote Canvas instance.")
self._end_at = value
@property
def appointment_group_id(self):
"""Various Appointment-Group-related fields.These fields are only pertinent to time slots (appointments) and reservations of those time slots. See the Appointment Groups API. The id of the appointment group."""
return self._appointment_group_id
@appointment_group_id.setter
def appointment_group_id(self, value):
"""Setter for appointment_group_id property."""
self.logger.warn("Setting values on appointment_group_id will NOT update the remote Canvas instance.")
self._appointment_group_id = value
@property
def context_code(self):
"""the context code of the calendar this event belongs to (course, user or group)."""
return self._context_code
@context_code.setter
def context_code(self, value):
"""Setter for context_code property."""
self.logger.warn("Setting values on context_code will NOT update the remote Canvas instance.")
self._context_code = value
@property
def hidden(self):
"""Whether this event should be displayed on the calendar. Only true for course-level events with section-level child events."""
return self._hidden
@hidden.setter
def hidden(self, value):
"""Setter for hidden property."""
self.logger.warn("Setting values on hidden will NOT update the remote Canvas instance.")
self._hidden = value
@property
def start_at(self):
"""The start timestamp of the event."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn("Setting values on start_at will NOT update the remote Canvas instance.")
self._start_at = value
@property
def description(self):
"""The HTML description of the event."""
return self._description
@description.setter
def description(self, value):
"""Setter for description property."""
self.logger.warn("Setting values on description will NOT update the remote Canvas instance.")
self._description = value
@property
def child_events(self):
"""Included by default, but may be excluded (see include[] option). If this is a time slot (see the Appointment Groups API) this will be a list of any reservations. If this is a course-level event, this will be a list of section-level events (if any)."""
return self._child_events
@child_events.setter
def child_events(self, value):
"""Setter for child_events property."""
self.logger.warn("Setting values on child_events will NOT update the remote Canvas instance.")
self._child_events = value
@property
def workflow_state(self):
"""Current state of the event ('active', 'locked' or 'deleted') 'locked' indicates that start_at/end_at cannot be changed (though the event could be deleted). Normally only reservations or time slots with reservations are locked (see the Appointment Groups API)."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def effective_context_code(self):
"""if specified, it indicates which calendar this event should be displayed on. for example, a section-level event would have the course's context code here, while the section's context code would be returned above)."""
return self._effective_context_code
@effective_context_code.setter
def effective_context_code(self, value):
"""Setter for effective_context_code property."""
self.logger.warn("Setting values on effective_context_code will NOT update the remote Canvas instance.")
self._effective_context_code = value
@property
def html_url(self):
"""URL for a user to view this event."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn("Setting values on html_url will NOT update the remote Canvas instance.")
self._html_url = value
@property
def all_day_date(self):
"""The date of this event."""
return self._all_day_date
@all_day_date.setter
def all_day_date(self, value):
"""Setter for all_day_date property."""
self.logger.warn("Setting values on all_day_date will NOT update the remote Canvas instance.")
self._all_day_date = value
@property
def user(self):
"""If the event is a user-level reservation, this will contain the user participant JSON (refer to the Users API)."""
return self._user
@user.setter
def user(self, value):
"""Setter for user property."""
self.logger.warn("Setting values on user will NOT update the remote Canvas instance.")
self._user = value
@property
def participants_per_appointment(self):
"""If the event is a time slot, this is the participant limit."""
return self._participants_per_appointment
@participants_per_appointment.setter
def participants_per_appointment(self, value):
"""Setter for participants_per_appointment property."""
self.logger.warn("Setting values on participants_per_appointment will NOT update the remote Canvas instance.")
self._participants_per_appointment = value
@property
def parent_event_id(self):
"""Normally null. If this is a reservation (see the Appointment Groups API), the id will indicate the time slot it is for. If this is a section-level event, this will be the course-level parent event."""
return self._parent_event_id
@parent_event_id.setter
def parent_event_id(self, value):
"""Setter for parent_event_id property."""
self.logger.warn("Setting values on parent_event_id will NOT update the remote Canvas instance.")
self._parent_event_id = value
@property
def created_at(self):
"""When the calendar event was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn("Setting values on created_at will NOT update the remote Canvas instance.")
self._created_at = value
@property
def all_day(self):
"""Boolean indicating whether this is an all-day event (midnight to midnight)."""
return self._all_day
@all_day.setter
def all_day(self, value):
"""Setter for all_day property."""
self.logger.warn("Setting values on all_day will NOT update the remote Canvas instance.")
self._all_day = value
@property
def url(self):
"""URL for this calendar event (to update, delete, etc.)."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def location_address(self):
"""The address where the event is taking place."""
return self._location_address
@location_address.setter
def location_address(self, value):
"""Setter for location_address property."""
self.logger.warn("Setting values on location_address will NOT update the remote Canvas instance.")
self._location_address = value
@property
def own_reservation(self):
"""If the event is a reservation, this a boolean indicating whether it is the current user's reservation, or someone else's."""
return self._own_reservation
@own_reservation.setter
def own_reservation(self, value):
"""Setter for own_reservation property."""
self.logger.warn("Setting values on own_reservation will NOT update the remote Canvas instance.")
self._own_reservation = value
@property
def appointment_group_url(self):
"""The API URL of the appointment group."""
return self._appointment_group_url
@appointment_group_url.setter
def appointment_group_url(self, value):
"""Setter for appointment_group_url property."""
self.logger.warn("Setting values on appointment_group_url will NOT update the remote Canvas instance.")
self._appointment_group_url = value
@property
def all_context_codes(self):
"""a comma-separated list of all calendar contexts this event is part of."""
return self._all_context_codes
@all_context_codes.setter
def all_context_codes(self, value):
"""Setter for all_context_codes property."""
self.logger.warn("Setting values on all_context_codes will NOT update the remote Canvas instance.")
self._all_context_codes = value
class Assignmentevent(BaseModel):
"""Assignmentevent Model."""
def __init__(self, start_at=None, description=None, title=None, url=None, assignment=None, created_at=None, workflow_state=None, html_url=None, end_at=None, updated_at=None, all_day_date=None, context_code=None, assignment_overrides=None, id=None, all_day=None):
"""Init method for Assignmentevent class."""
self._start_at = start_at
self._description = description
self._title = title
self._url = url
self._assignment = assignment
self._created_at = created_at
self._workflow_state = workflow_state
self._html_url = html_url
self._end_at = end_at
self._updated_at = updated_at
self._all_day_date = all_day_date
self._context_code = context_code
self._assignment_overrides = assignment_overrides
self._id = id
self._all_day = all_day
self.logger = logging.getLogger('py3canvas.Assignmentevent')
@property
def start_at(self):
"""The due_at timestamp of the assignment."""
return self._start_at
@start_at.setter
def start_at(self, value):
"""Setter for start_at property."""
self.logger.warn("Setting values on start_at will NOT update the remote Canvas instance.")
self._start_at = value
@property
def description(self):
"""The HTML description of the assignment."""
return self._description
@description.setter
def description(self, value):
"""Setter for description property."""
self.logger.warn("Setting values on description will NOT update the remote Canvas instance.")
self._description = value
@property
def title(self):
"""The title of the assignment."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn("Setting values on title will NOT update the remote Canvas instance.")
self._title = value
@property
def url(self):
"""URL for this assignment (note that updating/deleting should be done via the Assignments API)."""
return self._url
@url.setter
def url(self, value):
"""Setter for url property."""
self.logger.warn("Setting values on url will NOT update the remote Canvas instance.")
self._url = value
@property
def assignment(self):
"""The full assignment JSON data (See the Assignments API)."""
return self._assignment
@assignment.setter
def assignment(self, value):
"""Setter for assignment property."""
self.logger.warn("Setting values on assignment will NOT update the remote Canvas instance.")
self._assignment = value
@property
def created_at(self):
"""When the assignment was created."""
return self._created_at
@created_at.setter
def created_at(self, value):
"""Setter for created_at property."""
self.logger.warn("Setting values on created_at will NOT update the remote Canvas instance.")
self._created_at = value
@property
def workflow_state(self):
"""Current state of the assignment ('published' or 'deleted')."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def html_url(self):
"""URL for a user to view this assignment."""
return self._html_url
@html_url.setter
def html_url(self, value):
"""Setter for html_url property."""
self.logger.warn("Setting values on html_url will NOT update the remote Canvas instance.")
self._html_url = value
@property
def end_at(self):
"""The due_at timestamp of the assignment."""
return self._end_at
@end_at.setter
def end_at(self, value):
"""Setter for end_at property."""
self.logger.warn("Setting values on end_at will NOT update the remote Canvas instance.")
self._end_at = value
@property
def updated_at(self):
"""When the assignment was last updated."""
return self._updated_at
@updated_at.setter
def updated_at(self, value):
"""Setter for updated_at property."""
self.logger.warn("Setting values on updated_at will NOT update the remote Canvas instance.")
self._updated_at = value
@property
def all_day_date(self):
"""The due date of this assignment."""
return self._all_day_date
@all_day_date.setter
def all_day_date(self, value):
"""Setter for all_day_date property."""
self.logger.warn("Setting values on all_day_date will NOT update the remote Canvas instance.")
self._all_day_date = value
@property
def context_code(self):
"""the context code of the (course) calendar this assignment belongs to."""
return self._context_code
@context_code.setter
def context_code(self, value):
"""Setter for context_code property."""
self.logger.warn("Setting values on context_code will NOT update the remote Canvas instance.")
self._context_code = value
@property
def assignment_overrides(self):
"""The list of AssignmentOverrides that apply to this event (See the Assignments API). This information is useful for determining which students or sections this assignment-due event applies to."""
return self._assignment_overrides
@assignment_overrides.setter
def assignment_overrides(self, value):
"""Setter for assignment_overrides property."""
self.logger.warn("Setting values on assignment_overrides will NOT update the remote Canvas instance.")
self._assignment_overrides = value
@property
def id(self):
"""A synthetic ID for the assignment."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def all_day(self):
"""Boolean indicating whether this is an all-day event (e.g. assignment due at midnight)."""
return self._all_day
@all_day.setter
def all_day(self, value):
"""Setter for all_day property."""
self.logger.warn("Setting values on all_day will NOT update the remote Canvas instance.")
self._all_day = value
| mit | 8,638,054,460,111,715,000 | 44.314636 | 610 | 0.64701 | false |
cuauv/software | mission/missions/old/2018/track.py | 1 | 1649 | #!/usr/bin/env python3
import math
from mission.framework.task import Task
from mission.framework.combinators import Sequential, Concurrent, MasterConcurrent, Retry, Conditional, While, Either
from mission.framework.movement import Heading, RelativeToInitialHeading, VelocityX, VelocityY, Depth, RelativeToCurrentHeading, RelativeToCurrentDepth
from mission.framework.primitive import Log, NoOp, Zero, Succeed, Fail, FunctionTask
from mission.framework.helpers import ConsistencyCheck, call_if_function
import shm
FLIP = True
last_sub_heading = 0
last_pinger_heading = 0
def shm_heading():
'''
SHM heading output by hydrophones in the range [-pi,pi].
If FLIP, then phase shift by pi radians.
'''
global last_sub_heading, last_pinger_heading
h = shm.hydrophones_results_track.tracked_ping_heading_radians.get()
print('raw: ' + str(h))
if FLIP:
h += math.pi
if h > math.pi:
h -= math.pi * 2
print('flipped: ' + str(h))
if h != last_pinger_heading:
last_sub_heading = math.radians(shm.kalman.heading.get())
last_pinger_heading = h
return h + last_sub_heading
def get_desired_heading():
h = shm_heading()
if h < 0:
h += math.pi * 2
return math.degrees(h)
def get_desired_vel_x():
h = shm_heading()
if -math.pi/4 < h < math.pi/4:
return math.cos(h) * 0.3 # maybe change this function later?
else:
return 0
track = Sequential(
Depth(1.0),
While(
lambda: Sequential(
Heading(get_desired_heading),
VelocityX(get_desired_vel_x),
),
lambda: True
)
)
| bsd-3-clause | -2,502,780,342,940,534,000 | 25.596774 | 151 | 0.653123 | false |
turbokongen/home-assistant | homeassistant/components/harmony/remote.py | 1 | 9818 | """Support for Harmony Hub devices."""
import json
import logging
import voluptuous as vol
from homeassistant.components import remote
from homeassistant.components.remote import (
ATTR_ACTIVITY,
ATTR_DELAY_SECS,
ATTR_DEVICE,
ATTR_HOLD_SECS,
ATTR_NUM_REPEATS,
DEFAULT_DELAY_SECS,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from .connection_state import ConnectionStateMixin
from .const import (
ACTIVITY_POWER_OFF,
ATTR_ACTIVITY_LIST,
ATTR_ACTIVITY_STARTING,
ATTR_CURRENT_ACTIVITY,
ATTR_DEVICES_LIST,
ATTR_LAST_ACTIVITY,
DOMAIN,
HARMONY_OPTIONS_UPDATE,
PREVIOUS_ACTIVE_ACTIVITY,
SERVICE_CHANGE_CHANNEL,
SERVICE_SYNC,
)
from .subscriber import HarmonyCallback
_LOGGER = logging.getLogger(__name__)
# We want to fire remote commands right away
PARALLEL_UPDATES = 0
ATTR_CHANNEL = "channel"
HARMONY_SYNC_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
HARMONY_CHANGE_CHANNEL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_CHANNEL): cv.positive_int,
}
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up the Harmony config entry."""
data = hass.data[DOMAIN][entry.entry_id]
_LOGGER.debug("HarmonyData : %s", data)
default_activity = entry.options.get(ATTR_ACTIVITY)
delay_secs = entry.options.get(ATTR_DELAY_SECS, DEFAULT_DELAY_SECS)
harmony_conf_file = hass.config.path(f"harmony_{entry.unique_id}.conf")
device = HarmonyRemote(data, default_activity, delay_secs, harmony_conf_file)
async_add_entities([device])
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SYNC,
HARMONY_SYNC_SCHEMA,
"sync",
)
platform.async_register_entity_service(
SERVICE_CHANGE_CHANNEL, HARMONY_CHANGE_CHANNEL_SCHEMA, "change_channel"
)
class HarmonyRemote(ConnectionStateMixin, remote.RemoteEntity, RestoreEntity):
"""Remote representation used to control a Harmony device."""
def __init__(self, data, activity, delay_secs, out_path):
"""Initialize HarmonyRemote class."""
super().__init__()
self._data = data
self._name = data.name
self._state = None
self._current_activity = ACTIVITY_POWER_OFF
self.default_activity = activity
self._activity_starting = None
self._is_initial_update = True
self.delay_secs = delay_secs
self._unique_id = data.unique_id
self._last_activity = None
self._config_path = out_path
async def _async_update_options(self, data):
"""Change options when the options flow does."""
if ATTR_DELAY_SECS in data:
self.delay_secs = data[ATTR_DELAY_SECS]
if ATTR_ACTIVITY in data:
self.default_activity = data[ATTR_ACTIVITY]
def _setup_callbacks(self):
callbacks = {
"connected": self.got_connected,
"disconnected": self.got_disconnected,
"config_updated": self.new_config,
"activity_starting": self.new_activity,
"activity_started": self._new_activity_finished,
}
self.async_on_remove(self._data.async_subscribe(HarmonyCallback(**callbacks)))
def _new_activity_finished(self, activity_info: tuple) -> None:
"""Call for finished updated current activity."""
self._activity_starting = None
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Complete the initialization."""
await super().async_added_to_hass()
_LOGGER.debug("%s: Harmony Hub added", self._name)
self.async_on_remove(self._clear_disconnection_delay)
self._setup_callbacks()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{HARMONY_OPTIONS_UPDATE}-{self.unique_id}",
self._async_update_options,
)
)
# Store Harmony HUB config, this will also update our current
# activity
await self.new_config()
# Restore the last activity so we know
# how what to turn on if nothing
# is specified
last_state = await self.async_get_last_state()
if not last_state:
return
if ATTR_LAST_ACTIVITY not in last_state.attributes:
return
if self.is_on:
return
self._last_activity = last_state.attributes[ATTR_LAST_ACTIVITY]
@property
def device_info(self):
"""Return device info."""
return self._data.device_info(DOMAIN)
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the Harmony device's name."""
return self._name
@property
def should_poll(self):
"""Return the fact that we should not be polled."""
return False
@property
def device_state_attributes(self):
"""Add platform specific attributes."""
return {
ATTR_ACTIVITY_STARTING: self._activity_starting,
ATTR_CURRENT_ACTIVITY: self._current_activity,
ATTR_ACTIVITY_LIST: self._data.activity_names,
ATTR_DEVICES_LIST: self._data.device_names,
ATTR_LAST_ACTIVITY: self._last_activity,
}
@property
def is_on(self):
"""Return False if PowerOff is the current activity, otherwise True."""
return self._current_activity not in [None, "PowerOff"]
@property
def available(self):
"""Return True if connected to Hub, otherwise False."""
return self._data.available
def new_activity(self, activity_info: tuple) -> None:
"""Call for updating the current activity."""
activity_id, activity_name = activity_info
_LOGGER.debug("%s: activity reported as: %s", self._name, activity_name)
self._current_activity = activity_name
if self._is_initial_update:
self._is_initial_update = False
else:
self._activity_starting = activity_name
if activity_id != -1:
# Save the activity so we can restore
# to that activity if none is specified
# when turning on
self._last_activity = activity_name
self._state = bool(activity_id != -1)
self.async_write_ha_state()
async def new_config(self, _=None):
"""Call for updating the current activity."""
_LOGGER.debug("%s: configuration has been updated", self._name)
self.new_activity(self._data.current_activity)
await self.hass.async_add_executor_job(self.write_config_file)
async def async_turn_on(self, **kwargs):
"""Start an activity from the Harmony device."""
_LOGGER.debug("%s: Turn On", self.name)
activity = kwargs.get(ATTR_ACTIVITY, self.default_activity)
if not activity or activity == PREVIOUS_ACTIVE_ACTIVITY:
if self._last_activity:
activity = self._last_activity
else:
all_activities = self._data.activity_names
if all_activities:
activity = all_activities[0]
if activity:
await self._data.async_start_activity(activity)
else:
_LOGGER.error("%s: No activity specified with turn_on service", self.name)
async def async_turn_off(self, **kwargs):
"""Start the PowerOff activity."""
await self._data.async_power_off()
async def async_send_command(self, command, **kwargs):
"""Send a list of commands to one device."""
_LOGGER.debug("%s: Send Command", self.name)
device = kwargs.get(ATTR_DEVICE)
if device is None:
_LOGGER.error("%s: Missing required argument: device", self.name)
return
num_repeats = kwargs[ATTR_NUM_REPEATS]
delay_secs = kwargs.get(ATTR_DELAY_SECS, self.delay_secs)
hold_secs = kwargs[ATTR_HOLD_SECS]
await self._data.async_send_command(
command, device, num_repeats, delay_secs, hold_secs
)
async def change_channel(self, channel):
"""Change the channel using Harmony remote."""
await self._data.change_channel(channel)
async def sync(self):
"""Sync the Harmony device with the web service."""
if await self._data.sync():
await self.hass.async_add_executor_job(self.write_config_file)
def write_config_file(self):
"""Write Harmony configuration file.
This is a handy way for users to figure out the available commands for automations.
"""
_LOGGER.debug(
"%s: Writing hub configuration to file: %s", self.name, self._config_path
)
json_config = self._data.json_config
if json_config is None:
_LOGGER.warning("%s: No configuration received from hub", self.name)
return
try:
with open(self._config_path, "w+", encoding="utf-8") as file_out:
json.dump(json_config, file_out, sort_keys=True, indent=4)
except OSError as exc:
_LOGGER.error(
"%s: Unable to write HUB configuration to %s: %s",
self.name,
self._config_path,
exc,
)
| apache-2.0 | -8,381,512,501,906,454,000 | 32.394558 | 91 | 0.623956 | false |
briancurtin/python-openstacksdk | openstack/tests/examples/test_compute.py | 1 | 1689 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from examples.compute import create
from examples.compute import delete
from examples.compute import find as compute_find
from examples.compute import list as compute_list
from examples import connect
from examples.network import find as network_find
from examples.network import list as network_list
class TestCompute(unittest.TestCase):
"""Test the compute examples
The purpose of these tests is to ensure the examples run without erring
out.
"""
@classmethod
def setUpClass(cls):
cls.conn = connect.create_connection_from_config()
def test_compute(self):
compute_list.list_servers(self.conn)
compute_list.list_images(self.conn)
compute_list.list_flavors(self.conn)
compute_list.list_keypairs(self.conn)
network_list.list_networks(self.conn)
compute_find.find_image(self.conn)
compute_find.find_flavor(self.conn)
compute_find.find_keypair(self.conn)
network_find.find_network(self.conn)
create.create_server(self.conn)
delete.delete_keypair(self.conn)
delete.delete_server(self.conn)
| apache-2.0 | 8,154,398,348,642,976,000 | 32.78 | 75 | 0.730018 | false |
alabs/petateca | petateca/apps/serie/migrations/0002_auto__add_field_episode_title_en__add_field_episode_title_es__add_fiel.py | 1 | 9856 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Episode.title_en'
db.add_column('serie_episode', 'title_en', self.gf('django.db.models.fields.CharField')(null=True, max_length=255), keep_default=False)
# Adding field 'Episode.title_es'
db.add_column('serie_episode', 'title_es', self.gf('django.db.models.fields.CharField')(null=True, max_length=255), keep_default=False)
# Adding field 'Episode.description_en'
db.add_column('serie_episode', 'description_en', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False)
# Adding field 'Episode.description_es'
db.add_column('serie_episode', 'description_es', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False)
# Adding field 'Serie.name_en'
db.add_column('serie_serie', 'name_en', self.gf('django.db.models.fields.CharField')(null=True, max_length=255), keep_default=False)
# Adding field 'Serie.name_es'
db.add_column('serie_serie', 'name_es', self.gf('django.db.models.fields.CharField')(null=True, max_length=255), keep_default=False)
# Adding field 'Serie.description_en'
db.add_column('serie_serie', 'description_en', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False)
# Adding field 'Serie.description_es'
db.add_column('serie_serie', 'description_es', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False)
# Adding field 'Genre.name_en'
db.add_column('serie_genre', 'name_en', self.gf('django.db.models.fields.CharField')(null=True, max_length=25), keep_default=False)
# Adding field 'Genre.name_es'
db.add_column('serie_genre', 'name_es', self.gf('django.db.models.fields.CharField')(null=True, max_length=25), keep_default=False)
def backwards(self, orm):
# Deleting field 'Episode.title_en'
db.delete_column('serie_episode', 'title_en')
# Deleting field 'Episode.title_es'
db.delete_column('serie_episode', 'title_es')
# Deleting field 'Episode.description_en'
db.delete_column('serie_episode', 'description_en')
# Deleting field 'Episode.description_es'
db.delete_column('serie_episode', 'description_es')
# Deleting field 'Serie.name_en'
db.delete_column('serie_serie', 'name_en')
# Deleting field 'Serie.name_es'
db.delete_column('serie_serie', 'name_es')
# Deleting field 'Serie.description_en'
db.delete_column('serie_serie', 'description_en')
# Deleting field 'Serie.description_es'
db.delete_column('serie_serie', 'description_es')
# Deleting field 'Genre.name_en'
db.delete_column('serie_genre', 'name_en')
# Deleting field 'Genre.name_es'
db.delete_column('serie_genre', 'name_es')
models = {
'serie.actor': {
'Meta': {'object_name': 'Actor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'serie.episode': {
'Meta': {'object_name': 'Episode'},
'air_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created_time': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_en': ('django.db.models.fields.TextField', [], {}),
'description_es': ('django.db.models.fields.TextField', [], {}),
'episode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_time': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'season': ('django.db.models.fields.IntegerField', [], {}),
'serie': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'episodes'", 'to': "orm['serie.Serie']"}),
'slug_title': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title_es': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'serie.genre': {
'Meta': {'object_name': 'Genre'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'slug_name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'serie.imageactor': {
'Meta': {'object_name': 'ImageActor'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['serie.Actor']"}),
'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'serie.imageserie': {
'Meta': {'object_name': 'ImageSerie'},
'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'serie': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['serie.Serie']"}),
'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'serie.languages': {
'Meta': {'object_name': 'Languages'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'})
},
'serie.link': {
'Meta': {'object_name': 'Link'},
'audio_lang': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'audio_langs'", 'to': "orm['serie.Languages']"}),
'episode': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links'", 'to': "orm['serie.Episode']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subtitle': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sub_langs'", 'null': 'True', 'to': "orm['serie.Languages']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'serie.network': {
'Meta': {'object_name': 'Network'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'serie.serie': {
'Meta': {'object_name': 'Serie'},
'actors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['serie.Actor']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_en': ('django.db.models.fields.TextField', [], {}),
'description_es': ('django.db.models.fields.TextField', [], {}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'series'", 'symmetrical': 'False', 'to': "orm['serie.Genre']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_es': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'series'", 'to': "orm['serie.Network']"}),
'runtime': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug_name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'serie.subtitlelink': {
'Meta': {'object_name': 'SubtitleLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['serie.Languages']"}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subtitles'", 'to': "orm['serie.Link']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['serie']
| agpl-3.0 | -8,935,656,780,571,769,000 | 58.017964 | 173 | 0.564834 | false |
Stanford-Online/edx-analytics-pipeline | edx/analytics/tasks/warehouse/tests/test_run_vertica_sql_script.py | 1 | 3109 | """
Ensure we can write to Vertica data sources.
"""
from __future__ import absolute_import
import textwrap
from unittest import TestCase
import luigi
import luigi.task
from mock import MagicMock, patch, sentinel
from edx.analytics.tasks.util.tests.target import FakeTarget
from edx.analytics.tasks.warehouse.run_vertica_sql_script import RunVerticaSqlScriptTask
class RunVerticaSqlScriptTaskTest(TestCase):
"""
Ensure we can connect to and write data to Vertica data sources.
"""
def setUp(self):
patcher = patch('edx.analytics.tasks.util.vertica_target.vertica_python.vertica')
self.mock_vertica_connector = patcher.start()
self.addCleanup(patcher.stop)
def create_task(self, credentials=None):
"""
Emulate execution of a generic RunVerticaSqlScriptTask.
"""
# Make sure to flush the instance cache so we create a new task object.
luigi.task.Register.clear_instance_cache()
task = RunVerticaSqlScriptTask(
credentials=sentinel.ignored,
script_name='my simple script',
source_script=sentinel.ignored,
)
if not credentials:
credentials = '''\
{
"host": "db.example.com",
"port": 5433,
"user": "exampleuser",
"password": "example password"
}'''
# This SQL doesn't actually run, but I've used real SQL to provide context. :)
source = '''
DELETE TABLE my_schema.my_table;
CREATE TABLE my_schema.my_table AS SELECT foo, bar, baz FROM my_schema.another_table;
'''
fake_input = {
'credentials': FakeTarget(value=textwrap.dedent(credentials)),
'source_script': FakeTarget(value=textwrap.dedent(source))
}
fake_output = MagicMock(return_value=self.mock_vertica_connector)
self.mock_vertica_connector.marker_schema = "name_of_marker_schema"
self.mock_vertica_connector.marker_table = "name_of_marker_table"
task.input = MagicMock(return_value=fake_input)
task.output = fake_output
return task
def test_run_with_default_credentials(self):
self.create_task(credentials='{}').run()
def test_run(self):
self.create_task().run()
mock_conn = self.mock_vertica_connector.connect()
self.assertTrue(mock_conn.cursor().execute.called)
self.assertFalse(mock_conn.rollback.called)
self.assertTrue(mock_conn.commit.called)
self.assertTrue(mock_conn.close.called)
def test_run_with_failure(self):
task = self.create_task()
task.output().touch = MagicMock(side_effect=Exception("Failed to update marker"))
with self.assertRaises(Exception):
task.run()
mock_conn = self.mock_vertica_connector.connect()
self.assertTrue(mock_conn.cursor().execute.called)
self.assertTrue(mock_conn.rollback.called)
self.assertFalse(mock_conn.commit.called)
self.assertTrue(mock_conn.close.called)
| agpl-3.0 | 1,487,603,360,284,520,700 | 34.735632 | 93 | 0.639756 | false |
im-liang/word-frequency-counter | app.py | 1 | 2361 | import sys
from os import listdir
from os.path import isfile, join, isdir, exists
from multiprocessing import Pool
import support
import const
def wordFrequency(fname):
"""
Analysis of word usage for the file
@param fname: file name
@return: word frequency dict
"""
counter = {}
content = support.convertFile(fname)
sentences = support.split2Centences(content)
for sentence in sentences:
words = support.split2Word(sentence)
for word in words:
if word in counter:
counter[word].append(sentence)
else:
counter[word] = [sentence]
return counter
def mostFrequentWordinFile(counter):
"""
print the most frequent word, frequency, and the sentences where they are used from single word frequency dict
@param counter: word Frequency dict
"""
max_count = 0
result = {}
for word in counter:
if (len(counter[word]) > max_count):
result.clear()
result[word] = counter[word]
max_count = len(counter[word])
elif (len(counter[word]) == max_count):
result[word] = counter[word]
print "{:<15} {:<20} {:<20}".format('Word','Frequency','Sentences')
for current_word in result:
print "{:<15} {:<20} {:<20}".format(current_word, max_count, result[current_word])
def mostFrequentWordinDir(counters):
"""
print the most frequent word, frequency, and the sentences where they are used from multiple word frequency dict
@param counters: word Frequency dicts
"""
total_counter = {}
for counter in counters:
for word in counter:
if word in total_counter:
total_counter[word] = total_counter[word] + counter[word]
else:
total_counter[word] = counter[word]
mostFrequentWordinFile(total_counter)
def main():
if isdir(sys.argv[1]):
files = [sys.argv[1] +'/' + f for f in listdir(sys.argv[1]) if isfile(join(sys.argv[1], f))]
pool = Pool(len(files))
counter = pool.map(wordFrequency, files)
mostFrequentWordinDir(counter)
elif exists(sys.argv[1]):
counter = wordFrequency(sys.argv[1])
mostFrequentWordinFile(counter)
else:
print const.INPUT_ERR_MSG
return
if __name__ == "__main__":
main()
| mit | 4,527,895,484,852,742,000 | 29.662338 | 116 | 0.615841 | false |
rahulunair/nova | nova/policies/server_topology.py | 1 | 1484 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'compute:server:topology:%s'
server_topology_policies = [
policy.DocumentedRuleDefault(
BASE_POLICY_NAME % 'index',
base.RULE_ADMIN_OR_OWNER,
"Show the NUMA topology data for a server",
[
{
'method': 'GET',
'path': '/servers/{server_id}/topology'
}
]),
policy.DocumentedRuleDefault(
# Control host NUMA node and cpu pinning information
BASE_POLICY_NAME % 'host:index',
base.RULE_ADMIN_API,
"Show the NUMA topology data for a server with host NUMA ID and CPU "
"pinning information",
[
{
'method': 'GET',
'path': '/servers/{server_id}/topology'
}
]),
]
def list_rules():
return server_topology_policies
| apache-2.0 | 9,100,945,390,354,771,000 | 29.916667 | 78 | 0.617925 | false |
3dfxsoftware/cbss-addons | account_bank_balance_report/report/account_bank_balance_report.py | 1 | 13133 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from tools.translate import _
import pooler
from openerp.addons.account_report_lib.account_report_base import accountReportbase
class Parser(accountReportbase):
def __init__(self, cursor, uid, name, context):
super(Parser, self).__init__(cursor, uid, name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({
'storage':{},
'cumul_balance': 0.0,
'get_bank_account': self.get_bank_account,
'get_period': self.get_period,
'display_account_name': self.display_account_name,
'account_has_move_lines': self.account_has_move_lines,
'messages': self.messages,
'return_balance_account':self.return_balance_account,
'display_symbol_account': self.display_symbol_account,
'update_cumul_balance': self.update_cumul_balance,
'reset_data': self.reset_data,
'get_cumul_balance':self.get_cumul_balance,
})
#=================== DISPLAY DATA ===================================
def messages(self):
message = _("For this account, doesn't exist move lines")
return message
def account_has_move_lines(self, account_id):
if account_id in self.localcontext['storage']['result'].keys():
if len(self.localcontext['storage']['result'][account_id]) > 0:
return True
else:
return False
def display_account_name(self, data, account_id):
str_name = ''
bank_account = self.get_bank_account(data)
if bank_account.default_credit_account_id and bank_account.default_debit_account_id:
if bank_account.default_credit_account_id.id == bank_account.default_debit_account_id.id:
str_name = bank_account.default_credit_account_id.code + ' - ' + bank_account.default_credit_account_id.name + ' - ' + bank_account.default_credit_account_id.currency_id.name
else:
if bank_account.default_credit_account_id:
if bank_account.default_credit_account_id.id == account_id:
str_name = _('Default credit account: ') + bank_account.default_credit_account_id.code + ' - ' + bank_account.default_credit_account_id.name + ' - ' + bank_account.default_credit_account_id.currency_id.name
elif bank_account.default_debit_account_id:
if bank_account.default_debit_account_id.id == account_id:
str_name = _('Default debit account: ') + bank_account.default_debit_account_id.code + ' - ' + bank_account.default_debit_account_id.name + ' - ' + bank_account.default_debit_account_id.currency_id.name
else:
if bank_account.default_credit_account_id:
if bank_account.default_credit_account_id.id == account_id:
str_name = _('Default credit account: ') + bank_account.default_credit_account_id.code + ' - ' + bank_account.default_credit_account_id.name + ' - ' + bank_account.default_credit_account_id.currency_id.name
elif bank_account.default_debit_account_id:
if bank_account.default_debit_account_id.id == account_id:
str_name = _('Default debit account: ') + bank_account.default_debit_account_id.code + ' - ' + bank_account.default_debit_account_id.name + ' - ' + bank_account.default_debit_account_id.currency_id.name
return str_name
def display_symbol_account(self, account_id):
account = self.pool.get('account.account').browse(self.cr, self.uid, account_id)
if account.currency_id:
return account.currency_id.symbol
else:
return ''
#=============== SET AND GET DATA ====================================#
def reset_data(self):
self.localcontext['storage']['cumul_balance'] = 0.0
return False
def get_cumul_balance(self):
return self.localcontext['storage']['cumul_balance']
def get_bank_account(self, data):
return self._get_info(data, 'res_partner_bank_ids', 'res.partner.bank')
def get_period(self, data):
return self._get_info(data, 'period_ids', 'account.period')
def get_currency_company(self):
return self.pool.get('res.users').browse(self.cr, self.uid, [self.uid])[0].company_id.currency_id
def different_currency(self, currency_id):
currency_company = self.get_currency_company()
if currency_company != currency_id:
return True
else:
return False
#Change cumul_balance when changes the line
def update_cumul_balance(self, line):
cumul_balance = self.localcontext['storage']['cumul_balance']
if line.currency_id:
if line.currency_id.id == self.get_currency_company():
cumul_balance = self.localcontext['storage']['cumul_balance'] + line.debit - line.credit
dict_update = {'cumul_balance': cumul_balance}
self.localcontext['storage'].update(dict_update)
else:
cumul_balance = self.localcontext['storage']['cumul_balance'] + line.amount_currency
dict_update = {'cumul_balance': cumul_balance}
self.localcontext['storage'].update(dict_update)
return cumul_balance
def set_data_template(self, data):
#Main dictionary
res = self.classified_move_lines(data)
dict_update = {'result': res,}
self.localcontext['storage'].update(dict_update)
return False
def return_balance_account(self, data, account_id):
#Depends of account currency, return balance or foreign balance
balance = self.get_initial_balance(data, account_id)
account = self.pool.get('account.account').browse(self.cr, self.uid, account_id)
currency_company = self.get_currency_company()
if account.currency_id:
if account.currency_id == currency_company:
#initialize cum_balance
dict_update = {'cumul_balance': balance[account_id]['balance']}
self.localcontext['storage'].update(dict_update)
return balance[account_id]['balance']
else:
#initialize cum_balance
dict_update = {'cumul_balance': balance[account_id]['foreign_balance']}
self.localcontext['storage'].update(dict_update)
return balance[account_id]['foreign_balance']
#=====================================================================#
#===================================================================
# Find move_lines that match with default_credit_account_id or
# default_debit_account_id, status = valid and period is the
# same with selected in wizard
#===================================================================
def process_move_lines(self, data):
account_ids = []
period = self.get_period(data)
bank_account = self.get_bank_account(data)
if bank_account.default_credit_account_id and bank_account.default_debit_account_id:
if bank_account.default_credit_account_id.id == bank_account.default_debit_account_id.id:
account_ids.append(bank_account.default_debit_account_id.id)
else:
account_ids.append(bank_account.default_credit_account_id.id)
account_ids.append(bank_account.default_debit_account_id.id)
elif bank_account.default_credit_account_id:
account_ids.append(bank_account.default_credit_account_id.id)
elif bank_account.default_debit_account_id:
account_ids.append(bank_account.default_debit_account_id.id)
move_lines_ids = self.pool.get('account.move.line').search(self.cr, self.uid, [('account_id','in',account_ids),('state', '=', 'valid'),('period_id','=',period.id)])
move_lines = self.pool.get('account.move.line').browse(self.cr, self.uid, move_lines_ids)
return move_lines
#=======================================================================
# Create a dictionary where account is key and each of them have a
# move lines list associated
#=======================================================================
def classified_move_lines(self, data):
res = {}
#Get move_lines
move_lines = self.process_move_lines(data)
for line in move_lines:
#lines must have a account if they are included in list
#It is not necessary included a check with account
if line.account_id.id not in res:
res[line.account_id.id] = []
res[line.account_id.id].append(line)
return res
#=======================================================================
# Create a dictionary where account is key and each of them have a
# balance associated (initial balance)
#=======================================================================
def get_initial_balance(self, data, account_id):
account_balance = 0.0
library_obj = self.pool.get('account.webkit.report.library')
fiscal_year = self.get_fiscalyear(data)
account = self.pool.get('account.account').browse(self.cr, self.uid, account_id)
period = self.get_period(data)
currency_company = self.get_currency_company()
#Get initial balance with previous period for period selected
previous_period = self.pool.get('account.period').get_start_previous_period(self.cr, self.uid, start_period=period, fiscal_year=fiscal_year)
if account.currency_id:
#Compare currency, if account is different than currency company, get foreign_balance
if account.currency_id.id == currency_company:
account_balance = library_obj.get_account_balance(self.cr, self.uid,
[account_id],
['balance'],
initial_balance=True,
fiscal_year_id=fiscal_year.id,
start_period_id=previous_period,
end_period_id=previous_period,
filter_type='filter_period')
else:
account_balance = library_obj.get_account_balance(self.cr, self.uid,
[account_id],
['foreign_balance'],
initial_balance=True,
fiscal_year_id=fiscal_year.id,
start_period_id=previous_period,
end_period_id=previous_period,
filter_type='filter_period')
else:
account_balance = 0.0
return account_balance | gpl-2.0 | 6,034,979,615,341,734,000 | 51.536 | 230 | 0.521587 | false |
stepank/pyws | src/pyws/functions/managers.py | 1 | 2107 | from pyws.errors import BadFunction, FunctionNotFound,\
FunctionAlreadyRegistered
from pyws.functions import Function
class FunctionManager(object):
def get_one(self, context, name):
"""
Returns a function by its name if it is accessible in the context. If
it is not accessible or does not exist, raises
``pyws.errors.FunctionNotFound``. Read more about context in chaper
:doc:`context` and about functions in chapter :doc:`function`.
"""
raise NotImplementedError('FunctionManager.get_one')
def get_all(self, context):
"""
Returns a list of functions accessible in the context. Read more about
context in chaper :doc:`context` and about functiona in chapter
:doc:`function`.
"""
raise NotImplementedError('FunctionManager.get_all')
class FixedFunctionManager(FunctionManager):
"""
A fixed function manager, it has a fixed set of functions.
"""
def __init__(self, *functions):
"""
``functions`` is a list of functions to be registered.
"""
self.functions = {}
for function in functions:
self.add_function(function)
def build_function(self, function):
if not isinstance(function, Function):
raise BadFunction(function)
return function
def add_function(self, function):
"""
Adds the function to the list of registered functions.
"""
function = self.build_function(function)
if function.name in self.functions:
raise FunctionAlreadyRegistered(function.name)
self.functions[function.name] = function
def get_one(self, context, name):
"""
Returns a function if it is registered, the context is ignored.
"""
try:
return self.functions[name]
except KeyError:
raise FunctionNotFound(name)
def get_all(self, context):
"""
Returns a list of registered functions, the context is ignored.
"""
return self.functions.values()
| mit | -6,759,665,174,721,057,000 | 30.924242 | 78 | 0.627907 | false |
rbuffat/pyidf | tests/test_generatormicrochp.py | 1 | 3012 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.electric_load_center import GeneratorMicroChp
log = logging.getLogger(__name__)
class TestGeneratorMicroChp(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_generatormicrochp(self):
pyidf.validation_level = ValidationLevel.error
obj = GeneratorMicroChp()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_performance_parameters_name = "object-list|Performance Parameters Name"
obj.performance_parameters_name = var_performance_parameters_name
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# node
var_cooling_water_inlet_node_name = "node|Cooling Water Inlet Node Name"
obj.cooling_water_inlet_node_name = var_cooling_water_inlet_node_name
# node
var_cooling_water_outlet_node_name = "node|Cooling Water Outlet Node Name"
obj.cooling_water_outlet_node_name = var_cooling_water_outlet_node_name
# node
var_air_inlet_node_name = "node|Air Inlet Node Name"
obj.air_inlet_node_name = var_air_inlet_node_name
# node
var_air_outlet_node_name = "node|Air Outlet Node Name"
obj.air_outlet_node_name = var_air_outlet_node_name
# object-list
var_generator_fuel_supply_name = "object-list|Generator Fuel Supply Name"
obj.generator_fuel_supply_name = var_generator_fuel_supply_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.generatormicrochps[0].name, var_name)
self.assertEqual(idf2.generatormicrochps[0].performance_parameters_name, var_performance_parameters_name)
self.assertEqual(idf2.generatormicrochps[0].zone_name, var_zone_name)
self.assertEqual(idf2.generatormicrochps[0].cooling_water_inlet_node_name, var_cooling_water_inlet_node_name)
self.assertEqual(idf2.generatormicrochps[0].cooling_water_outlet_node_name, var_cooling_water_outlet_node_name)
self.assertEqual(idf2.generatormicrochps[0].air_inlet_node_name, var_air_inlet_node_name)
self.assertEqual(idf2.generatormicrochps[0].air_outlet_node_name, var_air_outlet_node_name)
self.assertEqual(idf2.generatormicrochps[0].generator_fuel_supply_name, var_generator_fuel_supply_name)
self.assertEqual(idf2.generatormicrochps[0].availability_schedule_name, var_availability_schedule_name) | apache-2.0 | 8,094,510,328,616,789,000 | 42.042857 | 119 | 0.688247 | false |
sopoforic/cgrr-gamecube | graphics.py | 1 | 2996 | from PIL import Image
def parse_rgb5a3(data, width):
if ((len(data))/2) % width != 0:
raise ValueError("Invalid number of tiles for width {}".format(width))
height = (len(data))//(width * 2)
img = Image.new("RGBA", size=(width, height))
imgdata = reorder_tiles(data, (4, 4), width, 16)
pixmap = []
for a, b in (imgdata[i:i+2] for i in range(0, len(imgdata), 2)):
try:
color = (a << 8) + b
except TypeError:
# python 2
color = (ord(a) << 8) + ord(b)
pixmap.append(rgb5a3_to_rgba(color))
img.putdata(pixmap)
return img
def parse_ci8(data, width):
if ((len(data) - 512)/4) % width != 0:
raise ValueError("Invalid number of tiles for width {}".format(width))
height = (len(data) - 512)//width
palette = parse_rgb5a3_palette(data[-512:])
img = Image.new("RGBA", size=(width, height))
imgdata = reorder_tiles(data[:-512], (8, 4), width, 8)
try:
pixmap = [palette[pixel] for pixel in imgdata]
except TypeError:
# python 2
pixmap = [palette[ord(pixel)] for pixel in imgdata]
img.putdata(pixmap)
return img
def parse_rgb5a3_palette(data):
if len(data) != 512:
raise ValueError("Palette length must be 512 bytes.")
palette = []
for a, b in (data[i:i+2] for i in range(0, 512, 2)):
try:
color = (a << 8) + b
except TypeError:
# python 2
color = (ord(a) << 8) + ord(b)
palette.append(rgb5a3_to_rgba(color))
return palette
def reorder_tiles(data, dims, width, bpp=8):
# This is an awful monstrosity, but all it's doing is reordering the data so
# that instead of being ordered in tiles, it's all just in a line, one row
# at a time.
tile_width, tile_height = dims
newdata = b''
tile_row_size = width*tile_height*bpp//8
tile_size = tile_width*tile_height*bpp//8
for tile_row in range(0, len(data), tile_row_size):
for row in range(0, tile_height*tile_width*bpp//8, tile_width*bpp//8):
for tile in range(0, tile_row_size, tile_size):
newdata += data[tile_row + row + tile:tile_row + row + tile + tile_width*bpp//8]
return newdata
# 1 -> True, 0 -> False
def int_to_bitfield(s):
return lambda r: [bool((r >> ((s-1) - n) & 1)) for n in range(s)]
# True -> 1, False -> 0
def bitfield_to_int(s):
return lambda r: sum([2 ** n for n in range(s) if r[(s-1)-n]])
def rgb5a3_to_rgba(color):
bf = int_to_bitfield(16)(color)
if bf[0]:
# no transparency
alpha = 0
red = bitfield_to_int(5)(bf[1:6]) * 0x8
green = bitfield_to_int(5)(bf[6:11]) * 0x8
blue = bitfield_to_int(5)(bf[11:]) * 0x8
else:
alpha = bitfield_to_int(3)(bf[1:4]) * 0x20
red = bitfield_to_int(4)(bf[4:8]) * 0x11
green = bitfield_to_int(4)(bf[8:12]) * 0x11
blue = bitfield_to_int(4)(bf[12:]) * 0x11
return (red, green, blue, alpha)
| gpl-3.0 | -5,450,840,273,704,599,000 | 35.536585 | 96 | 0.570761 | false |
RobLoach/lutris | lutris/gui/cellrenderers.py | 1 | 1642 | from gi.repository import Gtk, Pango, GObject
class GridViewCellRendererText(Gtk.CellRendererText):
"""CellRendererText adjusted for grid view display, removes extra padding"""
def __init__(self, width, *args, **kwargs):
super(GridViewCellRendererText, self).__init__(*args, **kwargs)
self.props.alignment = Pango.Alignment.CENTER
self.props.wrap_mode = Pango.WrapMode.WORD
self.props.xalign = 0.5
self.props.yalign = 0
self.props.width = width
self.props.wrap_width = width
class CellRendererButton(Gtk.CellRenderer):
value = GObject.Property(
type=str,
nick='value',
blurb='what data to render',
flags=(GObject.PARAM_READWRITE | GObject.PARAM_CONSTRUCT))
def __init__(self, layout):
Gtk.CellRenderer.__init__(self)
self.layout = layout
def do_get_size(self, widget, cell_area=None):
height = 20
max_width = 100
if cell_area:
return (cell_area.x, cell_area.y,
max(cell_area.width, max_width), cell_area.height)
return (0, 0, max_width, height)
def do_render(self, cr, widget, bg_area, cell_area, flags):
context = widget.get_style_context()
context.save()
context.add_class(Gtk.STYLE_CLASS_BUTTON)
self.layout.set_markup("Install")
(x, y, w, h) = self.do_get_size(widget, cell_area)
h -= 4
# Gtk.render_background(context, cr, x, y, w, h)
Gtk.render_frame(context, cr, x, y, w - 2, h + 4)
Gtk.render_layout(context, cr, x + 10, y, self.layout)
context.restore()
| gpl-3.0 | -6,833,111,031,784,157,000 | 35.488889 | 80 | 0.607795 | false |
rhelmer/socorro-webapp | crashstats/crashstats/views.py | 1 | 85774 | import os
import json
import datetime
import logging
import math
import isodate
import urllib
from collections import defaultdict
from operator import itemgetter
from django import http
from django.contrib.auth.models import Permission
from django.shortcuts import render, redirect
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required, permission_required
from django.core.cache import cache
from django.utils.http import urlquote
from session_csrf import anonymous_csrf
from . import forms, models, utils
from .decorators import check_days_parameter, pass_default_context
from crashstats.supersearch.models import SuperSearchUnredacted
# To prevent running in to a known Python bug
# (http://bugs.python.org/issue7980)
# we, here at "import time" (as opposed to run time) make use of time.strptime
# at least once
datetime.datetime.strptime('2013-07-15 10:00:00', '%Y-%m-%d %H:%M:%S')
def robots_txt(request):
return http.HttpResponse(
'User-agent: *\n'
'%s: /' % ('Allow' if settings.ENGAGE_ROBOTS else 'Disallow'),
mimetype='text/plain',
)
def favicon_ico(request):
"""return the favicon with the content type forced so we don't have to
rely on `mimetypes` to guess it non-deterministically per OS.
The reason for doing /favicon.ico in django instead of setting up
an Apache rewrite rule is to reduce complexity. Having it here means
it's predictable and means fewer things to go wrong outside just getting
this up and running.
"""
filename = os.path.join(settings.STATIC_ROOT, 'img', 'favicon.ico')
return http.HttpResponse(open(filename).read(), mimetype='image/x-icon')
def has_builds(product, versions):
contains_builds = False
prod_versions = []
values_separator = '+'
combinator = ':'
# Ensure we have versions before proceeding. If there are
# no verions, simply return the default of False.
if versions:
if isinstance(versions, list):
for version in versions:
prod_versions.append(product + combinator + version)
versions = values_separator.join(prod_versions)
else:
versions = product + combinator + versions
api = models.CurrentProducts()
products = api.get(versions=versions)
for product in products['hits']:
if product['has_builds']:
contains_builds = True
break
return contains_builds
def build_data_object_for_adu_graphs(start_date, end_date, response_items,
report_type='by_version',
code_to_name=None):
count = len(response_items)
graph_data = {
'startDate': start_date,
'endDate': end_date,
'count': count,
'labels': [],
}
for count, product_version in enumerate(sorted(response_items,
reverse=True),
start=1):
graph_data['ratio%s' % count] = []
label = product_version.split(':')[-1]
# the `product_version` can be something like `firefox:23.0:win`
# so use code_to_name so we can turn it into a nice looking label
if code_to_name:
label = code_to_name.get(label, label)
graph_data['labels'].append(label)
for day in sorted(response_items[product_version]):
ratio = response_items[product_version][day]['crash_hadu']
t = utils.unixtime(day, millis=True)
graph_data['ratio%s' % count].append([t, ratio])
return graph_data
def build_data_object_for_crash_reports(response_items):
crash_reports = []
for count, product_version in enumerate(sorted(response_items,
reverse=True)):
prod_ver = {}
prod_ver['product'] = product_version.split(':')[0]
prod_ver['version'] = product_version.split(':')[1]
crash_reports.append(prod_ver)
return crash_reports
def get_all_nightlies(context):
nightlies_only = settings.NIGHTLY_RELEASE_TYPES
return [
x for x in context['currentversions']
if x['release'].lower() in [rel.lower() for rel in nightlies_only]
]
def get_all_nightlies_for_product(context, product):
nightlies_only = settings.NIGHTLY_RELEASE_TYPES
versions = []
for release in context['currentversions']:
rel_product = release['product']
rel_release = release['release'].lower()
if rel_product == product:
if rel_release in [x.lower() for x in nightlies_only]:
versions.append(release['version'])
return versions
def get_latest_nightly(context, product):
version = None
for release in context['currentversions']:
if release['product'] == product:
rel = release['release']
if rel.lower() == 'nightly' and release['featured']:
version = release['version']
break
if version is None:
# We did not find a featured Nightly, let's simply use the latest
for release in context['currentversions']:
if release['product'] == product:
if release['release'].lower() == 'nightly':
version = release['version']
break
return version
def get_channel_for_release(version):
api = models.CurrentProducts()
version_info = api.get(
versions=version
)
return version_info['hits'][0]['build_type']
def get_timedelta_from_value_and_unit(value, unit):
if unit == 'weeks':
date_delta = datetime.timedelta(weeks=value)
elif unit == 'days':
date_delta = datetime.timedelta(days=value)
elif unit == 'hours':
date_delta = datetime.timedelta(hours=value)
else:
date_delta = datetime.timedelta(weeks=1)
return date_delta
@pass_default_context
@check_days_parameter([3, 7, 14], default=7)
def home(request, product, versions=None,
days=None, possible_days=None,
default_context=None):
context = default_context or {}
contains_builds = False
product = context['product']
if versions is None:
versions = []
for release in default_context['currentversions']:
if release['product'] == product and release['featured']:
versions.append(release['version'])
contains_builds = has_builds(product, versions)
else:
versions = versions.split(';')
contains_builds = has_builds(product, versions)
context['versions'] = versions
if len(versions) == 1:
context['version'] = versions[0]
context['has_builds'] = contains_builds
context['days'] = days
context['possible_days'] = possible_days
default_date_range_type = request.session.get('date_range_type', 'report')
context['default_date_range_type'] = default_date_range_type
return render(request, 'crashstats/home.html', context)
@utils.json_view
@pass_default_context
def frontpage_json(request, default_context=None):
date_range_types = ['report', 'build']
form = forms.FrontpageJSONForm(
default_context['currentversions'],
data=request.GET,
date_range_types=date_range_types,
)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
product = form.cleaned_data['product']
versions = form.cleaned_data['versions']
days = form.cleaned_data['duration']
assert isinstance(days, int) and days > 0, days
end_date = datetime.datetime.utcnow()
start_date = end_date - datetime.timedelta(days=days + 1)
if not versions:
versions = []
for release in default_context['currentversions']:
if release['product'] == product and release['featured']:
current_end_date = (
datetime.datetime.strptime(release['end_date'], '%Y-%m-%d')
)
if end_date.date() <= current_end_date.date():
versions.append(release['version'])
default = request.session.get('date_range_type', 'report')
date_range_type = form.cleaned_data['date_range_type'] or default
assert date_range_type in date_range_types
request.session['date_range_type'] = date_range_type
api = models.CrashesPerAdu()
crashes = api.get(
product=product,
versions=versions,
from_date=start_date.date(),
to_date=end_date.date(),
date_range_type=date_range_type
)
data = {}
data = build_data_object_for_adu_graphs(
start_date.strftime('%Y-%m-%d'),
end_date.strftime('%Y-%m-%d'),
crashes['hits']
)
# Because we need to always display the links at the bottom of
# the frontpage, even when there is no data to plot, get the
# list of prod/versions from the selected list and not from
# the returned crashes object.
data['product_versions'] = [
{'product': product, 'version': x}
for x in sorted(versions, reverse=True)
]
data['duration'] = days
data['date_range_type'] = date_range_type
return data
@pass_default_context
def products_list(request, default_context=None):
context = default_context or {}
context['products'] = context['currentproducts']['products']
return render(request, 'crashstats/products_list.html', context)
@pass_default_context
def explosive(request, product=None, versions=None, default_context=None):
context = default_context or {}
# TODO: allow query other periods
days = 5
start = datetime.datetime.utcnow() - datetime.timedelta(days)
start = start.date()
context['explosives'] = models.ExplosiveCrashes().get(start_date=start)
context['explosives'] = context['explosives']['hits']
context['tomorrow'] = {}
for expl in context['explosives']:
t = expl['date']
d = datetime.datetime.strptime(t, '%Y-%m-%d')
d += datetime.timedelta(1)
context['tomorrow'][t] = d.strftime('%Y-%m-%d')
return render(request, 'crashstats/explosive_crashes.html', context)
@pass_default_context
@utils.json_view
def explosive_data(request, signature, date, default_context=None):
explosive_date = datetime.datetime.strptime(date, '%Y-%m-%d')
# This is today as the mware does the same thing as range()
# it doesn't include the last day.
now = datetime.datetime.utcnow().date() + datetime.timedelta(1)
# if we are couple days ahead, we only want to draw the days surrounding
# the explosive crash.
days_ahead = min(max((now - explosive_date.date()).days, 0), 3)
end = explosive_date + datetime.timedelta(days_ahead)
start = (explosive_date -
datetime.timedelta(settings.EXPLOSIVE_REPORT_DAYS - days_ahead))
start = start.strftime('%Y-%m-%d')
end = end.strftime('%Y-%m-%d')
hits = models.CrashesCountByDay().get(signature=signature,
start_date=start,
end_date=end)['hits']
hits = sorted(hits.items())
return {'counts': hits}
@pass_default_context
@anonymous_csrf
@check_days_parameter([3, 7], default=7)
def topcrasher_ranks_bybug(request, days=None, possible_days=None,
default_context=None):
context = default_context or {}
if request.GET.get('bug_number'):
try:
bug_number = int(request.GET.get('bug_number'))
except ValueError:
return http.HttpResponseBadRequest('invalid bug number')
# bug IDs are stored as 32-bit int in Postgres
if len(bin(bug_number)[2:]) > 32:
return http.HttpResponseBadRequest('invalid bug number')
sig_by_bugs_api = models.SignaturesByBugs()
signatures = sig_by_bugs_api.get(bug_ids=bug_number)['hits']
context['signatures'] = signatures
context['bug_number'] = bug_number
end_date = datetime.datetime.utcnow()
start_date = end_date - datetime.timedelta(days=days)
top_crashes = defaultdict(dict)
for signature in signatures:
signature_summary_api = models.SignatureSummary()
result = signature_summary_api.get(
report_types=['products'],
signature=signature['signature'],
start_date=start_date,
end_date=end_date,
)
releases = result['reports']['products']
active = []
for release in releases:
for current in context['currentversions']:
if (
release['product_name'] == current['product']
and release['version_string'] == current['version']
):
current_end_date = (
datetime.datetime.strptime(current['end_date'],
'%Y-%m-%d')
)
if end_date.date() <= current_end_date.date():
active.append(current)
signame = signature['signature']
top_crashes[signame] = defaultdict(dict)
for release in active:
product = release['product']
version = release['version']
tcbs_api = models.TCBS()
tcbs = tcbs_api.get(
product=product,
version=version,
end_date=end_date.date(),
duration=days * 24,
limit=100
)['crashes']
for crash in tcbs:
if crash['signature'] == signame:
top_crashes[signame][product][version] = crash
context['top_crashes'] = top_crashes
return render(request, 'crashstats/topcrasher_ranks_bybug.html', context)
@pass_default_context
@anonymous_csrf
@check_days_parameter([1, 3, 7, 14, 28], default=7)
def topcrasher(request, product=None, versions=None, date_range_type=None,
crash_type=None, os_name=None, result_count='50', days=None,
possible_days=None, default_context=None):
context = default_context or {}
if product not in context['releases']:
raise http.Http404('Unrecognized product')
if date_range_type is None:
date_range_type = request.session.get('date_range_type', 'report')
if not versions:
# :(
# simulate what the nav.js does which is to take the latest version
# for this product.
for release in context['currentversions']:
if release['product'] == product and release['featured']:
url = reverse('crashstats:topcrasher',
kwargs=dict(product=product,
versions=release['version']))
return redirect(url)
else:
versions = versions.split(';')
if len(versions) == 1:
context['version'] = versions[0]
release_versions = [x['version'] for x in context['releases'][product]]
if context['version'] not in release_versions:
raise http.Http404('Unrecognized version')
context['has_builds'] = has_builds(product, context['version'])
end_date = datetime.datetime.utcnow()
if crash_type not in ['all', 'browser', 'plugin', 'content']:
crash_type = 'browser'
context['crash_type'] = crash_type
os_api = models.Platforms()
operating_systems = os_api.get()
if os_name not in (os['name'] for os in operating_systems):
os_name = None
context['os_name'] = os_name
# set the result counts filter in the context to use in
# the template. This way we avoid hardcoding it twice and
# have it defined in one common location.
context['result_counts'] = settings.TCBS_RESULT_COUNTS
if result_count not in context['result_counts']:
result_count = settings.TCBS_RESULT_COUNTS[0]
context['result_count'] = result_count
api = models.TCBS()
tcbs = api.get(
product=product,
version=context['version'],
crash_type=crash_type,
end_date=end_date.date(),
date_range_type=date_range_type,
duration=(days * 24),
limit=result_count,
os=os_name
)
context['numberOfCrashes'] = 0
signatures = []
for crash in tcbs['crashes'][:int(result_count)]:
signatures.append(crash['signature'])
context['numberOfCrashes'] += crash['count']
bugs = defaultdict(list)
api = models.Bugs()
if signatures:
for b in api.get(signatures=signatures)['hits']:
bugs[b['signature']].append(b['id'])
for crash in tcbs['crashes']:
crash_counts = []
# Due to the inconsistencies of OS usage and naming of
# codes and props for operating systems the hacky bit below
# is required. Socorro and the world will be a better place
# once https://bugzilla.mozilla.org/show_bug.cgi?id=790642 lands.
os_short_name_binding = {'lin': 'linux'}
for operating_system in operating_systems:
if operating_system['name'] == 'Unknown':
# not applicable in this context
continue
os_code = operating_system['code'][0:3].lower()
key = '%s_count' % os_short_name_binding.get(os_code, os_code)
crash_counts.append([crash[key], operating_system['name']])
crash['correlation_os'] = max(crash_counts)[1]
sig = crash['signature']
if sig in bugs:
if 'bugs' in crash:
crash['bugs'].extend(bugs[sig])
else:
crash['bugs'] = bugs[sig]
if 'bugs' in crash:
crash['bugs'].sort(reverse=True)
context['tcbs'] = tcbs
context['report'] = 'topcrasher'
context['days'] = days
context['possible_days'] = possible_days
context['total_crashing_signatures'] = len(signatures)
context['date_range_type'] = date_range_type
request.session['date_range_type'] = date_range_type
if request.GET.get('format') == 'csv':
return _render_topcrasher_csv(request, context, product)
return render(request, 'crashstats/topcrasher.html', context)
def _render_topcrasher_csv(request, context, product):
response = http.HttpResponse(mimetype='text/csv', content_type='text/csv')
filedate = datetime.datetime.utcnow().strftime('%Y-%m-%d')
response['Content-Disposition'] = ('attachment; filename="%s_%s_%s.csv"' %
(product, context['version'], filedate))
writer = utils.UnicodeWriter(response)
writer.writerow(['Rank',
'Change in Rank',
'Percentage of All Crashes',
'Previous Percentage',
'Signature',
'Total',
'Win',
'Mac',
'Linux',
'Is Garbage Collecting',
'Version Count',
'Versions'])
for crash in context['tcbs']['crashes']:
writer.writerow([crash.get('currentRank', '') + 1,
crash.get('changeInRank', ''),
crash.get('percentOfTotal', ''),
crash.get('previousPercentOfTotal', ''),
crash.get('signature', ''),
crash.get('count', ''),
crash.get('win_count', ''),
crash.get('mac_count', ''),
crash.get('linux_count', ''),
crash.get('is_gc_count', ''),
crash.get('versions_count', ''),
crash.get('versions', '')])
return response
@pass_default_context
def daily(request, default_context=None):
context = default_context or {}
# legacy fix
if 'v[]' in request.GET or 'os[]' in request.GET:
new_url = (request.build_absolute_uri()
.replace('v[]', 'v')
.replace('os[]', 'os'))
return redirect(new_url, permanent=True)
context['products'] = context['currentproducts']['products']
form_selection = request.GET.get('form_selection')
platforms_api = models.Platforms()
platforms = platforms_api.get()
if form_selection == 'by_os':
form_class = forms.DailyFormByOS
else:
form_selection = 'by_version'
form_class = forms.DailyFormByVersion
date_range_types = ['report', 'build']
hang_types = ['any', 'crash', 'hang-p']
form = form_class(
context['currentversions'],
platforms,
data=request.GET,
date_range_types=date_range_types,
hang_types=hang_types,
)
if form.is_valid():
params = form.cleaned_data
params['product'] = params.pop('p')
params['versions'] = params.pop('v')
try:
params['os_names'] = params.pop('os')
except KeyError:
params['os_names'] = None
else:
return http.HttpResponseBadRequest(str(form.errors))
if len(params['versions']) > 0:
context['version'] = params['versions'][0]
context['form_selection'] = form_selection
context['product'] = params['product']
if not params['versions']:
# need to pick the default featured ones
params['versions'] = [
version['version']
for version in context['currentversions']
if version['product'] == params['product'] and version['featured']
]
context['available_versions'] = []
now = datetime.datetime.utcnow().date()
for version in context['currentversions']:
start_date = isodate.parse_date(version['start_date'])
end_date = isodate.parse_date(version['end_date'])
if (
params['product'] == version['product'] and
start_date <= now and
end_date >= now
):
context['available_versions'].append(version['version'])
if not params.get('os_names'):
params['os_names'] = [x['name'] for x in platforms if x.get('display')]
context['os_names'] = params.get('os_names')
end_date = params.get('date_end') or datetime.datetime.utcnow()
if isinstance(end_date, datetime.datetime):
end_date = end_date.date()
start_date = (params.get('date_start') or
end_date - datetime.timedelta(weeks=2))
if isinstance(start_date, datetime.datetime):
start_date = start_date.date()
context['start_date'] = start_date.strftime('%Y-%m-%d')
context['end_date'] = end_date.strftime('%Y-%m-%d')
context['duration'] = abs((start_date - end_date).days)
context['dates'] = utils.daterange(start_date, end_date)
context['hang_type'] = params.get('hang_type') or 'any'
default = request.session.get('date_range_type', 'report')
context['date_range_type'] = params.get('date_range_type') or default
if params.get('hang_type') == 'any':
hang_type = None
else:
hang_type = params.get('hang_type')
api = models.CrashesPerAdu()
crashes = api.get(
product=params['product'],
versions=params['versions'],
from_date=start_date,
to_date=end_date,
date_range_type=params['date_range_type'],
os=params['os_names'],
form_selection=form_selection,
report_type=hang_type
)
code_to_name = dict(
(x['code'], x['name']) for x in platforms if x.get('display')
)
cadu = {}
cadu = build_data_object_for_adu_graphs(
context['start_date'],
context['end_date'],
crashes['hits'],
code_to_name=code_to_name
)
cadu['product_versions'] = build_data_object_for_crash_reports(
crashes['hits'],
)
data_table = {
'totals': {},
'dates': {}
}
has_data_versions = set()
for product_version in crashes['hits']:
data_table['totals'][product_version] = {
'crashes': 0,
'adu': 0,
'throttle': 0,
'crash_hadu': 0,
'ratio': 0,
}
for date in crashes['hits'][product_version]:
crash_info = crashes['hits'][product_version][date]
has_data_versions.add(crash_info['version'])
if date not in data_table['dates']:
data_table['dates'][date] = []
data_table['dates'][date].append(crash_info)
if params['date_range_type'] == 'build':
# for the Date Range = "Build Date" report, we only want to
# include versions that had data.
context['versions'] = list(has_data_versions)
else:
context['versions'] = params['versions']
for date in data_table['dates']:
if form_selection == 'by_version':
data_table['dates'][date] = sorted(data_table['dates'][date],
key=itemgetter('version'),
reverse=True)
else:
data_table['dates'][date] = sorted(data_table['dates'][date],
key=itemgetter('os'),
reverse=True)
if request.GET.get('format') == 'csv':
return _render_daily_csv(
request,
data_table,
params['product'],
params['versions'],
platforms,
context['os_names'],
form_selection
)
context['data_table'] = data_table
context['graph_data'] = json.dumps(cadu)
context['report'] = 'daily'
return render(request, 'crashstats/daily.html', context)
def _render_daily_csv(request, data, product, versions, platforms, os_names,
form_selection):
response = http.HttpResponse(mimetype='text/csv', content_type='text/csv')
title = 'ADI_' + product + '_' + '_'.join(versions) + '_' + form_selection
response['Content-Disposition'] = (
'attachment; filename="%s.csv"' % title
)
writer = utils.UnicodeWriter(response)
head_row = ['Date']
labels = (
('report_count', 'Crashes'),
('adu', 'ADI'),
('throttle', 'Throttle'),
('crash_hadu', 'Ratio'),
)
if form_selection == 'by_version':
for version in versions:
for __, label in labels:
head_row.append('%s %s %s' % (product, version, label))
elif form_selection == 'by_os':
for os_name in os_names:
for version in versions:
for __, label in labels:
head_row.append(
'%s %s on %s %s' %
(product, version, os_name, label)
)
writer.writerow(head_row)
def append_row_blob(blob, labels):
for key, __ in labels:
value = blob[key]
if key == 'throttle':
value = '%.1f%%' % (100.0 * value)
elif key in ('crash_hadu', 'ratio'):
value = '%.3f%%' % value
else:
value = str(value)
row.append(value)
# reverse so that recent dates appear first
for date in sorted(data['dates'].keys(), reverse=True):
crash_info = data['dates'][date]
"""
`crash_info` is a list that looks something like this:
[{'adu': 4500,
'crash_hadu': 43.0,
'date': u'2012-10-13',
'product': u'WaterWolf',
'report_count': 1935,
'throttle': 1.0,
'version': u'4.0a2'}]
Or, if form_selection=='by_os' it would look like this:
[{'os': 'Linux',
'adu': 4500,
'crash_hadu': 43.0,
'date': u'2012-10-13',
'product': u'WaterWolf',
'report_count': 1935,
'throttle': 1.0,
'version': u'4.0a2'},
{'os': 'Windows',
'adu': 4500,
'crash_hadu': 43.0,
'date': u'2012-10-13',
'product': u'WaterWolf',
'report_count': 1935,
'throttle': 1.0,
'version': u'4.0a2'},
]
"""
row = [date]
info_by_version = dict((x['version'], x) for x in crash_info)
if form_selection == 'by_version':
# Turn each of them into a dict where the keys is the version
for version in versions:
if version in info_by_version:
blob = info_by_version[version]
append_row_blob(blob, labels)
else:
for __ in labels:
row.append('-')
elif form_selection == 'by_os':
info_by_os = dict((x['os'], x) for x in crash_info)
for os_name in os_names:
blob = info_by_os[os_name]
append_row_blob(blob, labels)
else:
raise NotImplementedError(form_selection) # pragma: no cover
assert len(row) == len(head_row), (len(row), len(head_row))
writer.writerow(row)
# add the totals
totals_labels = (
('crashes', 'Crashes'),
('adu', 'ADI'),
('throttle', 'Throttle'),
('ratio', 'Ratio'),
)
row = ['Total']
for version in versions:
if form_selection == 'by_os':
for platform in platforms:
product_version_platform = '%s:%s:%s' % (product, version,
platform['code'])
try:
blob = data['totals'][product_version_platform]
except KeyError:
continue
append_row_blob(blob, totals_labels)
else:
product_version = '%s:%s' % (product, version)
try:
blob = data['totals'][product_version]
except KeyError:
continue
append_row_blob(blob, totals_labels)
writer.writerow(row)
return response
@pass_default_context
@check_days_parameter([3, 7, 14, 28], 7)
def topchangers(request, product=None, versions=None,
days=None, possible_days=None,
default_context=None):
context = default_context or {}
if not versions:
versions = []
# select all current versions, if none are chosen
for release in context['currentversions']:
if release['product'] == product and release['featured']:
versions.append(release['version'])
else:
versions = versions.split(';')
context['days'] = days
context['possible_days'] = possible_days
context['versions'] = versions
if len(versions) == 1:
context['version'] = versions[0]
context['product_versions'] = []
for version in versions:
context['product_versions'].append('%s:%s' % (product, version))
end_date = datetime.datetime.utcnow()
# FIXME hardcoded crash_type
crash_type = 'browser'
changers = defaultdict(list)
api = models.TCBS()
for v in versions:
tcbs = api.get(
product=product,
version=v,
crash_type=crash_type,
end_date=end_date.date(),
date_range_type='report',
duration=days * 24,
limit='300'
)
for crash in tcbs['crashes']:
if crash['changeInRank'] != 'new' and crash['signature']:
change = int(crash['changeInRank'])
changers[change].append(crash)
context['topchangers'] = changers
context['report'] = 'topchangers'
return render(request, 'crashstats/topchangers.html', context)
@pass_default_context
@permission_required('crashstats.view_exploitability')
def exploitable_crashes(
request,
product=None,
versions=None,
default_context=None
):
context = default_context or {}
if product is None:
return redirect(
'crashstats:exploitable_crashes',
settings.DEFAULT_PRODUCT,
permanent=True
)
try:
page = max(1, int(request.GET.get('page', 1)))
except ValueError:
page = 1
context['current_page'] = page
results_per_page = settings.EXPLOITABILITY_BATCH_SIZE
exploitable_crashes = models.CrashesByExploitability()
exploitable = exploitable_crashes.get(
product=product,
version=versions,
page=page,
batch=results_per_page
)
crashes = []
bugs = defaultdict(list)
signatures = [x['signature'] for x in exploitable['hits']]
if signatures:
api = models.Bugs()
for b in api.get(signatures=signatures)['hits']:
bugs[b['signature']].append(b['id'])
for crash in exploitable['hits']:
crash['bugs'] = sorted(bugs.get(crash['signature'], []), reverse=True)
crashes.append(crash)
context['crashes'] = crashes
context['pages'] = int(math.ceil(
1.0 * exploitable['total'] / results_per_page
))
context['version'] = versions
context['report'] = 'exploitable'
return render(request, 'crashstats/exploitability.html', context)
@pass_default_context
def report_index(request, crash_id, default_context=None):
if not crash_id:
raise http.Http404('Crash id is missing')
valid_crash_id = utils.find_crash_id(crash_id)
if not valid_crash_id:
return http.HttpResponseBadRequest('Invalid crash ID')
# Sometimes, in Socorro we use a prefix on the crash ID. Usually it's
# 'bp-' but this is configurable.
# If you try to use this to reach the perma link for a crash, it should
# redirect to the report index with the correct crash ID.
if valid_crash_id != crash_id:
return redirect(reverse(
'crashstats:report_index',
args=(valid_crash_id,)
))
context = default_context or {}
context['crash_id'] = crash_id
api = models.UnredactedCrash()
def handle_middleware_404(crash_id, error_code):
if error_code == 404:
# if crash was submitted today, send to pending screen
crash_date = datetime.datetime.strptime(crash_id[-6:], '%y%m%d')
crash_age = datetime.datetime.utcnow() - crash_date
if crash_age < datetime.timedelta(days=1):
tmpl = 'crashstats/report_index_pending.html'
else:
tmpl = 'crashstats/report_index_not_found.html'
return render(request, tmpl, context)
elif error_code == 408:
return render(request,
'crashstats/report_index_pending.html', context)
elif error_code == 410:
return render(request,
'crashstats/report_index_too_old.html', context)
# this is OK because this function is expected to be called within
# an exception stack frame
raise
try:
context['report'] = api.get(crash_id=crash_id)
except models.BadStatusCodeError as e:
return handle_middleware_404(crash_id, e.status)
if 'json_dump' in context['report']:
json_dump = context['report']['json_dump']
if 'sensitive' in json_dump and \
not request.user.has_perm('crashstats.view_pii'):
del json_dump['sensitive']
context['raw_stackwalker_output'] = json.dumps(
json_dump,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
utils.enhance_json_dump(json_dump, settings.VCS_MAPPINGS)
parsed_dump = json_dump
elif 'dump' in context['report']:
context['raw_stackwalker_output'] = context['report']['dump']
parsed_dump = utils.parse_dump(
context['report']['dump'],
settings.VCS_MAPPINGS
)
else:
context['raw_stackwalker_output'] = 'No dump available'
parsed_dump = {}
# If the parsed_dump lacks a `parsed_dump.crash_info.crashing_thread`
# we can't loop over the frames :(
if parsed_dump.get('crash_info', {}).get('crashing_thread') is None:
# the template does a big `{% if parsed_dump.threads %}`
parsed_dump['threads'] = None
context['parsed_dump'] = parsed_dump
context['bug_product_map'] = settings.BUG_PRODUCT_MAP
process_type = 'unknown'
if context['report']['process_type'] is None:
process_type = 'browser'
elif context['report']['process_type'] == 'plugin':
process_type = 'plugin'
elif context['report']['process_type'] == 'content':
process_type = 'content'
context['process_type'] = process_type
bugs_api = models.Bugs()
hits = bugs_api.get(signatures=[context['report']['signature']])['hits']
# bugs_api.get(signatures=...) will return all signatures associated
# with the bugs found, but we only want those with matching signature
context['bug_associations'] = [
x for x in hits
if x['signature'] == context['report']['signature']
]
context['bug_associations'].sort(
key=lambda x: x['id'],
reverse=True
)
raw_api = models.RawCrash()
try:
context['raw'] = raw_api.get(crash_id=crash_id)
except models.BadStatusCodeError as e:
return handle_middleware_404(crash_id, e.status)
context['raw_keys'] = []
if request.user.has_perm('crashstats.view_pii'):
# hold nothing back
context['raw_keys'] = context['raw'].keys()
else:
context['raw_keys'] = [
x for x in context['raw']
if x in models.RawCrash.API_WHITELIST
]
context['raw_keys'].sort(key=unicode.lower)
if 'InstallTime' in context['raw']:
try:
install_time = datetime.datetime.fromtimestamp(
int(context['raw']['InstallTime'])
)
context['install_time'] = (
install_time.strftime('%Y-%m-%d %H:%M:%S')
)
except ValueError:
# that means the `InstallTime` value was not valid.
# that's just as good or bad as it not being in the raw crash
logging.debug(
'Raw crash contains invalid `InstallTime`: %r',
context['raw']['InstallTime']
)
if 'HangID' in context['raw']:
context['hang_id'] = context['raw']['HangID']
crash_pair_api = models.CrashPairsByCrashId()
context['crash_pairs'] = crash_pair_api.get(
uuid=context['report']['uuid'],
hang_id=context['hang_id']
)
if request.user.has_perm('crashstats.view_rawdump'):
context['raw_dump_urls'] = [
reverse('crashstats:raw_data', args=(crash_id, 'dmp')),
reverse('crashstats:raw_data', args=(crash_id, 'json'))
]
if context['raw'].get('additional_minidumps'):
suffixes = [
x.strip()
for x in context['raw']['additional_minidumps'].split(',')
if x.strip()
]
for suffix in suffixes:
name = 'upload_file_minidump_%s' % (suffix,)
context['raw_dump_urls'].append(
reverse(
'crashstats:raw_data_named',
args=(crash_id, name, 'dmp')
)
)
correlations_api = models.CorrelationsSignatures()
total_correlations = 0
if 'os_name' in context['report']:
platform = context['report']['os_name']
for report_type in settings.CORRELATION_REPORT_TYPES:
try:
correlations = correlations_api.get(
report_type=report_type,
product=context['report']['product'],
version=context['report']['version'],
platforms=platform)
hits = correlations['hits'] if correlations else []
if context['report']['signature'] in hits:
total_correlations += 1
except models.BadStatusCodeError:
# correlations failure should not block view
# bug 1005224 will move this to an asynchronous client
# request instead.
pass
context['total_correlations'] = total_correlations
context['BUG_PRODUCT_MAP'] = settings.BUG_PRODUCT_MAP
return render(request, 'crashstats/report_index.html', context)
@utils.json_view
def report_pending(request, crash_id):
if not crash_id:
raise http.Http404("Crash id is missing")
data = {}
url = reverse('crashstats:report_index', kwargs=dict(crash_id=crash_id))
api = models.UnredactedCrash()
try:
data['report'] = api.get(crash_id=crash_id)
status = 'ready'
status_message = 'The report for %s is now available.' % crash_id
url_redirect = "%s" % url
except models.BadStatusCodeError as e:
if str(e).startswith('5'):
raise
status = 'error'
status_message = 'The report for %s is not available yet.' % crash_id
url_redirect = ''
data = {
"status": status,
"status_message": status_message,
"url_redirect": url_redirect
}
return data
@pass_default_context
def report_list(request, partial=None, default_context=None):
context = default_context or {}
form = forms.ReportListForm(
models.ProductsVersions().get(),
models.CurrentVersions().get(),
models.Platforms().get(),
request.GET
)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
try:
page = int(request.GET.get('page', 1))
if page < 1:
page = 1
except ValueError:
return http.HttpResponseBadRequest('Invalid page')
context['current_page'] = page
context['signature'] = form.cleaned_data['signature']
context['product_versions'] = form.cleaned_data['version']
end_date = form.cleaned_data['date'] or datetime.datetime.utcnow()
if form.cleaned_data['range_unit']:
range_unit = form.cleaned_data['range_unit']
else:
range_unit = settings.RANGE_UNITS[0]
if form.cleaned_data['process_type']:
process_type = form.cleaned_data['process_type']
else:
process_type = settings.PROCESS_TYPES[0]
if form.cleaned_data['hang_type']:
hang_type = form.cleaned_data['hang_type']
else:
hang_type = settings.HANG_TYPES[0]
if form.cleaned_data['plugin_field']:
plugin_field = form.cleaned_data['plugin_field']
else:
plugin_field = settings.PLUGIN_FIELDS[0]
if form.cleaned_data['plugin_query_type']:
plugin_query_type = form.cleaned_data['plugin_query_type']
if plugin_query_type in settings.QUERY_TYPES_MAP:
plugin_query_type = settings.QUERY_TYPES_MAP[plugin_query_type]
else:
plugin_query_type = settings.QUERY_TYPES[0]
duration = get_timedelta_from_value_and_unit(
int(form.cleaned_data['range_value']),
range_unit
)
if request.user.has_perm('crashstats.run_long_queries'):
# The user is an admin and is allowed to perform bigger queries
max_query_range = settings.QUERY_RANGE_MAXIMUM_DAYS_ADMIN
else:
max_query_range = settings.QUERY_RANGE_MAXIMUM_DAYS
# Check whether the user tries to run a big query, and limit it
if duration.days > max_query_range:
return http.HttpResponseBadRequest('range duration too long')
context['current_day'] = duration.days
start_date = end_date - duration
context['start_date'] = start_date.strftime('%Y-%m-%d')
context['end_date'] = end_date.strftime('%Y-%m-%d')
if form.cleaned_data['product']:
context['selected_products'] = form.cleaned_data['product']
context['product'] = form.cleaned_data['product'][0]
else:
context['selected_products'] = None
context['product'] = settings.DEFAULT_PRODUCT
results_per_page = 250
result_offset = results_per_page * (page - 1)
ALL_REPORTS_COLUMNS = (
# key, label, on by default?
('date_processed', 'Date', True),
('duplicate_of', 'Dup', True),
('product', 'Product', True),
('version', 'Version', True),
('build', 'Build', True),
('os_and_version', 'OS', True),
('cpu_name', 'Build Arch', True),
('reason', 'Reason', True),
('address', 'Address', True),
('uptime', 'Uptime', True),
('install_time', 'Install Time', True),
('user_comments', 'Comments', True),
)
# columns that should, by default, start in descending order
DEFAULT_REVERSE_COLUMNS = (
'date_processed',
)
_default_column_keys = [x[0] for x in ALL_REPORTS_COLUMNS if x[2]]
raw_crash_fields = models.RawCrash.API_WHITELIST
if request.user.has_perm('crashstats.view_pii'):
# add any fields to ALL_REPORTS_COLUMNS raw_crash_fields that
# signed in people are allowed to see.
raw_crash_fields += ('URL',)
RAW_CRASH_FIELDS = sorted(
raw_crash_fields,
key=lambda x: x.lower()
)
all_reports_columns_keys = [x[0] for x in ALL_REPORTS_COLUMNS]
ALL_REPORTS_COLUMNS = tuple(
list(ALL_REPORTS_COLUMNS) +
[(x, '%s*' % x, False) for x in RAW_CRASH_FIELDS
if x not in all_reports_columns_keys]
)
if partial == 'reports' or partial == 'correlations':
# This is an optimization.
# The primary use of the "Reports" tab is to load data on the
# models.ReportList() model. However, the models.Correlations() model
# is also going to need to do this to figure out all the OSs and
# versions that it needs.
# By calling the models.ReportList().get(...), independent of
# sorting requirements for both partials, we can take advantage
# of the fact that the ReportList() data gets cached.
context['sort'] = request.GET.get('sort', 'date_processed')
context['reverse'] = request.GET.get('reverse', 'false').lower()
context['reverse'] = context['reverse'] != 'false'
columns = request.GET.getlist('c')
# these are the columns used to render the table in reports.html
context['columns'] = []
for key, label, default in ALL_REPORTS_COLUMNS:
if (not columns and default) or key in columns:
reverse_ = None
if key == context['sort']:
reverse_ = not context['reverse']
else:
if key in DEFAULT_REVERSE_COLUMNS:
reverse_ = True
context['columns'].append({
'key': key,
'label': label,
'reverse': reverse_
})
context['columns_values_joined'] = ','.join(
x['key'] for x in context['columns']
)
include_raw_crash = False
for each in context['columns']:
key = each['key']
if key in raw_crash_fields and key not in _default_column_keys:
include_raw_crash = True
break
context['include_raw_crash'] = include_raw_crash
# some column keys have ids that aren't real fields,
# so transform those before sending to the middleware
sort_ = context['sort']
if sort_ == 'os_and_version':
sort_ = 'os_name'
assert start_date and end_date
api = models.ReportList()
context['report_list'] = api.get(
signature=context['signature'],
products=context['selected_products'],
versions=context['product_versions'],
os=form.cleaned_data['platform'],
start_date=start_date,
end_date=end_date,
build_ids=form.cleaned_data['build_id'],
reasons=form.cleaned_data['reason'],
release_channels=form.cleaned_data['release_channels'],
report_process=process_type,
report_type=hang_type,
plugin_in=plugin_field,
plugin_search_mode=plugin_query_type,
plugin_terms=form.cleaned_data['plugin_query'],
include_raw_crash=include_raw_crash,
result_number=results_per_page,
result_offset=result_offset,
sort=sort_,
reverse=context['reverse'],
)
if partial == 'reports':
current_query = request.GET.copy()
if 'page' in current_query:
del current_query['page']
context['current_url'] = '%s?%s' % (reverse('crashstats:report_list'),
current_query.urlencode())
if not context['report_list']['hits']:
return render(
request,
'crashstats/partials/no_data.html',
context
)
context['signature'] = context['report_list']['hits'][0]['signature']
context['report_list']['total_pages'] = int(math.ceil(
context['report_list']['total'] / float(results_per_page)))
context['report_list']['total_count'] = context['report_list']['total']
if partial == 'correlations':
os_count = defaultdict(int)
version_count = defaultdict(int)
for report in context['report_list']['hits']:
os_name = report['os_name']
version = report['version']
# report_list does not contain beta identifier, but the correlation
# form needs it for validation
if report['release_channel'] == 'beta':
version = version + 'b'
os_count[os_name] += 1
version_count[version] += 1
report['date_processed'] = isodate.parse_datetime(
report['date_processed']
).strftime('%b %d, %Y %H:%M')
report['install_time'] = isodate.parse_datetime(
report['install_time']
).strftime('%Y-%m-%d %H:%M:%S')
if os_count:
correlation_os = max(os_count.iterkeys(),
key=lambda k: os_count[k])
else:
correlation_os = None
context['correlation_os'] = correlation_os
if version_count:
correlation_version = max(version_count.iterkeys(),
key=lambda k: version_count[k])
else:
correlation_version = None
if correlation_version is None:
correlation_version = ''
context['correlation_version'] = correlation_version
correlations_api = models.CorrelationsSignatures()
total_correlations = 0
if correlation_version and correlation_os:
for report_type in settings.CORRELATION_REPORT_TYPES:
correlations = correlations_api.get(
report_type=report_type,
product=context['product'],
version=correlation_version,
platforms=correlation_os
)
hits = correlations['hits'] if correlations else []
if context['signature'] in hits:
total_correlations += 1
context['total_correlations'] = total_correlations
versions = []
for product_version in context['product_versions']:
versions.append(product_version.split(':')[1])
if partial == 'table':
context['table'] = {}
crashes_frequency_api = models.CrashesFrequency()
params = {
'signature': context['signature'],
'products': [context['product']],
'versions': versions,
'from': start_date.date(),
'to': end_date.date(),
}
builds = crashes_frequency_api.get(**params)['hits']
for i, build in enumerate(builds):
try:
build_date = datetime.datetime.strptime(build['build_date'],
'%Y%m%d%H%M%S')
buildid = build_date.strftime('%Y%m%d%H')
except ValueError:
# ValueError happens when build['build_date'] isn't really
# a date
buildid = build['build_date']
except TypeError:
# TypeError happens when build['build_date'] is None
buildid = "(no build ID found)"
context['table'][buildid] = build
# signature URLs only if you're logged in
if partial == 'sigurls':
if context['selected_products']:
products = [context['product']]
else:
products = 'ALL'
assert not context['product_versions'], context['product_versions']
if request.user.has_perm('crashstats.view_pii'):
signatureurls_api = models.SignatureURLs()
sigurls = signatureurls_api.get(
signature=context['signature'],
products=products,
versions=context['product_versions'],
start_date=start_date,
end_date=end_date
)
context['signature_urls'] = sigurls['hits']
else:
context['signature_urls'] = None
if partial == 'comments':
context['comments'] = []
comments_api = models.CommentsBySignature()
context['comments'] = comments_api.get(
signature=context['signature'],
products=form.cleaned_data['product'],
versions=context['product_versions'],
os=form.cleaned_data['platform'],
start_date=start_date,
end_date=end_date,
build_ids=form.cleaned_data['build_id'],
reasons=form.cleaned_data['reason'],
release_channels=form.cleaned_data['release_channels'],
report_process=form.cleaned_data['process_type'],
report_type=form.cleaned_data['hang_type'],
plugin_in=form.cleaned_data['plugin_field'],
plugin_search_mode=form.cleaned_data['plugin_query_type'],
plugin_terms=form.cleaned_data['plugin_query'],
result_number=results_per_page,
result_offset=result_offset
)
current_query = request.GET.copy()
if 'page' in current_query:
del current_query['page']
context['current_url'] = '%s?%s' % (reverse('crashstats:report_list'),
current_query.urlencode())
if not context['comments']['hits']:
return render(
request,
'crashstats/partials/no_data.html',
context
)
context['comments']['total_pages'] = int(math.ceil(
context['comments']['total'] / float(results_per_page)))
context['comments']['total_count'] = context['comments']['total']
if partial == 'bugzilla':
bugs_api = models.Bugs()
context['bug_associations'] = bugs_api.get(
signatures=[context['signature']]
)['hits']
context['bug_associations'].sort(key=lambda x: x['id'], reverse=True)
match_total = 0
for bug in context['bug_associations']:
# Only add up bugs where it matches the signature exactly.
if bug['signature'] == context['signature']:
match_total += 1
context['bugsig_match_total'] = match_total
if partial == 'graph':
# if we have a version, expose the channel for the current
# release for use in the adu graph
if context['product_versions']:
context['channel'] = get_channel_for_release(
context['product_versions']
)
else:
# if no version was provided fallback to nightly
context['channel'] = 'nightly'
# the ui is going to need access to all channels
context['channels'] = ','.join(settings.CHANNELS)
# set initial form data
data = {
'product_name': context['product'],
'signature': context['signature'],
'channel': context['channel'],
'start_date': context['start_date'],
'end_date': context['end_date']
}
context['form'] = forms.ADUBySignatureJSONForm(
settings.CHANNELS,
models.ProductsVersions().get(),
data,
auto_id=True
)
if not partial:
# prep it so it's nicer to work with in the template
context['all_reports_columns'] = [
{'value': x[0], 'label': x[1], 'default': x[2]}
for x in ALL_REPORTS_COLUMNS
]
if partial == 'graph':
tmpl = 'crashstats/partials/graph.html'
elif partial == 'reports':
tmpl = 'crashstats/partials/reports.html'
elif partial == 'comments':
tmpl = 'crashstats/partials/comments.html'
elif partial == 'sigurls':
tmpl = 'crashstats/partials/sigurls.html'
elif partial == 'bugzilla':
tmpl = 'crashstats/partials/bugzilla.html'
elif partial == 'table':
tmpl = 'crashstats/partials/table.html'
elif partial == 'correlations':
tmpl = 'crashstats/partials/correlations.html'
elif partial:
raise NotImplementedError('Unknown template for %s' % partial)
else:
tmpl = 'crashstats/report_list.html'
return render(request, tmpl, context)
@utils.json_view
@pass_default_context
def adu_by_signature_json(request, default_context=None):
form = forms.ADUBySignatureJSONForm(
settings.CHANNELS,
models.ProductsVersions().get(),
data=request.GET,
)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
product = form.cleaned_data['product_name']
signature = form.cleaned_data['signature']
channel = form.cleaned_data['channel']
start_date = form.cleaned_data['start_date']
end_date = form.cleaned_data['end_date']
api = models.AduBySignature()
adu_by_sig_data = api.get(
product_name=product,
start_date=start_date,
end_date=end_date,
signature=signature,
channel=channel
)
return adu_by_sig_data
@pass_default_context
def status(request, default_context=None):
response = models.Status().get()
stats = response['hits']
# transform some of the data to be plotted, store it seperately
plot_data = {}
attributes = [
'avg_process_sec',
'avg_wait_sec',
'waiting_job_count',
'processors_count',
'date_created'
]
for a in attributes:
plucked = list(reversed([x.get(a) for x in stats]))
if a is 'date_created':
plucked = map(lambda x: utils.parse_isodate(x, "%H:%M"), plucked)
plot_data[a] = [list(x) for x in enumerate(plucked)]
# format the dates in place for display in the table
attributes = ['date_created',
'date_recently_completed',
'date_oldest_job_queued']
for stat in stats:
for attribute in attributes:
stat[attribute] = utils.parse_isodate(stat[attribute])
if stats:
first_stat = stats[0]
else:
first_stat = None
context = default_context or {}
context.update({
'data': stats,
'stat': first_stat,
'plot_data': plot_data,
'socorro_revision': response['socorro_revision'],
'breakpad_revision': response['breakpad_revision'],
'schema_revision': response['schema_revision'],
})
return render(request, 'crashstats/status.html', context)
def status_json(request):
response = http.HttpResponse(
models.Status().get(decode_json=False),
content_type='application/json; charset=UTF-8'
)
response['Access-Control-Allow-Origin'] = '*'
return response
def status_revision(request):
return http.HttpResponse(
models.Status().get()['socorro_revision'],
content_type='text/plain'
)
@pass_default_context
def crontabber_state(request, default_context=None):
context = default_context or {}
return render(request, 'crashstats/crontabber_state.html', context)
@pass_default_context
@login_required
def your_crashes(request, default_context=None):
"""Shows a logged in user a list of his or her recent crash reports. """
context = default_context or {}
one_month_ago = (
datetime.datetime.utcnow() - datetime.timedelta(weeks=4)
).isoformat()
api = SuperSearchUnredacted()
results = api.get(
email=request.user.email,
date='>%s' % one_month_ago,
)
context['crashes_list'] = [
dict(zip(('crash_id', 'date'), (x['uuid'], x['date'])))
for x in results['hits']
]
return render(request, 'crashstats/your_crashes.html', context)
@pass_default_context
def login(request, default_context=None):
context = default_context or {}
return render(request, 'crashstats/login.html', context)
@pass_default_context
@login_required
def permissions(request, default_context=None):
context = default_context or {}
context['permissions'] = (
Permission.objects.filter(content_type__model='')
.order_by('name')
)
return render(request, 'crashstats/permissions.html', context)
def quick_search(request):
query = request.GET.get('query', '').strip()
crash_id = utils.find_crash_id(query)
if crash_id:
url = reverse(
'crashstats:report_index',
kwargs=dict(crash_id=crash_id)
)
elif query:
url = '%s?signature=%s' % (
reverse('supersearch.search'),
urlquote('~%s' % query)
)
else:
url = reverse('supersearch.search')
return redirect(url)
@pass_default_context
def query(request, default_context=None):
context = default_context or {}
products = models.ProductsVersions().get()
versions = models.CurrentVersions().get()
platforms = models.Platforms().get()
form = forms.QueryForm(products, versions, platforms, request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
if form.cleaned_data['query_type']:
query_type = form.cleaned_data['query_type']
if (query_type in settings.QUERY_TYPES_MAP):
query_type = settings.QUERY_TYPES_MAP[query_type]
else:
query_type = settings.QUERY_TYPES[0]
results_per_page = 100
if form.cleaned_data['version']:
# We need to extract just the version number for use with the
# navigation version select drop-down.
selected_version = form.cleaned_data['version'][0].split(':')[1]
context['version'] = selected_version
if form.cleaned_data['product']:
selected_products = form.cleaned_data['product']
else:
selected_products = [settings.DEFAULT_PRODUCT]
context['product'] = selected_products[0]
if not form.cleaned_data['date']:
date = datetime.datetime.utcnow()
# This is an optimization for elasticsearch.
# If the user supplies a value for 'date', we just use that but
# if no value is sent, then the default one is less precise,
# which means users will often use the same value for date
# (assuming they don't change that value).
# In the backend, we end up with more common date filters
# thus improving performance.
date = date.replace(minute=0, second=0, microsecond=0)
else:
date = form.cleaned_data['date']
try:
context['current_page'] = int(request.GET.get('page', 1))
except ValueError:
return http.HttpResponseBadRequest('Invalid page')
previous_page = context['current_page'] - 1
context['results_offset'] = results_per_page * previous_page
current_query = request.GET.copy()
if 'page' in current_query:
del current_query['page']
context['current_url'] = '%s?%s' % (reverse('crashstats:query'),
current_query.urlencode())
context['products'] = products
if form.cleaned_data['range_unit']:
range_unit = form.cleaned_data['range_unit']
else:
range_unit = settings.RANGE_UNITS[0]
# 'all' is here for backwards compatibility, it's an alias of 'any'
process_type = form.cleaned_data['process_type']
if not process_type or process_type == 'all':
process_type = settings.PROCESS_TYPES[0]
hang_type = form.cleaned_data['hang_type']
if not hang_type or hang_type == 'all':
hang_type = settings.HANG_TYPES[0]
if form.cleaned_data['plugin_field']:
plugin_field = form.cleaned_data['plugin_field']
else:
plugin_field = settings.PLUGIN_FIELDS[0]
if form.cleaned_data['plugin_query_type']:
plugin_query_type = form.cleaned_data['plugin_query_type']
if plugin_query_type in settings.QUERY_TYPES_MAP:
plugin_query_type = settings.QUERY_TYPES_MAP[plugin_query_type]
else:
plugin_query_type = settings.QUERY_TYPES[0]
current_products = defaultdict(list)
now = datetime.datetime.utcnow().date()
for product in products:
for release in products[product]:
start_date = isodate.parse_date(release['start_date'])
end_date = isodate.parse_date(release['end_date'])
if now >= start_date and now <= end_date:
current_products[product].append(release)
context['products_json'] = json.dumps(current_products)
context['platforms'] = platforms
params = {
'signature': form.cleaned_data['signature'],
'query': form.cleaned_data['query'],
'products': selected_products,
'versions': form.cleaned_data['version'],
'platforms': form.cleaned_data['platform'],
'end_date': date,
'date_range_unit': range_unit,
'date_range_value': form.cleaned_data['range_value'],
'query_type': query_type,
'reason': form.cleaned_data['reason'],
'release_channels': form.cleaned_data['release_channels'],
'build_id': form.cleaned_data['build_id'],
'process_type': process_type,
'hang_type': hang_type,
'plugin_field': plugin_field,
'plugin_query_type': plugin_query_type,
'plugin_query': form.cleaned_data['plugin_query']
}
if params['build_id']:
params['build_id'] = [unicode(x) for x in params['build_id']]
params['platforms_names'] = [
p['name'] for p in platforms
if p['code'] in params['platforms']
]
context['params'] = params
context['params_json'] = json.dumps({'versions': params['versions'],
'products': params['products']})
context['query'] = {
'total': 0,
'total_count': 0,
'total_pages': 0
}
if (request.GET.get('do_query') or
request.GET.get('date') or
request.GET.get('query')):
api = models.Search()
date_delta = get_timedelta_from_value_and_unit(
int(params['date_range_value']),
params['date_range_unit']
)
if request.user.has_perm('crashstats.run_long_queries'):
# The user is an admin and is allowed to perform bigger queries
max_query_range = settings.QUERY_RANGE_MAXIMUM_DAYS_ADMIN
error_type = 'exceeded_maximum_date_range_admin'
else:
max_query_range = settings.QUERY_RANGE_MAXIMUM_DAYS
error_type = 'exceeded_maximum_date_range'
# Check whether the user tries to run a big query, and limit it
if date_delta.days > max_query_range:
# Display an error
context['error'] = {
'type': error_type,
'data': {
'maximum': max_query_range,
'default': settings.QUERY_RANGE_DEFAULT_DAYS
}
}
# And change the date range to its default value
params['date_range_unit'] = 'days'
params['date_range_value'] = settings.QUERY_RANGE_DEFAULT_DAYS
date_delta = datetime.timedelta(days=params['date_range_value'])
start_date = params['end_date'] - date_delta
force_api_impl = request.GET.get(
'_force_api_impl',
settings.SEARCH_MIDDLEWARE_IMPL
)
search_results = api.get(
terms=params['query'],
products=params['products'],
versions=params['versions'],
os=params['platforms'],
start_date=start_date,
end_date=params['end_date'],
search_mode=params['query_type'],
reasons=params['reason'],
release_channels=params['release_channels'],
build_ids=params['build_id'],
report_process=params['process_type'],
report_type=params['hang_type'],
plugin_in=params['plugin_field'],
plugin_search_mode=params['plugin_query_type'],
plugin_terms=params['plugin_query'],
result_number=results_per_page,
result_offset=context['results_offset'],
_force_api_impl=force_api_impl
)
search_results['total_pages'] = int(math.ceil(
search_results['total'] / float(results_per_page)))
search_results['total_count'] = search_results['total']
# Bugs for each signature
signatures = [h['signature'] for h in search_results['hits']]
if signatures:
bugs = defaultdict(list)
bugs_api = models.Bugs()
for b in bugs_api.get(signatures=signatures)['hits']:
bugs[b['signature']].append(b['id'])
for hit in search_results['hits']:
sig = hit['signature']
if sig in bugs:
if 'bugs' in hit:
hit['bugs'].extend(bugs[sig])
else:
hit['bugs'] = bugs[sig]
context['query'] = search_results
# Building the query_string for links to report/list
query_params = {
'product': params['products'],
'version': params['versions'],
'platform': params['platforms'],
'query_type': params['query_type'],
'date': params['end_date'].strftime('%Y-%m-%d %H:%M:%S'),
'range_value': params['date_range_value'],
'range_unit': params['date_range_unit'],
'reason': params['reason'],
'release_channels': params['release_channels'],
'build_id': params['build_id'],
'hang_type': params['hang_type'],
'process_type': params['process_type']
}
if params['hang_type'] == 'plugin':
query_params += {
'plugin_field': params['plugin_field'],
'plugin_query_type': params['plugin_query_type'],
'plugin_query': params['plugin_query']
}
context['report_list_query_string'] = (
urllib.urlencode(utils.sanitize_dict(query_params), True))
return render(request, 'crashstats/query.html', context)
@utils.json_view
def buginfo(request, signatures=None):
form = forms.BugInfoForm(request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
bugs = form.cleaned_data['bug_ids']
fields = form.cleaned_data['include_fields']
bzapi = models.BugzillaBugInfo()
result = bzapi.get(bugs, fields)
# store all of these in a cache
for bug in result['bugs']:
if 'id' in bug:
cache_key = 'buginfo:%s' % bug['id']
cache.set(cache_key, bug, 60 * 60) # one hour
return result
@utils.json_view
def plot_signature(request, product, versions, start_date, end_date,
signature):
date_format = '%Y-%m-%d'
try:
start_date = datetime.datetime.strptime(start_date, date_format)
end_date = datetime.datetime.strptime(end_date, date_format)
except ValueError, msg:
return http.HttpResponseBadRequest(str(msg))
if not signature:
return http.HttpResponseBadRequest('signature is required')
api = models.SignatureTrend()
sigtrend = api.get(
product=product,
version=versions,
signature=signature,
end_date=end_date,
start_date=start_date,
)
graph_data = {
'startDate': start_date,
'signature': signature,
'endDate': end_date,
'counts': [],
'percents': [],
}
for s in sigtrend['hits']:
t = utils.unixtime(s['date'], millis=True)
graph_data['counts'].append([t, s['count']])
graph_data['percents'].append([t, (s['percent_of_total'])])
return graph_data
@utils.json_view
def signature_summary(request):
form = forms.SignatureSummaryForm(
models.ProductsVersions().get(),
models.CurrentVersions().get(),
request.GET
)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
range_value = form.cleaned_data['range_value'] or 1
end_date = form.cleaned_data['date'] or datetime.datetime.utcnow()
signature = form.cleaned_data['signature']
version = form.cleaned_data['version']
start_date = end_date - datetime.timedelta(days=range_value)
report_types = {
'architecture': 'architectures',
'flash_version': 'flashVersions',
'os': 'percentageByOs',
'process_type': 'processTypes',
'products': 'productVersions',
'uptime': 'uptimeRange',
'distinct_install': 'distinctInstall',
'devices': 'devices',
'graphics': 'graphics',
'exploitability': 'exploitabilityScore',
}
api = models.SignatureSummary()
result = {}
signature_summary = {}
results = api.get(
report_types=report_types.keys(),
signature=signature,
start_date=start_date,
end_date=end_date,
versions=version,
)
for r, name in report_types.items():
result[name] = results['reports'][r]
signature_summary[name] = []
# whether you can view the exploitability stuff depends on several
# logical steps...
can_view_exploitability = False
if request.user.has_perm('crashstats.view_exploitability'):
# definitely!
can_view_exploitability = True
elif request.user.has_perm('crashstats.view_flash_exploitability'):
# then it better be only Flash versions
flash_versions = [
x['category'] for x in result['flashVersions']
]
# This business logic is very specific.
# For more information see
# https://bugzilla.mozilla.org/show_bug.cgi?id=946429
if flash_versions and '[blank]' not in flash_versions:
can_view_exploitability = True
if can_view_exploitability:
for r in result['exploitabilityScore']:
signature_summary['exploitabilityScore'].append({
'report_date': r['report_date'],
'null_count': r['null_count'],
'low_count': r['low_count'],
'medium_count': r['medium_count'],
'high_count': r['high_count'],
})
else:
result.pop('exploitabilityScore')
signature_summary.pop('exploitabilityScore')
# because in python we use pep8 under_scored style in js we use camelCase
signature_summary['canViewExploitability'] = can_view_exploitability
def format_float(number):
return '%.2f' % float(number)
for r in result['architectures']:
signature_summary['architectures'].append({
'architecture': r['category'],
'percentage': format_float(r['percentage']),
'numberOfCrashes': r['report_count']})
for r in result['percentageByOs']:
signature_summary['percentageByOs'].append({
'os': r['category'],
'percentage': format_float(r['percentage']),
'numberOfCrashes': r['report_count']})
for r in result['productVersions']:
signature_summary['productVersions'].append({
'product': r['product_name'],
'version': r['version_string'],
'percentage': format_float(r['percentage']),
'numberOfCrashes': r['report_count']})
for r in result['uptimeRange']:
signature_summary['uptimeRange'].append({
'range': r['category'],
'percentage': format_float(r['percentage']),
'numberOfCrashes': r['report_count']})
for r in result['processTypes']:
signature_summary['processTypes'].append({
'processType': r['category'],
'percentage': format_float(r['percentage']),
'numberOfCrashes': r['report_count']})
for r in result['flashVersions']:
signature_summary['flashVersions'].append({
'flashVersion': r['category'],
'percentage': format_float(r['percentage']),
'numberOfCrashes': r['report_count']})
for r in result['distinctInstall']:
signature_summary['distinctInstall'].append({
'product': r['product_name'],
'version': r['version_string'],
'crashes': r['crashes'],
'installations': r['installations']})
for r in result['devices']:
signature_summary['devices'].append({
'cpu_abi': r['cpu_abi'],
'manufacturer': r['manufacturer'],
'model': r['model'],
'version': r['version'],
'report_count': r['report_count'],
'percentage': r['percentage'],
})
for r in result['graphics']:
vendor_name = r['vendor_name'] or r['vendor_hex']
adapter_name = r['adapter_name'] or r['adapter_hex']
signature_summary['graphics'].append({
'vendor': vendor_name,
'adapter': adapter_name,
'report_count': r['report_count'],
'percentage': r['percentage'],
})
return signature_summary
@pass_default_context
@anonymous_csrf
def gccrashes(request, product, version=None, default_context=None):
context = default_context or {}
versions = get_all_nightlies_for_product(context, product)
if version is None:
# No version was passed get the latest nightly
version = get_latest_nightly(context, product)
current_products = context['currentproducts']['products']
context['report'] = 'gccrashes'
context['versions'] = versions
context['products'] = current_products
context['selected_version'] = version
context['selected_product'] = product
start_date = None
end_date = None
date_today = datetime.datetime.utcnow()
week_ago = date_today - datetime.timedelta(days=7)
# Check whether dates were passed but, only use these if both
# the start and end date was provided else, fallback to the defaults.
if 'start_date' in request.GET and 'end_date' in request.GET:
start_date = request.GET['start_date']
end_date = request.GET['end_date']
context['start_date'] = start_date if start_date else week_ago
context['end_date'] = end_date if end_date else date_today
nightly_versions = get_all_nightlies(context)
data = {
'product': product,
'version': version,
'start_date': context['start_date'],
'end_date': context['end_date']
}
context['form'] = forms.GCCrashesForm(
data,
nightly_versions=nightly_versions,
auto_id=True
)
return render(request, 'crashstats/gccrashes.html', context)
@utils.json_view
@pass_default_context
def gccrashes_json(request, default_context=None):
nightly_versions = get_all_nightlies(default_context)
form = forms.GCCrashesForm(request.GET, nightly_versions=nightly_versions)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
product = form.cleaned_data['product']
version = form.cleaned_data['version']
start_date = form.cleaned_data['start_date']
end_date = form.cleaned_data['end_date']
api = models.GCCrashes()
result = api.get(
product=product,
version=version,
from_date=start_date,
to=end_date,
)
return result
@pass_default_context
def crash_trends(request, product, versions=None, default_context=None):
context = default_context or {}
context['product'] = product
context['report'] = 'crash_trends'
version = get_latest_nightly(context, product)
context['version'] = version
context['end_date'] = datetime.datetime.utcnow()
context['start_date'] = context['end_date'] - datetime.timedelta(days=7)
context['products'] = context['currentproducts']
url = reverse('crashstats:crashtrends_json')
params = {
'product': product,
'version': version,
'start_date': context['start_date'].strftime('%Y-%m-%d'),
'end_date': context['end_date'].strftime('%Y-%m-%d')
}
url += '?' + urllib.urlencode(params)
context['data_url'] = url
return render(request, 'crashstats/crash_trends.html', context)
@utils.json_view
@pass_default_context
def get_nightlies_for_product_json(request, default_context=None):
return get_all_nightlies_for_product(
default_context,
request.GET.get('product')
)
@utils.json_view
@pass_default_context
def crashtrends_json(request, default_context=None):
nightly_versions = get_all_nightlies(default_context)
form = forms.CrashTrendsForm(nightly_versions, request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
product = form.cleaned_data['product']
version = form.cleaned_data['version']
start_date = form.cleaned_data['start_date']
end_date = form.cleaned_data['end_date']
api = models.CrashTrends()
response = api.get(
product=product,
version=version,
start_date=start_date.date(),
end_date=end_date.date()
)
formatted = {}
for report in response['crashtrends']:
report_date = report['report_date']
if report_date not in formatted:
formatted[report_date] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
if report['days_out'] >= 8:
formatted[report_date][8] += report['report_count']
else:
days_out = int(report['days_out'])
formatted[report_date][days_out] += report['report_count']
json_response = {
'crashtrends': formatted,
'total': len(formatted)
}
return json_response
@permission_required('crashstats.view_rawdump')
def raw_data(request, crash_id, extension, name=None):
api = models.RawCrash()
if extension == 'json':
format = 'meta'
content_type = 'application/json'
elif extension == 'dmp':
format = 'raw'
content_type = 'application/octet-stream'
else:
raise NotImplementedError(extension)
data = api.get(crash_id=crash_id, format=format, name=name)
response = http.HttpResponse(content_type=content_type)
if extension == 'json':
response.write(json.dumps(data))
else:
response.write(data)
return response
@utils.json_view
def correlations_json(request):
form = forms.CorrelationsJSONForm(
models.ProductsVersions().get(),
models.CurrentVersions().get(),
models.Platforms().get(),
request.GET
)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
report_type = form.cleaned_data['correlation_report_type']
product = form.cleaned_data['product']
version = form.cleaned_data['version']
# correlations does not differentiate betas since it works on raw data
if version.endswith('b'):
version = version.split('b')[0]
platform = form.cleaned_data['platform']
signature = form.cleaned_data['signature']
api = models.Correlations()
return api.get(report_type=report_type, product=product, version=version,
platform=platform, signature=signature)
@utils.json_view
def correlations_signatures_json(request):
form = forms.CorrelationsSignaturesJSONForm(
models.ProductsVersions().get(),
models.CurrentVersions().get(),
models.Platforms().get(),
request.GET
)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
report_type = form.cleaned_data['correlation_report_type']
product = form.cleaned_data['product']
version = form.cleaned_data['version']
platforms = form.cleaned_data['platforms']
api = models.CorrelationsSignatures()
result = api.get(
report_type=report_type,
product=product,
version=version,
platforms=platforms
)
# if the product and/or version is completely unrecognized, you
# don't get an error or an empty list, you get NULL
if result is None:
result = {'hits': [], 'total': 0}
return result
| mpl-2.0 | 9,182,683,778,932,783,000 | 33.405937 | 79 | 0.578497 | false |
shanot/imp | modules/multifit/pyext/src/models.py | 1 | 1750 | #!/usr/bin/env python
from __future__ import print_function
import IMP.multifit
from IMP import OptionParser
__doc__ = "Write output models as PDB files."
# analyse the ensemble, first we will do the rmsd stuff
def parse_args():
usage = """%prog [options] <asmb.input> <proteomics.input>
<mapping.input> <combinations> <model prefix>
Write output models.
"""
parser = OptionParser(usage)
parser.add_option("-m", "--max", type="int", dest="max", default=None,
help="maximum number of models to write")
(options, args) = parser.parse_args()
if len(args) != 5:
parser.error("incorrect number of arguments")
return options, args
def run(asmb_fn, proteomics_fn, mapping_fn, combs_fn, model_output, max_comb):
# get rmsd for subunits
mdl = IMP.Model()
combs = IMP.multifit.read_paths(combs_fn)
sd = IMP.multifit.read_settings(asmb_fn)
sd.set_was_used(True)
prot_data = IMP.multifit.read_proteomics_data(proteomics_fn)
mapping_data = IMP.multifit.read_protein_anchors_mapping(prot_data,
mapping_fn)
ensmb = IMP.multifit.load_ensemble(sd, mdl, mapping_data)
mhs = ensmb.get_molecules()
print("number of combinations:", len(combs), max_comb)
for i, comb in enumerate(combs[:max_comb]):
if i % 500 == 0:
print(i)
ensmb.load_combination(comb)
print(model_output + "." + str(i) + ".pdb")
IMP.atom.write_pdb(mhs, model_output + "." + str(i) + ".pdb")
ensmb.unload_combination(comb)
def main():
options, args = parse_args()
run(args[0], args[1], args[2], args[3], args[4], options.max)
if __name__ == "__main__":
main()
| gpl-3.0 | 749,032,697,762,741,500 | 32.653846 | 78 | 0.609714 | false |
nyamairi/TracToChatwork | tochatwork/admin_panel.py | 1 | 1782 | import pkg_resources
from trac.core import Component, implements
from trac.admin import IAdminPanelProvider
from trac.web.chrome import add_notice, add_warning, ITemplateProvider
from trac.util.text import exception_to_unicode
SECTION_NAME = 'tochatwork'
class AdminPanel(Component):
implements(IAdminPanelProvider, ITemplateProvider)
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm:
yield ('tochatwork', 'ToChatwork', 'settings', 'Settings')
def render_admin_panel(self, req, cat, page, version):
self.log.debug("cat: %s page: %s", cat, page)
req.perm.require('TRAC_ADMIN')
options = (
'api_base_url',
'api_token',
'room_id',
'only_owner_changed',
'notify_symbol',
'api_token_field_name')
self.log.debug("method: %s", req.method)
if req.method == 'POST':
for option in options:
self.config.set(SECTION_NAME, option, req.args.get(option))
try:
self.config.save()
self.log.debug('config saved.')
add_notice(req, 'Your changes have been saved.')
except Exception, e:
self.log.error("Error writing to trac.ini: %s", exception_to_unicode(e))
add_warning(req, 'Error writing to trac.ini.')
req.redirect(req.href.admin(cat, page))
params = dict([(option, self.config.get(SECTION_NAME, option)) for option in options])
return 'settings.html', params
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('tochatwork', 'templates')] | mit | 6,070,210,907,413,598,000 | 32.307692 | 94 | 0.584175 | false |
academicsystems/Qengine | pkg/config/qconfig.py | 1 | 3956 | import os.path
from random import choice
from string import ascii_uppercase
import sys
import yaml # pyyaml
from ..blocks import loadblocks as lb
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Config(object):
__metaclass__ = Singleton
initialized = False
# static config variables
QENGINE_CACHE_DIR = ''
QENGINE_ENABLE_REMOTE = False
QENGINE_SALT = ''
QENGINE_IV = ''
QENGINE_LOG_REQUESTS = False
QENGINE_LOG_RESPONSE = False
QENGINE_MOODLE_HACKS = False
QENGINE_NO_CACHE = False
QENGINE_PASSKEY = None
QENGINE_QUESTION_LOCATION = 'filesystem'
ENGINEINFO = {}
BLOCKS = {}
def __init__(self):
pass
def _loadConfigOption(self,key,default,configuration):
try:
value = configuration[key]
if type(value) is str:
if len(value) == 0:
value = default
except:
value = default
return value
def init(self,inconfig=None):
if inconfig != None:
configuration = inconfig
else:
if os.path.isfile('./config/configuration.yaml'):
config_file = './config/configuration.yaml';
elif os.path.isfile('./config/default-configuration.yaml'):
config_file = './config/default-configuration.yaml';
print 'WARNING: Using default-configuration.yaml. You should supply your own configuration.yaml file'
else:
print 'FATAL ERROR: Could not find configuration.yaml file!'
sys.exit()
with open(config_file,'r') as f:
configuration = yaml.load(f.read())
#! set location of questions
#
# 1 - local files
# 2 - mongodb
# 3 - mysql
# 4 - receive questions
# mandatory environment variables
try:
self.QENGINE_SALT = configuration['QENGINE_SALT']
except Exception as e:
print e
sys.exit()
validSaltLengths = [16,24,32]
if len(self.QENGINE_SALT) not in validSaltLengths:
print 'QENGINE_SALT must be 16, 24, or 32 bytes long'
sys.exit()
# optional environment variables that require validation
try:
self.QENGINE_IV = configuration['QENGINE_IV']
if len(self.QENGINE_SALT) not in validSaltLengths:
print 'QENGINE_IV must be 16 bytes long'
sys.exit()
except:
self.QENGINE_IV = ''.join(choice(ascii_uppercase) for i in range(16))
try:
self.QENGINE_QUESTION_LOCATION = configuration['QENGINE_QUESTION_LOCATION']
if self.QENGINE_QUESTION_LOCATION == 'filesystem':
pass
elif self.QENGINE_QUESTION_LOCATION == 'mysql':
pass
elif self.QENGINE_QUESTION_LOCATION == 'mongodb':
pass
elif self.QENGINE_QUESTION_LOCATION == None:
self.QENGINE_QUESTION_LOCATION = 'filesystem'
else:
print "QENGINE_QUESTION_LOCATION must be 'filesystem', 'mysql', or 'mongodb'"
sys.exit()
except:
self.QENGINE_QUESTION_LOCATION = 'filesystem'
# optional environment variables
self.QENGINE_ENABLE_REMOTE = self._loadConfigOption('QENGINE_ENABLE_REMOTE',False,configuration)
self.QENGINE_LOG_REQUESTS = self._loadConfigOption('QENGINE_LOG_REQUESTS',False,configuration)
self.QENGINE_LOG_RESPONSE = self._loadConfigOption('QENGINE_LOG_RESPONSE',False,configuration)
self.QENGINE_MOODLE_HACKS = self._loadConfigOption('QENGINE_MOODLE_HACKS',False,configuration)
self.QENGINE_NO_CACHE = self._loadConfigOption('QENGINE_NO_CACHE',False,configuration)
self.QENGINE_PASSKEY = self._loadConfigOption('QENGINE_PASSKEY',None,configuration)
# only required if no cache is false
if not self.QENGINE_NO_CACHE:
try:
self.QENGINE_CACHE_DIR = configuration['QENGINE_CACHE_DIR']
except Exception as e:
print e
sys.exit()
self.ENGINEINFO = {}
# engine info
for key in configuration:
if 'ENGINEINFO_' in key:
ekey = key.split('_',1)[1]
if len(ekey) > 0:
self.ENGINEINFO[ekey] = configuration[key]
self.BLOCKS = lb.loadblocks(configuration)
self.initialized = True
| apache-2.0 | -6,168,134,766,040,976,000 | 27.666667 | 105 | 0.69818 | false |
JiangKlijna/design-pattern | AbstractFactoryPattern/AbstractFactory.py | 1 | 1135 | # encoding = utf-8
import random
class PetShop:
"""A pet shop"""
def __init__(self, animal_factory=None):
self.pet_factory = animal_factory
def show_pet(self):
pet = self.pet_factory.get_pet()
print("This is a lovely", pet)
print("It says", pet.speak())
print("It eats", self.pet_factory.get_food())
# Stuff that our factory makes
class Dog:
def speak(self):
return "woof"
def __str__(self):
return "Dog"
class Cat:
def speak(self):
return "meow"
def __str__(self):
return "Cat"
# Factory classes
class DogFactory:
def get_pet(self):
return Dog()
def get_food(self):
return "dog food"
class CatFactory:
def get_pet(self):
return Cat()
def get_food(self):
return "cat food"
# Create the proper family
def get_factory():
return random.choice([DogFactory, CatFactory])()
# Show pets with various factories
if __name__ == "__main__":
shop = PetShop()
for i in range(3):
shop.pet_factory = get_factory()
shop.show_pet()
print("=" * 20)
| apache-2.0 | 3,855,028,213,898,458,600 | 17.015873 | 53 | 0.570044 | false |
guorendong/iridium-browser-ubuntu | third_party/chromite/cli/cros/lint.py | 1 | 16717 | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This module is not automatically loaded by the `cros` helper. The filename
# would need a "cros_" prefix to make that happen. It lives here so that it
# is alongside the cros_lint.py file.
#
# For msg namespaces, the 9xxx should generally be reserved for our own use.
"""Additional lint modules loaded by pylint.
This is loaded by pylint directly via its pylintrc file:
load-plugins=chromite.cli.cros.lint
Then pylint will import the register function and call it. So we can have
as many/few checkers as we want in this one module.
"""
from __future__ import print_function
import os
import sys
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
# pylint: disable=too-few-public-methods
class DocStringChecker(BaseChecker):
"""PyLint AST based checker to verify PEP 257 compliance
See our style guide for more info:
http://dev.chromium.org/chromium-os/python-style-guidelines#TOC-Describing-arguments-in-docstrings
"""
# TODO: See about merging with the pep257 project:
# https://github.com/GreenSteam/pep257
__implements__ = IAstroidChecker
# pylint: disable=class-missing-docstring,multiple-statements
class _MessageCP001(object): pass
class _MessageCP002(object): pass
class _MessageCP003(object): pass
class _MessageCP004(object): pass
class _MessageCP005(object): pass
class _MessageCP006(object): pass
class _MessageCP007(object): pass
class _MessageCP008(object): pass
class _MessageCP009(object): pass
class _MessageCP010(object): pass
class _MessageCP011(object): pass
class _MessageCP012(object): pass
class _MessageCP013(object): pass
class _MessageCP014(object): pass
class _MessageCP015(object): pass
# pylint: enable=class-missing-docstring,multiple-statements
name = 'doc_string_checker'
priority = -1
MSG_ARGS = 'offset:%(offset)i: {%(line)s}'
msgs = {
'C9001': ('Modules should have docstrings (even a one liner)',
('module-missing-docstring'), _MessageCP001),
'C9002': ('Classes should have docstrings (even a one liner)',
('class-missing-docstring'), _MessageCP002),
'C9003': ('Trailing whitespace in docstring'
': %s' % MSG_ARGS,
('docstring-trailing-whitespace'), _MessageCP003),
'C9004': ('Leading whitespace in docstring (excess or missing)'
': %s' % MSG_ARGS,
('docstring-leading-whitespace'), _MessageCP004),
'C9005': ('Closing triple quotes should not be cuddled',
('docstring-cuddled-quotes'), _MessageCP005),
'C9006': ('Section names should be preceded by one blank line'
': %s' % MSG_ARGS,
('docstring-section-newline'), _MessageCP006),
'C9007': ('Section names should be "Args:", "Returns:", "Yields:", '
'and "Raises:": %s' % MSG_ARGS,
('docstring-section-name'), _MessageCP007),
'C9008': ('Sections should be in the order: Args, Returns/Yields, Raises',
('docstring-section-order'), _MessageCP008),
'C9009': ('First line should be a short summary',
('docstring-first-line'), _MessageCP009),
'C9010': ('Not all args mentioned in doc string: |%(arg)s|',
('docstring-missing-args'), _MessageCP010),
'C9011': ('Variable args/keywords are named *args/**kwargs, not %(arg)s',
('docstring-misnamed-args'), _MessageCP011),
'C9012': ('Incorrectly formatted Args section: %(arg)s',
('docstring-arg-spacing'), _MessageCP012),
'C9013': ('Too many blank lines in a row: %s' % MSG_ARGS,
('docstring-too-many-newlines'), _MessageCP013),
'C9014': ('Second line should be blank',
('docstring-second-line-blank'), _MessageCP014),
'C9015': ('Section indentation is incorrect: %s' % MSG_ARGS,
('docstring-section-indent'), _MessageCP015),
}
options = ()
# TODO: Should we enforce Examples?
VALID_SECTIONS = ('Args', 'Returns', 'Yields', 'Raises',)
def visit_function(self, node):
"""Verify function docstrings"""
if node.doc:
lines = node.doc.split('\n')
self._check_common(node, lines)
self._check_last_line_function(node, lines)
self._check_section_lines(node, lines)
self._check_all_args_in_doc(node, lines)
self._check_func_signature(node)
else:
# This is what C0111 already does for us, so ignore.
pass
def visit_module(self, node):
"""Verify module docstrings"""
if node.doc:
self._check_common(node)
else:
# Ignore stub __init__.py files.
if os.path.basename(node.file) == '__init__.py':
return
self.add_message('C9001', node=node)
def visit_class(self, node):
"""Verify class docstrings"""
if node.doc:
self._check_common(node)
else:
self.add_message('C9002', node=node, line=node.fromlineno)
def _check_common(self, node, lines=None):
"""Common checks we enforce on all docstrings"""
if lines is None:
lines = node.doc.split('\n')
funcs = (
self._check_first_line,
self._check_second_line_blank,
self._check_whitespace,
self._check_last_line,
)
for f in funcs:
f(node, lines)
def _check_first_line(self, node, lines):
"""Make sure first line is a short summary by itself"""
if lines[0] == '':
self.add_message('C9009', node=node, line=node.fromlineno)
def _check_second_line_blank(self, node, lines):
"""Make sure the second line is blank"""
if len(lines) > 1 and lines[1] != '':
self.add_message('C9014', node=node, line=node.fromlineno)
def _check_whitespace(self, node, lines):
"""Verify whitespace is sane"""
# Make sure first line doesn't have leading whitespace.
if lines[0].lstrip() != lines[0]:
margs = {'offset': 0, 'line': lines[0]}
self.add_message('C9004', node=node, line=node.fromlineno, args=margs)
# Verify no trailing whitespace.
# We skip the last line since it's supposed to be pure whitespace.
#
# Also check for multiple blank lines in a row.
last_blank = False
for i, l in enumerate(lines[:-1]):
margs = {'offset': i, 'line': l}
if l.rstrip() != l:
self.add_message('C9003', node=node, line=node.fromlineno, args=margs)
curr_blank = l == ''
if last_blank and curr_blank:
self.add_message('C9013', node=node, line=node.fromlineno, args=margs)
last_blank = curr_blank
# Now specially handle the last line.
l = lines[-1]
if l.strip() != '' and l.rstrip() != l:
margs = {'offset': len(lines), 'line': l}
self.add_message('C9003', node=node, line=node.fromlineno, args=margs)
def _check_last_line(self, node, lines):
"""Make sure last line is all by itself"""
if len(lines) > 1:
if lines[-1].strip() != '':
self.add_message('C9005', node=node, line=node.fromlineno)
def _check_last_line_function(self, node, lines):
"""Make sure last line is indented"""
if len(lines) > 1:
# The -1 line holds the """ itself and that should be indented.
if lines[-1] == '':
margs = {'offset': len(lines) - 1, 'line': lines[-1]}
self.add_message('C9005', node=node, line=node.fromlineno, args=margs)
# The last line should not be blank.
if lines[-2] == '':
margs = {'offset': len(lines) - 2, 'line': lines[-2]}
self.add_message('C9003', node=node, line=node.fromlineno, args=margs)
def _check_section_lines(self, node, lines):
"""Verify each section (Args/Returns/Yields/Raises) is sane"""
lineno_sections = [-1] * len(self.VALID_SECTIONS)
invalid_sections = (
# Handle common misnamings.
'arg', 'argument', 'arguments',
'ret', 'rets', 'return',
'yield', 'yeild', 'yeilds',
'raise', 'throw', 'throws',
)
last = lines[0].strip()
for i, line in enumerate(lines[1:]):
margs = {'offset': i + 1, 'line': line}
l = line.strip()
# Catch semi-common javadoc style.
if l.startswith('@param') or l.startswith('@return'):
self.add_message('C9007', node=node, line=node.fromlineno, args=margs)
# See if we can detect incorrect behavior.
section = l.split(':', 1)[0]
if section in self.VALID_SECTIONS or section.lower() in invalid_sections:
# Make sure it has some number of leading whitespace.
if not line.startswith(' '):
self.add_message('C9004', node=node, line=node.fromlineno, args=margs)
# Make sure it has a single trailing colon.
if l != '%s:' % section:
self.add_message('C9007', node=node, line=node.fromlineno, args=margs)
# Make sure it's valid.
if section.lower() in invalid_sections:
self.add_message('C9007', node=node, line=node.fromlineno, args=margs)
else:
# Gather the order of the sections.
lineno_sections[self.VALID_SECTIONS.index(section)] = i
# Verify blank line before it.
if last != '':
self.add_message('C9006', node=node, line=node.fromlineno, args=margs)
last = l
# Make sure the sections are in the right order.
valid_lineno = lambda x: x >= 0
lineno_sections = filter(valid_lineno, lineno_sections)
if lineno_sections != sorted(lineno_sections):
self.add_message('C9008', node=node, line=node.fromlineno)
# Check the indentation level on all the sections.
# The -1 line holds the trailing """ itself and that should be indented to
# the correct number of spaces. All checks below are relative to this. If
# it is off, then these checks might report weird errors, but that's ok as
# ultimately the docstring is still wrong :).
indent_len = len(lines[-1])
for lineno in lineno_sections:
# First the section header (e.g. Args:).
lineno += 1
line = lines[lineno]
if len(line) - len(line.lstrip(' ')) != indent_len:
margs = {'offset': lineno, 'line': line}
self.add_message('C9015', node=node, line=node.fromlineno, args=margs)
def _check_all_args_in_doc(self, node, lines):
"""All function arguments are mentioned in doc"""
if not hasattr(node, 'argnames'):
return
# Locate the start of the args section.
arg_lines = []
for l in lines:
if arg_lines:
if l.strip() in [''] + ['%s:' % x for x in self.VALID_SECTIONS]:
break
elif l.strip() != 'Args:':
continue
arg_lines.append(l)
else:
# If they don't have an Args section, then give it a pass.
return
# Now verify all args exist.
# TODO: Should we verify arg order matches doc order ?
# TODO: Should we check indentation of wrapped docs ?
missing_args = []
for arg in node.args.args:
# Ignore class related args.
if arg.name in ('cls', 'self'):
continue
# Ignore ignored args.
if arg.name.startswith('_'):
continue
for l in arg_lines:
aline = l.lstrip()
if aline.startswith('%s:' % arg.name):
amsg = aline[len(arg.name) + 1:]
if len(amsg) and len(amsg) - len(amsg.lstrip()) != 1:
margs = {'arg': l}
self.add_message('C9012', node=node, line=node.fromlineno,
args=margs)
break
else:
missing_args.append(arg.name)
if missing_args:
margs = {'arg': '|, |'.join(missing_args)}
self.add_message('C9010', node=node, line=node.fromlineno, args=margs)
def _check_func_signature(self, node):
"""Require *args to be named args, and **kwargs kwargs"""
vararg = node.args.vararg
if vararg and vararg != 'args' and vararg != '_args':
margs = {'arg': vararg}
self.add_message('C9011', node=node, line=node.fromlineno, args=margs)
kwarg = node.args.kwarg
if kwarg and kwarg != 'kwargs' and kwarg != '_kwargs':
margs = {'arg': kwarg}
self.add_message('C9011', node=node, line=node.fromlineno, args=margs)
class Py3kCompatChecker(BaseChecker):
"""Make sure we enforce py3k compatible features"""
__implements__ = IAstroidChecker
# pylint: disable=class-missing-docstring,multiple-statements
class _MessageR9100(object): pass
# pylint: enable=class-missing-docstring,multiple-statements
name = 'py3k_compat_checker'
priority = -1
MSG_ARGS = 'offset:%(offset)i: {%(line)s}'
msgs = {
'R9100': ('Missing "from __future__ import print_function" line',
('missing-print-function'), _MessageR9100),
}
options = ()
def __init__(self, *args, **kwargs):
super(Py3kCompatChecker, self).__init__(*args, **kwargs)
self.seen_print_func = False
self.saw_imports = False
def close(self):
"""Called when done processing module"""
if not self.seen_print_func:
# Do not warn if moduler doesn't import anything at all (like
# empty __init__.py files).
if self.saw_imports:
self.add_message('R9100')
def _check_print_function(self, node):
"""Verify print_function is imported"""
if node.modname == '__future__':
for name, _ in node.names:
if name == 'print_function':
self.seen_print_func = True
def visit_from(self, node):
"""Process 'from' statements"""
self.saw_imports = True
self._check_print_function(node)
def visit_import(self, _node):
"""Process 'import' statements"""
self.saw_imports = True
class SourceChecker(BaseChecker):
"""Make sure we enforce rules on the source."""
__implements__ = IAstroidChecker
# pylint: disable=class-missing-docstring,multiple-statements
class _MessageR9200(object): pass
class _MessageR9201(object): pass
class _MessageR9202(object): pass
# pylint: enable=class-missing-docstring,multiple-statements
name = 'source_checker'
priority = -1
MSG_ARGS = 'offset:%(offset)i: {%(line)s}'
msgs = {
'R9200': ('Shebang should be #!/usr/bin/python2 or #!/usr/bin/python3',
('bad-shebang'), _MessageR9200),
'R9201': ('Shebang is missing, but file is executable',
('missing-shebang'), _MessageR9201),
'R9202': ('Shebang is set, but file is not executable',
('spurious-shebang'), _MessageR9202),
}
options = ()
def visit_module(self, node):
"""Called when the whole file has been read"""
stream = node.file_stream
stream.seek(0)
self._check_shebang(node, stream)
def _check_shebang(self, _node, stream):
"""Verify the shebang is version specific"""
st = os.fstat(stream.fileno())
mode = st.st_mode
executable = bool(mode & 0o0111)
shebang = stream.readline()
if shebang[0:2] != '#!':
if executable:
self.add_message('R9201')
return
elif not executable:
self.add_message('R9202')
parts = shebang.split()
if parts[0] not in ('#!/usr/bin/python2', '#!/usr/bin/python3'):
self.add_message('R9200')
class ChromiteLoggingChecker(BaseChecker):
"""Make sure we enforce rules on importing logging."""
__implements__ = IAstroidChecker
# pylint: disable=class-missing-docstring,multiple-statements
class _MessageR9301(object): pass
# pylint: enable=class-missing-docstring,multiple-statements
name = 'chromite_logging_checker'
priority = -1
MSG_ARGS = 'offset:%(offset)i: {%(line)s}'
msgs = {
'R9301': ('logging is deprecated. Use "from chromite.lib import '
'cros_logging as logging" to import chromite/lib/cros_logging',
('cros-logging-import'), _MessageR9301),
}
options = ()
# This checker is disabled by default because we only want to disallow "import
# logging" in chromite and not in other places cros lint is used. To enable
# this checker, modify the pylintrc file.
enabled = False
def visit_import(self, node):
"""Called when node is an import statement."""
for name, _ in node.names:
if name == 'logging':
self.add_message('R9301', line=node.lineno)
def register(linter):
"""pylint will call this func to register all our checkers"""
# Walk all the classes in this module and register ours.
this_module = sys.modules[__name__]
for member in dir(this_module):
if (not member.endswith('Checker') or
member in ('BaseChecker', 'IAstroidChecker')):
continue
cls = getattr(this_module, member)
linter.register_checker(cls(linter))
| bsd-3-clause | -7,882,174,848,410,504,000 | 34.796574 | 100 | 0.627864 | false |
deepmind/mathematics_dataset | mathematics_dataset/sample/number.py | 1 | 5071 | # Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate random integers and rationals with minimum guarantees on entropy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
# Dependency imports
from mathematics_dataset.util import display
import numpy as np
import six
import sympy
def _coprime_density(value):
"""Returns float > 0; asymptotic density of integers coprime to `value`."""
factors = sympy.factorint(value)
density = 1.0
for prime in six.iterkeys(factors):
density *= 1 - 1 / prime
return density
def integer(entropy, signed, min_abs=0, coprime_to=1):
"""Returns an integer from a set of size ceil(10**entropy).
If `signed` is True, then includes negative integers, otherwise includes just
positive integers.
Args:
entropy: Float >= 0.
signed: Boolean. Whether to also return negative numbers.
min_abs: Integer >= 0. The minimum absolute value.
coprime_to: Optional integer >= 1. The returned integer is guaranteed to be
coprime to `coprime_to`, with entropy still accounted for.
Returns:
Integer.
"""
assert isinstance(min_abs, int) and not isinstance(min_abs, bool)
coprime_to = abs(coprime_to)
assert min_abs >= 0
max_ = math.pow(10, entropy)
max_ += min_abs
if coprime_to >= 2:
max_ = max_ / _coprime_density(coprime_to) + 1
if signed:
max_ = int(math.ceil(max_ / 2))
range_ = [-max_, max_]
else:
max_ = int(math.ceil(max_))
range_ = [min_abs, max_]
while True:
value = random.randint(*range_)
if abs(value) >= min_abs and sympy.gcd(value, coprime_to) == 1:
break
return sympy.Integer(value)
def non_integer_rational(entropy, signed):
"""Similar args to `integer`. Entropy split between denom and numer."""
numer_entropy = random.uniform(0, entropy)
denom_entropy = entropy - numer_entropy
numer = integer(numer_entropy, signed, min_abs=1)
denom = integer(denom_entropy, False, min_abs=2, coprime_to=numer)
return sympy.Rational(numer, denom)
def integer_or_rational(entropy, signed, min_abs=0):
"""Returns a rational, with 50% probability of it being an integer."""
if random.choice([False, True]):
return integer(entropy, signed, min_abs=min_abs)
else:
return non_integer_rational(entropy, signed)
def non_integer_decimal(entropy, signed):
"""Returns a random decimal; integer divided by random power of ten.
Guaranteed to be non-integer (i.e., numbers after the decimal point).
Args:
entropy: Float.
signed: Boolean. Whether to also return negative numbers.
Returns:
Non-integer decimal.
"""
while True:
base = integer(entropy, signed)
shift = random.randint(1, int(math.ceil(entropy)))
divisor = 10**shift
if base % divisor != 0:
return display.Decimal(sympy.Rational(base, divisor))
def integer_or_decimal(entropy, signed):
"""Returns integer or non-integer decimal; 50% probability of each."""
if random.choice([False, True]):
# Represent it as a decimal so that arithmetic operations are supported:
return display.Decimal(integer(entropy, signed))
else:
return non_integer_decimal(entropy, signed)
def entropy_of_value(value):
"""Returns "min entropy" that would give probability of getting this value."""
if isinstance(value, display.Decimal):
return entropy_of_value(sympy.numer(value))
if is_non_integer_rational(value):
numer = sympy.numer(value)
denom = sympy.denom(value)
return entropy_of_value(numer) + entropy_of_value(denom)
elif not is_integer(value):
raise ValueError('Unhandled value: {}'.format(value))
# Note: we sample integers in a range of size approx 10**entropy about zero,
# so assume that `abs(value)` is about half of the upper range.
return math.log10(5 * abs(value) + 1)
def is_integer(value):
return isinstance(value, (int, np.int64, np.int32, sympy.Integer))
def is_positive_integer(value):
"""Filter for: value is a strictly positive integer."""
return is_integer(value) and value > 0
def is_integer_or_rational(value):
return is_integer(value) or isinstance(value, sympy.Rational)
def is_integer_or_decimal(value):
return is_integer(value) or isinstance(value, display.Decimal)
def is_integer_or_rational_or_decimal(value):
return is_integer_or_rational(value) or is_integer_or_decimal(value)
def is_non_integer_rational(value):
return is_integer_or_rational(value) and not is_integer(value)
| apache-2.0 | 1,212,512,588,697,373,000 | 29.733333 | 80 | 0.709525 | false |
rackspace/pyrax | tests/unit/test_autoscale.py | 1 | 69694 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
import pyrax
import pyrax.autoscale
from pyrax.autoscale import AutoScaleClient
from pyrax.autoscale import AutoScalePolicy
from pyrax.autoscale import AutoScaleWebhook
from pyrax.autoscale import ScalingGroup
from pyrax.autoscale import ScalingGroupManager
import pyrax.exceptions as exc
import pyrax.utils as utils
from pyrax import fakes
class AutoscaleTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(AutoscaleTest, self).__init__(*args, **kwargs)
def setUp(self):
self.identity = fakes.FakeIdentity()
self.scaling_group = fakes.FakeScalingGroup(self.identity)
def tearDown(self):
pass
def test_make_policies(self):
sg = self.scaling_group
p1 = utils.random_unicode()
p2 = utils.random_unicode()
sg.scalingPolicies = [{"name": p1}, {"name": p2}]
sg._make_policies()
self.assertEqual(len(sg.policies), 2)
polnames = [pol.name for pol in sg.policies]
self.assert_(p1 in polnames)
self.assert_(p2 in polnames)
def test_get_state(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get_state = Mock()
sg.get_state()
mgr.get_state.assert_called_once_with(sg)
def test_pause(self):
sg = self.scaling_group
mgr = sg.manager
mgr.pause = Mock()
sg.pause()
mgr.pause.assert_called_once_with(sg)
def test_resume(self):
sg = self.scaling_group
mgr = sg.manager
mgr.resume = Mock()
sg.resume()
mgr.resume.assert_called_once_with(sg)
def test_update(self):
sg = self.scaling_group
mgr = sg.manager
mgr.update = Mock()
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
metadata = utils.random_unicode()
sg.update(name=name, cooldown=cooldown, min_entities=min_entities,
max_entities=max_entities, metadata=metadata)
mgr.update.assert_called_once_with(sg, name=name, cooldown=cooldown,
min_entities=min_entities, max_entities=max_entities,
metadata=metadata)
def test_update_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.update_metadata = Mock()
metadata = utils.random_unicode()
sg.update_metadata(metadata)
mgr.update_metadata.assert_called_once_with(sg, metadata=metadata)
def test_get_configuration(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get_configuration = Mock()
sg.get_configuration()
mgr.get_configuration.assert_called_once_with(sg)
def test_get_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get_launch_config = Mock()
sg.get_launch_config()
mgr.get_launch_config.assert_called_once_with(sg)
def test_update_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
mgr.update_launch_config = Mock()
server_name = utils.random_unicode()
flavor = utils.random_unicode()
image = utils.random_unicode()
disk_config = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode().encode("utf-8") # Must be bytes
networks = utils.random_unicode()
load_balancers = utils.random_unicode()
key_name = utils.random_unicode()
config_drive = utils.random_unicode()
user_data = utils.random_unicode()
sg.update_launch_config(server_name=server_name, flavor=flavor,
image=image, disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
mgr.update_launch_config.assert_called_once_with(sg,
server_name=server_name, flavor=flavor, image=image,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
def test_update_launch_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.update_launch_metadata = Mock()
metadata = utils.random_unicode()
sg.update_launch_metadata(metadata)
mgr.update_launch_metadata.assert_called_once_with(sg, metadata)
def test_add_policy(self):
sg = self.scaling_group
mgr = sg.manager
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.add_policy = Mock()
sg.add_policy(name, policy_type, cooldown, change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args)
mgr.add_policy.assert_called_once_with(sg, name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_list_policies(self):
sg = self.scaling_group
mgr = sg.manager
mgr.list_policies = Mock()
sg.list_policies()
mgr.list_policies.assert_called_once_with(sg)
def test_get_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
mgr.get_policy = Mock()
sg.get_policy(pol)
mgr.get_policy.assert_called_once_with(sg, pol)
def test_update_policy(self):
sg = self.scaling_group
mgr = sg.manager
policy = utils.random_unicode()
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
desired_capacity = utils.random_unicode()
is_percent = utils.random_unicode()
args = utils.random_unicode()
mgr.update_policy = Mock()
sg.update_policy(policy, name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
mgr.update_policy.assert_called_once_with(scaling_group=sg,
policy=policy, name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_execute_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
mgr.execute_policy = Mock()
sg.execute_policy(pol)
mgr.execute_policy.assert_called_once_with(scaling_group=sg,
policy=pol)
def test_delete_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
mgr.delete_policy = Mock()
sg.delete_policy(pol)
mgr.delete_policy.assert_called_once_with(scaling_group=sg,
policy=pol)
def test_add_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.add_webhook = Mock()
sg.add_webhook(pol, name, metadata=metadata)
mgr.add_webhook.assert_called_once_with(sg, pol, name,
metadata=metadata)
def test_list_webhooks(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
mgr.list_webhooks = Mock()
sg.list_webhooks(pol)
mgr.list_webhooks.assert_called_once_with(sg, pol)
def test_update_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
hook = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook = Mock()
sg.update_webhook(pol, hook, name=name, metadata=metadata)
mgr.update_webhook.assert_called_once_with(scaling_group=sg, policy=pol,
webhook=hook, name=name, metadata=metadata)
def test_update_webhook_metadata(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
hook = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook_metadata = Mock()
sg.update_webhook_metadata(pol, hook, metadata=metadata)
mgr.update_webhook_metadata.assert_called_once_with(sg, pol, hook,
metadata)
def test_delete_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
hook = utils.random_unicode()
mgr.delete_webhook = Mock()
sg.delete_webhook(pol, hook)
mgr.delete_webhook.assert_called_once_with(sg, pol, hook)
def test_policy_count(self):
sg = self.scaling_group
num = random.randint(1, 100)
sg.policies = ["x"] * num
self.assertEqual(sg.policy_count, num)
def test_name(self):
sg = self.scaling_group
name = utils.random_unicode()
newname = utils.random_unicode()
sg.groupConfiguration = {"name": name}
self.assertEqual(sg.name, name)
sg.name = newname
self.assertEqual(sg.name, newname)
def test_cooldown(self):
sg = self.scaling_group
cooldown = utils.random_unicode()
newcooldown = utils.random_unicode()
sg.groupConfiguration = {"cooldown": cooldown}
self.assertEqual(sg.cooldown, cooldown)
sg.cooldown = newcooldown
self.assertEqual(sg.cooldown, newcooldown)
def test_metadata(self):
sg = self.scaling_group
metadata = utils.random_unicode()
newmetadata = utils.random_unicode()
sg.groupConfiguration = {"metadata": metadata}
self.assertEqual(sg.metadata, metadata)
sg.metadata = newmetadata
self.assertEqual(sg.metadata, newmetadata)
def test_min_entities(self):
sg = self.scaling_group
min_entities = utils.random_unicode()
newmin_entities = utils.random_unicode()
sg.groupConfiguration = {"minEntities": min_entities}
self.assertEqual(sg.min_entities, min_entities)
sg.min_entities = newmin_entities
self.assertEqual(sg.min_entities, newmin_entities)
def test_max_entities(self):
sg = self.scaling_group
max_entities = utils.random_unicode()
newmax_entities = utils.random_unicode()
sg.groupConfiguration = {"maxEntities": max_entities}
self.assertEqual(sg.max_entities, max_entities)
sg.max_entities = newmax_entities
self.assertEqual(sg.max_entities, newmax_entities)
def test_mgr_get_state(self):
sg = self.scaling_group
mgr = sg.manager
id1 = utils.random_unicode()
id2 = utils.random_unicode()
ac = utils.random_unicode()
dc = utils.random_unicode()
pc = utils.random_unicode()
paused = utils.random_unicode()
statedict = {"group": {
"active": [{"id": id1}, {"id": id2}],
"activeCapacity": ac,
"desiredCapacity": dc,
"pendingCapacity": pc,
"paused": paused,
}}
expected = {
"active": [id1, id2],
"active_capacity": ac,
"desired_capacity": dc,
"pending_capacity": pc,
"paused": paused,
}
mgr.api.method_get = Mock(return_value=(None, statedict))
ret = mgr.get_state(sg)
self.assertEqual(ret, expected)
def test_mgr_pause(self):
sg = self.scaling_group
mgr = sg.manager
uri = "/%s/%s/pause" % (mgr.uri_base, sg.id)
mgr.api.method_post = Mock(return_value=(None, None))
mgr.pause(sg)
mgr.api.method_post.assert_called_once_with(uri)
def test_mgr_resume(self):
sg = self.scaling_group
mgr = sg.manager
uri = "/%s/%s/resume" % (mgr.uri_base, sg.id)
mgr.api.method_post = Mock(return_value=(None, None))
mgr.resume(sg)
mgr.api.method_post.assert_called_once_with(uri)
def test_mgr_get_configuration(self):
sg = self.scaling_group
mgr = sg.manager
uri = "/%s/%s/config" % (mgr.uri_base, sg.id)
conf = utils.random_unicode()
resp_body = {"groupConfiguration": conf}
mgr.api.method_get = Mock(return_value=(None, resp_body))
ret = mgr.get_configuration(sg)
mgr.api.method_get.assert_called_once_with(uri)
self.assertEqual(ret, conf)
def test_mgr_update(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
uri = "/%s/%s/config" % (mgr.uri_base, sg.id)
sg.name = utils.random_unicode()
sg.cooldown = utils.random_unicode()
sg.min_entities = utils.random_unicode()
sg.max_entities = utils.random_unicode()
metadata = utils.random_unicode()
mgr.api.method_put = Mock(return_value=(None, None))
expected_body = {"name": sg.name,
"cooldown": sg.cooldown,
"minEntities": sg.min_entities,
"maxEntities": sg.max_entities,
"metadata": metadata,
}
mgr.update(sg.id, metadata=metadata)
mgr.api.method_put.assert_called_once_with(uri, body=expected_body)
def test_mgr_replace(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
uri = "/%s/%s/config" % (mgr.uri_base, sg.id)
sg.name = utils.random_unicode()
sg.cooldown = utils.random_unicode()
sg.min_entities = utils.random_unicode()
sg.max_entities = utils.random_unicode()
metadata = utils.random_unicode()
new_name = utils.random_unicode()
new_cooldown = utils.random_unicode()
new_min = utils.random_unicode()
new_max = utils.random_unicode()
mgr.api.method_put = Mock(return_value=(None, None))
expected_body = {
"name": new_name,
"cooldown": new_cooldown,
"minEntities": new_min,
"maxEntities": new_max,
"metadata": {}
}
mgr.replace(sg.id, new_name, new_cooldown, new_min, new_max)
mgr.api.method_put.assert_called_once_with(uri, body=expected_body)
def test_mgr_update_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
sg.metadata = {"orig": "orig"}
metadata = {"new": "new"}
expected = sg.metadata.copy()
expected.update(metadata)
mgr.update = Mock()
mgr.update_metadata(sg.id, metadata)
mgr.update.assert_called_once_with(sg, metadata=expected)
def test_mgr_get_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
key_name = utils.random_unicode()
launchdict = {"launchConfiguration":
{"type": typ,
"args": {
"loadBalancers": lbs,
"server": {
"name": name,
"flavorRef": flv,
"imageRef": img,
"OS-DCF:diskConfig": dconfig,
"metadata": metadata,
"personality": personality,
"networks": networks,
"key_name": key_name,
},
},
},
}
expected = {
"type": typ,
"load_balancers": lbs,
"name": name,
"flavor": flv,
"image": img,
"disk_config": dconfig,
"metadata": metadata,
"personality": personality,
"networks": networks,
"key_name": key_name,
}
mgr.api.method_get = Mock(return_value=(None, launchdict))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
ret = mgr.get_launch_config(sg)
mgr.api.method_get.assert_called_once_with(uri)
self.assertEqual(ret, expected)
def test_mgr_update_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
sg.launchConfiguration = {}
body = {"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"personality": mgr._encode_personality(personality),
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, metadata=metadata,
personality=personality, networks=networks, load_balancers=lbs)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_update_launch_config_unset_personality(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = [{
"path": "/foo/bar",
"contents": "cHlyYXg="
}]
networks = utils.random_unicode()
sg.launchConfiguration = {
"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"personality": personality,
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
body = {
"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, metadata=metadata,
personality=[], networks=networks, load_balancers=lbs)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_update_launch_config_no_personality(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
networks = utils.random_unicode()
sg.launchConfiguration = {}
body = {"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, metadata=metadata,
networks=networks, load_balancers=lbs)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_update_launch_config_no_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
networks = utils.random_unicode()
sg.launchConfiguration = {}
body = {"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"networks": networks,
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, networks=networks, load_balancers=lbs)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_update_launch_config_key_name(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
key_name = utils.random_unicode()
sg.launchConfiguration = {}
body = {"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"networks": networks,
"metadata": metadata,
"key_name": key_name,
"personality": mgr._encode_personality(personality),
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, metadata=metadata,
personality=personality, networks=networks, load_balancers=lbs,
key_name=key_name)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_replace_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
sg.launchConfiguration = {
"type": typ,
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"personality": personality,
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
new_typ = utils.random_unicode()
new_name = utils.random_unicode()
new_flv = utils.random_unicode()
new_img = utils.random_unicode()
expected = {
"type": new_typ,
"args": {
"server": {
"name": new_name,
"imageRef": new_img,
"flavorRef": new_flv,
},
"loadBalancers": []
}
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.replace_launch_config(sg.id, launch_config_type=new_typ,
server_name=new_name, flavor=new_flv, image=new_img)
mgr.api.method_put.assert_called_once_with(uri, body=expected)
def test_mgr_update_launch_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
orig_meta = {"orig": "orig"}
new_meta = {"new": "new"}
sg.launchConfiguration = {"args": {"server": {"metadata": orig_meta}}}
expected = orig_meta.copy()
expected.update(new_meta)
mgr.update_launch_config = Mock()
mgr.update_launch_metadata(sg.id, new_meta)
mgr.update_launch_config.assert_called_once_with(sg, metadata=expected)
def test_mgr_add_policy(self):
sg = self.scaling_group
mgr = sg.manager
ret_body = {"policies": [{}]}
mgr.api.method_post = Mock(return_value=(None, ret_body))
uri = "/%s/%s/policies" % (mgr.uri_base, sg.id)
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
for is_percent in (True, False):
post_body = {"name": name, "cooldown": cooldown, "type": ptype}
if is_percent:
post_body["changePercent"] = change
else:
post_body["change"] = change
ret = mgr.add_policy(sg, name, ptype, cooldown, change,
is_percent=is_percent)
mgr.api.method_post.assert_called_with(uri, body=[post_body])
self.assert_(isinstance(ret, AutoScalePolicy))
def test_mgr_create_policy_body(self):
sg = self.scaling_group
mgr = sg.manager
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
change = utils.random_unicode()
expected_pct = {"name": name,
"cooldown": cooldown,
"type": ptype,
"desiredCapacity": desired_capacity,
"args": args
}
expected_nopct = expected_pct.copy()
expected_pct["changePercent"] = change
expected_nopct["change"] = change
ret_pct = mgr._create_policy_body(name, ptype, cooldown, change=change,
is_percent=True, desired_capacity=desired_capacity, args=args)
ret_nopct = mgr._create_policy_body(name, ptype, cooldown,
change=change, is_percent=False,
desired_capacity=desired_capacity, args=args)
self.assertEqual(ret_nopct, expected_nopct)
self.assertEqual(ret_pct, expected_pct)
def test_mgr_add_policy_desired_capacity(self):
sg = self.scaling_group
mgr = sg.manager
ret_body = {"policies": [{}]}
mgr.api.method_post = Mock(return_value=(None, ret_body))
uri = "/%s/%s/policies" % (mgr.uri_base, sg.id)
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
desired_capacity = utils.random_unicode()
post_body = {
"name": name,
"cooldown": cooldown,
"type": ptype,
"desiredCapacity": desired_capacity,
}
ret = mgr.add_policy(sg, name, ptype, cooldown,
desired_capacity=desired_capacity)
mgr.api.method_post.assert_called_with(uri, body=[post_body])
self.assert_(isinstance(ret, AutoScalePolicy))
def test_mgr_list_policies(self):
sg = self.scaling_group
mgr = sg.manager
ret_body = {"policies": [{}]}
mgr.api.method_get = Mock(return_value=(None, ret_body))
uri = "/%s/%s/policies" % (mgr.uri_base, sg.id)
ret = mgr.list_policies(sg)
mgr.api.method_get.assert_called_once_with(uri)
def test_mgr_get_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
ret_body = {"policy": {}}
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
mgr.api.method_get = Mock(return_value=(None, ret_body))
ret = mgr.get_policy(sg, pol)
self.assert_(isinstance(ret, AutoScalePolicy))
mgr.api.method_get.assert_called_once_with(uri)
def test_mgr_replace_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol_id = utils.random_unicode()
info = {
"name": utils.random_unicode(),
"type": utils.random_unicode(),
"cooldown": utils.random_unicode(),
"change": utils.random_unicode(),
"args": utils.random_unicode(),
}
policy = fakes.FakeAutoScalePolicy(mgr, info, sg)
mgr.get_policy = Mock(return_value=policy)
new_name = utils.random_unicode()
new_type = utils.random_unicode()
new_cooldown = utils.random_unicode()
new_change_percent = utils.random_unicode()
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol_id)
expected = {
"name": new_name,
"type": new_type,
"cooldown": new_cooldown,
"changePercent": new_change_percent,
}
ret = mgr.replace_policy(sg, pol_id, name=new_name,
policy_type=new_type, cooldown=new_cooldown,
change=new_change_percent, is_percent=True)
mgr.api.method_put.assert_called_with(uri, body=expected)
def test_mgr_update_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
mgr.get_policy = Mock(return_value=fakes.FakeAutoScalePolicy(mgr, {},
sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
for is_percent in (True, False):
put_body = {"name": name, "cooldown": cooldown, "type": ptype,
"args": args}
if is_percent:
put_body["changePercent"] = change
else:
put_body["change"] = change
ret = mgr.update_policy(sg, pol, name=name, policy_type=ptype,
cooldown=cooldown, change=change, is_percent=is_percent,
args=args)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_desired_to_desired(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
new_desired_capacity = 10
old_info = {"desiredCapacity": 0}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": name, "cooldown": cooldown, "type": ptype,
"desiredCapacity": new_desired_capacity}
ret = mgr.update_policy(sg, pol, name=name, policy_type=ptype,
cooldown=cooldown, desired_capacity=new_desired_capacity)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_change_to_desired(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
new_desired_capacity = 10
old_info = {"change": -1}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": name, "cooldown": cooldown, "type": ptype,
"desiredCapacity": new_desired_capacity}
ret = mgr.update_policy(sg, pol, name=name, policy_type=ptype,
cooldown=cooldown, desired_capacity=new_desired_capacity)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_desired_to_change(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
new_change = 1
old_info = {"desiredCapacity": 0}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": name, "cooldown": cooldown, "type": ptype,
"change": new_change}
ret = mgr.update_policy(sg, pol, name=name, policy_type=ptype,
cooldown=cooldown, change=new_change)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_maintain_desired_capacity(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
new_name = utils.random_unicode()
old_capacity = 0
old_info = {
"type": ptype,
"desiredCapacity": old_capacity,
"cooldown": cooldown,
}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": new_name, "cooldown": cooldown, "type": ptype,
"desiredCapacity": old_capacity}
ret = mgr.update_policy(sg, pol, name=new_name)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_maintain_is_percent(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
new_name = utils.random_unicode()
old_percent = 10
old_info = {
"type": ptype,
"changePercent": old_percent,
"cooldown": cooldown,
}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": new_name, "cooldown": cooldown, "type": ptype,
"changePercent": old_percent}
ret = mgr.update_policy(sg, pol, name=new_name)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_maintain_is_absolute(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
new_name = utils.random_unicode()
old_change = 10
old_info = {
"type": ptype,
"change": old_change,
"cooldown": cooldown,
}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": new_name, "cooldown": cooldown, "type": ptype,
"change": old_change}
ret = mgr.update_policy(sg, pol, name=new_name)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_execute_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
uri = "/%s/%s/policies/%s/execute" % (mgr.uri_base, sg.id, pol)
mgr.api.method_post = Mock(return_value=(None, None))
mgr.execute_policy(sg, pol)
mgr.api.method_post.assert_called_once_with(uri)
def test_mgr_delete_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
mgr.api.method_delete = Mock(return_value=(None, None))
mgr.delete_policy(sg, pol)
mgr.api.method_delete.assert_called_once_with(uri)
def test_mgr_add_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
ret_body = {"webhooks": [{}]}
mgr.api.method_post = Mock(return_value=(None, ret_body))
uri = "/%s/%s/policies/%s/webhooks" % (mgr.uri_base, sg.id, pol)
mgr.get_policy = Mock(return_value=fakes.FakeAutoScalePolicy(mgr, {},
sg))
name = utils.random_unicode()
metadata = utils.random_unicode()
post_body = {"name": name, "metadata": metadata}
ret = mgr.add_webhook(sg, pol, name, metadata=metadata)
mgr.api.method_post.assert_called_with(uri, body=[post_body])
self.assert_(isinstance(ret, AutoScaleWebhook))
def test_mgr_list_webhooks(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
ret_body = {"webhooks": [{}]}
mgr.api.method_get = Mock(return_value=(None, ret_body))
mgr.get_policy = Mock(return_value=fakes.FakeAutoScalePolicy(mgr, {},
sg))
uri = "/%s/%s/policies/%s/webhooks" % (mgr.uri_base, sg.id, pol.id)
ret = mgr.list_webhooks(sg, pol)
mgr.api.method_get.assert_called_once_with(uri)
def test_mgr_get_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
ret_body = {"webhook": {}}
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
mgr.api.method_get = Mock(return_value=(None, ret_body))
ret = mgr.get_webhook(sg, pol, hook)
self.assert_(isinstance(ret, AutoScaleWebhook))
mgr.api.method_get.assert_called_once_with(uri)
def test_mgr_replace_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
info = {"name": utils.random_unicode(),
"metadata": utils.random_unicode()}
hook_obj = fakes.FakeAutoScaleWebhook(mgr, info, pol, sg)
new_name = utils.random_unicode()
new_metadata = utils.random_unicode()
mgr.get_webhook = Mock(return_value=hook_obj)
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
expected = {"name": new_name, "metadata": {}}
ret = mgr.replace_webhook(sg, pol, hook, name=new_name)
mgr.api.method_put.assert_called_with(uri, body=expected)
def test_mgr_update_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
hook_obj = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.get_webhook = Mock(return_value=hook_obj)
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
put_body = {"name": name, "metadata": metadata}
ret = mgr.update_webhook(sg, pol, hook, name=name, metadata=metadata)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_webhook_metadata(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
hook_obj = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
hook_obj.metadata = {"orig": "orig"}
metadata = {"new": "new"}
expected = hook_obj.metadata.copy()
expected.update(metadata)
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
mgr.update_webhook = Mock()
mgr.get_webhook = Mock(return_value=hook_obj)
mgr.update_webhook_metadata(sg, pol, hook, metadata)
mgr.update_webhook.assert_called_once_with(sg, pol, hook_obj,
metadata=expected)
def test_mgr_delete_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
hook_obj = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
mgr.api.method_delete = Mock(return_value=(None, None))
mgr.get_webhook = Mock(return_value=hook_obj)
mgr.delete_webhook(sg, pol, hook)
mgr.api.method_delete.assert_called_once_with(uri)
def test_mgr_resolve_lbs_dict(self):
sg = self.scaling_group
mgr = sg.manager
key = utils.random_unicode()
val = utils.random_unicode()
lb_dict = {key: val}
ret = mgr._resolve_lbs(lb_dict)
self.assertEqual(ret, [lb_dict])
def test_mgr_resolve_lbs_clb(self):
sg = self.scaling_group
mgr = sg.manager
clb = fakes.FakeLoadBalancer(None, {})
ret = mgr._resolve_lbs(clb)
expected = {"loadBalancerId": clb.id, "port": clb.port}
self.assertEqual(ret, [expected])
def test_mgr_resolve_lbs_tuple(self):
sg = self.scaling_group
mgr = sg.manager
fake_id = utils.random_unicode()
fake_port = utils.random_unicode()
lbs = (fake_id, fake_port)
ret = mgr._resolve_lbs(lbs)
expected = {"loadBalancerId": fake_id, "port": fake_port}
self.assertEqual(ret, [expected])
def test_mgr_resolve_lbs_id(self):
sg = self.scaling_group
mgr = sg.manager
clb = fakes.FakeLoadBalancer(None, {})
sav = pyrax.cloud_loadbalancers
class PyrCLB(object):
def get(self, *args, **kwargs):
return clb
pyrax.cloud_loadbalancers = PyrCLB()
ret = mgr._resolve_lbs("fakeid")
expected = {"loadBalancerId": clb.id, "port": clb.port}
self.assertEqual(ret, [expected])
pyrax.cloud_loadbalancers = sav
def test_mgr_resolve_lbs_id_fail(self):
sg = self.scaling_group
mgr = sg.manager
pyclb = pyrax.cloudloadbalancers
pyclb.get = Mock(side_effect=Exception())
self.assertRaises(exc.InvalidLoadBalancer, mgr._resolve_lbs, "bogus")
def test_mgr_create_body(self):
sg = self.scaling_group
mgr = sg.manager
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
launch_config_type = utils.random_unicode()
flavor = utils.random_unicode()
disk_config = None
metadata = None
personality = [{"path": "/tmp/testing", "contents": b"testtest"}]
scaling_policies = None
networks = utils.random_unicode()
lb = fakes.FakeLoadBalancer()
load_balancers = (lb.id, lb.port)
server_name = utils.random_unicode()
image = utils.random_unicode()
group_metadata = utils.random_unicode()
key_name = utils.random_unicode()
expected = {
"groupConfiguration": {
"cooldown": cooldown,
"maxEntities": max_entities,
"minEntities": min_entities,
"name": name,
"metadata": group_metadata},
"launchConfiguration": {
"args": {
"loadBalancers": [{"loadBalancerId": lb.id,
"port": lb.port}],
"server": {
"flavorRef": flavor,
"imageRef": image,
"metadata": {},
"name": server_name,
"personality": [{"path": "/tmp/testing",
"contents": b"dGVzdHRlc3Q="}],
"networks": networks,
"key_name": key_name}
},
"type": launch_config_type},
"scalingPolicies": []}
self.maxDiff = 1000000
ret = mgr._create_body(name, cooldown, min_entities, max_entities,
launch_config_type, server_name, image, flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers,
scaling_policies=scaling_policies,
group_metadata=group_metadata, key_name=key_name)
self.assertEqual(ret, expected)
def test_mgr_create_body_disk_config(self):
sg = self.scaling_group
mgr = sg.manager
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
launch_config_type = utils.random_unicode()
flavor = utils.random_unicode()
disk_config = utils.random_unicode()
metadata = None
personality = None
scaling_policies = None
networks = utils.random_unicode()
lb = fakes.FakeLoadBalancer()
load_balancers = (lb.id, lb.port)
server_name = utils.random_unicode()
image = utils.random_unicode()
group_metadata = utils.random_unicode()
key_name = utils.random_unicode()
expected = {
"groupConfiguration": {
"cooldown": cooldown,
"maxEntities": max_entities,
"minEntities": min_entities,
"name": name,
"metadata": group_metadata},
"launchConfiguration": {
"args": {
"loadBalancers": [{"loadBalancerId": lb.id,
"port": lb.port}],
"server": {
"OS-DCF:diskConfig": disk_config,
"flavorRef": flavor,
"imageRef": image,
"metadata": {},
"name": server_name,
"networks": networks,
"key_name": key_name}
},
"type": launch_config_type},
"scalingPolicies": []}
self.maxDiff = 1000000
ret = mgr._create_body(name, cooldown, min_entities, max_entities,
launch_config_type, server_name, image, flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers,
scaling_policies=scaling_policies,
group_metadata=group_metadata, key_name=key_name)
self.assertEqual(ret, expected)
def test_policy_init(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg.id)
self.assert_(pol.scaling_group is sg)
def test_policy_get(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.get_policy = Mock(return_value=pol)
pol.get()
mgr.get_policy.assert_called_once_with(sg, pol)
def test_policy_delete(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.delete_policy = Mock()
pol.delete()
mgr.delete_policy.assert_called_once_with(sg, pol)
def test_policy_update(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.update_policy = Mock()
pol.update(name=name, policy_type=policy_type, cooldown=cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
mgr.update_policy.assert_called_once_with(scaling_group=sg,
policy=pol, name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_policy_execute(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.execute_policy = Mock()
pol.execute()
mgr.execute_policy.assert_called_once_with(sg, pol)
def test_policy_add_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.add_webhook = Mock()
name = utils.random_unicode()
metadata = utils.random_unicode()
pol.add_webhook(name, metadata=metadata)
mgr.add_webhook.assert_called_once_with(sg, pol, name,
metadata=metadata)
def test_policy_list_webhooks(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.list_webhooks = Mock()
pol.list_webhooks()
mgr.list_webhooks.assert_called_once_with(sg, pol)
def test_policy_get_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
mgr.get_webhook = Mock()
pol.get_webhook(hook)
mgr.get_webhook.assert_called_once_with(sg, pol, hook)
def test_policy_update_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook = Mock()
pol.update_webhook(hook, name=name, metadata=metadata)
mgr.update_webhook.assert_called_once_with(sg, policy=pol, webhook=hook,
name=name, metadata=metadata)
def test_policy_update_webhook_metadata(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook_metadata = Mock()
pol.update_webhook_metadata(hook, metadata=metadata)
mgr.update_webhook_metadata.assert_called_once_with(sg, pol, hook,
metadata)
def test_policy_delete_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
mgr.delete_webhook = Mock()
pol.delete_webhook(hook)
mgr.delete_webhook.assert_called_once_with(sg, pol, hook)
def test_webhook_get(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
pol.get_webhook = Mock()
hook.get()
pol.get_webhook.assert_called_once_with(hook)
def test_webhook_update(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
name = utils.random_unicode()
metadata = utils.random_unicode()
pol.update_webhook = Mock()
hook.update(name=name, metadata=metadata)
pol.update_webhook.assert_called_once_with(hook, name=name,
metadata=metadata)
def test_webhook_update_metadata(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
metadata = utils.random_unicode()
pol.update_webhook_metadata = Mock()
hook.update_metadata(metadata=metadata)
pol.update_webhook_metadata.assert_called_once_with(hook,
metadata)
def test_webhook_delete(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
pol.delete_webhook = Mock()
hook.delete()
pol.delete_webhook.assert_called_once_with(hook)
def test_clt_get_state(self):
clt = fakes.FakeAutoScaleClient()
sg = self.scaling_group
mgr = clt._manager
mgr.get_state = Mock()
clt.get_state(sg)
mgr.get_state.assert_called_once_with(sg)
def test_clt_pause(self):
clt = fakes.FakeAutoScaleClient()
sg = self.scaling_group
mgr = clt._manager
mgr.pause = Mock()
clt.pause(sg)
mgr.pause.assert_called_once_with(sg)
def test_clt_resume(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.resume = Mock()
clt.resume(sg)
mgr.resume.assert_called_once_with(sg)
def test_clt_replace(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
metadata = utils.random_unicode()
mgr.replace = Mock()
clt.replace(sg, name, cooldown, min_entities, max_entities,
metadata=metadata)
mgr.replace.assert_called_once_with(sg, name, cooldown, min_entities,
max_entities, metadata=metadata)
def test_clt_update(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update = Mock()
clt.update(sg, name=name, cooldown=cooldown, min_entities=min_entities,
max_entities=max_entities, metadata=metadata)
mgr.update.assert_called_once_with(sg, name=name, cooldown=cooldown,
min_entities=min_entities, max_entities=max_entities,
metadata=metadata)
def test_clt_update_metadata(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
metadata = utils.random_unicode()
mgr.update_metadata = Mock()
clt.update_metadata(sg, metadata)
mgr.update_metadata.assert_called_once_with(sg, metadata)
def test_clt_get_configuration(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.get_configuration = Mock()
clt.get_configuration(sg)
mgr.get_configuration.assert_called_once_with(sg)
def test_clt_get_launch_config(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.get_launch_config = Mock()
clt.get_launch_config(sg)
mgr.get_launch_config.assert_called_once_with(sg)
def test_clt_replace_launch_config(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.replace_launch_config = Mock()
launch_config_type = utils.random_unicode()
server_name = utils.random_unicode()
image = utils.random_unicode()
flavor = utils.random_unicode()
disk_config = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
load_balancers = utils.random_unicode()
key_name = utils.random_unicode()
clt.replace_launch_config(sg, launch_config_type, server_name, image,
flavor, disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name)
mgr.replace_launch_config.assert_called_once_with(sg,
launch_config_type, server_name, image, flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name)
def test_clt_update_launch_config(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.update_launch_config = Mock()
server_name = utils.random_unicode()
flavor = utils.random_unicode()
image = utils.random_unicode()
disk_config = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
load_balancers = utils.random_unicode()
key_name = utils.random_unicode()
user_data = utils.random_unicode()
config_drive = utils.random_unicode()
clt.update_launch_config(sg, server_name=server_name, flavor=flavor,
image=image, disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
mgr.update_launch_config.assert_called_once_with(sg,
server_name=server_name, flavor=flavor, image=image,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
def test_clt_update_launch_metadata(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.update_launch_metadata = Mock()
metadata = utils.random_unicode()
clt.update_launch_metadata(sg, metadata)
mgr.update_launch_metadata.assert_called_once_with(sg, metadata)
def test_clt_add_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.add_policy = Mock()
clt.add_policy(sg, name, policy_type, cooldown, change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args)
mgr.add_policy.assert_called_once_with(sg, name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_clt_list_policies(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.list_policies = Mock()
clt.list_policies(sg)
mgr.list_policies.assert_called_once_with(sg)
def test_clt_get_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
mgr.get_policy = Mock()
clt.get_policy(sg, pol)
mgr.get_policy.assert_called_once_with(sg, pol)
def test_clt_replace_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.replace_policy = Mock()
clt.replace_policy(sg, pol, name, policy_type, cooldown, change=change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args)
mgr.replace_policy.assert_called_once_with(sg, pol, name, policy_type,
cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_clt_update_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.update_policy = Mock()
clt.update_policy(sg, pol, name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
mgr.update_policy.assert_called_once_with(sg, pol, name=name,
policy_type=policy_type, cooldown=cooldown, change=change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args)
def test_clt_execute_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
mgr.execute_policy = Mock()
clt.execute_policy(sg, pol)
mgr.execute_policy.assert_called_once_with(scaling_group=sg, policy=pol)
def test_clt_delete_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
mgr.delete_policy = Mock()
clt.delete_policy(sg, pol)
mgr.delete_policy.assert_called_once_with(scaling_group=sg, policy=pol)
def test_clt_add_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.add_webhook = Mock()
clt.add_webhook(sg, pol, name, metadata=metadata)
mgr.add_webhook.assert_called_once_with(sg, pol, name,
metadata=metadata)
def test_clt_list_webhooks(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
mgr.list_webhooks = Mock()
clt.list_webhooks(sg, pol)
mgr.list_webhooks.assert_called_once_with(sg, pol)
def test_clt_get_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
mgr.get_webhook = Mock()
clt.get_webhook(sg, pol, hook)
mgr.get_webhook.assert_called_once_with(sg, pol, hook)
def test_clt_replace_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.replace_webhook = Mock()
clt.replace_webhook(sg, pol, hook, name, metadata=metadata)
mgr.replace_webhook.assert_called_once_with(sg, pol, hook, name,
metadata=metadata)
def test_clt_update_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook = Mock()
clt.update_webhook(sg, pol, hook, name=name, metadata=metadata)
mgr.update_webhook.assert_called_once_with(scaling_group=sg, policy=pol,
webhook=hook, name=name, metadata=metadata)
def test_clt_update_webhook_metadata(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook_metadata = Mock()
clt.update_webhook_metadata(sg, pol, hook, metadata)
mgr.update_webhook_metadata.assert_called_once_with(sg, pol, hook,
metadata)
def test_clt_delete_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
mgr.delete_webhook = Mock()
clt.delete_webhook(sg, pol, hook)
mgr.delete_webhook.assert_called_once_with(sg, pol, hook)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 5,893,022,643,775,277,000 | 38.576377 | 80 | 0.567151 | false |
shaun-h/PGame | Games/Snake/snake.py | 1 | 5131 | from scene import *
from ui import get_screen_size
import random
import time
A = Action
class snake(Scene):
def setup(self):
self.movement = 1
self.movementStep = 0.1
self.lastStep = 0
self.grid = []
self.snakePos = []
self.food = -1
self.snakeColour = '#000000'
self.emptyColour = '#ffffff'
self.foodColour = '#c6c6c6'
self.wallColour = '#4d4d4d'
self.rowSize = 0
self.columnSize = 0
self.sizeToUse = 1
self.canChangeMovement = True
self.score = 0
self.allowMove = True
self.setupGrid(self.sizeToUse,self.emptyColour,self.emptyColour )
for i in self.grid:
self.add_child(i[0])
self.setupMaze()
self.newGame()
def newGame(self):
self.resetBoard()
self.allowMove = False
self.canChangeMovement = True
self.score = 0
self.food = -1
#self.lastStep = 0
mid = int((len(self.grid)/2)-((self.rowSize/2)*((self.columnSize-1)%2)))
self.snakePos = [mid-1,mid,mid+1]
self.food = 0
self.movement = 1
for i in self.snakePos:
self.grid[i][1] = 'snake'
self.grid[i][0].color = self.snakeColour
self.pickFoodPosition()
self.run_action(A.sequence(A.wait(2), A.call(self.startGame)))
def startGame(self):
self.allowMove = True
def setupGrid(self, size, colour, lineColour, gap=0):
w,h = get_screen_size()
use = 0
useSize = 20*size
ysize = 0
xsize = 0
border = 1
if w > h:
use = h
ysize = useSize
xsize = int(w / (use / (useSize+border)))
else:
use = w
xsize = useSize
ysize = int(h / (use / (useSize+border)))
ws = (use / (useSize+border))
last_height = None
control_height = None
for i in range(0,ysize):
for j in range(0,xsize):
pr = ui.Path.rect(0,0,ws,ws)
if w > h:
x = ((ws+gap)*j)+(ws*border)-ws/2#-(gap*(xsize/2))
y = h - (((ws+gap)*i)+(ws*border))
else:
x = ((ws+gap)*j)+(((ws*border)+ws)/2)#-(gap*(xsize/2))
y = h - (((ws+gap)*i)+(((ws*border)-ws/2)))
self.grid.append([ShapeNode(pr, position=(x, y), fill_color = colour, stroke_color = lineColour),'empty'])
last_height = y
self.rowSize = xsize
self.columnSize = ysize
def touch_moved(self, touch):
loc = touch.location
prev = touch.prev_location
w, y = ui.get_screen_size()
diffRequiredx = 0.03
diffRequiredy = 0.015
diffx = w * diffRequiredx
diffy = y * diffRequiredy
xdiff = (loc.x - prev.x)
ydiff = (loc.y - prev.y)
if abs(xdiff) > abs(ydiff):
if xdiff < -diffx and not self.movement == 1 and self.canChangeMovement:
self.movement = -1
self.canChangeMovement = False
elif xdiff > diffx and not self.movement == -1 and self.canChangeMovement:
self.movement = 1
self.canChangeMovement = False
else:
if ydiff < -diffy and not self.movement == -self.rowSize and self.canChangeMovement:
self.movement = self.rowSize
self.canChangeMovement = False
elif ydiff > diffy and not self.movement == self.rowSize and self.canChangeMovement:
self.movement = -self.rowSize
self.canChangeMovement = False
def update(self):
if self.t >= self.lastStep + self.movementStep:
self.lastStep = self.lastStep + self.movementStep
self.move()
def move(self):
if not self.allowMove:
return
hi = self.snakePos[-1]+self.movement
head = self.grid[hi]
if head[1] == 'wall' or head[1] == 'snake':
self.newGame()
self.allowMove = False
return
elif hi == self.food:
head[1] = 'snake'
head[0].color = self.snakeColour
self.score = self.score + 1
if self.checkWinningCondition():
self.allowMove = False
self.winner()
else:
self.pickFoodPosition()
else:
tail = self.grid[self.snakePos[0]]
tail[1] = 'empty'
tail[0].color = self.emptyColour
head[1] = 'snake'
head[0].color = self.snakeColour
self.snakePos.pop(0)
self.snakePos.append(hi)
self.canChangeMovement = True
def winner(self):
print('winner')
def checkWinningCondition(self):
full = True
for i in self.grid:
if i[1]=='empty':
full = False
break
return full
def pickFoodPosition(self):
found =False
while not found:
ppos = random.randint(0, len(self.grid)-1)
if self.grid[ppos][1] == 'empty':
found = True
self.food = ppos
food = self.grid[self.food]
food[0].color = self.foodColour
food[1] = 'food'
def setupMaze(self):
for i in range(0,self.rowSize):
self.grid[i][0].color = self.wallColour
self.grid[i][1] = 'wall'
for i in range(len(self.grid)-self.rowSize,len(self.grid)):
self.grid[i][0].color = self.wallColour
self.grid[i][1] = 'wall'
for i in range(0,self.columnSize):
self.grid[i*self.rowSize][0].color = self.wallColour
self.grid[i*self.rowSize][1] = 'wall'
self.grid[(i+1)*self.rowSize-1][0].color = self.wallColour
self.grid[(i+1)*self.rowSize-1][1] = 'wall'
def resetBoard(self):
for i in self.snakePos:
snake = self.grid[i]
snake[1] = 'empty'
snake[0].color = self.emptyColour
if self.food > -1:
food = self.grid[self.food]
food[1] = 'empty'
food[0].color = self.emptyColour
def RunGame():
run(snake(), PORTRAIT, show_fps=True)
if __name__ == '__main__':
RunGame()
| mit | 7,900,173,201,529,440,000 | 25.448454 | 110 | 0.638277 | false |
Javier-AG/SMC_thesis | extraction_and_annotation/high_level_description.py | 1 | 1476 | import Timbral_Brightness as bright
import Timbral_Depth as depth
import Timbral_Hardness as hard
import Timbral_Roughness as rough
import os
import numpy as np
import pandas as pd
# High-level descriptors calculation
# Set folders: change source directory
pardir = 'DATASET_PATH'
folder = 'FOLDER_NAME'
# Initialize arrays
b = []
d = []
h = []
r = []
# Timbral models
tracks = np.array([track for track in os.listdir(os.path.join(pardir,folder)) if track[-3:] == 'wav'])
for track in tracks:
b.append(bright.timbral_brightness(os.path.join(pardir,folder,track)))
d.append(depth.timbral_depth(os.path.join(pardir,folder,track)))
h.append(hard.timbral_hardness(os.path.join(pardir,folder,track)))
r.append(rough.timbral_roughness(os.path.join(pardir,folder,track)))
# Normalization and rearrange (0 to 100)
# Brightness
b1 = b - min(b)
b_norm = (b1 / max(b1))
# Depth
d1 = d - min(d)
d_norm = (d1 / max(d1))
# Hardness
h1 = h - min(h)
h_norm = (h1 / max(h1))
# Roughness
r1 = r - min(r)
r_norm = (r1 / max(r1))
#print "Brightness: \n", b, "\n", b_norm, "\n", "Depth: \n", d, "\n", d_norm, "\n", "Hard: \n", h, "\n", h_norm, "\n", "Roughness: \n", r, "\n", r_norm, "\n"
pardir_csv = 'DATASET_PATH'
path_csv = os.path.join(pardir_csv,folder+"_features.csv")
df = pd.read_csv(path_csv,index_col=0)
df['brightness'] = b_norm
df['depth'] = d_norm
df['hardness'] = h_norm
df['roughness'] = r_norm
df.to_csv('OUT_DATASET_PATH'+folder+'_descriptors.csv')
| gpl-3.0 | -5,852,060,324,948,805,000 | 29.122449 | 157 | 0.663957 | false |
0atman/flask-admin | flask_admin/tests/test_base.py | 1 | 7277 | from nose.tools import ok_, eq_, raises
from flask import Flask, request
from flask.views import MethodView
from flask.ext.admin import base
class MockView(base.BaseView):
# Various properties
allow_call = True
allow_access = True
@base.expose('/')
def index(self):
return 'Success!'
@base.expose('/test/')
def test(self):
return self.render('mock.html')
def _handle_view(self, name, **kwargs):
if self.allow_call:
return super(MockView, self)._handle_view(name, **kwargs)
else:
return 'Failure!'
def is_accessible(self):
if self.allow_access:
return super(MockView, self).is_accessible()
else:
return False
class MockMethodView(base.BaseView):
@base.expose('/')
def index(self):
return 'Success!'
@base.expose_plugview('/_api/1')
class API1(MethodView):
def get(self, cls):
return cls.render('method.html', request=request, name='API1')
def post(self, cls):
return cls.render('method.html', request=request, name='API1')
def put(self, cls):
return cls.render('method.html', request=request, name='API1')
def delete(self, cls):
return cls.render('method.html', request=request, name='API1')
@base.expose_plugview('/_api/2')
class API2(MethodView):
def get(self, cls):
return cls.render('method.html', request=request, name='API2')
def post(self, cls):
return cls.render('method.html', request=request, name='API2')
def test_baseview_defaults():
view = MockView()
eq_(view.name, None)
eq_(view.category, None)
eq_(view.endpoint, None)
eq_(view.url, None)
eq_(view.static_folder, None)
eq_(view.admin, None)
eq_(view.blueprint, None)
def test_base_defaults():
admin = base.Admin()
eq_(admin.name, 'Admin')
eq_(admin.url, '/admin')
eq_(admin.endpoint, 'admin')
eq_(admin.app, None)
ok_(admin.index_view is not None)
eq_(admin.index_view._template, 'admin/index.html')
# Check if default view was added
eq_(len(admin._views), 1)
eq_(admin._views[0], admin.index_view)
def test_custom_index_view():
view = base.AdminIndexView(name='a', category='b', endpoint='c',
url='/d', template='e')
admin = base.Admin(index_view=view)
eq_(admin.endpoint, 'c')
eq_(admin.url, '/d')
ok_(admin.index_view is view)
eq_(view.name, 'a')
eq_(view.category, 'b')
eq_(view._template, 'e')
# Check if view was added
eq_(len(admin._views), 1)
eq_(admin._views[0], view)
def test_base_registration():
app = Flask(__name__)
admin = base.Admin(app)
eq_(admin.app, app)
ok_(admin.index_view.blueprint is not None)
def test_admin_customizations():
app = Flask(__name__)
admin = base.Admin(app, name='Test', url='/foobar')
eq_(admin.name, 'Test')
eq_(admin.url, '/foobar')
client = app.test_client()
rv = client.get('/foobar/')
eq_(rv.status_code, 200)
def test_baseview_registration():
admin = base.Admin()
view = MockView()
bp = view.create_blueprint(admin)
# Base properties
eq_(view.admin, admin)
ok_(view.blueprint is not None)
# Calculated properties
eq_(view.endpoint, 'mockview')
eq_(view.url, '/admin/mockview')
eq_(view.name, 'Mock View')
# Verify generated blueprint properties
eq_(bp.name, view.endpoint)
eq_(bp.url_prefix, view.url)
eq_(bp.template_folder, 'templates')
eq_(bp.static_folder, view.static_folder)
# Verify customizations
view = MockView(name='Test', endpoint='foobar')
view.create_blueprint(base.Admin())
eq_(view.name, 'Test')
eq_(view.endpoint, 'foobar')
eq_(view.url, '/admin/foobar')
view = MockView(url='test')
view.create_blueprint(base.Admin())
eq_(view.url, '/admin/test')
view = MockView(url='/test/test')
view.create_blueprint(base.Admin())
eq_(view.url, '/test/test')
def test_baseview_urls():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
eq_(len(view._urls), 2)
@raises(Exception)
def test_no_default():
app = Flask(__name__)
admin = base.Admin(app)
admin.add_view(base.BaseView())
def test_call():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/')
eq_(rv.status_code, 200)
rv = client.get('/admin/mockview/')
eq_(rv.data, 'Success!')
rv = client.get('/admin/mockview/test/')
eq_(rv.data, 'Success!')
# Check authentication failure
view.allow_call = False
rv = client.get('/admin/mockview/')
eq_(rv.data, 'Failure!')
def test_permissions():
app = Flask(__name__)
admin = base.Admin(app)
view = MockView()
admin.add_view(view)
client = app.test_client()
view.allow_access = False
rv = client.get('/admin/mockview/')
eq_(rv.status_code, 404)
def test_submenu():
app = Flask(__name__)
admin = base.Admin(app)
admin.add_view(MockView(name='Test 1', category='Test', endpoint='test1'))
# Second view is not normally accessible
view = MockView(name='Test 2', category='Test', endpoint='test2')
view.allow_access = False
admin.add_view(view)
ok_('Test' in admin._menu_categories)
eq_(len(admin._menu), 2)
eq_(admin._menu[1].name, 'Test')
eq_(len(admin._menu[1]._children), 2)
# Categories don't have URLs and they're not accessible
eq_(admin._menu[1].get_url(), None)
eq_(admin._menu[1].is_accessible(), False)
eq_(len(admin._menu[1].get_children()), 1)
def test_delayed_init():
app = Flask(__name__)
admin = base.Admin()
admin.add_view(MockView())
admin.init_app(app)
client = app.test_client()
rv = client.get('/admin/mockview/')
eq_(rv.data, 'Success!')
def test_multi_instances_init():
app = Flask(__name__)
admin = base.Admin(app)
class ManageIndex(base.AdminIndexView):
pass
manage = base.Admin(app, index_view=ManageIndex(url='/manage', endpoint='manage'))
@raises(Exception)
def test_double_init():
app = Flask(__name__)
admin = base.Admin(app)
admin.init_app(app)
def test_nested_flask_views():
app = Flask(__name__)
admin = base.Admin(app)
view = MockMethodView()
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/mockmethodview/_api/1')
assert rv.data == 'GET - API1'
rv = client.put('/admin/mockmethodview/_api/1')
assert rv.data == 'PUT - API1'
rv = client.post('/admin/mockmethodview/_api/1')
assert rv.data == 'POST - API1'
rv = client.delete('/admin/mockmethodview/_api/1')
assert rv.data == 'DELETE - API1'
rv = client.get('/admin/mockmethodview/_api/2')
assert rv.data == 'GET - API2'
rv = client.post('/admin/mockmethodview/_api/2')
assert rv.data == 'POST - API2'
rv = client.delete('/admin/mockmethodview/_api/2')
assert rv.status_code == 405
rv = client.put('/admin/mockmethodview/_api/2')
assert rv.status_code == 405
| bsd-3-clause | 7,890,585,517,632,265,000 | 24.896797 | 86 | 0.607256 | false |
PeteAndersen/swarfarm | data_log/migrations/0005_auto_20190322_1821.py | 1 | 10182 | # Generated by Django 2.1.7 on 2019-03-23 01:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_log', '0004_riftdungeonlog_clear_time'),
]
operations = [
migrations.RemoveField(
model_name='riftraiditemdrop',
name='item',
),
migrations.RemoveField(
model_name='riftraiditemdrop',
name='log',
),
migrations.RemoveField(
model_name='riftraidmonsterdrop',
name='item',
),
migrations.RemoveField(
model_name='riftraidmonsterdrop',
name='log',
),
migrations.RemoveField(
model_name='riftraidrunecraftdrop',
name='item',
),
migrations.RemoveField(
model_name='riftraidrunecraftdrop',
name='log',
),
migrations.RemoveField(
model_name='worldbosslog',
name='summoner',
),
migrations.RemoveField(
model_name='worldbosslogitemdrop',
name='item',
),
migrations.RemoveField(
model_name='worldbosslogitemdrop',
name='log',
),
migrations.RemoveField(
model_name='worldbosslogmonsterdrop',
name='log',
),
migrations.RemoveField(
model_name='worldbosslogmonsterdrop',
name='monster',
),
migrations.RemoveField(
model_name='worldbosslogrunedrop',
name='log',
),
migrations.AlterField(
model_name='dungeonitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='dungeonitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonlog',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='dungeonmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='dungeonmonsterpiecedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monster_pieces', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonmonsterpiecedrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='dungeonrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonsecretdungeondrop',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='dungeonsecretdungeondrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='secret_dungeons', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='magicboxcraftitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='magicboxcraftitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.MagicBoxCraft'),
),
migrations.AlterField(
model_name='magicboxcraftrunecraftdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rune_crafts', to='data_log.MagicBoxCraft'),
),
migrations.AlterField(
model_name='magicboxcraftrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.MagicBoxCraft'),
),
migrations.AlterField(
model_name='riftdungeonitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='riftdungeonitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftdungeonlog',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='riftdungeonmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftdungeonmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='riftdungeonrunecraftdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rune_crafts', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftdungeonrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftraidlog',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='shoprefreshitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='shoprefreshitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.ShopRefreshLog'),
),
migrations.AlterField(
model_name='shoprefreshmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.ShopRefreshLog'),
),
migrations.AlterField(
model_name='shoprefreshmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='shoprefreshrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.ShopRefreshLog'),
),
migrations.AlterField(
model_name='summonlog',
name='item',
field=models.ForeignKey(help_text='Item or currency used to summon', on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='summonlog',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='wishlogitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='wishlogitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.WishLog'),
),
migrations.AlterField(
model_name='wishlogmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.WishLog'),
),
migrations.AlterField(
model_name='wishlogmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='wishlogrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.WishLog'),
),
migrations.DeleteModel(
name='RiftRaidItemDrop',
),
migrations.DeleteModel(
name='RiftRaidMonsterDrop',
),
migrations.DeleteModel(
name='RiftRaidRuneCraftDrop',
),
migrations.DeleteModel(
name='WorldBossLog',
),
migrations.DeleteModel(
name='WorldBossLogItemDrop',
),
migrations.DeleteModel(
name='WorldBossLogMonsterDrop',
),
migrations.DeleteModel(
name='WorldBossLogRuneDrop',
),
]
| apache-2.0 | -5,502,780,763,330,571,000 | 39.245059 | 150 | 0.592909 | false |
ULHPC/easybuild-easyblocks | easybuild/easyblocks/r/__init__.py | 1 | 1219 | ##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Special handling of R easyblocks, due to clash with easybuild.easyblocks.r namespace.
author: Kenneth Hoste (Ghent University)
"""
from easybuild.easyblocks.r.r import *
| gpl-2.0 | -5,102,146,674,657,587,000 | 39.633333 | 96 | 0.753076 | false |
codingjoe/django-s3file | s3file/views.py | 1 | 1865 | import base64
import hashlib
import hmac
import logging
from django import http
from django.conf import settings
from django.core.files.storage import default_storage
from django.views import generic
logger = logging.getLogger("s3file")
class S3MockView(generic.View):
def post(self, request):
success_action_status = request.POST.get("success_action_status", 201)
try:
file = request.FILES["file"]
key = request.POST["key"]
date = request.POST["x-amz-date"]
signature = request.POST["x-amz-signature"]
policy = request.POST["policy"]
except KeyError:
logger.exception("bad request")
return http.HttpResponseBadRequest()
try:
signature = base64.b64decode(signature.encode())
policy = base64.b64decode(policy.encode())
calc_sign = hmac.new(
settings.SECRET_KEY.encode(), policy + date.encode(), "sha256"
).digest()
except ValueError:
logger.exception("bad request")
return http.HttpResponseBadRequest()
if not hmac.compare_digest(signature, calc_sign):
logger.warning("bad signature")
return http.HttpResponseForbidden()
key = key.replace("${filename}", file.name)
etag = hashlib.md5(file.read()).hexdigest() # nosec
file.seek(0)
key = default_storage.save(key, file)
return http.HttpResponse(
'<?xml version="1.0" encoding="UTF-8"?>'
"<PostResponse>"
f"<Location>{settings.MEDIA_URL}{key}</Location>"
f"<Bucket>{getattr(settings, 'AWS_STORAGE_BUCKET_NAME')}</Bucket>"
f"<Key>{key}</Key>"
f'<ETag>"{etag}"</ETag>'
"</PostResponse>",
status=success_action_status,
)
| mit | -3,465,388,422,501,639,700 | 32.909091 | 78 | 0.589812 | false |
Kaosumaru/conan-freetype | conanfile.py | 1 | 1449 | from conans import ConanFile, CMake
from conans import tools
import os
class freetypeConan(ConanFile):
name = "freetype"
version = "2.6.2"
url = "https://github.com/Kaosumaru/conan-freetype"
settings = "os", "compiler", "build_type", "arch"
exports = "freetype/*"
freetype_name = "freetype-%s" % version
source_tgz = "http://download.savannah.gnu.org/releases/freetype/%s.tar.gz" % freetype_name
def source(self):
self.output.info("Downloading %s" % self.source_tgz)
tools.download(self.source_tgz, "freetype.tar.gz")
tools.unzip("freetype.tar.gz", ".")
os.unlink("freetype.tar.gz")
def config(self):
pass
#self.requires.add("zlib/1.2.8@lasote/stable", private=False)
#self.options["zlib"].shared = False
def build(self):
cmake = CMake(self.settings)
self.run('cd %s && mkdir build' % self.freetype_name)
self.run('cd %s/build && cmake -DCMAKE_INSTALL_PREFIX:PATH=../../install .. %s' % (self.freetype_name, cmake.command_line))
self.run("cd %s/build && cmake --build . --target install %s" % (self.freetype_name, cmake.build_config))
def package(self):
self.copy("*.h", dst="include", src="install/include/freetype2")
self.copy("*.lib", dst="lib", src="install/lib")
self.copy("*.a", dst="lib", src="install/lib")
def package_info(self):
self.cpp_info.libs = ["freetype"]
| mit | -8,372,275,207,622,794,000 | 36.153846 | 131 | 0.618357 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/os_profile.py | 1 | 6448 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OSProfile(Model):
"""Specifies the operating system settings for the virtual machine.
:param computer_name: Specifies the host OS name of the virtual machine.
<br><br> **Max-length (Windows):** 15 characters <br><br> **Max-length
(Linux):** 64 characters. <br><br> For naming conventions and restrictions
see [Azure infrastructure services implementation
guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions).
:type computer_name: str
:param admin_username: Specifies the name of the administrator account.
<br><br> **Windows-only restriction:** Cannot end in "." <br><br>
**Disallowed values:** "administrator", "admin", "user", "user1", "test",
"user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm",
"admin2", "aspnet", "backup", "console", "david", "guest", "john",
"owner", "root", "server", "sql", "support", "support_388945a0", "sys",
"test2", "test3", "user4", "user5". <br><br> **Minimum-length (Linux):** 1
character <br><br> **Max-length (Linux):** 64 characters <br><br>
**Max-length (Windows):** 20 characters <br><br><li> For root access to
the Linux VM, see [Using root privileges on Linux virtual machines in
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)<br><li>
For a list of built-in system users on Linux that should not be used in
this field, see [Selecting User Names for Linux on
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
:type admin_username: str
:param admin_password: Specifies the password of the administrator
account. <br><br> **Minimum-length (Windows):** 8 characters <br><br>
**Minimum-length (Linux):** 6 characters <br><br> **Max-length
(Windows):** 123 characters <br><br> **Max-length (Linux):** 72 characters
<br><br> **Complexity requirements:** 3 out of 4 conditions below need to
be fulfilled <br> Has lower characters <br>Has upper characters <br> Has a
digit <br> Has a special character (Regex match [\\W_]) <br><br>
**Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123",
"Pa$$word", "pass@word1", "Password!", "Password1", "Password22",
"iloveyou!" <br><br> For resetting the password, see [How to reset the
Remote Desktop service or its login password in a Windows
VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> For resetting root password, see [Manage users, SSH, and check or
repair disks on Azure Linux VMs using the VMAccess
Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password)
:type admin_password: str
:param custom_data: Specifies a base-64 encoded string of custom data. The
base-64 encoded string is decoded to a binary array that is saved as a
file on the Virtual Machine. The maximum length of the binary array is
65535 bytes. <br><br> For using cloud-init for your VM, see [Using
cloud-init to customize a Linux VM during
creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
:type custom_data: str
:param windows_configuration: Specifies Windows operating system settings
on the virtual machine.
:type windows_configuration:
~azure.mgmt.compute.v2016_04_30_preview.models.WindowsConfiguration
:param linux_configuration: Specifies the Linux operating system settings
on the virtual machine. <br><br>For a list of supported Linux
distributions, see [Linux on Azure-Endorsed
Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
<br><br> For running non-endorsed distributions, see [Information for
Non-Endorsed
Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
:type linux_configuration:
~azure.mgmt.compute.v2016_04_30_preview.models.LinuxConfiguration
:param secrets: Specifies set of certificates that should be installed
onto the virtual machine.
:type secrets:
list[~azure.mgmt.compute.v2016_04_30_preview.models.VaultSecretGroup]
"""
_attribute_map = {
'computer_name': {'key': 'computerName', 'type': 'str'},
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'custom_data': {'key': 'customData', 'type': 'str'},
'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'},
'linux_configuration': {'key': 'linuxConfiguration', 'type': 'LinuxConfiguration'},
'secrets': {'key': 'secrets', 'type': '[VaultSecretGroup]'},
}
def __init__(self, **kwargs):
super(OSProfile, self).__init__(**kwargs)
self.computer_name = kwargs.get('computer_name', None)
self.admin_username = kwargs.get('admin_username', None)
self.admin_password = kwargs.get('admin_password', None)
self.custom_data = kwargs.get('custom_data', None)
self.windows_configuration = kwargs.get('windows_configuration', None)
self.linux_configuration = kwargs.get('linux_configuration', None)
self.secrets = kwargs.get('secrets', None)
| mit | -8,916,829,913,839,380,000 | 64.131313 | 211 | 0.685949 | false |
car3oon/saleor | saleor/product/templatetags/product_images.py | 1 | 1695 | import logging
import warnings
from django import template
from django.conf import settings
from django.contrib.staticfiles.templatetags.staticfiles import static
logger = logging.getLogger(__name__)
register = template.Library()
# cache available sizes at module level
def get_available_sizes():
all_sizes = set()
keys = settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS
for size_group, sizes in keys.items():
for size_name, size in sizes:
all_sizes.add(size)
return all_sizes
AVAILABLE_SIZES = get_available_sizes()
@register.simple_tag()
def get_thumbnail(instance, size, method='crop'):
if instance:
size_name = '%s__%s' % (method, size)
if (size_name not in AVAILABLE_SIZES and not
settings.VERSATILEIMAGEFIELD_SETTINGS['create_images_on_demand']):
msg = ('Thumbnail size %s is not defined in settings '
'and it won\'t be generated automatically' % size_name)
warnings.warn(msg)
try:
if method == 'crop':
thumbnail = instance.crop[size]
else:
thumbnail = instance.thumbnail[size]
except:
logger.exception('Thumbnail fetch failed',
extra={'instance': instance, 'size': size})
else:
return thumbnail.url
return static('images/product-image-placeholder.png')
@register.simple_tag()
def product_first_image(product, size, method='crop'):
"""
Returns main product image
"""
all_images = product.images.all()
main_image = all_images[0].image if all_images else None
return get_thumbnail(main_image, size, method)
| bsd-3-clause | -8,253,396,041,086,579,000 | 30.981132 | 82 | 0.634808 | false |
citronneur/rdpy | bin/rdpy-rdpmitm.py | 1 | 11898 | #!/usr/bin/python
#
# Copyright (c) 2014-2015 Sylvain Peyrefitte
#
# This file is part of rdpy.
#
# rdpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
RDP proxy with Man in the middle capabilities
Save RDP events in output RSR file format
RSR file format can be read by rdpy-rsrplayer.py
----------------------------
Client RDP -> | ProxyServer | ProxyClient | -> Server RDP
----------------------------
| Record Session |
-----------------
"""
import sys
import os
import argparse
import time
from rdpy.core import log, error, rss
from rdpy.protocol.rdp import rdp
from twisted.internet import reactor
log._LOG_LEVEL = log.Level.INFO
class ProxyServer(rdp.RDPServerObserver):
"""
@summary: Server side of proxy
"""
def __init__(self, controller, target, clientSecurityLevel, rssRecorder):
"""
@param controller: {RDPServerController}
@param target: {tuple(ip, port)}
@param rssRecorder: {rss.FileRecorder} use to record session
"""
rdp.RDPServerObserver.__init__(self, controller)
self._target = target
self._client = None
self._rss = rssRecorder
self._clientSecurityLevel = clientSecurityLevel
def setClient(self, client):
"""
@summary: Event throw by client when it's ready
@param client: {ProxyClient}
"""
self._client = client
def onReady(self):
"""
@summary: Event use to inform state of server stack
First time this event is called is when human client is connected
Second time is after color depth nego, because color depth nego
restart a connection sequence
@see: rdp.RDPServerObserver.onReady
"""
if self._client is None:
# try a connection
domain, username, password = self._controller.getCredentials()
self._rss.credentials(username, password,
domain, self._controller.getHostname())
width, height = self._controller.getScreen()
self._rss.screen(width, height, self._controller.getColorDepth())
reactor.connectTCP(self._target[0], int(self._target[1]), ProxyClientFactory(self, width, height,
domain, username, password, self._clientSecurityLevel))
def onClose(self):
"""
@summary: Call when human client close connection
@see: rdp.RDPServerObserver.onClose
"""
# end scenario
self._rss.close()
# close network stack
if self._client is None:
return
self._client._controller.close()
def onKeyEventScancode(self, code, isPressed, isExtended):
"""
@summary: Event call when a keyboard event is catch in scan code format
@param code: {integer} scan code of key
@param isPressed: {boolean} True if key is down
@param isExtended: {boolean} True if a special key
@see: rdp.RDPServerObserver.onKeyEventScancode
"""
if self._client is None:
return
self._client._controller.sendKeyEventScancode(
code, isPressed, isExtended)
self._rss.keyScancode(code, isPressed)
def onKeyEventUnicode(self, code, isPressed):
"""
@summary: Event call when a keyboard event is catch in unicode format
@param code: unicode of key
@param isPressed: True if key is down
@see: rdp.RDPServerObserver.onKeyEventUnicode
"""
if self._client is None:
return
self._client._controller.sendKeyEventUnicode(code, isPressed)
self._rss.keyUnicode(code, isPressed)
def onPointerEvent(self, x, y, button, isPressed):
"""
@summary: Event call on mouse event
@param x: {int} x position
@param y: {int} y position
@param button: {int} 1, 2, 3, 4 or 5 button
@param isPressed: {bool} True if mouse button is pressed
@see: rdp.RDPServerObserver.onPointerEvent
"""
if self._client is None:
return
self._client._controller.sendPointerEvent(x, y, button, isPressed)
class ProxyServerFactory(rdp.ServerFactory):
"""
@summary: Factory on listening events
"""
def __init__(self, target, ouputDir, privateKeyFilePath, certificateFilePath, clientSecurity):
"""
@param target: {tuple(ip, prt)}
@param privateKeyFilePath: {str} file contain server private key (if none -> back to standard RDP security)
@param certificateFilePath: {str} file contain server certificate (if none -> back to standard RDP security)
@param clientSecurity: {str(ssl|rdp)} security layer use in client connection side
"""
rdp.ServerFactory.__init__(
self, 16, privateKeyFilePath, certificateFilePath)
self._target = target
self._ouputDir = ouputDir
self._clientSecurity = clientSecurity
# use produce unique file by connection
self._uniqueId = 0
def buildObserver(self, controller, addr):
"""
@param controller: {rdp.RDPServerController}
@param addr: destination address
@see: rdp.ServerFactory.buildObserver
"""
self._uniqueId += 1
return ProxyServer(controller, self._target, self._clientSecurity, rss.createRecorder(os.path.join(self._ouputDir, "%s_%s_%s.rss" % (time.strftime('%Y%m%d%H%M%S'), addr.host, self._uniqueId))))
class ProxyClient(rdp.RDPClientObserver):
"""
@summary: Client side of proxy
"""
def __init__(self, controller, server):
"""
@param controller: {rdp.RDPClientController}
@param server: {ProxyServer}
"""
rdp.RDPClientObserver.__init__(self, controller)
self._server = server
def onReady(self):
"""
@summary: Event use to signal that RDP stack is ready
Inform ProxyServer that i'm connected
@see: rdp.RDPClientObserver.onReady
"""
self._server.setClient(self)
# maybe color depth change
self._server._controller.setColorDepth(
self._controller.getColorDepth())
def onSessionReady(self):
"""
@summary: Windows session is ready
@see: rdp.RDPClientObserver.onSessionReady
"""
pass
def onClose(self):
"""
@summary: Event inform that stack is close
@see: rdp.RDPClientObserver.onClose
"""
# end scenario
self._server._rss.close()
self._server._controller.close()
def onUpdate(self, destLeft, destTop, destRight, destBottom, width, height, bitsPerPixel, isCompress, data):
"""
@summary: Event use to inform bitmap update
@param destLeft: {int} xmin position
@param destTop: {int} ymin position
@param destRight: {int} xmax position because RDP can send bitmap with padding
@param destBottom: {int} ymax position because RDP can send bitmap with padding
@param width: {int} width of bitmap
@param height: {int} height of bitmap
@param bitsPerPixel: {int} number of bit per pixel
@param isCompress: {bool} use RLE compression
@param data: {str} bitmap data
@see: rdp.RDPClientObserver.onUpdate
"""
self._server._rss.update(destLeft, destTop, destRight, destBottom, width, height,
bitsPerPixel, rss.UpdateFormat.BMP if isCompress else rss.UpdateFormat.RAW, data)
self._server._controller.sendUpdate(
destLeft, destTop, destRight, destBottom, width, height, bitsPerPixel, isCompress, data)
class ProxyClientFactory(rdp.ClientFactory):
"""
@summary: Factory for proxy client
"""
def __init__(self, server, width, height, domain, username, password, security):
"""
@param server: {ProxyServer}
@param width: {int} screen width
@param height: {int} screen height
@param domain: {str} domain session
@param username: {str} username session
@param password: {str} password session
@param security: {str(ssl|rdp)} security level
"""
self._server = server
self._width = width
self._height = height
self._domain = domain
self._username = username
self._password = password
self._security = security
def buildObserver(self, controller, addr):
"""
@summary: Build observer
@param controller: rdp.RDPClientController
@param addr: destination address
@see: rdp.ClientFactory.buildObserver
@return: ProxyClient
"""
# set screen resolution
controller.setScreen(self._width, self._height)
# set credential
controller.setDomain(self._domain)
controller.setUsername(self._username)
controller.setPassword(self._password)
controller.setSecurityLevel(self._security)
controller.setPerformanceSession()
return ProxyClient(controller, self._server)
def parseIpPort(interface, defaultPort="3389"):
if ':' in interface:
s = interface.split(':')
return s[0], int(s[1])
else:
return interface, int(defaultPort)
def isDirectory(outputDirectory):
if outputDirectory is None or not os.path.dirname(outputDirectory):
log.error("{} is an invalid output directory or directory doesn't exist".format(
outputDirectory))
return outputDirectory
def mapSecurityLayer(layer):
return {
"rdp": rdp.SecurityLevel.RDP_LEVEL_RDP,
"tls": rdp.SecurityLevel.RDP_LEVEL_SSL,
"nla": rdp.SecurityLevel.RDP_LEVEL_NLA
}[layer]
if __name__ == '__main__':
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('-l', '--listen', type=parseIpPort, default="0.0.0.0:3389",
help="<addr>[:<port>] to bind the server")
p.add_argument('-t', '--target', type=parseIpPort, required=True,
help="<addr>[:<port>] of the target you want to connect to via proxy")
p.add_argument('-o', '--output', type=isDirectory,
help="output directory", required=True)
p.add_argument('-s', '--sec', choices=["rdp", "tls", "nla"],
default="rdp", help="set protocol security layer")
ssl = p.add_argument_group()
ssl.add_argument('-c', '--certificate', help="certificate for TLS connections")
ssl.add_argument('-k', '--key', help="private key of the given certificate for TLS connections")
args = p.parse_args()
if args.certificate and args.key and not args.sec == "nla":
args.sec = "tls"
log.info("running server on {addr}, using {sec} security layer, proxying to {target}".format(
addr=args.listen, sec=args.sec.upper(), target=args.target))
reactor.listenTCP(args.listen[1], ProxyServerFactory(
args.target, args.output, args.key, args.certificate, mapSecurityLayer(args.sec)),
interface=args.listen[0])
reactor.run()
| gpl-3.0 | -8,752,646,454,044,498,000 | 35.835913 | 201 | 0.620188 | false |
mit-ll/LO-PHI | lophi-automation/lophi_automation/master.py | 1 | 17891 | """
This connects to multiple controllers running across numerous physical
servers to control large scale experiments.
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import multiprocessing
import random
import logging
logger = logging.getLogger(__name__)
# LO-PHI
import lophi.globals as G
# LO-PHI Automation
import lophi_automation.configs.helper as Configs
import lophi_automation.ext_interface.rabbitmq as rabbitmq
import lophi_automation.database.db as DB
from lophi_automation.network.command import LophiCommand
CMD_PREFIX = G.bcolors.HEADER + "lophi-controller$ " + G.bcolors.ENDC
LISTS = {'machines',
'controllers',
'analysis'}
class LoPhiMaster:
"""
This class will connect to all of our controllers and relay messages
amongst them. Potentially will have load-balancing etc. in the future.
"""
def __init__(self, options, positionals):
"""
Initialize our master with the config of controllers
"""
print "* Starting up LOPHI Master Process"
self.COMMANDS = {G.CTRL_CMD_START: self.command_start,
G.CTRL_CMD_LIST: self.command_list,
G.CTRL_CMD_PAUSE: self.command_abstract,
G.CTRL_CMD_UNPAUSE: self.command_abstract,
G.CTRL_CMD_SPLASH: self.command_splash,
G.CTRL_CMD_UPDATE_HW: self.command_update_hw,
G.CTRL_CMD_STOP: self.command_abstract,
G.CTRL_CMD_DIE: self.command_abstract,
G.CTRL_CMD_ATTACH: self.command_abstract,
G.CTRL_CMD_EXECUTE: self.command_abstract}
self.MSG_TYPES = set([G.CTRL_TYPE, G.REG_TYPE])
# response header
self.RESP_HEADER = "[LOPHI Master] "
logger.debug("Importing config files...")
# Save our config file
self.master_config_file = options.config_file
# Save our config file
self.analysis_directory = options.analysis_directory
# Read our config into an internal structure
self.config_list = Configs.import_from_config(self.master_config_file,
"controller")
# Read our analysis scripts into an internal structure
self.update_analysis()
# Connect to our database
self.DB_analysis = DB.DatastoreAnalysis(options.services_host)
# Set our RabbitMQ host
self.amqp_host = options.services_host
def update_analysis(self):
"""
Read our directory and updated our list of found analysis to reflect
the current state of the file system
"""
# Read our analysis scripts into an internal structure
self.analysis_list = Configs.import_analysis_scripts(
self.analysis_directory)
def command_list(self, command):
"""
Generic command to list statuses of the server
"""
# See if the list exists and return results
if len(command.args) > 0 and command.args[0] in LISTS:
resp = []
# Print out our available machines
if command.args[0] == "machines":
# Loop over controllers
for c in self.config_list:
# Get updated list of machiens
self.config_list[c].get_machines()
# Print output
machines_tmp = self.config_list[c].machines
resp.append("--- %s" % c)
for x in machines_tmp:
name = machines_tmp[x].config.name
m_type = machines_tmp[x].type
profile = machines_tmp[x].config.volatility_profile
resp.append(" [%s] Type: %s, Profile: %s" % (
name, m_type, profile))
resp.append("--- %s" % c)
# Print out our LO-PHI configs
if command.args[0] == "controllers":
if len(self.config_list) == 0:
resp.append("No controllers are configured.")
else:
resp.append("--- Available Controllers")
for x in self.config_list:
resp.append(str(self.config_list[x]))
resp.append("--- Available Controllers")
# Print out our running analyses
if command.args[0] == "analysis":
# Ensure our list
self.update_analysis()
# Loop over controllers
for c in self.analysis_list:
analysis, filename = self.analysis_list[c]
resp.append("\n[%s] %s" % (c, filename))
if len(resp) == 0:
resp.append(
"No analysis scripts found in %s." % self.analysis_directory)
return '\n'.join(resp)
else:
return self.RESP_HEADER + "ERROR: No such list.\n Available lists are: %s\n" % LISTS
def get_machines(self, config):
"""
Given a config, this will find all of the matching machines on our
controllers. It returns a list of controllers and machines for
each.
@todo: Code is repeated on controller... Fix this!
"""
machines = {}
for c in self.config_list:
# Get most recent status
self.config_list[c].get_machines()
# Does a machine exist to run the profile on?
machines_c = []
for m in self.config_list[c].machines:
tmp_machine = self.config_list[c].machines[m]
if config == None:
machines_c.append(m)
else:
# Profiles Match?
m_profile = tmp_machine.config.volatility_profile
a_profile = config.volatility_profile
# Same type of machines?
m_type = tmp_machine.type
a_type = config.machine_type
# Break when we find a match
if m_profile == a_profile and tmp_machine.ALLOCATED < 0 and m_type == a_type:
machines_c.append(m)
if len(machines_c) > 0:
machines[c] = len(machines_c)
if len(machines) > 0:
return machines
else:
# No Match?
return None
def _send_analysis(self, analysis_name):
"""
Send analysis to our controller and start it.
"""
def command_start(self, command):
"""
Search through all of our machines, calculate how many we want to
start at each remote controller, issue the start command and push
the config that we want to execute
"""
# Update our analysis list
self.update_analysis()
# Figure out which analysis they want to run
if command.analysis is None:
logger.error("Must name analysis to start.")
return self.RESP_HEADER + "ERROR: No analysis name provided."
if command.analysis not in self.analysis_list.keys():
return self.RESP_HEADER + "ERROR: Analysis does not exist. Options are: %s" % self.analysis_list.keys()
else:
analysis_file = self.analysis_list[command.analysis][1]
analysis_class = self.analysis_list[command.analysis][0]
# Did they define a controller?
if command.controller is not None and command.controller not in self.config_list.keys():
return self.RESP_HEADER + "ERROR: Controller does not exist. Options are: %s" % self.config_list.keys()
# Does this analysis involve a binary sample?
if command.sample_doc_id is not None:
# Add this analysis to our database
db_analysis_id = self.DB_analysis.create_analysis(
command.sample_doc_id, command)
# update our command
command.db_analysis_id = db_analysis_id
# Start all of our analysis on the remote controller
if command.controller is not None:
self.config_list[command.controller].send_analysis(analysis_file,
command)
else:
# What machine type are we looking for?
if command.machine_type is not None:
machine_type = command.machine_type
elif analysis_class.MACHINE_TYPE is not None:
machine_type = analysis_class.MACHINE_TYPE
else:
machine_type = None
# Find a list of controllers with these types of machines
controllers = []
# Loop over all of our controllers
for config in self.config_list:
controller = self.config_list[config]
controller.get_machines()
# Loop over all of the machines on that controller
for m_name in controller.machines:
if controller.machines[m_name].type == machine_type:
controllers.append(controller)
# Did we find any acceptable controllers?
if len(controllers) == 0:
logger.error("No controllers found with machines of type %d" %
machine_type)
return self.RESP_HEADER + "No controllers found with machines of type %d" % machine_type
# Pick a random controller in the list
rand_idx = random.randint(0, len(controllers) - 1)
rand_controller = controllers[rand_idx]
print " * Sending analysis to random controller. (%s)" % rand_controller.name
rand_controller.send_analysis(analysis_file, command)
return self.RESP_HEADER + "Machines started successfully."
def command_abstract(self, command):
"""
This function processes abstract commands and passes them to the
appropriate analysis engine (or all)
"""
for c in self.config_list:
self.config_list[c].send_cmd(command)
logger.debug("Sent abstract command.")
return self.RESP_HEADER + "Command %s sent successfully." % command.cmd
def command_splash(self, cmd):
""" Print splash screen for the CLI """
ret = []
# Get our machines
machines = {} # self.get_machines(None)
# Calculate the total number of machines
total_machines = 0
for c in machines:
total_machines += machines[c]
ret.append(".--------------------------------------------.")
ret.append("| |")
ret.append("| LO-PHI Master Controller |")
ret.append("| |")
ret.append("+--------------------------------------------+")
ret.append("| - - |")
ret.append("| Remote Servers: %-3d |" % (
len(self.config_list)))
ret.append(
"| Remote Machines: %-3d |" % total_machines)
ret.append("| Analyses: %-3d |" % (
len(self.analysis_list)))
ret.append("| - - |")
ret.append("| - - |")
ret.append("| Type 'help' for a list of commands. |")
ret.append("| - - |")
ret.append("`--------------------------------------------'")
print "Sending Splash Screen"
# return "Splash!"
return '\n'.join(ret)
def command_update_hw(self, cmd):
""" Update our HW config info, e.g. update the IP for a physical machine sensor """
# TODO
pass
# def process_cmd(self, type, cmd, corr_id, routing_key):
def process_cmd(self, cmd):
""" Generic function to process commands received from amqp and send a response """
resp = self.COMMANDS[cmd.cmd](cmd)
logger.debug("Resp: %s" % resp)
# send to resp_queue
# if type == G.CTRL_TYPE:
#
# response = json.dumps((corr_id, routing_key, resp))
# logger.debug("Sending response: %s" % response)
# self.out_queue.put(response)
response = cmd.make_response(resp)
logger.debug("Sending response: %s" % response)
self.out_queue.put(str(response))
def start(self):
"""
Main function to just loop forever while waiting for input over amqp
"""
quit_commands = ['q', 'quit', 'exit']
# Setup our handler to close gracefully
G.set_exit_handler(self.cleanup)
# Setup or queues
self.manager = multiprocessing.Manager()
self.INPUT_QUEUE = self.manager.Queue()
# set of comm processes (rabbitmq, etc.) - for cleanup later
self.comm_processes = set([])
# Set up communication queue with all of our consumers, processes, and producers
self.in_queue = multiprocessing.Queue()
self.out_queue = multiprocessing.Queue()
# Listen for physical cards registering
# HOST = ''
# PORT = G.CARD_REG_PORT
#
# self.reg_consumer = Card_Reg_Server((HOST, PORT), UDPHandler, self.in_queue)
# self.reg_consumer.start()
# self.comm_processes.add(self.reg_consumer)
# Setup RabbitMQ consumers and queues
logger.debug("Starting up LOPHI RabbitmQ Producers...")
# self.ctrl_producer = rabbitmq.LOPHI_RPC_Producer(self.amqp_host,
# self.out_queue,
# G.RabbitMQ.CTRL_OUT,
# G.RabbitMQ.CTRL_IN,
# exchange_type=G.RabbitMQ.TYPE_FANOUT,
# exchange=G.RabbitMQ.EXCHANGE_FANOUT)
self.ctrl_producer = rabbitmq.LOPHI_RabbitMQ_Producer(self.amqp_host,
self.out_queue,
G.RabbitMQ.CTRL_OUT,
exchange_type=G.RabbitMQ.TYPE_FANOUT,
routing_key='',
exchange=G.RabbitMQ.EXCHANGE_FANOUT)
self.ctrl_producer.start()
self.comm_processes.add(self.ctrl_producer)
logger.debug("Starting up LOPHI RabbitMQ Consumers...")
# Listen for control messages, e.g. from a CLI
# self.ctrl_consumer = rabbitmq.LOPHI_RPC_Consumer(self.amqp_host,
# self.in_queue,
# G.RabbitMQ.CTRL_IN)
self.ctrl_consumer = rabbitmq.LOPHI_RabbitMQ_Consumer(self.amqp_host,
self.in_queue,
G.RabbitMQ.CTRL_IN)
self.ctrl_consumer.start()
self.comm_processes.add(self.ctrl_consumer)
# Connect to all of our controllers
for c in self.config_list:
self.config_list[c].connect()
print "Waiting for input from queues."
# Just loop forever taking input from rabbitmq
while 1:
user_input = self.in_queue.get()
# Decode input from rabbitmq format
try:
# (corr_id, routing_key, msg) = json.loads(user_input)
# type is type of message
# command
# (type, cmd_data) = msg
cmd = LophiCommand.from_data(user_input)
except:
print "Unknown command: ", user_input
continue
logger.debug("Received msg %s" % cmd)
# check if type is valid
# if msg.type not in self.MSG_TYPES:
# print "Invalid message type: %s\n" % type
# See if it's valid command
if cmd.cmd not in self.COMMANDS.keys():
resp = "Invalid Command: %s\n" % cmd.cmd
logger.debug(resp)
response = cmd.make_response(resp)
self.out_queue.put(response)
else:
# self.process_cmd(type, cmd, corr_id, routing_key)
self.process_cmd(cmd)
"""
@todo: add command to kill master
"""
# Call our cleanup function and shutdown nicely
self.cleanup(None)
def cleanup(self, sig, func=None):
"""
Simple funciton to just close up everything nicely
"""
print "Closing up shop..."
# Disconnect all of our remote controllers
for c in self.config_list:
self.config_list[c].disconnect()
# Kill our data handler
self.INPUT_QUEUE.put(G.CTRL_CMD_KILL)
# Terminate the consumers and producers
self.in_queue.put(G.CTRL_CMD_KILL)
self.out_queue.put(G.CTRL_CMD_KILL)
for child in self.comm_processes:
child.stop()
| bsd-3-clause | -743,557,954,228,598,300 | 37.641469 | 115 | 0.511989 | false |
akhmadMizkat/odoo | addons/base_setup/res_config.py | 1 | 3628 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
class base_config_settings(osv.osv_memory):
_name = 'base.config.settings'
_inherit = 'res.config.settings'
_columns = {
'group_multi_company': fields.boolean('Manage multiple companies',
help='Work in multi-company environments, with appropriate security access between companies.',
implied_group='base.group_multi_company'),
'module_share': fields.boolean('Allow documents sharing',
help="""Share or embbed any screen of Odoo."""),
'module_portal': fields.boolean('Activate the customer portal',
help="""Give your customers access to their documents."""),
'module_auth_oauth': fields.boolean('Use external authentication providers (OAuth)'),
'module_base_import': fields.boolean("Allow users to import data from CSV/XLS/XLSX/ODS files"),
'module_google_drive': fields.boolean('Attach Google documents to any record',
help="""This installs the module google_docs."""),
'module_google_calendar': fields.boolean('Allow the users to synchronize their calendar with Google Calendar',
help="""This installs the module google_calendar."""),
'module_inter_company_rules': fields.boolean('Manage Inter Company',
help="""This installs the module inter_company_rules.\n Configure company rules to automatically create SO/PO when one of your company sells/buys to another of your company."""),
'company_share_partner': fields.boolean('Share partners to all companies',
help="Share your partners to all companies defined in your instance.\n"
" * Checked : Partners are visible for every companies, even if a company is defined on the partner.\n"
" * Unchecked : Each company can see only its partner (partners where company is defined). Partners not related to a company are visible for all companies."),
'group_multi_currency': fields.boolean('Allow multi currencies',
implied_group='base.group_multi_currency',
help="Allows to work in a multi currency environment"),
}
def open_company(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
return {
'type': 'ir.actions.act_window',
'name': 'My Company',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'res.company',
'res_id': user.company_id.id,
'target': 'current',
}
def get_default_company_share_partner(self, cr, uid, fields, context=None):
partner_rule = self.pool['ir.model.data'].xmlid_to_object(cr, uid, 'base.res_partner_rule', context=context)
return {
'company_share_partner': not bool(partner_rule.active)
}
def set_default_company_share_partner(self, cr, uid, ids, context=None):
partner_rule = self.pool['ir.model.data'].xmlid_to_object(cr, uid, 'base.res_partner_rule', context=context)
for wizard in self.browse(cr, uid, ids, context=context):
self.pool['ir.rule'].write(cr, uid, [partner_rule.id], {'active': not bool(wizard.company_share_partner)}, context=context)
# Empty class but required since it's overrided by sale & crm
class sale_config_settings(osv.osv_memory):
_name = 'sale.config.settings'
_inherit = 'res.config.settings'
_columns = {
}
| gpl-3.0 | -6,858,198,202,764,836,000 | 53.149254 | 190 | 0.63699 | false |
netsamir/dotfiles | files/vim/bundle/YouCompleteMe/third_party/ycmd/ycmd/completers/typescript/typescript_completer.py | 1 | 18871 | # Copyright (C) 2015 - 2016 Google Inc.
# 2016 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import json
import logging
import os
import re
import subprocess
import itertools
from threading import Thread
from threading import Event
from threading import Lock
from tempfile import NamedTemporaryFile
from ycmd import responses
from ycmd import utils
from ycmd.completers.completer import Completer
from ycmd.completers.completer_utils import GetFileContents
BINARY_NOT_FOUND_MESSAGE = ( 'tsserver not found. '
'TypeScript 1.5 or higher is required' )
MAX_DETAILED_COMPLETIONS = 100
RESPONSE_TIMEOUT_SECONDS = 10
_logger = logging.getLogger( __name__ )
class DeferredResponse( object ):
"""
A deferred that resolves to a response from TSServer.
"""
def __init__( self, timeout = RESPONSE_TIMEOUT_SECONDS ):
self._event = Event()
self._message = None
self._timeout = timeout
def resolve( self, message ):
self._message = message
self._event.set()
def result( self ):
self._event.wait( timeout = self._timeout )
if not self._event.isSet():
raise RuntimeError( 'Response Timeout' )
message = self._message
if not message[ 'success' ]:
raise RuntimeError( message[ 'message' ] )
if 'body' in message:
return self._message[ 'body' ]
class TypeScriptCompleter( Completer ):
"""
Completer for TypeScript.
It uses TSServer which is bundled with TypeScript 1.5
See the protocol here:
https://github.com/Microsoft/TypeScript/blob/2cb0dfd99dc2896958b75e44303d8a7a32e5dc33/src/server/protocol.d.ts
"""
def __init__( self, user_options ):
super( TypeScriptCompleter, self ).__init__( user_options )
# Used to prevent threads from concurrently writing to
# the tsserver process' stdin
self._writelock = Lock()
binarypath = utils.PathToFirstExistingExecutable( [ 'tsserver' ] )
if not binarypath:
_logger.error( BINARY_NOT_FOUND_MESSAGE )
raise RuntimeError( BINARY_NOT_FOUND_MESSAGE )
self._logfile = _LogFileName()
tsserver_log = '-file {path} -level {level}'.format( path = self._logfile,
level = _LogLevel() )
# TSServer get the configuration for the log file through the environment
# variable 'TSS_LOG'. This seems to be undocumented but looking at the
# source code it seems like this is the way:
# https://github.com/Microsoft/TypeScript/blob/8a93b489454fdcbdf544edef05f73a913449be1d/src/server/server.ts#L136
self._environ = os.environ.copy()
utils.SetEnviron( self._environ, 'TSS_LOG', tsserver_log )
_logger.info( 'TSServer log file: {0}'.format( self._logfile ) )
# Each request sent to tsserver must have a sequence id.
# Responses contain the id sent in the corresponding request.
self._sequenceid = itertools.count()
# Used to prevent threads from concurrently accessing the sequence counter
self._sequenceid_lock = Lock()
# We need to redirect the error stream to the output one on Windows.
self._tsserver_handle = utils.SafePopen( binarypath,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
env = self._environ )
# Used to map sequence id's to their corresponding DeferredResponse
# objects. The reader loop uses this to hand out responses.
self._pending = {}
# Used to prevent threads from concurrently reading and writing to
# the pending response dictionary
self._pendinglock = Lock()
# Start a thread to read response from TSServer.
self._thread = Thread( target = self._ReaderLoop, args = () )
self._thread.daemon = True
self._thread.start()
_logger.info( 'Enabling typescript completion' )
def _ReaderLoop( self ):
"""
Read responses from TSServer and use them to resolve
the DeferredResponse instances.
"""
while True:
try:
message = self._ReadMessage()
# We ignore events for now since we don't have a use for them.
msgtype = message[ 'type' ]
if msgtype == 'event':
eventname = message[ 'event' ]
_logger.info( 'Recieved {0} event from tsserver'.format( eventname ) )
continue
if msgtype != 'response':
_logger.error( 'Unsuported message type {0}'.format( msgtype ) )
continue
seq = message[ 'request_seq' ]
with self._pendinglock:
if seq in self._pending:
self._pending[ seq ].resolve( message )
del self._pending[ seq ]
except Exception as e:
_logger.exception( e )
def _ReadMessage( self ):
"""Read a response message from TSServer."""
# The headers are pretty similar to HTTP.
# At the time of writing, 'Content-Length' is the only supplied header.
headers = {}
while True:
headerline = self._tsserver_handle.stdout.readline().strip()
if not headerline:
break
key, value = utils.ToUnicode( headerline ).split( ':', 1 )
headers[ key.strip() ] = value.strip()
# The response message is a JSON object which comes back on one line.
# Since this might change in the future, we use the 'Content-Length'
# header.
if 'Content-Length' not in headers:
raise RuntimeError( "Missing 'Content-Length' header" )
contentlength = int( headers[ 'Content-Length' ] )
# TSServer adds a newline at the end of the response message and counts it
# as one character (\n) towards the content length. However, newlines are
# two characters on Windows (\r\n), so we need to take care of that. See
# issue https://github.com/Microsoft/TypeScript/issues/3403
content = self._tsserver_handle.stdout.read( contentlength )
if utils.OnWindows() and content.endswith( b'\r' ):
content += self._tsserver_handle.stdout.read( 1 )
return json.loads( utils.ToUnicode( content ) )
def _BuildRequest( self, command, arguments = None ):
"""Build TSServer request object."""
with self._sequenceid_lock:
seq = next( self._sequenceid )
request = {
'seq': seq,
'type': 'request',
'command': command
}
if arguments:
request[ 'arguments' ] = arguments
return request
def _SendCommand( self, command, arguments = None ):
"""
Send a request message to TSServer but don't wait for the response.
This function is to be used when we don't care about the response
to the message that is sent.
"""
request = json.dumps( self._BuildRequest( command, arguments ) ) + '\n'
with self._writelock:
self._tsserver_handle.stdin.write( utils.ToBytes( request ) )
self._tsserver_handle.stdin.flush()
def _SendRequest( self, command, arguments = None ):
"""
Send a request message to TSServer and wait
for the response.
"""
request = self._BuildRequest( command, arguments )
json_request = json.dumps( request ) + '\n'
deferred = DeferredResponse()
with self._pendinglock:
seq = request[ 'seq' ]
self._pending[ seq ] = deferred
with self._writelock:
self._tsserver_handle.stdin.write( utils.ToBytes( json_request ) )
self._tsserver_handle.stdin.flush()
return deferred.result()
def _Reload( self, request_data ):
"""
Syncronize TSServer's view of the file to
the contents of the unsaved buffer.
"""
filename = request_data[ 'filepath' ]
contents = request_data[ 'file_data' ][ filename ][ 'contents' ]
tmpfile = NamedTemporaryFile( delete = False )
tmpfile.write( utils.ToBytes( contents ) )
tmpfile.close()
self._SendRequest( 'reload', {
'file': filename,
'tmpfile': tmpfile.name
} )
os.unlink( tmpfile.name )
def SupportedFiletypes( self ):
return [ 'typescript' ]
def ComputeCandidatesInner( self, request_data ):
self._Reload( request_data )
entries = self._SendRequest( 'completions', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'start_codepoint' ]
} )
# A less detailed version of the completion data is returned
# if there are too many entries. This improves responsiveness.
if len( entries ) > MAX_DETAILED_COMPLETIONS:
return [ _ConvertCompletionData(e) for e in entries ]
names = []
namelength = 0
for e in entries:
name = e[ 'name' ]
namelength = max( namelength, len( name ) )
names.append( name )
detailed_entries = self._SendRequest( 'completionEntryDetails', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'start_codepoint' ],
'entryNames': names
} )
return [ _ConvertDetailedCompletionData( e, namelength )
for e in detailed_entries ]
def GetSubcommandsMap( self ):
return {
'GoToDefinition' : ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToReferences' : ( lambda self, request_data, args:
self._GoToReferences( request_data ) ),
'GoToType' : ( lambda self, request_data, args:
self._GoToType( request_data ) ),
'GetType' : ( lambda self, request_data, args:
self._GetType( request_data ) ),
'GetDoc' : ( lambda self, request_data, args:
self._GetDoc( request_data ) ),
'RefactorRename' : ( lambda self, request_data, args:
self._RefactorRename( request_data, args ) ),
}
def OnBufferVisit( self, request_data ):
filename = request_data[ 'filepath' ]
self._SendCommand( 'open', { 'file': filename } )
def OnBufferUnload( self, request_data ):
filename = request_data[ 'filepath' ]
self._SendCommand( 'close', { 'file': filename } )
def OnFileReadyToParse( self, request_data ):
self._Reload( request_data )
def _GoToDefinition( self, request_data ):
self._Reload( request_data )
try:
filespans = self._SendRequest( 'definition', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
span = filespans[ 0 ]
return responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
span[ 'file' ] ) ),
span[ 'file' ],
span[ 'start' ][ 'line' ],
span[ 'start' ][ 'offset' ] ) )
except RuntimeError:
raise RuntimeError( 'Could not find definition' )
def _GoToReferences( self, request_data ):
self._Reload( request_data )
response = self._SendRequest( 'references', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
return [
responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
ref[ 'file' ] ) ),
ref[ 'file' ],
ref[ 'start' ][ 'line' ],
ref[ 'start' ][ 'offset' ] ),
ref[ 'lineText' ] )
for ref in response[ 'refs' ]
]
def _GoToType( self, request_data ):
self._Reload( request_data )
try:
filespans = self._SendRequest( 'typeDefinition', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_num' ]
} )
span = filespans[ 0 ]
return responses.BuildGoToResponse(
filepath = span[ 'file' ],
line_num = span[ 'start' ][ 'line' ],
column_num = span[ 'start' ][ 'offset' ]
)
except RuntimeError:
raise RuntimeError( 'Could not find type definition' )
def _GetType( self, request_data ):
self._Reload( request_data )
info = self._SendRequest( 'quickinfo', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
return responses.BuildDisplayMessageResponse( info[ 'displayString' ] )
def _GetDoc( self, request_data ):
self._Reload( request_data )
info = self._SendRequest( 'quickinfo', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
message = '{0}\n\n{1}'.format( info[ 'displayString' ],
info[ 'documentation' ] )
return responses.BuildDetailedInfoResponse( message )
def _RefactorRename( self, request_data, args ):
if len( args ) != 1:
raise ValueError( 'Please specify a new name to rename it to.\n'
'Usage: RefactorRename <new name>' )
self._Reload( request_data )
response = self._SendRequest( 'rename', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ],
'findInComments': False,
'findInStrings': False,
} )
if not response[ 'info' ][ 'canRename' ]:
raise RuntimeError( 'Value cannot be renamed: {0}'.format(
response[ 'info' ][ 'localizedErrorMessage' ] ) )
# The format of the response is:
#
# body {
# info {
# ...
# triggerSpan: {
# length: original_length
# }
# }
#
# locs [ {
# file: file_path
# locs: [
# start: {
# line: line_num
# offset: offset
# }
# end {
# line: line_num
# offset: offset
# }
# ] }
# ]
# }
#
new_name = args[ 0 ]
location = responses.Location( request_data[ 'line_num' ],
request_data[ 'column_num' ],
request_data[ 'filepath' ] )
chunks = []
for file_replacement in response[ 'locs' ]:
chunks.extend( _BuildFixItChunksForFile( request_data,
new_name,
file_replacement ) )
return responses.BuildFixItResponse( [
responses.FixIt( location, chunks )
] )
def Shutdown( self ):
self._SendCommand( 'exit' )
if not self.user_options[ 'server_keep_logfiles' ]:
os.unlink( self._logfile )
self._logfile = None
def DebugInfo( self, request_data ):
return ( 'TSServer logfile:\n {0}' ).format( self._logfile )
def _LogFileName():
with NamedTemporaryFile( dir = utils.PathToCreatedTempDir(),
prefix = 'tsserver_',
suffix = '.log',
delete = False ) as logfile:
return logfile.name
def _LogLevel():
return 'verbose' if _logger.isEnabledFor( logging.DEBUG ) else 'normal'
def _ConvertCompletionData( completion_data ):
return responses.BuildCompletionData(
insertion_text = completion_data[ 'name' ],
menu_text = completion_data[ 'name' ],
kind = completion_data[ 'kind' ],
extra_data = completion_data[ 'kind' ]
)
def _ConvertDetailedCompletionData( completion_data, padding = 0 ):
name = completion_data[ 'name' ]
display_parts = completion_data[ 'displayParts' ]
signature = ''.join( [ p[ 'text' ] for p in display_parts ] )
# needed to strip new lines and indentation from the signature
signature = re.sub( '\s+', ' ', signature )
menu_text = '{0} {1}'.format( name.ljust( padding ), signature )
return responses.BuildCompletionData(
insertion_text = name,
menu_text = menu_text,
kind = completion_data[ 'kind' ]
)
def _BuildFixItChunkForRange( new_name,
file_contents,
file_name,
source_range ):
""" returns list FixItChunk for a tsserver source range """
return responses.FixItChunk(
new_name,
responses.Range(
start = _BuildLocation( file_contents,
file_name,
source_range[ 'start' ][ 'line' ],
source_range[ 'start' ][ 'offset' ] ),
end = _BuildLocation( file_contents,
file_name,
source_range[ 'end' ][ 'line' ],
source_range[ 'end' ][ 'offset' ] ) ) )
def _BuildFixItChunksForFile( request_data, new_name, file_replacement ):
""" returns a list of FixItChunk for each replacement range for the
supplied file"""
# On windows, tsserver annoyingly returns file path as C:/blah/blah,
# whereas all other paths in Python are of the C:\\blah\\blah form. We use
# normpath to have python do the conversion for us.
file_path = os.path.normpath( file_replacement[ 'file' ] )
file_contents = utils.SplitLines( GetFileContents( request_data, file_path ) )
return [ _BuildFixItChunkForRange( new_name, file_contents, file_path, r )
for r in file_replacement[ 'locs' ] ]
def _BuildLocation( file_contents, filename, line, offset ):
return responses.Location(
line = line,
# tsserver returns codepoint offsets, but we need byte offsets, so we must
# convert
column = utils.CodepointOffsetToByteOffset( file_contents[ line - 1 ],
offset ),
filename = filename )
| unlicense | 175,188,819,222,903,420 | 32.818996 | 117 | 0.597213 | false |
google-research/ssl_detection | third_party/FasterRCNN/FasterRCNN/modeling/model_fpn.py | 1 | 8940 | # -*- coding: utf-8 -*-
import itertools
import numpy as np
import tensorflow as tf
from tensorpack.models import Conv2D, FixedUnPooling, MaxPooling, layer_register
from tensorpack.tfutils.argscope import argscope
from tensorpack.tfutils.scope_utils import under_name_scope
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils.tower import get_current_tower_context
from tensorpack.utils.argtools import memoized
from config import config as cfg
from ..utils.box_ops import area as tf_area
from .backbone import GroupNorm
from .model_box import roi_align
from .model_rpn import generate_rpn_proposals, rpn_losses, get_all_anchors
@layer_register(log_shape=True)
def fpn_model(features):
"""
Args:
features ([tf.Tensor]): ResNet features c2-c5
Returns:
[tf.Tensor]: FPN features p2-p6
"""
assert len(features) == 4, features
num_channel = cfg.FPN.NUM_CHANNEL
use_gn = cfg.FPN.NORM == 'GN'
def upsample2x(name, x):
try:
resize = tf.compat.v2.image.resize_images
with tf.name_scope(name):
shp2d = tf.shape(x)[2:]
x = tf.transpose(x, [0, 2, 3, 1])
x = resize(x, shp2d * 2, 'nearest')
x = tf.transpose(x, [0, 3, 1, 2])
return x
except AttributeError:
return FixedUnPooling(
name,
x,
2,
unpool_mat=np.ones((2, 2), dtype='float32'),
data_format='channels_first')
with argscope(
Conv2D,
data_format='channels_first',
activation=tf.identity,
use_bias=True,
kernel_initializer=tf.variance_scaling_initializer(scale=1.)):
lat_2345 = [
Conv2D('lateral_1x1_c{}'.format(i + 2), c, num_channel, 1)
for i, c in enumerate(features)
]
if use_gn:
lat_2345 = [
GroupNorm('gn_c{}'.format(i + 2), c) for i, c in enumerate(lat_2345)
]
lat_sum_5432 = []
for idx, lat in enumerate(lat_2345[::-1]):
if idx == 0:
lat_sum_5432.append(lat)
else:
lat = lat + upsample2x('upsample_lat{}'.format(6 - idx),
lat_sum_5432[-1])
lat_sum_5432.append(lat)
p2345 = [
Conv2D('posthoc_3x3_p{}'.format(i + 2), c, num_channel, 3)
for i, c in enumerate(lat_sum_5432[::-1])
]
if use_gn:
p2345 = [
GroupNorm('gn_p{}'.format(i + 2), c) for i, c in enumerate(p2345)
]
p6 = MaxPooling(
'maxpool_p6',
p2345[-1],
pool_size=1,
strides=2,
data_format='channels_first',
padding='VALID')
return p2345 + [p6]
@under_name_scope()
def fpn_map_rois_to_levels(boxes):
"""
Assign boxes to level 2~5.
Args:
boxes (nx4):
Returns:
[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices
of boxes in its level.
[tf.Tensor]: 4 tensors, the gathered boxes in each level.
Be careful that the returned tensor could be empty.
"""
sqrtarea = tf.sqrt(tf_area(boxes))
level = tf.cast(
tf.floor(4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))),
tf.int32)
# RoI levels range from 2~5 (not 6)
level_ids = [
tf.where(level <= 2),
tf.where(tf.equal(level, 3)), # == is not supported
tf.where(tf.equal(level, 4)),
tf.where(level >= 5)
]
level_ids = [
tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
for i, x in enumerate(level_ids)
]
num_in_levels = [
tf.size(x, name='num_roi_level{}'.format(i + 2))
for i, x in enumerate(level_ids)
]
add_moving_summary(*num_in_levels)
level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
return level_ids, level_boxes
@under_name_scope()
def multilevel_roi_align(features, rcnn_boxes, resolution):
"""
Args:
features ([tf.Tensor]): 4 FPN feature level 2-5
rcnn_boxes (tf.Tensor): nx4 boxes
resolution (int): output spatial resolution
Returns:
NxC x res x res
"""
assert len(features) == 4, features
# Reassign rcnn_boxes to levels
level_ids, level_boxes = fpn_map_rois_to_levels(rcnn_boxes)
all_rois = []
# Crop patches from corresponding levels
for i, boxes, featuremap in zip(itertools.count(), level_boxes, features):
with tf.name_scope('roi_level{}'.format(i + 2)):
boxes_on_featuremap = boxes * (1.0 / cfg.FPN.ANCHOR_STRIDES[i])
all_rois.append(roi_align(featuremap, boxes_on_featuremap, resolution))
# this can fail if using TF<=1.8 with MKL build
all_rois = tf.concat(all_rois, axis=0) # NCHW
# Unshuffle to the original order, to match the original samples
level_id_perm = tf.concat(level_ids, axis=0) # A permutation of 1~N
level_id_invert_perm = tf.invert_permutation(level_id_perm)
all_rois = tf.gather(all_rois, level_id_invert_perm, name='output')
return all_rois
def multilevel_rpn_losses(multilevel_anchors, multilevel_label_logits,
multilevel_box_logits):
"""
Args:
multilevel_anchors: #lvl RPNAnchors
multilevel_label_logits: #lvl tensors of shape HxWxA
multilevel_box_logits: #lvl tensors of shape HxWxAx4
Returns:
label_loss, box_loss
"""
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_anchors) == num_lvl
assert len(multilevel_label_logits) == num_lvl
assert len(multilevel_box_logits) == num_lvl
losses = []
with tf.name_scope('rpn_losses'):
for lvl in range(num_lvl):
anchors = multilevel_anchors[lvl]
label_loss, box_loss = rpn_losses(
anchors.gt_labels,
anchors.encoded_gt_boxes(),
multilevel_label_logits[lvl],
multilevel_box_logits[lvl],
name_scope='level{}'.format(lvl + 2))
losses.extend([label_loss, box_loss])
total_label_loss = tf.add_n(losses[::2], name='label_loss')
total_box_loss = tf.add_n(losses[1::2], name='box_loss')
add_moving_summary(total_label_loss, total_box_loss)
return [total_label_loss, total_box_loss]
@under_name_scope()
def generate_fpn_proposals(multilevel_pred_boxes, multilevel_label_logits,
image_shape2d):
"""
Args:
multilevel_pred_boxes: #lvl HxWxAx4 boxes
multilevel_label_logits: #lvl tensors of shape HxWxA
Returns:
boxes: kx4 float
scores: k logits
"""
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_pred_boxes) == num_lvl
assert len(multilevel_label_logits) == num_lvl
training = get_current_tower_context().is_training
all_boxes = []
all_scores = []
if cfg.FPN.PROPOSAL_MODE == 'Level':
fpn_nms_topk = cfg.RPN.TRAIN_PER_LEVEL_NMS_TOPK if training else cfg.RPN.TEST_PER_LEVEL_NMS_TOPK
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(multilevel_label_logits[lvl], [-1]), image_shape2d,
fpn_nms_topk)
all_boxes.append(proposal_boxes)
all_scores.append(proposal_scores)
proposal_boxes = tf.concat(all_boxes, axis=0) # nx4
proposal_scores = tf.concat(all_scores, axis=0) # n
# Here we are different from Detectron.
# Detectron picks top-k within the batch, rather than within an image. However we do not have a batch.
proposal_topk = tf.minimum(tf.size(proposal_scores), fpn_nms_topk)
proposal_scores, topk_indices = tf.nn.top_k(
proposal_scores, k=proposal_topk, sorted=False)
proposal_boxes = tf.gather(
proposal_boxes, topk_indices, name='all_proposals')
else:
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
all_boxes.append(tf.reshape(pred_boxes_decoded, [-1, 4]))
all_scores.append(tf.reshape(multilevel_label_logits[lvl], [-1]))
all_boxes = tf.concat(all_boxes, axis=0)
all_scores = tf.concat(all_scores, axis=0)
proposal_boxes, proposal_scores = generate_rpn_proposals(
all_boxes, all_scores, image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if training else cfg.RPN.TEST_POST_NMS_TOPK)
tf.sigmoid(proposal_scores, name='probs') # for visualization
return tf.stop_gradient(proposal_boxes, name='boxes'), \
tf.stop_gradient(proposal_scores, name='scores')
@memoized
def get_all_anchors_fpn(*, strides, sizes, ratios, max_size):
"""
Returns:
[anchors]: each anchors is a SxSx NUM_ANCHOR_RATIOS x4 array.
"""
assert len(strides) == len(sizes)
foas = []
for stride, size in zip(strides, sizes):
foa = get_all_anchors(
stride=stride, sizes=(size,), ratios=ratios, max_size=max_size)
foas.append(foa)
return foas
| apache-2.0 | 4,639,428,613,423,541,000 | 32.234201 | 106 | 0.630201 | false |
belcaid/last-slack-msg-on-raspberrypi | slack.py | 1 | 1465 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from slackclient import SlackClient
import yaml
import datetime
import pytz
import json
global message_string
token = "ADD-YOUR-TOKEN"# found at https://api.slack.com/web#authentication
sc = SlackClient(token)
users = sc.api_call("users.list")
users_dict = users['members']
def nom_utilisateur(id):
for item in users_dict:
if item['id'] == id:
nom_user = item['name']
return nom_user
def conversion_date(ts):
ma_date = datetime.datetime.fromtimestamp(ts, tz=pytz.timezone('America/Montreal')).strftime('%d-%m-%Y %H:%M:%S')
return ma_date
def reception_message():
global message_string
if sc.rtm_connect():
while True:
contenu_recu = sc.rtm_read()
# Verify if the list list is not empty
if contenu_recu:
mon_dict = contenu_recu[0]
# Change this line by adding the channel id that you want to select
if mon_dict['type'] == "message" and mon_dict['channel'] == "YOUR-CHANNEL-ID" and mon_dict['user']!="USLACKBOT":
message_string = nom_utilisateur(mon_dict["user"]) + "%,%" + mon_dict['text'] + "%,%" + conversion_date(float(mon_dict['ts']))
return message_string
time.sleep(1)
else:
return "Erreur de connexion"
if __name__ == "__main__":
reception_message()
| mit | -8,217,405,894,988,625,000 | 28.897959 | 146 | 0.588396 | false |
ddofer/ProFET | ProFET/feat_extract/FastaFilter.py | 1 | 4999 | '''
Go through a given fasta file (later - mod for sets of fastas),
and output a new fasta file, with sequences containg unknown, or non standard AA
removed; too short sequences removed.
Later, can be used to filter sequences whose ID is a classname; (keeping those
with a minimum amount of examples, e.g. 30+ samples per CATH classs/domain/..)
Look for:
5.1.2 Iterating over the records in a sequence file - BioPy Cookbook
16.1.1 Filtering a sequence file (BioPython cookbook)
Filtering biopython seqio.
5.5 Writing Sequence Files - BioPy cookbook
REGEX to remove last letter:
([a-z]+[.][0-9]{1,3}[.][0-9]{1,3})[.][0-9]{1,3}
$1
'''
from sys import argv
import os
from Bio import SeqIO
from collections import Counter
from Bio.Seq import Seq
ILLEGALS = ['B', 'J', 'Z', 'X', 'U', 'O', 'Z']
def contain_illegals(seq, illegals=ILLEGALS):
'''
Used tofilter U,B,Z,X.. (non standard AA) from a given sequence.
Returns True if illegals in sequence.
>>>print(contain_illegals('XSC'))
True
'''
for c in illegals:
if c in seq:
return True
return False
"http://nbviewer.ipython.org/urls/bitbucket.org/hrojas/learn-pandas/raw/master/lessons/11%20-%20Lesson.ipynb"
def Get_Dirr_All_Fasta (Dirr = '.'):
'''
Get all FASTA (*.fasta) files from current working directory,
returns a list of files.
If not additional param given, default is in current dir
CURRENTLY - Does not get files from subdirectories.
'''
'''old - for f in os.listdir(sys.argv[1]) :
We could also do:
for file in glob("*.fasta"):
'''
if Dirr != '.':
os.chdir(str(Dirr))
print ("dirr change to: %s" %(Dirr))
for f in os.listdir(os.curdir) : #If nothing given, def
files = [f for f in os.listdir(os.curdir) if (os.path.isfile(f) and f.endswith(".fasta"))]
return files
def FilterFastaSeqGroups (Dir,fName,minGroupCount = 45,minLength= 27,FilterGroups=True):
# minGroupCount = 40 #Minimum amount of samples per class.
# minLength= 29 #Minimum length of a protein sequence.
KeywordCount = Counter() #Hhold counts of encoutnered classes/KW = second word in fasta description. Used to filter
os.chdir(str(Dir))
# testHandle=Dir+fName
# testHandle=fName
# handle = open(sys.argv[1])
# handle = testHandle
handle=fName
modHandle = 'FILT'+fName
'5.1.2 Iterating over the records in a sequence file - BioPy Cookbook; Modified for Python 3'
def FiltSequences (handle,minLength=27):
'''
Filter and output a set of fasta files,
creates a new multifasta file containing only sequences of min. length
and not containing illegal AA.
Also counts + updates occurences of KW/Group-ID (second word in descriptor).
'''
it = SeqIO.parse(handle, "fasta")
i=0
filteredRecords = []
for seq in it:
i +=1
if contain_illegals(seq=seq.seq)==False:
if len(seq.seq)>minLength:
filteredRecords.append(seq)
KW = seq.description.split()[1]
KeywordCount[KW] +=1
print(i)
print('KeywordCount', KeywordCount)
SeqIO.write(filteredRecords, modHandle, "fasta")
def FiltSeqGroups (handle,Classes):
'''
Keeps sequences whose class is in pre-approved list (min. amount of counts);
Class/KW is second word of seq's description.
'''
it = SeqIO.parse(handle, "fasta")
filteredRecords = []
i=0
for seq in it:
KW = seq.description.split()[1]
seq.description=KW #remove extra info. LOSSY
if KW in Classes:
filteredRecords.append(seq)
i += 1
SeqIO.write(filteredRecords, modHandle, "fasta")
print('Filtered left:', i)
FiltSequences(handle=handle,minLength=minLength)
'Keep only those classes with a minimal amount of samples'
if FilterGroups==True:
FiltGroup = set()
for k,v in KeywordCount.items():
if v>=minGroupCount:
FiltGroup.add(k)
print(FiltGroup)
FiltSeqGroups(handle=modHandle,Classes=FiltGroup)
if __name__=="__main__":
# Dir = r"D:\SkyDrive\Dropbox\BioInformatics Lab\AA_Information\CODE\Feature_Extract\test_seq\TestFilt"
#minCounts = int(input('Input Min Count'))
#Dir = str(input('Input target directory with fastas to filter. \n'))
# print(len(argv))
print ("Input should be: Dirr with target fasta, MinCountPerClass , minLengthPerSeq")
Dir = str(argv[1])
if argv[2] is None:
minCounts=60
else:
minCounts=int(argv[2])
if argv[3] is None:
minLength=40
else:
minLength=int(argv[3])
# minLength=input('Enter minimum protein length')
FastaFiles = Get_Dirr_All_Fasta(Dirr=Dir)
for f in FastaFiles:
FilterFastaSeqGroups (Dir,f,minGroupCount = minCounts,minLength= minLength,FilterGroups=True)
| gpl-3.0 | -2,050,795,477,758,331,600 | 32.066225 | 119 | 0.638093 | false |
Ircam-Web/mezzanine-organization | organization/formats/fr/formats.py | 1 | 2414 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
DATE_EVENT_FORMAT = 'D j F'
DATE_EVENT_FORMAT_Y = 'D j F Y'
WEEK_DAY_FORMAT = 'D j'
WEEK_DAY_FORMAT_Y = 'D j Y'
TIME_FORMAT = 'H\hi'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss [fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss [fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| agpl-3.0 | 1,685,152,173,706,029,000 | 38.57377 | 77 | 0.636288 | false |
joefutrelle/pyifcb | ifcb/data/bins.py | 1 | 3657 | """
Bin API. Provides consistent access to IFCB raw data stored
in various formats.
"""
from functools import lru_cache
from .adc import SCHEMA
from .hdr import TEMPERATURE, HUMIDITY
from .utils import BaseDictlike
from ..metrics.ml_analyzed import compute_ml_analyzed
class BaseBin(BaseDictlike):
"""
Base class for Bin implementations. Providing common features.
The bin PID is available as a Pid object via the "pid" property.
Subclasses must implement this.
Bins are dict-like. Keys are target numbers, values are ADC records.
ADC records are tuples.
Also supports an "adc" property that is a Pandas DataFrame containing
ADC data. Subclasses are required to provide this. The default dictlike
implementation uses that property.
Context manager support is provided for implementations
that must open files or other data streams.
"""
@property
def lid(self):
"""
:returns str: the bin's LID.
"""
return self.pid.bin_lid
@property
@lru_cache()
def images_adc(self):
"""
:returns pandas.DataFrame: the ADC data, minus targets that
are not associated with images
"""
return self.adc[self.adc[self.schema.ROI_WIDTH] > 0]
@property
def timestamp(self):
"""
:returns datetime: the bin's timestamp.
"""
return self.pid.timestamp
@property
def schema(self):
return SCHEMA[self.pid.schema_version]
# context manager default implementation
def __enter__(self):
return self
def __exit__(self, *args):
pass
# dictlike interface
def keys(self):
yield from self.adc.index
def has_key(self, k):
return k in self.adc.index
def __len__(self):
return len(self.adc.index)
def get_target(self, target_number):
"""
Retrieve a target record by target number
:param target_number: the target number
"""
d = tuple(self.adc[c][target_number] for c in self.adc.columns)
return d
def __getitem__(self, target_number):
return self.get_target(target_number)
# metrics
@lru_cache()
def _get_ml_analyzed(self):
return compute_ml_analyzed(self)
@property
def ml_analyzed(self):
ma, _, _ = self._get_ml_analyzed()
return ma
@property
def look_time(self):
_, lt, _ = self._get_ml_analyzed()
return lt
@property
def run_time(self):
_, _, rt = self._get_ml_analyzed()
return rt
@property
def inhibit_time(self):
return self.run_time - self.look_time
@property
def trigger_rate(self):
"""return trigger rate in triggers / s"""
return 1.0 * len(self) / self.run_time
@property
def temperature(self):
return self.header(TEMPERATURE)
@property
def humidity(self):
return self.header(HUMIDITY)
# convenience APIs for writing in different formats
def read(self):
with self:
new_bin = BaseBin()
new_bin.pid = self.pid.copy()
new_bin.headers = self.headers.copy()
new_bin.adc = self.adc
new_bin.images = { k:v for k,v in self.images.items() }
return new_bin
def to_hdf(self, hdf_file, group=None, replace=True):
from .hdf import bin2hdf
bin2hdf(self, hdf_file, group=group, replace=replace)
def to_zip(self, zip_path):
from .zip import bin2zip
bin2zip(self, zip_path)
def to_mat(self, mat_path):
from .matlab import bin2mat
bin2mat(self, mat_path)
| mit | 4,120,801,862,766,967,300 | 28.97541 | 75 | 0.615532 | false |
plotly/python-api | packages/python/plotly/plotly/graph_objs/barpolar/_marker.py | 1 | 43687 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "barpolar"
_path_str = "barpolar.marker"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"line",
"opacity",
"opacitysrc",
"reversescale",
"showscale",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to barpolar.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.barpolar.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.barpola
r.marker.colorbar.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.barpolar.marker.colorbar.tickformatstopdefaul
ts), sets the default property values to use
for elements of
barpolar.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.barpolar.marker.co
lorbar.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
barpolar.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
barpolar.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.barpolar.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.barpolar.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
Returns
-------
plotly.graph_objs.barpolar.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the bars.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for opacity .
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.barpolar.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
line
:class:`plotly.graph_objects.barpolar.marker.Line`
instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud for
opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
line=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.Marker`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.barpolar.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
line
:class:`plotly.graph_objects.barpolar.marker.Line`
instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud for
opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.barpolar.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("opacitysrc", None)
_v = opacitysrc if opacitysrc is not None else _v
if _v is not None:
self["opacitysrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit | 5,252,691,283,202,661,000 | 40.448767 | 87 | 0.536338 | false |
summerAI/serapis | serapis/config.py | 1 | 2921 | #!/usr/bin/env python2
# coding=utf-8
"""
Config Handler
"""
__author__ = "Manuel Ebert"
__copyright__ = "Copyright 2015, summer.ai"
__date__ = "2015-11-09"
__email__ = "[email protected]"
import boto3
import os
from util import AttrDict
path = os.path.dirname(os.path.abspath(__file__))
def load_yaml(filename):
"""
This is a shitty YAML parser. If we were grown ups, we'd use PyYaml of course.
But since PyYaml refuses to run on AWS Lambda, we'll do this instead.
Args:
filename - filename to load
Returns:
dict
"""
def parse_value(value):
if "#" in value:
value = value[:value.index("#")]
value = value.strip(" \n")
if not value:
return None
if value.lower() == "true":
return True
if value.lower() == "false":
return False
try:
return int(value)
except:
try:
return float(value)
except:
return value
result = {}
current_key = None
with open(filename) as f:
for line in f.readlines():
if ":" in line:
key, value = line.split(":", 1)
key = key.strip()
current_key = key
result[key] = parse_value(value)
elif line.strip().startswith("-"):
value = line.strip(" -\n")
if not isinstance(result[current_key], list):
result[current_key] = [parse_value(value)]
else:
result[current_key].append(parse_value(value))
return result
def abs_path(filename):
return os.path.join(path, "config", "{}.yaml".format(filename))
def load_config(config):
keys = load_yaml(abs_path("default"))
keys['credentials'] = {}
if os.path.exists(abs_path("credentials")):
keys['credentials'] = load_yaml(abs_path("credentials"))
if config != 'default':
keys.update(load_yaml(abs_path(config)))
if "aws_access_key" in keys['credentials']:
keys['s3'] = boto3.resource(
's3', region_name=keys['region'],
aws_access_key_id=keys['credentials']['aws_access_key'],
aws_secret_access_key=keys['credentials']['aws_access_secret']
)
keys['s3_client'] = boto3.client(
's3', region_name=keys['region'],
aws_access_key_id=keys['credentials']['aws_access_key'],
aws_secret_access_key=keys['credentials']['aws_access_secret']
)
else:
keys['s3'] = boto3.resource('s3', region_name=keys['region'])
keys['s3_client'] = boto3.client('s3', region_name=keys['region'])
return AttrDict(keys)
config = load_config(os.environ.get('WORDNIK_CONFIG', 'default'))
def update_config(config_name):
global config
config.__data.update(load_yaml(abs_path(config_name)))
| mit | -5,043,766,123,198,381,000 | 27.920792 | 82 | 0.553235 | false |
hip-odoo/odoo | odoo/addons/base/ir/ir_model.py | 2 | 66977 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
import dateutil
import logging
import time
from collections import defaultdict
from odoo import api, fields, models, SUPERUSER_ID, tools, _
from odoo.exceptions import AccessError, UserError, ValidationError
from odoo.modules.registry import Registry
from odoo.osv import expression
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def encode(s):
""" Return an UTF8-encoded version of ``s``. """
return s.encode('utf8') if isinstance(s, unicode) else s
# base environment for doing a safe_eval
SAFE_EVAL_BASE = {
'datetime': datetime,
'dateutil': dateutil,
'time': time,
}
def make_compute(text, deps):
""" Return a compute function from its code body and dependencies. """
func = lambda self: safe_eval(text, SAFE_EVAL_BASE, {'self': self}, mode="exec")
deps = [arg.strip() for arg in (deps or "").split(",")]
return api.depends(*deps)(func)
#
# IMPORTANT: this must be the first model declared in the module
#
class Base(models.AbstractModel):
""" The base model, which is implicitly inherited by all models. """
_name = 'base'
class Unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class IrModel(models.Model):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _default_field_id(self):
if self.env.context.get('install_mode'):
return [] # no default field when importing
return [(0, 0, {'name': 'x_name', 'field_description': 'Name', 'ttype': 'char'})]
name = fields.Char(string='Model Description', translate=True, required=True)
model = fields.Char(default='x_', required=True, index=True)
info = fields.Text(string='Information')
field_id = fields.One2many('ir.model.fields', 'model_id', string='Fields', required=True, copy=True,
default=_default_field_id)
inherited_model_ids = fields.Many2many('ir.model', compute='_inherited_models', string="Inherited models",
help="The list of models that extends the current model.")
state = fields.Selection([('manual', 'Custom Object'), ('base', 'Base Object')], string='Type', default='manual', readonly=True)
access_ids = fields.One2many('ir.model.access', 'model_id', string='Access')
transient = fields.Boolean(string="Transient Model")
modules = fields.Char(compute='_in_modules', string='In Apps', help='List of modules in which the object is defined or inherited')
view_ids = fields.One2many('ir.ui.view', compute='_view_ids', string='Views')
@api.depends()
def _inherited_models(self):
for model in self:
parent_names = list(self.env[model.model]._inherits)
if parent_names:
model.inherited_model_ids = self.search([('model', 'in', parent_names)])
@api.depends()
def _in_modules(self):
installed_modules = self.env['ir.module.module'].search([('state', '=', 'installed')])
installed_names = set(installed_modules.mapped('name'))
xml_ids = models.Model._get_external_ids(self)
for model in self:
module_names = set(xml_id.split('.')[0] for xml_id in xml_ids[model.id])
model.modules = ", ".join(sorted(installed_names & module_names))
@api.depends()
def _view_ids(self):
for model in self:
model.view_ids = self.env['ir.ui.view'].search([('model', '=', model.model)])
@api.constrains('model')
def _check_model_name(self):
for model in self:
if model.state == 'manual':
if not model.model.startswith('x_'):
raise ValidationError(_("The model name must start with 'x_'."))
if not models.check_object_name(model.model):
raise ValidationError(_("The model name can only contain lowercase characters, digits, underscores and dots."))
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (field 'model') and model
# description (field 'name')
@api.model
def _name_search(self, name='', args=None, operator='ilike', limit=100):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return super(IrModel, self).search(domain, limit=limit).name_get()
def _drop_table(self):
for model in self:
table = self.env[model.model]._table
self._cr.execute('select relkind from pg_class where relname=%s', (table,))
result = self._cr.fetchone()
if result and result[0] == 'v':
self._cr.execute('DROP view %s' % table)
elif result and result[0] == 'r':
self._cr.execute('DROP TABLE %s CASCADE' % table)
return True
@api.multi
def unlink(self):
# Prevent manual deletion of module tables
if not self._context.get(MODULE_UNINSTALL_FLAG):
for model in self:
if model.state != 'manual':
raise UserError(_("Model '%s' contains module data and cannot be removed!") % model.name)
# prevent screwing up fields that depend on these models' fields
model.field_id._prepare_update()
imc = self.env['ir.model.constraint'].search([('model', 'in', self.ids)])
imc.unlink()
self._drop_table()
res = super(IrModel, self).unlink()
# Reload registry for normal unlink only. For module uninstall, the
# reload is done independently in odoo.modules.loading.
if not self._context.get(MODULE_UNINSTALL_FLAG):
self._cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
registry = Registry.new(self._cr.dbname)
registry.signal_registry_change()
return res
@api.multi
def write(self, vals):
if '__last_update' in self._context:
self = self.with_context({k: v for k, v in self._context.iteritems() if k != '__last_update'})
if 'model' in vals and any(rec.model != vals['model'] for rec in self):
raise UserError(_('Field "Model" cannot be modified on models.'))
if 'state' in vals and any(rec.state != vals['state'] for rec in self):
raise UserError(_('Field "Type" cannot be modified on models.'))
if 'transient' in vals and any(rec.transient != vals['transient'] for rec in self):
raise UserError(_('Field "Transient Model" cannot be modified on models.'))
# Filter out operations 4 from field id, because the web client always
# writes (4,id,False) even for non dirty items.
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(IrModel, self).write(vals)
@api.model
def create(self, vals):
res = super(IrModel, self).create(vals)
if vals.get('state', 'manual') == 'manual':
# setup models; this automatically adds model in registry
self.pool.setup_models(self._cr, partial=(not self.pool.ready))
# update database schema
self.pool.init_models(self._cr, [vals['model']], dict(self._context, update_custom_fields=True))
self.pool.signal_registry_change()
return res
@api.model
def name_create(self, name):
""" Infer the model from the name. E.g.: 'My New Model' should become 'x_my_new_model'. """
vals = {
'name': name,
'model': 'x_' + '_'.join(name.lower().split(' ')),
}
return self.create(vals).name_get()[0]
@api.model
def _instanciate(self, model_data):
""" Return a class for the custom model given by parameters ``model_data``. """
class CustomModel(models.Model):
_name = encode(model_data['model'])
_description = model_data['name']
_module = False
_custom = True
_transient = bool(model_data['transient'])
__doc__ = model_data['info']
return CustomModel
class IrModelFields(models.Model):
_name = 'ir.model.fields'
_description = "Fields"
_order = "name"
_rec_name = 'field_description'
name = fields.Char(string='Field Name', default='x_', required=True, index=True)
complete_name = fields.Char(index=True)
model = fields.Char(string='Object Name', required=True, index=True,
help="The technical name of the model this field belongs to")
relation = fields.Char(string='Object Relation',
help="For relationship fields, the technical name of the target model")
relation_field = fields.Char(help="For one2many fields, the field on the target model that implement the opposite many2one relationship")
model_id = fields.Many2one('ir.model', string='Model', required=True, index=True, ondelete='cascade',
help="The model this field belongs to")
field_description = fields.Char(string='Field Label', default='', required=True, translate=True)
help = fields.Text(string='Field Help', translate=True)
ttype = fields.Selection(selection='_get_field_types', string='Field Type', required=True)
selection = fields.Char(string='Selection Options', default="",
help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]")
copy = fields.Boolean(string='Copied', help="Whether the value is copied when duplicating a record.")
related = fields.Char(string='Related Field', help="The corresponding related field, if any. This must be a dot-separated list of field names.")
required = fields.Boolean()
readonly = fields.Boolean()
index = fields.Boolean(string='Indexed')
translate = fields.Boolean(string='Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)")
size = fields.Integer()
state = fields.Selection([('manual', 'Custom Field'), ('base', 'Base Field')], string='Type', default='manual', required=True, readonly=True, index=True)
on_delete = fields.Selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
string='On Delete', default='set null', help='On delete property for many2one fields')
domain = fields.Char(default="[]", help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]")
groups = fields.Many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id')
selectable = fields.Boolean(default=True)
modules = fields.Char(compute='_in_modules', string='In Apps', help='List of modules in which the field is defined')
serialization_field_id = fields.Many2one('ir.model.fields', 'Serialization Field', domain="[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation.")
relation_table = fields.Char(help="Used for custom many2many fields to define a custom relation table name")
column1 = fields.Char(string='Column 1', help="Column referring to the record in the model table")
column2 = fields.Char(string="Column 2", help="Column referring to the record in the comodel table")
compute = fields.Text(help="Code to compute the value of the field.\n"
"Iterate on the recordset 'self' and assign the field's value:\n\n"
" for record in self:\n"
" record['size'] = len(record.name)\n\n"
"Modules time, datetime, dateutil are available.")
depends = fields.Char(string='Dependencies', help="Dependencies of compute method; "
"a list of comma-separated field names, like\n\n"
" name, partner_id.name")
store = fields.Boolean(string='Stored', default=True, help="Whether the value is stored in the database.")
@api.model
def _get_field_types(self):
# retrieve the possible field types from the field classes' metaclass
return sorted((key, key) for key in fields.MetaField.by_type)
@api.depends()
def _in_modules(self):
installed_modules = self.env['ir.module.module'].search([('state', '=', 'installed')])
installed_names = set(installed_modules.mapped('name'))
xml_ids = models.Model._get_external_ids(self)
for field in self:
module_names = set(xml_id.split('.')[0] for xml_id in xml_ids[field.id])
field.modules = ", ".join(sorted(installed_names & module_names))
@api.model
def _check_selection(self, selection):
try:
items = safe_eval(selection)
if not (isinstance(items, (tuple, list)) and
all(isinstance(item, (tuple, list)) and len(item) == 2 for item in items)):
raise ValueError(selection)
except Exception:
_logger.info('Invalid selection list definition for fields.selection', exc_info=True)
raise UserError(_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
@api.constrains('name', 'state')
def _check_name(self):
for field in self:
if field.state == 'manual' and not field.name.startswith('x_'):
raise ValidationError(_("Custom fields must have a name that starts with 'x_' !"))
try:
models.check_pg_name(field.name)
except ValidationError:
msg = _("Field names can only contain characters, digits and underscores (up to 63).")
raise ValidationError(msg)
@api.constrains('model', 'name')
def _unique_name(self):
# fix on stable branch (to be converted into an SQL constraint)
for field in self:
count = self.search_count([('model', '=', field.model), ('name', '=', field.name)])
if count > 1:
raise ValidationError(_("Field names must be unique per model."))
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)', 'Size of the field cannot be negative.'),
]
def _related_field(self):
""" Return the ``Field`` instance corresponding to ``self.related``. """
names = self.related.split(".")
last = len(names) - 1
model = self.env[self.model or self.model_id.model]
for index, name in enumerate(names):
field = model._fields.get(name)
if field is None:
raise UserError(_("Unknown field name '%s' in related field '%s'") % (name, self.related))
if index < last and not field.relational:
raise UserError(_("Non-relational field name '%s' in related field '%s'") % (name, self.related))
model = model[name]
return field
@api.one
@api.constrains('related')
def _check_related(self):
if self.state == 'manual' and self.related:
field = self._related_field()
if field.type != self.ttype:
raise ValidationError(_("Related field '%s' does not have type '%s'") % (self.related, self.ttype))
if field.relational and field.comodel_name != self.relation:
raise ValidationError(_("Related field '%s' does not have comodel '%s'") % (self.related, self.relation))
@api.onchange('related')
def _onchange_related(self):
if self.related:
try:
field = self._related_field()
except UserError as e:
return {'warning': {'title': _("Warning"), 'message': e.message}}
self.ttype = field.type
self.relation = field.comodel_name
self.readonly = True
self.copy = False
@api.constrains('depends')
def _check_depends(self):
""" Check whether all fields in dependencies are valid. """
for record in self:
if not record.depends:
continue
for seq in record.depends.split(","):
if not seq.strip():
raise UserError(_("Empty dependency in %r") % (record.depends))
model = self.env[record.model]
names = seq.strip().split(".")
last = len(names) - 1
for index, name in enumerate(names):
field = model._fields.get(name)
if field is None:
raise UserError(_("Unknown field %r in dependency %r") % (name, seq.strip()))
if index < last and not field.relational:
raise UserError(_("Non-relational field %r in dependency %r") % (name, seq.strip()))
model = model[name]
@api.onchange('compute')
def _onchange_compute(self):
if self.compute:
self.readonly = True
self.copy = False
@api.one
@api.constrains('relation_table')
def _check_relation_table(self):
if self.relation_table:
models.check_pg_name(self.relation_table)
@api.model
def _custom_many2many_names(self, model_name, comodel_name):
""" Return default names for the table and columns of a custom many2many field. """
rel1 = self.env[model_name]._table
rel2 = self.env[comodel_name]._table
table = 'x_%s_%s_rel' % tuple(sorted([rel1, rel2]))
if rel1 == rel2:
return (table, 'id1', 'id2')
else:
return (table, '%s_id' % rel1, '%s_id' % rel2)
@api.onchange('ttype', 'model_id', 'relation')
def _onchange_ttype(self):
self.copy = (self.ttype != 'one2many')
if self.ttype == 'many2many' and self.model_id and self.relation:
names = self._custom_many2many_names(self.model_id.model, self.relation)
self.relation_table, self.column1, self.column2 = names
else:
self.relation_table = False
self.column1 = False
self.column2 = False
@api.onchange('relation_table')
def _onchange_relation_table(self):
if self.relation_table:
# check whether other fields use the same table
others = self.search([('ttype', '=', 'many2many'),
('relation_table', '=', self.relation_table),
('id', 'not in', self._origin.ids)])
if others:
for other in others:
if (other.model, other.relation) == (self.relation, self.model):
# other is a candidate inverse field
self.column1 = other.column2
self.column2 = other.column1
return
return {'warning': {
'title': _("Warning"),
'message': _("The table %r if used for other, possibly incompatible fields.") % self.relation_table,
}}
@api.multi
def _drop_column(self):
tables_to_drop = set()
for field in self:
if field.name in models.MAGIC_COLUMNS:
continue
model = self.env[field.model]
self._cr.execute('SELECT relkind FROM pg_class WHERE relname=%s', (model._table,))
relkind = self._cr.fetchone()
self._cr.execute("""SELECT column_name FROM information_schema.columns
WHERE table_name=%s AND column_name=%s""",
(model._table, field.name))
column_name = self._cr.fetchone()
if column_name and (relkind and relkind[0] == 'r'):
self._cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = field.relation_table or model._fields[field.name].relation
tables_to_drop.add(rel_name)
model._pop_field(field.name)
if tables_to_drop:
# drop the relation tables that are not used by other fields
self._cr.execute("""SELECT relation_table FROM ir_model_fields
WHERE relation_table IN %s AND id NOT IN %s""",
(tuple(tables_to_drop), tuple(self.ids)))
tables_to_keep = set(row[0] for row in self._cr.fetchall())
for rel_name in tables_to_drop - tables_to_keep:
self._cr.execute('DROP TABLE "%s"' % rel_name)
return True
@api.multi
def _prepare_update(self):
""" Check whether the fields in ``self`` may be modified or removed.
This method prevents the modification/deletion of many2one fields
that have an inverse one2many, for instance.
"""
self = self.filtered(lambda record: record.state == 'manual')
if not self:
return
for record in self:
model = self.env[record.model]
field = model._fields[record.name]
if field.type == 'many2one' and model._field_inverses.get(field):
if self._context.get(MODULE_UNINSTALL_FLAG):
# automatically unlink the corresponding one2many field(s)
inverses = self.search([('relation', '=', field.model_name),
('relation_field', '=', field.name)])
inverses.unlink()
continue
msg = _("The field '%s' cannot be removed because the field '%s' depends on it.")
raise UserError(msg % (field, model._field_inverses[field][0]))
# remove fields from registry, and check that views are not broken
fields = [self.env[record.model]._pop_field(record.name) for record in self]
domain = expression.OR([('arch_db', 'like', record.name)] for record in self)
views = self.env['ir.ui.view'].search(domain)
try:
for view in views:
view._check_xml()
except Exception:
raise UserError("\n".join([
_("Cannot rename/delete fields that are still present in views:"),
_("Fields:") + " " + ", ".join(map(str, fields)),
_("View:") + " " + view.name,
]))
finally:
# the registry has been modified, restore it
self.pool.setup_models(self._cr)
@api.multi
def unlink(self):
if not self:
return True
# Prevent manual deletion of module columns
if not self._context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self):
raise UserError(_("This column contains module data and cannot be removed!"))
# prevent screwing up fields that depend on these fields
self._prepare_update()
model_names = self.mapped('model')
self._drop_column()
res = super(IrModelFields, self).unlink()
# The field we just deleted might be inherited, and the registry is
# inconsistent in this case; therefore we reload the registry.
if not self._context.get(MODULE_UNINSTALL_FLAG):
self._cr.commit()
api.Environment.reset()
registry = Registry.new(self._cr.dbname)
models = registry.descendants(model_names, '_inherits')
registry.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
registry.signal_registry_change()
return res
@api.model
def create(self, vals):
if 'model_id' in vals:
model_data = self.env['ir.model'].browse(vals['model_id'])
vals['model'] = model_data.model
if vals.get('ttype') == 'selection':
if not vals.get('selection'):
raise UserError(_('For selection fields, the Selection Options must be given!'))
self._check_selection(vals['selection'])
res = super(IrModelFields, self).create(vals)
if vals.get('state', 'manual') == 'manual':
if vals.get('relation') and not self.env['ir.model'].search([('model', '=', vals['relation'])]):
raise UserError(_("Model %s does not exist!") % vals['relation'])
if vals.get('ttype') == 'one2many':
if not self.search([('model_id', '=', vals['relation']), ('name', '=', vals['relation_field']), ('ttype', '=', 'many2one')]):
raise UserError(_("Many2one %s on model %s does not exist!") % (vals['relation_field'], vals['relation']))
self.pool.clear_manual_fields()
if vals['model'] in self.pool:
# setup models; this re-initializes model in registry
self.pool.setup_models(self._cr, partial=(not self.pool.ready))
# update database schema of model and its descendant models
models = self.pool.descendants([vals['model']], '_inherits')
self.pool.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
self.pool.signal_registry_change()
return res
@api.multi
def write(self, vals):
# For the moment renaming a sparse field or changing the storing system
# is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self:
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise UserError(_('Changing the storing system for field "%s" is not allowed.') % field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise UserError(_('Renaming sparse field "%s" is not allowed') % field.name)
# if set, *one* column can be renamed here
column_rename = None
# names of the models to patch
patched_models = set()
if vals and self:
# check selection if given
if vals.get('selection'):
self._check_selection(vals['selection'])
for item in self:
if item.state != 'manual':
raise UserError(_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if vals.get('model_id', item.model_id.id) != item.model_id.id:
raise UserError(_("Changing the model of a field is forbidden!"))
if vals.get('ttype', item.ttype) != item.ttype:
raise UserError(_("Changing the type of a field is not yet supported. "
"Please drop it and create it again!"))
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if vals.get('name', item.name) != item.name:
# We need to rename the column
item._prepare_update()
if column_rename:
raise UserError(_('Can only rename one field at a time!'))
column_rename = (obj._table, item.name, vals['name'], item.index)
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
patched_models.add(obj._name)
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(IrModelFields, self).write(vals)
self.pool.clear_manual_fields()
if column_rename:
# rename column in database, and its corresponding index if present
table, oldname, newname, index = column_rename
self._cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (table, oldname, newname))
if index:
self._cr.execute('ALTER INDEX "%s_%s_index" RENAME TO "%s_%s_index"' % (table, oldname, table, newname))
if column_rename or patched_models:
# setup models, this will reload all manual fields in registry
self.pool.setup_models(self._cr, partial=(not self.pool.ready))
if patched_models:
# update the database schema of the models to patch
models = self.pool.descendants(patched_models, '_inherits')
self.pool.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
if column_rename or patched_models:
self.pool.signal_registry_change()
return res
@api.multi
def name_get(self):
res = []
for field in self:
res.append((field.id, '%s (%s)' % (field.field_description, field.model)))
return res
@api.model
def _instanciate(self, field_data, partial):
""" Return a field instance corresponding to parameters ``field_data``. """
attrs = {
'manual': True,
'string': field_data['field_description'],
'help': field_data['help'],
'index': bool(field_data['index']),
'copy': bool(field_data['copy']),
'related': field_data['related'],
'required': bool(field_data['required']),
'readonly': bool(field_data['readonly']),
'store': bool(field_data['store']),
}
# FIXME: ignore field_data['serialization_field_id']
if field_data['ttype'] in ('char', 'text', 'html'):
attrs['translate'] = bool(field_data['translate'])
attrs['size'] = field_data['size'] or None
elif field_data['ttype'] in ('selection', 'reference'):
attrs['selection'] = safe_eval(field_data['selection'])
elif field_data['ttype'] == 'many2one':
if partial and field_data['relation'] not in self.env:
return
attrs['comodel_name'] = field_data['relation']
attrs['ondelete'] = field_data['on_delete']
attrs['domain'] = safe_eval(field_data['domain']) if field_data['domain'] else None
elif field_data['ttype'] == 'one2many':
if partial and not (
field_data['relation'] in self.env and (
field_data['relation_field'] in self.env[field_data['relation']]._fields or
field_data['relation_field'] in self.pool.get_manual_fields(self._cr, field_data['relation'])
)):
return
attrs['comodel_name'] = field_data['relation']
attrs['inverse_name'] = field_data['relation_field']
attrs['domain'] = safe_eval(field_data['domain']) if field_data['domain'] else None
elif field_data['ttype'] == 'many2many':
if partial and field_data['relation'] not in self.env:
return
attrs['comodel_name'] = field_data['relation']
rel, col1, col2 = self._custom_many2many_names(field_data['model'], field_data['relation'])
attrs['relation'] = field_data['relation_table'] or rel
attrs['column1'] = field_data['column1'] or col1
attrs['column2'] = field_data['column2'] or col2
attrs['domain'] = safe_eval(field_data['domain']) if field_data['domain'] else None
# add compute function if given
if field_data['compute']:
attrs['compute'] = make_compute(field_data['compute'], field_data['depends'])
return fields.Field.by_type[field_data['ttype']](**attrs)
class IrModelConstraint(models.Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by Odoo
models.
"""
_name = 'ir.model.constraint'
name = fields.Char(string='Constraint', required=True, index=True,
help="PostgreSQL constraint or foreign key name.")
definition = fields.Char(help="PostgreSQL constraint definition")
model = fields.Many2one('ir.model', required=True, index=True)
module = fields.Many2one('ir.module.module', required=True, index=True)
type = fields.Char(string='Constraint Type', required=True, size=1, index=True,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints.")
date_update = fields.Datetime(string='Update Date')
date_init = fields.Datetime(string='Initialization Date')
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
@api.multi
def _module_data_uninstall(self):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if not (self._uid == SUPERUSER_ID or self.env.user.has_group('base.group_system')):
raise AccessError(_('Administrator access is required to uninstall a module'))
ids_set = set(self.ids)
for data in self.sorted(key='id', reverse=True):
name = tools.ustr(data.name)
if data.model.model in self.env:
table = self.env[data.model.model]._table
else:
table = data.model.model.replace('.', '_')
typ = data.type
# double-check we are really going to delete all the owners of this schema element
self._cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = set(x[0] for x in self._cr.fetchall())
if external_ids - ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
self._cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""",
('f', name, table))
if self._cr.fetchone():
self._cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, data.model.model)
if typ == 'u':
# test if constraint exists
self._cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""",
('u', name, table))
if self._cr.fetchone():
self._cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, data.model.model)
self.unlink()
@api.multi
def copy(self, default=None):
default = dict(default or {})
default['name'] = self.name + '_copy'
return super(IrModelConstraint, self).copy(default)
class IrModelRelation(models.Model):
"""
This model tracks PostgreSQL tables used to implement Odoo many2many
relations.
"""
_name = 'ir.model.relation'
name = fields.Char(string='Relation Name', required=True, index=True,
help="PostgreSQL table name implementing a many2many relation.")
model = fields.Many2one('ir.model', required=True, index=True)
module = fields.Many2one('ir.module.module', required=True, index=True)
date_update = fields.Datetime(string='Update Date')
date_init = fields.Datetime(string='Initialization Date')
@api.multi
def _module_data_uninstall(self):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if not (self._uid == SUPERUSER_ID or self.env.user.has_group('base.group_system')):
raise AccessError(_('Administrator access is required to uninstall a module'))
ids_set = set(self.ids)
to_drop = tools.OrderedSet()
for data in self.sorted(key='id', reverse=True):
name = tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
self._cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = set(x[0] for x in self._cr.fetchall())
if external_ids - ids_set:
# as installed modules have defined this element we must not delete it!
continue
self._cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if self._cr.fetchone():
to_drop.add(name)
self.unlink()
# drop m2m relation tables
for table in to_drop:
self._cr.execute('DROP TABLE %s CASCADE' % table,)
_logger.info('Dropped table %s', table)
class IrModelAccess(models.Model):
_name = 'ir.model.access'
name = fields.Char(required=True, index=True)
active = fields.Boolean(default=True, help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module).')
model_id = fields.Many2one('ir.model', string='Object', required=True, domain=[('transient', '=', False)], index=True, ondelete='cascade')
group_id = fields.Many2one('res.groups', string='Group', ondelete='cascade', index=True)
perm_read = fields.Boolean(string='Read Access')
perm_write = fields.Boolean(string='Write Access')
perm_create = fields.Boolean(string='Create Access')
perm_unlink = fields.Boolean(string='Delete Access')
@api.model
def check_groups(self, group):
""" Check whether the current user has the given group. """
grouparr = group.split('.')
if not grouparr:
return False
self._cr.execute("""SELECT 1 FROM res_groups_users_rel
WHERE uid=%s AND gid IN (
SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(self._uid, grouparr[0], grouparr[1],))
return bool(self._cr.fetchone())
@api.model
def check_group(self, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ('read', 'write', 'create', 'unlink'), 'Invalid access mode'
if isinstance(model, models.BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
query = """ SELECT 1 FROM ir_model_access a
JOIN ir_model m ON (m.id = a.model_id)
WHERE a.active AND a.perm_{mode} AND
m.model=%s AND (a.group_id IN %s OR a.group_id IS NULL)
""".format(mode=mode)
self._cr.execute(query, (model_name, tuple(group_ids)))
return bool(self._cr.rowcount)
@api.model_cr
def group_names_with_access(self, model_name, access_mode):
""" Return the names of visible groups which have been granted
``access_mode`` on the model ``model_name``.
:rtype: list
"""
assert access_mode in ('read', 'write', 'create', 'unlink'), 'Invalid access mode'
self._cr.execute("""SELECT c.name, g.name
FROM ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE m.model=%s AND a.active IS TRUE AND a.perm_""" + access_mode,
(model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in self._cr.fetchall()]
# The context parameter is useful when the method translates error messages.
# But as the method raises an exception in that case, the key 'lang' might
# not be really necessary as a cache key, unless the `ormcache_context`
# decorator catches the exception (it does not at the moment.)
@api.model
@tools.ormcache_context('self._uid', 'model', 'mode', 'raise_exception', keys=('lang',))
def check(self, model, mode='read', raise_exception=True):
if self._uid == 1:
# User root have all accesses
return True
assert mode in ('read', 'write', 'create', 'unlink'), 'Invalid access mode'
if isinstance(model, models.BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.env:
_logger.error('Missing model %s', model_name)
elif self.env[model_name].is_transient():
return True
# We check if a specific rule exists
self._cr.execute("""SELECT MAX(CASE WHEN perm_{mode} THEN 1 ELSE 0 END)
FROM ir_model_access a
JOIN ir_model m ON (m.id = a.model_id)
JOIN res_groups_users_rel gu ON (gu.gid = a.group_id)
WHERE m.model = %s
AND gu.uid = %s
AND a.active IS TRUE""".format(mode=mode),
(model_name, self._uid,))
r = self._cr.fetchone()[0]
if not r:
# there is no specific rule. We check the generic rule
self._cr.execute("""SELECT MAX(CASE WHEN perm_{mode} THEN 1 ELSE 0 END)
FROM ir_model_access a
JOIN ir_model m ON (m.id = a.model_id)
WHERE a.group_id IS NULL
AND m.model = %s
AND a.active IS TRUE""".format(mode=mode),
(model_name,))
r = self._cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.info('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, self._uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = set()
@classmethod
def register_cache_clearing_method(cls, model, method):
cls.__cache_clearing_methods.add((model, method))
@classmethod
def unregister_cache_clearing_method(cls, model, method):
cls.__cache_clearing_methods.discard((model, method))
@api.model_cr
def call_cache_clearing_methods(self):
self.invalidate_cache()
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.env:
getattr(self.env[model], method)()
#
# Check rights on actions
#
@api.model
def create(self, values):
self.call_cache_clearing_methods()
return super(IrModelAccess, self).create(values)
@api.multi
def write(self, values):
self.call_cache_clearing_methods()
return super(IrModelAccess, self).write(values)
@api.multi
def unlink(self):
self.call_cache_clearing_methods()
return super(IrModelAccess, self).unlink()
class IrModelData(models.Model):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by Odoo
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module, model, name'
name = fields.Char(string='External Identifier', required=True,
help="External Key/Identifier that can be used for "
"data integration with third-party systems")
complete_name = fields.Char(compute='_compute_complete_name', string='Complete ID')
model = fields.Char(string='Model Name', required=True)
module = fields.Char(default='', required=True)
res_id = fields.Integer(string='Record ID', help="ID of the target record in the database")
noupdate = fields.Boolean(string='Non Updatable', default=False)
date_update = fields.Datetime(string='Update Date', default=fields.Datetime.now)
date_init = fields.Datetime(string='Init Date', default=fields.Datetime.now)
reference = fields.Char(string='Reference', compute='_compute_reference', readonly=True, store=False)
@api.depends('module', 'name')
def _compute_complete_name(self):
for res in self:
res.complete_name = ".".join(filter(None, [res.module, res.name]))
@api.depends('model', 'res_id')
def _compute_reference(self):
for res in self:
res.reference = "%s,%s" % (res.model, res.res_id)
def __init__(self, pool, cr):
models.Model.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
@api.model_cr_context
def _auto_init(self):
res = super(IrModelData, self)._auto_init()
self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_model_data_module_name_uniq_index'")
if not self._cr.fetchone():
self._cr.execute('CREATE UNIQUE INDEX ir_model_data_module_name_uniq_index ON ir_model_data (module, name)')
self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_model_data_model_res_id_index'")
if not self._cr.fetchone():
self._cr.execute('CREATE INDEX ir_model_data_model_res_id_index ON ir_model_data (model, res_id)')
return res
@api.multi
def name_get(self):
model_id_name = defaultdict(dict) # {res_model: {res_id: name}}
for xid in self:
model_id_name[xid.model][xid.res_id] = None
# fill in model_id_name with name_get() of corresponding records
for model, id_name in model_id_name.iteritems():
try:
ng = self.env[model].browse(id_name).name_get()
id_name.update(ng)
except Exception:
pass
# return results, falling back on complete_name
return [(xid.id, model_id_name[xid.model][xid.res_id] or xid.complete_name)
for xid in self]
# NEW V8 API
@api.model
@tools.ormcache('xmlid')
def xmlid_lookup(self, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
xid = self.search([('module', '=', module), ('name', '=', name)])
if not xid:
raise ValueError('External ID not found in the system: %s' % xmlid)
# the sql constraints ensure us we have only one result
res = xid.read(['model', 'res_id'])[0]
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % xmlid)
return res['id'], res['model'], res['res_id']
@api.model
def xmlid_to_res_model_res_id(self, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
@api.model
def xmlid_to_res_id(self, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(xmlid, raise_if_not_found)[1]
@api.model
def xmlid_to_object(self, xmlid, raise_if_not_found=False):
""" Return a browse_record
if not found and raise_if_not_found is False return None
"""
t = self.xmlid_to_res_model_res_id(xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.env[res_model].browse(res_id)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
@api.model
def _get_id(self, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup("%s.%s" % (module, xml_id))[0]
@api.model
def get_object_reference(self, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup("%s.%s" % (module, xml_id))[1:3]
@api.model
def check_object_reference(self, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(module, xml_id)
#search on id found in result to check if current user has read access right
if self.env[model].search([('id', '=', res_id)]):
return model, res_id
if raise_on_access_error:
raise AccessError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
@api.model
def get_object(self, module, xml_id):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object("%s.%s" % (module, xml_id), raise_if_not_found=True)
@api.model
def _update_dummy(self, model, module, xml_id=False, store=True):
if xml_id:
try:
# One step to check the ID is defined and the record actually exists
record = self.get_object(module, xml_id)
if record:
self.loads[(module, xml_id)] = (model, record.id)
for parent_model, parent_field in self.env[model]._inherits.iteritems():
parent = record[parent_field]
parent_xid = '%s_%s' % (xml_id, parent_model.replace('.', '_'))
self.loads[(module, parent_xid)] = (parent_model, parent.id)
return record.id
except Exception:
pass
return False
@api.multi
def unlink(self):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(IrModelData, self).unlink()
@api.model
def _update(self, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False):
# records created during module install should not display the messages of OpenChatter
self = self.with_context(install_mode=True)
current_module = module
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.')) == 2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action = self.browse()
record = self.env[model].browse(res_id)
if xml_id:
self._cr.execute("""SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s""" % record._table,
(module, xml_id))
results = self._cr.fetchall()
for imd_id, imd_res_id, real_id, imd_model, imd_noupdate in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and imd_noupdate:
return imd_res_id
if not real_id:
self.clear_caches()
self._cr.execute('DELETE FROM ir_model_data WHERE id=%s', (imd_id,))
record = record.browse()
else:
assert model == imd_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, imd_model, model)
action = self.browse(imd_id)
record = record.browse(imd_res_id)
if action and record:
record.write(values)
action.sudo().write({'date_update': fields.Datetime.now()})
elif record:
record.write(values)
if xml_id:
for parent_model, parent_field in record._inherits.iteritems():
self.sudo().create({
'name': xml_id + '_' + parent_model.replace('.', '_'),
'model': parent_model,
'module': module,
'res_id': record[parent_field].id,
'noupdate': noupdate,
})
self.sudo().create({
'name': xml_id,
'model': model,
'module': module,
'res_id': record.id,
'noupdate': noupdate,
})
elif mode == 'init' or (mode == 'update' and xml_id):
existing_parents = set() # {parent_model, ...}
if xml_id:
for parent_model, parent_field in record._inherits.iteritems():
xid = self.search([
('module', '=', module),
('name', '=', xml_id + '_' + parent_model.replace('.', '_')),
])
# XML ID found in the database, try to recover an existing record
if xid:
parent = self.env[xid.model].browse(xid.res_id)
if parent.exists():
existing_parents.add(xid.model)
values[parent_field] = parent.id
else:
xid.unlink()
record = record.create(values)
if xml_id:
#To add an external identifiers to all inherits model
inherit_models = [record]
while inherit_models:
current_model = inherit_models.pop()
for parent_model_name, parent_field in current_model._inherits.iteritems():
inherit_models.append(self.env[parent_model_name])
if parent_model_name in existing_parents:
continue
self.sudo().create({
'name': xml_id + '_' + parent_model_name.replace('.', '_'),
'model': parent_model_name,
'module': module,
'res_id': record[parent_field].id,
'noupdate': noupdate,
})
existing_parents.add(parent_model_name)
self.sudo().create({
'name': xml_id,
'model': model,
'module': module,
'res_id': record.id,
'noupdate': noupdate
})
if current_module and module != current_module:
_logger.warning("Creating the ir.model.data %s in module %s instead of %s.",
xml_id, module, current_module)
if xml_id and record:
self.loads[(module, xml_id)] = (model, record.id)
for parent_model, parent_field in record._inherits.iteritems():
parent_xml_id = xml_id + '_' + parent_model.replace('.', '_')
self.loads[(module, parent_xml_id)] = (parent_model, record[parent_field].id)
return record.id
@api.model
def _module_data_uninstall(self, modules_to_remove):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
if not (self._uid == SUPERUSER_ID or self.env.user.has_group('base.group_system')):
raise AccessError(_('Administrator access is required to uninstall a module'))
# enable model/field deletion
self = self.with_context(**{MODULE_UNINSTALL_FLAG: True})
datas = self.search([('module', 'in', modules_to_remove)])
wkf_todo = []
to_unlink = tools.OrderedSet()
undeletable = self.browse([])
for data in datas.sorted(key='id', reverse=True):
model = data.model
res_id = data.res_id
to_unlink.add((model, res_id))
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
self._cr.execute('SELECT res_type, res_id FROM wkf_instance WHERE id IN (SELECT inst_id FROM wkf_workitem WHERE act_id=%s)', (res_id,))
wkf_todo.extend(self._cr.fetchall())
self._cr.execute("UPDATE wkf_transition SET condition='True', group_id=NULL, signal=NULL, act_to=act_from, act_from=%s WHERE act_to=%s", (res_id, res_id))
self.invalidate_cache()
for model, res_id in wkf_todo:
try:
record = self.env[model].browse(res_id)
record.step_workflow()
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
undeletable = self.browse()
for model, res_id in to_unlink:
external_ids = self.search([('model', '=', model), ('res_id', '=', res_id)])
if external_ids - datas:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.env[model].browse(res_id)
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
external_ids.unlink()
continue
if field.name in models.LOG_ACCESS_COLUMNS and field.model in self.env and self.env[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
self._cr.execute('SAVEPOINT record_unlink_save')
self.env[model].browse(res_id).unlink()
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
undeletable += external_ids
self._cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
self._cr.execute('RELEASE SAVEPOINT record_unlink_save')
return undeletable
# Remove non-model records first, then model fields, and finish with models
undeletable += unlink_if_refcount(item for item in to_unlink if item[0] not in ('ir.model', 'ir.model.fields', 'ir.model.constraint'))
undeletable += unlink_if_refcount(item for item in to_unlink if item[0] == 'ir.model.constraint')
modules = self.env['ir.module.module'].search([('name', 'in', modules_to_remove)])
constraints = self.env['ir.model.constraint'].search([('module', 'in', modules.ids)])
constraints._module_data_uninstall()
undeletable += unlink_if_refcount(item for item in to_unlink if item[0] == 'ir.model.fields')
relations = self.env['ir.model.relation'].search([('module', 'in', modules.ids)])
relations._module_data_uninstall()
undeletable += unlink_if_refcount(item for item in to_unlink if item[0] == 'ir.model')
(datas - undeletable).unlink()
@api.model
def _process_end(self, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules or tools.config.get('import_partial'):
return True
bad_imd_ids = []
self = self.with_context({MODULE_UNINSTALL_FLAG: True})
query = """ SELECT id, name, model, res_id, module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC
"""
self._cr.execute(query, (tuple(modules), False))
for (id, name, model, res_id, module) in self._cr.fetchall():
if (module, name) not in self.loads:
if model in self.env:
_logger.info('Deleting %s@%s (%s.%s)', res_id, model, module, name)
record = self.env[model].browse(res_id)
if record.exists():
record.unlink()
else:
bad_imd_ids.append(id)
if bad_imd_ids:
self.browse(bad_imd_ids).unlink()
self.loads.clear()
class WizardModelMenu(models.TransientModel):
_name = 'wizard.ir.model.menu.create'
menu_id = fields.Many2one('ir.ui.menu', string='Parent Menu', required=True, ondelete='cascade')
name = fields.Char(string='Menu Name', required=True)
@api.multi
def menu_create(self):
for menu in self:
model = self.env['ir.model'].browse(self._context.get('model_id'))
vals = {
'name': menu.name,
'res_model': model.model,
'view_mode': 'tree,form',
}
action_id = self.env['ir.actions.act_window'].create(vals)
self.env['ir.ui.menu'].create({
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,)
})
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 | -8,925,995,035,553,623,000 | 46.400566 | 206 | 0.565881 | false |
openstack/taskflow | taskflow/utils/misc.py | 1 | 18584 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import datetime
import inspect
import os
import re
import socket
import sys
import threading
import types
import enum
from oslo_serialization import jsonutils
from oslo_serialization import msgpackutils
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import reflection
import six
from taskflow.types import failure
UNKNOWN_HOSTNAME = "<unknown>"
NUMERIC_TYPES = six.integer_types + (float,)
# NOTE(imelnikov): regular expression to get scheme from URI,
# see RFC 3986 section 3.1
_SCHEME_REGEX = re.compile(r"^([A-Za-z][A-Za-z0-9+.-]*):")
class StrEnum(str, enum.Enum):
"""An enumeration that is also a string and can be compared to strings."""
def __new__(cls, *args, **kwargs):
for a in args:
if not isinstance(a, str):
raise TypeError("Enumeration '%s' (%s) is not"
" a string" % (a, type(a).__name__))
return super(StrEnum, cls).__new__(cls, *args, **kwargs)
class StringIO(six.StringIO):
"""String buffer with some small additions."""
def write_nl(self, value, linesep=os.linesep):
self.write(value)
self.write(linesep)
class BytesIO(six.BytesIO):
"""Byte buffer with some small additions."""
def reset(self):
self.seek(0)
self.truncate()
def get_hostname(unknown_hostname=UNKNOWN_HOSTNAME):
"""Gets the machines hostname; if not able to returns an invalid one."""
try:
hostname = socket.getfqdn()
if not hostname:
return unknown_hostname
else:
return hostname
except socket.error:
return unknown_hostname
def match_type(obj, matchers):
"""Matches a given object using the given matchers list/iterable.
NOTE(harlowja): each element of the provided list/iterable must be
tuple of (valid types, result).
Returns the result (the second element of the provided tuple) if a type
match occurs, otherwise none if no matches are found.
"""
for (match_types, match_result) in matchers:
if isinstance(obj, match_types):
return match_result
else:
return None
def countdown_iter(start_at, decr=1):
"""Generator that decrements after each generation until <= zero.
NOTE(harlowja): we can likely remove this when we can use an
``itertools.count`` that takes a step (on py2.6 which we still support
that step parameter does **not** exist and therefore can't be used).
"""
if decr <= 0:
raise ValueError("Decrement value must be greater"
" than zero and not %s" % decr)
while start_at > 0:
yield start_at
start_at -= decr
def extract_driver_and_conf(conf, conf_key):
"""Common function to get a driver name and its configuration."""
if isinstance(conf, six.string_types):
conf = {conf_key: conf}
maybe_uri = conf[conf_key]
try:
uri = parse_uri(maybe_uri)
except (TypeError, ValueError):
return (maybe_uri, conf)
else:
return (uri.scheme, merge_uri(uri, conf.copy()))
def reverse_enumerate(items):
"""Like reversed(enumerate(items)) but with less copying/cloning..."""
for i in countdown_iter(len(items)):
yield i - 1, items[i - 1]
def merge_uri(uri, conf):
"""Merges a parsed uri into the given configuration dictionary.
Merges the username, password, hostname, port, and query parameters of
a URI into the given configuration dictionary (it does **not** overwrite
existing configuration keys if they already exist) and returns the merged
configuration.
NOTE(harlowja): does not merge the path, scheme or fragment.
"""
uri_port = uri.port
specials = [
('username', uri.username, lambda v: bool(v)),
('password', uri.password, lambda v: bool(v)),
# NOTE(harlowja): A different check function is used since 0 is
# false (when bool(v) is applied), and that is a valid port...
('port', uri_port, lambda v: v is not None),
]
hostname = uri.hostname
if hostname:
if uri_port is not None:
hostname += ":%s" % (uri_port)
specials.append(('hostname', hostname, lambda v: bool(v)))
for (k, v, is_not_empty_value_func) in specials:
if is_not_empty_value_func(v):
conf.setdefault(k, v)
for (k, v) in six.iteritems(uri.params()):
conf.setdefault(k, v)
return conf
def find_subclasses(locations, base_cls, exclude_hidden=True):
"""Finds subclass types in the given locations.
This will examines the given locations for types which are subclasses of
the base class type provided and returns the found subclasses (or fails
with exceptions if this introspection can not be accomplished).
If a string is provided as one of the locations it will be imported and
examined if it is a subclass of the base class. If a module is given,
all of its members will be examined for attributes which are subclasses of
the base class. If a type itself is given it will be examined for being a
subclass of the base class.
"""
derived = set()
for item in locations:
module = None
if isinstance(item, six.string_types):
try:
pkg, cls = item.split(':')
except ValueError:
module = importutils.import_module(item)
else:
obj = importutils.import_class('%s.%s' % (pkg, cls))
if not reflection.is_subclass(obj, base_cls):
raise TypeError("Object '%s' (%s) is not a '%s' subclass"
% (item, type(item), base_cls))
derived.add(obj)
elif isinstance(item, types.ModuleType):
module = item
elif reflection.is_subclass(item, base_cls):
derived.add(item)
else:
raise TypeError("Object '%s' (%s) is an unexpected type" %
(item, type(item)))
# If it's a module derive objects from it if we can.
if module is not None:
for (name, obj) in inspect.getmembers(module):
if name.startswith("_") and exclude_hidden:
continue
if reflection.is_subclass(obj, base_cls):
derived.add(obj)
return derived
def pick_first_not_none(*values):
"""Returns first of values that is *not* None (or None if all are/were)."""
for val in values:
if val is not None:
return val
return None
def parse_uri(uri):
"""Parses a uri into its components."""
# Do some basic validation before continuing...
if not isinstance(uri, six.string_types):
raise TypeError("Can only parse string types to uri data, "
"and not '%s' (%s)" % (uri, type(uri)))
match = _SCHEME_REGEX.match(uri)
if not match:
raise ValueError("Uri '%s' does not start with a RFC 3986 compliant"
" scheme" % (uri))
return netutils.urlsplit(uri)
def disallow_when_frozen(excp_cls):
"""Frozen checking/raising method decorator."""
def decorator(f):
@six.wraps(f)
def wrapper(self, *args, **kwargs):
if self.frozen:
raise excp_cls()
else:
return f(self, *args, **kwargs)
return wrapper
return decorator
def clamp(value, minimum, maximum, on_clamped=None):
"""Clamps a value to ensure its >= minimum and <= maximum."""
if minimum > maximum:
raise ValueError("Provided minimum '%s' must be less than or equal to"
" the provided maximum '%s'" % (minimum, maximum))
if value > maximum:
value = maximum
if on_clamped is not None:
on_clamped()
if value < minimum:
value = minimum
if on_clamped is not None:
on_clamped()
return value
def fix_newlines(text, replacement=os.linesep):
"""Fixes text that *may* end with wrong nl by replacing with right nl."""
return replacement.join(text.splitlines())
def binary_encode(text, encoding='utf-8', errors='strict'):
"""Encodes a text string into a binary string using given encoding.
Does nothing if data is already a binary string (raises on unknown types).
"""
if isinstance(text, six.binary_type):
return text
else:
return encodeutils.safe_encode(text, encoding=encoding,
errors=errors)
def binary_decode(data, encoding='utf-8', errors='strict'):
"""Decodes a binary string into a text string using given encoding.
Does nothing if data is already a text string (raises on unknown types).
"""
if isinstance(data, six.text_type):
return data
else:
return encodeutils.safe_decode(data, incoming=encoding,
errors=errors)
def _check_decoded_type(data, root_types=(dict,)):
if root_types:
if not isinstance(root_types, tuple):
root_types = tuple(root_types)
if not isinstance(data, root_types):
if len(root_types) == 1:
root_type = root_types[0]
raise ValueError("Expected '%s' root type not '%s'"
% (root_type, type(data)))
else:
raise ValueError("Expected %s root types not '%s'"
% (list(root_types), type(data)))
return data
def decode_msgpack(raw_data, root_types=(dict,)):
"""Parse raw data to get decoded object.
Decodes a msgback encoded 'blob' from a given raw data binary string and
checks that the root type of that decoded object is in the allowed set of
types (by default a dict should be the root type).
"""
try:
data = msgpackutils.loads(raw_data)
except Exception as e:
# TODO(harlowja): fix this when msgpackutils exposes the msgpack
# exceptions so that we can avoid catching just exception...
raise ValueError("Expected msgpack decodable data: %s" % e)
else:
return _check_decoded_type(data, root_types=root_types)
def decode_json(raw_data, root_types=(dict,)):
"""Parse raw data to get decoded object.
Decodes a JSON encoded 'blob' from a given raw data binary string and
checks that the root type of that decoded object is in the allowed set of
types (by default a dict should be the root type).
"""
try:
data = jsonutils.loads(binary_decode(raw_data))
except UnicodeDecodeError as e:
raise ValueError("Expected UTF-8 decodable data: %s" % e)
except ValueError as e:
raise ValueError("Expected JSON decodable data: %s" % e)
else:
return _check_decoded_type(data, root_types=root_types)
class cachedproperty(object):
"""A *thread-safe* descriptor property that is only evaluated once.
This caching descriptor can be placed on instance methods to translate
those methods into properties that will be cached in the instance (avoiding
repeated attribute checking logic to do the equivalent).
NOTE(harlowja): by default the property that will be saved will be under
the decorated methods name prefixed with an underscore. For example if we
were to attach this descriptor to an instance method 'get_thing(self)' the
cached property would be stored under '_get_thing' in the self object
after the first call to 'get_thing' occurs.
"""
def __init__(self, fget=None, require_lock=True):
if require_lock:
self._lock = threading.RLock()
else:
self._lock = None
# If a name is provided (as an argument) then this will be the string
# to place the cached attribute under if not then it will be the
# function itself to be wrapped into a property.
if inspect.isfunction(fget):
self._fget = fget
self._attr_name = "_%s" % (fget.__name__)
self.__doc__ = getattr(fget, '__doc__', None)
else:
self._attr_name = fget
self._fget = None
self.__doc__ = None
def __call__(self, fget):
# If __init__ received a string or a lock boolean then this will be
# the function to be wrapped as a property (if __init__ got a
# function then this will not be called).
self._fget = fget
if not self._attr_name:
self._attr_name = "_%s" % (fget.__name__)
self.__doc__ = getattr(fget, '__doc__', None)
return self
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def __get__(self, instance, owner):
if instance is None:
return self
# Quick check to see if this already has been made (before acquiring
# the lock). This is safe to do since we don't allow deletion after
# being created.
if hasattr(instance, self._attr_name):
return getattr(instance, self._attr_name)
else:
if self._lock is not None:
self._lock.acquire()
try:
return getattr(instance, self._attr_name)
except AttributeError:
value = self._fget(instance)
setattr(instance, self._attr_name, value)
return value
finally:
if self._lock is not None:
self._lock.release()
def millis_to_datetime(milliseconds):
"""Converts number of milliseconds (from epoch) into a datetime object."""
return datetime.datetime.fromtimestamp(float(milliseconds) / 1000)
def get_version_string(obj):
"""Gets a object's version as a string.
Returns string representation of object's version taken from
its 'version' attribute, or None if object does not have such
attribute or its version is None.
"""
obj_version = getattr(obj, 'version', None)
if isinstance(obj_version, (list, tuple)):
obj_version = '.'.join(str(item) for item in obj_version)
if obj_version is not None and not isinstance(obj_version,
six.string_types):
obj_version = str(obj_version)
return obj_version
def sequence_minus(seq1, seq2):
"""Calculate difference of two sequences.
Result contains the elements from first sequence that are not
present in second sequence, in original order. Works even
if sequence elements are not hashable.
"""
result = list(seq1)
for item in seq2:
try:
result.remove(item)
except ValueError:
pass
return result
def as_int(obj, quiet=False):
"""Converts an arbitrary value into a integer."""
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError("Can not translate '%s' (%s) to an integer"
% (obj, type(obj)))
return obj
@contextlib.contextmanager
def capture_failure():
"""Captures the occurring exception and provides a failure object back.
This will save the current exception information and yield back a
failure object for the caller to use (it will raise a runtime error if
no active exception is being handled).
This is useful since in some cases the exception context can be cleared,
resulting in None being attempted to be saved after an exception handler is
run. This can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, yield a failure and
then run other code.
For example::
>>> from taskflow.utils import misc
>>>
>>> def cleanup():
... pass
...
>>>
>>> def save_failure(f):
... print("Saving %s" % f)
...
>>>
>>> try:
... raise IOError("Broken")
... except Exception:
... with misc.capture_failure() as fail:
... print("Activating cleanup")
... cleanup()
... save_failure(fail)
...
Activating cleanup
Saving Failure: IOError: Broken
"""
exc_info = sys.exc_info()
if not any(exc_info):
raise RuntimeError("No active exception is being handled")
else:
yield failure.Failure(exc_info=exc_info)
def is_iterable(obj):
"""Tests an object to to determine whether it is iterable.
This function will test the specified object to determine whether it is
iterable. String types (both ``str`` and ``unicode``) are ignored and will
return False.
:param obj: object to be tested for iterable
:return: True if object is iterable and is not a string
"""
return (not isinstance(obj, six.string_types) and
isinstance(obj, collections.Iterable))
def safe_copy_dict(obj):
"""Copy an existing dictionary or default to empty dict...
This will return a empty dict if given object is falsey, otherwise it
will create a dict of the given object (which if provided a dictionary
object will make a shallow copy of that object).
"""
if not obj:
return {}
# default to a shallow copy to avoid most ownership issues
return dict(obj)
| apache-2.0 | -3,695,374,384,073,142,300 | 33.351201 | 79 | 0.619511 | false |
dungtn/baby-vqa | model.py | 1 | 3686 | from keras.models import Sequential
from keras.layers import Bidirectional, BatchNormalization
from keras.layers.embeddings import Embedding
from keras.layers.core import Dense, Activation, Merge, Dropout, Flatten, Reshape
from keras.layers.convolutional import MaxPooling2D
from keras.layers.recurrent import LSTM, GRU
class LSTMModel(object):
def __init__(self, vocab_size = 10000, img_dim=4096, word_dim=300, max_sent_len=26, nb_classes=1000, lstm_hidden_dim=512, fc_hidden_dim=2014, bidirect=True, dropout=0.5):
self.vocab_size = vocab_size
self.img_dim = img_dim
self.word_dim = word_dim
self.max_sent_len = max_sent_len
self.nb_classes = nb_classes
self.lstm_hidden_dim = lstm_hidden_dim
self.fc_hidden_dim = fc_hidden_dim
self.bidirect = bidirect
self.dropout = dropout
def build(self):
self.img_model = Sequential()
self.img_model.add(MaxPooling2D(input_shape=(14, 14, 512)))
self.img_model.add(Flatten())
for i in xrange(3):
self.img_model.add(Dense(self.img_dim, activation='tanh'))
self.img_model.add(BatchNormalization())
self.txt_model = Sequential()
# self.txt_model.add(Embedding(self.vocab_size, self.word_dim, input_length=self.max_sent_len, mask_zero = True))
if self.bidirect:
self.txt_model.add(Bidirectional(LSTM(output_dim=self.lstm_hidden_dim), input_shape=(self.max_sent_len, self.word_dim)))
# self.txt_model.add(Bidirectional(GRU(output_dim=self.lstm_hidden_dim), input_shape=(self.max_sent_len, self.word_dim)))
else:
M = Masking(mask_value=0., input_shape=(self.max_sent_len, self.word_dim))
self.txt_model.add(M)
self.txt_model.add(LSTM(output_dim=self.lstm_hidden_dim, input_shape=(self.max_sent_len, self.word_dim)))
# self.txt_model.add(GRU(output_dim=self.lstm_hidden_dim, input_shape=(self.max_sent_len, self.word_dim)))
self.model = Sequential()
self.model.add(Merge([self.txt_model, self.img_model], mode='concat', concat_axis=1))
self.model.add(BatchNormalization())
for i in xrange(2):
self.model.add(Dense(self.fc_hidden_dim, init='he_normal', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(self.dropout))
self.model.add(Dense(self.nb_classes, activation='softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
self.model.summary()
def fit(self, X_ques, X_img, y, nb_epoch=50, batch_size=50, shuffle=True):
return self.model.fit([X_ques, X_img], y, nb_epoch=nb_epoch, batch_size=batch_size, shuffle=shuffle)
def evaluate(self, X_ques_test, X_im_test, y_test, batch_size=50):
return self.model.evaluate([X_ques_test, X_im_test], y_test, batch_size=batch_size)
def train_on_batch(self, X_ques, X_img, y):
return self.model.train_on_batch([X_ques, X_img], y)
def test_on_batch(self, X_ques_test, X_img_test, y_test):
return self.model.test_on_batch([X_ques_test, X_img_test], y_test)
def save(self):
params = {
'img_dim': self.img_dim,
'word_dim': self.word_dim,
'max_sent_len': self.max_sent_len,
'nb_classes': self.nb_classes,
'lstm_hidden_dim': self.lstm_hidden_dim,
'fc_hidden_dim': self.fc_hidden_dim,
'bidirect': self.bidirect,
'dropout': self.dropout
}
fn = '../models/'+"".join(["{0}={1},".format(k,v) for k,v in params.iteritems()])
open(fn+'.json', 'w').write(self.model.to_json())
| mit | 3,799,223,031,472,505,000 | 46.25641 | 174 | 0.643516 | false |
jlumpe/pyalign | pyalign/matrix/matrices/create_matrices.py | 1 | 5544 | """Script that formats substitution matrices obtained from NCBI
This folder contains substitution matrices (mostly PAM and BLOSUM) used by
NCBI's BLAST tool. Data was obtained from
ftp://ftp.ncbi.nih.gov/blast/matrices/ on 2/14/16.
* PAM matrices - in two formats: PAM{n} and PAM{n}.cdi. N ranges from 10 to
500 in increments of 10 for the first. Scales range from ln(2)/2 for
PAM10 to ln(2)/7 for PAM500. The .cdi variants only exist for a few N
values and seem to be in units of 10ths of a bit.
* BLOSUM matrices - from BLOSUM30 to 100 in increments of 5. There's also a
BLOSUMN matrix which I am unsure about. Each has a .50 variant which is
the exact same file (by sha1 checksum).
* Nucleotide - NUC.4.4 looks useful, NUC.4.2 is non-rectangular?
* Identity - IDENTITY and MATCH can be replaced by
pyalign.matrix.identity_matrix.
* Others - DAYHOFF and GONNET seem to be slightly different PAM250 matrices.
Don't see these being useful.
"""
import os
from cStringIO import StringIO
from cPickle import Pickler, HIGHEST_PROTOCOL
import numpy as np
import pandas as pd
from pyalign import SubstitutionMatrix
ln2 = float(np.log(2))
# Names of all PAM matrix files along with their mutation distances and scales
# in inverse bits
pam_files = [
('PAM10', 10, 2),
('PAM20', 20, 2),
('PAM30', 30, 2),
('PAM40', 40, 2),
('PAM50', 50, 2),
('PAM60', 60, 2),
('PAM70', 70, 2),
('PAM80', 80, 2),
('PAM90', 90, 2),
('PAM100', 100, 2),
('PAM110', 110, 2),
('PAM120', 120, 2),
('PAM130', 130, 2),
('PAM140', 140, 2),
('PAM150', 150, 2),
('PAM160', 160, 2),
('PAM170', 170, 3),
('PAM180', 180, 3),
('PAM190', 190, 3),
('PAM200', 200, 3),
('PAM210', 210, 3),
('PAM220', 220, 3),
('PAM230', 230, 3),
('PAM240', 240, 3),
('PAM250', 250, 3),
('PAM260', 260, 3),
('PAM270', 270, 4),
('PAM280', 280, 4),
('PAM290', 290, 4),
('PAM300', 300, 4),
('PAM310', 310, 4),
('PAM320', 320, 4),
('PAM330', 330, 4),
('PAM340', 340, 4),
('PAM350', 350, 5),
('PAM360', 360, 5),
('PAM370', 370, 5),
('PAM380', 380, 5),
('PAM390', 390, 5),
('PAM400', 400, 5),
('PAM410', 410, 6),
('PAM420', 420, 6),
('PAM430', 430, 6),
('PAM440', 440, 6),
('PAM450', 450, 6),
('PAM460', 460, 6),
('PAM470', 470, 7),
('PAM480', 480, 7),
('PAM490', 490, 7),
('PAM500', 500, 7),
('PAM40.cdi', 40, 10),
('PAM80.cdi', 80, 10),
('PAM120.cdi', 120, 10),
('PAM160.cdi', 160, 10),
('PAM200.cdi', 200, 10),
('PAM250.cdi', 250, 10)
]
# Names of all PAM matrix files along with their percent values and scales in
# inverse bits
blosum_files = [
('BLOSUM30', 30, 5),
('BLOSUM35', 35, 4),
('BLOSUM40', 40, 4),
('BLOSUM45', 45, 3),
('BLOSUM50', 50, 3),
('BLOSUM55', 55, 3),
('BLOSUM60', 60, 2),
('BLOSUM62', 62, 2),
('BLOSUM65', 65, 2),
('BLOSUM70', 70, 2),
('BLOSUM75', 75, 2),
('BLOSUM80', 80, 3),
('BLOSUM85', 85, 2),
('BLOSUM90', 90, 2),
('BLOSUM100', 100, 3),
('BLOSUMN', None, 2)
]
# Names of additional matrix files in format (file_name, new_name, attrs)
addl_files = [
('NUC', 'NUC.4.4', {'type': 'NUC'})
]
def parse_ncbi_matrix(lines):
"""Parses a matrix file in the format obtained from NCBI
returns a tuple of (matrix, symbols, description)
"""
# Buffer to store uncommented lines
table_buffer = StringIO()
# Store description as well
desc_lines = []
# Split lines into description/non-description
for line in lines:
if line.startswith('#'):
desc_line = line[1:]
if desc_line.startswith(' '):
desc_line = desc_line[1:]
if desc_line:
desc_lines.append(desc_line)
elif line.strip():
table_buffer.write(line)
# Parse table
table_buffer.seek(0)
table = pd.read_table(table_buffer, sep=r'\s+')
# Should have identical row/column labels
assert table.columns.equals(table.index)
return table.values, list(table.columns), ''.join(desc_lines).strip()
def get_matrices():
"""(name, ncbi_file, extra_attrs) for each matrix to be formatted"""
matrices = list(addl_files)
# PAM matrices
for fname, dist, bits in pam_files:
name = 'PAM{}_{}'.format(dist, bits)
attrs = {
'type': 'PAM',
'scale': ln2 / bits,
'scale_invbits': bits,
'dist': dist
}
matrices.append((name, fname, attrs))
# BLOSUM matrices
for fname, pct, bits in blosum_files:
attrs = {
'type': 'BLOSUM',
'scale': ln2 / bits,
'scale_invbits': bits,
'percent': pct
}
matrices.append((fname, fname, attrs))
return matrices
def create_matrices(indir='.'):
"""Creates SubstitutionMatrix instances from NCBI matrices in directory.
Also gives dict of matrix attributes by name.
Returns:
dict (matrices), dict (attributes)
"""
matrices = dict()
# Stores attributes for each matrix
matrix_attrs = dict()
# For each matrix
for name, ncbi_file, extra_attrs in get_matrices():
# Parse the file
fpath = os.path.join(indir, ncbi_file)
with open(fpath) as fh:
values, symbols, description = parse_ncbi_matrix(fh)
# Create the matrix object
matrix = SubstitutionMatrix(symbols, values)
matrices[name] = matrix
# Attributes
attrs = {
'ncbi_file': ncbi_file,
'description': description,
'range': (np.min(values), np.max(values))
}
attrs.update(extra_attrs)
matrix_attrs[name] = attrs
return matrices, matrix_attrs
def pickle_matrices(matrices, outdir='.'):
"""Pickles dictionary of matrices output by create_matrices"""
for name, matrix in matrices.iteritems():
fpath = os.path.join(outdir, name + '.pickle')
with open(fpath, 'wb') as fh:
pickler = Pickler(fh, HIGHEST_PROTOCOL)
pickler.dump(matrix)
| mit | 8,317,513,757,221,103,000 | 22.793991 | 78 | 0.640693 | false |
antonc42/taiga-contrib-ldap-auth | taiga_contrib_ldap_auth/connector.py | 1 | 4779 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# Copyright (C) 2015 Ensky Lin <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ldap3 import Server, Connection, ANONYMOUS, SIMPLE, SYNC, SUBTREE, NONE
from django.conf import settings
from taiga.base.connectors.exceptions import ConnectorBaseException
class LDAPError(ConnectorBaseException):
pass
class LDAPConnectionError(LDAPError):
pass
class LDAPUserLoginError(LDAPError):
pass
SERVER = getattr(settings, "LDAP_SERVER", "")
PORT = getattr(settings, "LDAP_PORT", "")
SEARCH_BASE = getattr(settings, "LDAP_SEARCH_BASE", "")
SEARCH_FILTER_ADDITIONAL = getattr(settings, "LDAP_SEARCH_FILTER_ADDITIONAL", "")
BIND_DN = getattr(settings, "LDAP_BIND_DN", "")
BIND_PASSWORD = getattr(settings, "LDAP_BIND_PASSWORD", "")
USERNAME_ATTRIBUTE = getattr(settings, "LDAP_USERNAME_ATTRIBUTE", "")
EMAIL_ATTRIBUTE = getattr(settings, "LDAP_EMAIL_ATTRIBUTE", "")
FULL_NAME_ATTRIBUTE = getattr(settings, "LDAP_FULL_NAME_ATTRIBUTE", "")
def login(login: str, password: str) -> tuple:
"""
Connect to LDAP server, perform a search and attempt a bind.
Can raise `exc.LDAPLoginError` exceptions if any of the
operations fail.
:returns: tuple (username, email, full_name)
"""
# connect to the LDAP server
if SERVER.lower().startswith("ldaps://"):
use_ssl = True
else:
use_ssl = False
try:
server = Server(SERVER, port = PORT, get_info = NONE, use_ssl = use_ssl)
except Exception as e:
error = "Error connecting to LDAP server: %s" % e
raise LDAPConnectionError({"error_message": error})
# authenticate as service if credentials provided, anonymously otherwise
if BIND_DN is not None and BIND_DN != '':
service_user = BIND_DN
service_pass = BIND_PASSWORD
service_auth = SIMPLE
else:
service_user = None
service_pass = None
service_auth = ANONYMOUS
try:
c = Connection(server, auto_bind = True, client_strategy = SYNC, check_names = True,
user = service_user, password = service_pass, authentication = service_auth)
except Exception as e:
error = "Error connecting to LDAP server: %s" % e
raise LDAPConnectionError({"error_message": error})
# search for user-provided login
search_filter = '(|(%s=%s)(%s=%s))' % (USERNAME_ATTRIBUTE, login, EMAIL_ATTRIBUTE, login)
if SEARCH_FILTER_ADDITIONAL:
search_filter = '(&%s%s)' % (search_filter, SEARCH_FILTER_ADDITIONAL)
try:
c.search(search_base = SEARCH_BASE,
search_filter = search_filter,
search_scope = SUBTREE,
attributes = [USERNAME_ATTRIBUTE, EMAIL_ATTRIBUTE, FULL_NAME_ATTRIBUTE],
paged_size = 5)
except Exception as e:
error = "LDAP login incorrect: %s" % e
raise LDAPUserLoginError({"error_message": error})
# stop if no search results
# TODO: handle multiple matches
if (len(c.response) == 0) or (c.response[0].get('type') != 'searchResEntry'):
raise LDAPUserLoginError({"error_message": "LDAP login not found"})
# attempt LDAP bind
username = c.response[0].get('raw_attributes').get(USERNAME_ATTRIBUTE)[0].decode('utf-8')
email = c.response[0].get('raw_attributes').get(EMAIL_ATTRIBUTE)[0].decode('utf-8')
full_name = c.response[0].get('raw_attributes').get(FULL_NAME_ATTRIBUTE)[0].decode('utf-8')
try:
dn = c.response[0].get('dn')
user_conn = Connection(server, auto_bind = True, client_strategy = SYNC,
check_names = True, authentication = SIMPLE,
user = dn, password = password)
except Exception as e:
error = "LDAP bind failed: %s" % e
raise LDAPUserLoginError({"error_message": error})
# LDAP binding successful, but some values might have changed, or
# this is the user's first login, so return them
return (username, email, full_name)
| agpl-3.0 | 1,047,184,773,024,419,600 | 38.479339 | 99 | 0.664224 | false |
josrolgil/exjobbCalvin | calvin/runtime/north/storage.py | 1 | 34594 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.north.plugins.storage import storage_factory
from calvin.runtime.north.plugins.coders.messages import message_coder_factory
from calvin.runtime.south.plugins.async import async
from calvin.utilities import calvinlogger
from calvin.utilities.calvin_callback import CalvinCB
from calvin.actor import actorport
from calvin.actor.actor import ShadowActor
from calvin.utilities import calvinconfig
from calvin.actorstore.store import GlobalStore
from calvin.utilities import dynops
import re
_log = calvinlogger.get_logger(__name__)
_conf = calvinconfig.get()
class Storage(object):
"""
Storage helper functions.
All functions in this class should be async and never block.
"""
def __init__(self, node):
self.localstore = {}
self.localstore_sets = {}
self.started = False
self.node = node
storage_type = _conf.get(None, 'storage_type')
self.proxy = _conf.get(None, 'storage_proxy') if storage_type == 'proxy' else None
_log.analyze(self.node.id, "+", {'proxy': self.proxy})
self.tunnel = {}
self.starting = storage_type != 'local'
self.storage = storage_factory.get(storage_type, node)
self.coder = message_coder_factory.get("json") # TODO: always json? append/remove requires json at the moment
self.flush_delayedcall = None
self.reset_flush_timeout()
### Storage life cycle management ###
def reset_flush_timeout(self):
""" Reset flush timeout
"""
self.flush_timeout = 0.2
def trigger_flush(self, delay=None):
""" Trigger a flush of internal data
"""
if self.localstore or self.localstore_sets:
if delay is None:
delay = self.flush_timeout
if self.flush_delayedcall is not None:
self.flush_delayedcall.cancel()
self.flush_delayedcall = async.DelayedCall(delay, self.flush_localdata)
def flush_localdata(self):
""" Write data in localstore to storage
"""
_log.debug("Flush local storage data")
if self.flush_timeout < 600:
self.flush_timeout = self.flush_timeout * 2
self.flush_delayedcall = None
for key in self.localstore:
_log.debug("Flush key %s: %s" % (key, self.localstore[key]))
self.storage.set(key=key, value=self.localstore[key],
cb=CalvinCB(func=self.set_cb, org_key=None, org_value=None, org_cb=None))
for key, value in self.localstore_sets.iteritems():
self._flush_append(key, value['+'])
self._flush_remove(key, value['-'])
def _flush_append(self, key, value):
if not value:
return
_log.debug("Flush append on key %s: %s" % (key, list(value)))
coded_value = self.coder.encode(list(value))
self.storage.append(key=key, value=coded_value,
cb=CalvinCB(func=self.append_cb, org_key=None, org_value=None, org_cb=None))
def _flush_remove(self, key, value):
if not value:
return
_log.debug("Flush remove on key %s: %s" % (key, list(value)))
coded_value = self.coder.encode(list(value))
self.storage.remove(key=key, value=coded_value,
cb=CalvinCB(func=self.remove_cb, org_key=None, org_value=None, org_cb=None))
def started_cb(self, *args, **kwargs):
""" Called when storage has started, flushes localstore
"""
_log.debug("Storage started!!")
if not args[0]:
return
self.started = True
self.trigger_flush(0)
if kwargs["org_cb"]:
async.DelayedCall(0, kwargs["org_cb"], args[0])
def start(self, iface='', cb=None):
""" Start storage
"""
_log.analyze(self.node.id, "+", None)
if self.starting:
name = self.node.attributes.get_node_name_as_str() or self.node.id
try:
self.storage.start(iface=iface, cb=CalvinCB(self.started_cb, org_cb=cb), name=name)
except:
_log.exception("Failed start of storage for name={}, switches to local".format(name))
if not self.proxy:
self._init_proxy()
def _init_proxy(self):
_log.analyze(self.node.id, "+ SERVER", None)
# We are not proxy client, so we can be proxy bridge/master
self._proxy_cmds = {'GET': self.get,
'SET': self.set,
'GET_CONCAT': self.get_concat,
'APPEND': self.append,
'REMOVE': self.remove,
'DELETE': self.delete,
'REPLY': self._proxy_reply}
try:
self.node.proto.register_tunnel_handler('storage', CalvinCB(self.tunnel_request_handles))
except:
# OK, then skip being a proxy server
pass
def stop(self, cb=None):
""" Stop storage
"""
_log.analyze(self.node.id, "+", {'started': self.started})
if self.started:
self.storage.stop(cb=cb)
elif cb:
cb()
self.started = False
### Storage operations ###
def set_cb(self, key, value, org_key, org_value, org_cb):
""" set callback, on error store in localstore and retry after flush_timeout
"""
if value:
if org_cb:
org_cb(key=key, value=True)
if key in self.localstore:
del self.localstore[key]
self.reset_flush_timeout()
else:
_log.error("Failed to store %s" % key)
if org_key and org_value:
if not org_value is None:
self.localstore[key] = org_value
if org_cb:
org_cb(key=key, value=False)
self.trigger_flush()
def set(self, prefix, key, value, cb):
""" Set key: prefix+key value: value
"""
_log.debug("Set key %s, value %s" % (prefix + key, value))
value = self.coder.encode(value) if value else value
if prefix + key in self.localstore_sets:
del self.localstore_sets[prefix + key]
# Always save locally
self.localstore[prefix + key] = value
if self.started:
self.storage.set(key=prefix + key, value=value, cb=CalvinCB(func=self.set_cb, org_key=key, org_value=value, org_cb=cb))
elif cb:
async.DelayedCall(0, cb, key=key, value=True)
def get_cb(self, key, value, org_cb, org_key):
""" get callback
"""
if value:
value = self.coder.decode(value)
org_cb(org_key, value)
def get(self, prefix, key, cb):
""" Get value for key: prefix+key, first look in localstore
"""
if not cb:
return
if prefix + key in self.localstore:
value = self.localstore[prefix + key]
if value:
value = self.coder.decode(value)
async.DelayedCall(0, cb, key=key, value=value)
else:
try:
self.storage.get(key=prefix + key, cb=CalvinCB(func=self.get_cb, org_cb=cb, org_key=key))
except:
_log.error("Failed to get: %s" % key)
async.DelayedCall(0, cb, key=key, value=False)
def get_iter_cb(self, key, value, it, org_key, include_key=False):
""" get callback
"""
_log.analyze(self.node.id, "+ BEGIN", {'value': value, 'key': org_key})
if value:
value = self.coder.decode(value)
it.append((key, value) if include_key else value)
_log.analyze(self.node.id, "+", {'value': value, 'key': org_key})
else:
_log.analyze(self.node.id, "+", {'value': 'FailedElement', 'key': org_key})
it.append((key, dynops.FailedElement) if include_key else dynops.FailedElement)
def get_iter(self, prefix, key, it, include_key=False):
""" Get value for key: prefix+key, first look in localstore
Add the value to the supplied dynamic iterable (preferable a LimitedList or List)
"""
if it:
if prefix + key in self.localstore:
value = self.localstore[prefix + key]
if value:
value = self.coder.decode(value)
_log.analyze(self.node.id, "+", {'value': value, 'key': key})
it.append((key, value) if include_key else value)
else:
try:
self.storage.get(key=prefix + key,
cb=CalvinCB(func=self.get_iter_cb, it=it, org_key=key, include_key=include_key))
except:
_log.analyze(self.node.id, "+", {'value': 'FailedElement', 'key': key})
_log.error("Failed to get: %s" % key)
it.append((key, dynops.FailedElement) if include_key else dynops.FailedElement)
def get_concat_cb(self, key, value, org_cb, org_key, local_list):
""" get callback
"""
if value:
value = self.coder.decode(value)
org_cb(org_key, list(set(value + local_list)))
else:
org_cb(org_key, local_list if local_list else None)
def get_concat(self, prefix, key, cb):
""" Get value for key: prefix+key, first look in localstore
Return value is list. The storage could be eventually consistent.
For example a remove might only have reached part of the
storage and hence the return list might contain removed items,
but also missing items.
"""
if not cb:
return
if prefix + key in self.localstore_sets:
_log.analyze(self.node.id, "+ GET LOCAL", None)
value = self.localstore_sets[prefix + key]
# Return the set that we intended to append since that's all we have until it is synced
local_list = list(value['+'])
else:
local_list = []
try:
self.storage.get_concat(key=prefix + key,
cb=CalvinCB(func=self.get_concat_cb, org_cb=cb, org_key=key, local_list=local_list))
except:
_log.error("Failed to get: %s" % key, exc_info=True)
async.DelayedCall(0, cb, key=key, value=local_list if local_list else None)
def get_concat_iter_cb(self, key, value, org_key, include_key, it):
""" get callback
"""
_log.analyze(self.node.id, "+ BEGIN", {'key': org_key, 'value': value, 'iter': str(it)})
if value:
value = self.coder.decode(value)
_log.analyze(self.node.id, "+ VALUE", {'value': value, 'key': org_key})
it.extend([(org_key, v) for v in value] if include_key else value)
it.final()
_log.analyze(self.node.id, "+ END", {'key': org_key, 'iter': str(it)})
def get_concat_iter(self, prefix, key, include_key=False):
""" Get value for key: prefix+key, first look in localstore
Returned value is dynamic iterable. The storage could be eventually consistent.
For example a remove might only have reached part of the
storage and hence the return iterable might contain removed items,
but also missing items.
"""
_log.analyze(self.node.id, "+ BEGIN", {'key': key})
if prefix + key in self.localstore_sets:
_log.analyze(self.node.id, "+ GET LOCAL", None)
value = self.localstore_sets[prefix + key]
# Return the set that we intended to append since that's all we have until it is synced
local_list = list(value['+'])
_log.analyze(self.node.id, "+", {'value': local_list, 'key': key})
else:
local_list = []
if include_key:
local_list = [(key, v) for v in local_list]
it = dynops.List(local_list)
try:
self.storage.get_concat(key=prefix + key,
cb=CalvinCB(func=self.get_concat_iter_cb, org_key=key,
include_key=include_key, it=it))
except:
if self.started:
_log.error("Failed to get: %s" % key, exc_info=True)
it.final()
_log.analyze(self.node.id, "+ END", {'key': key, 'iter': str(it)})
return it
def append_cb(self, key, value, org_key, org_value, org_cb):
""" append callback, on error retry after flush_timeout
"""
if value:
if org_cb:
org_cb(key=org_key, value=True)
if key in self.localstore_sets:
if self.localstore_sets[key]['-']:
self.localstore_sets[key]['+'] = set([])
else:
del self.localstore_sets[key]
self.reset_flush_timeout()
else:
_log.error("Failed to update %s" % key)
if org_cb:
org_cb(key=org_key, value=False)
self.trigger_flush()
def append(self, prefix, key, value, cb):
""" set operation append on key: prefix+key value: value is a list of items
"""
_log.debug("Append key %s, value %s" % (prefix + key, value))
# Keep local storage for sets updated until confirmed
if (prefix + key) in self.localstore_sets:
# Append value items
self.localstore_sets[prefix + key]['+'] |= set(value)
# Don't remove value items any more
self.localstore_sets[prefix + key]['-'] -= set(value)
else:
self.localstore_sets[prefix + key] = {'+': set(value), '-': set([])}
if self.started:
coded_value = self.coder.encode(list(self.localstore_sets[prefix + key]['+']))
self.storage.append(key=prefix + key, value=coded_value,
cb=CalvinCB(func=self.append_cb, org_key=key, org_value=value, org_cb=cb))
else:
if cb:
cb(key=key, value=True)
def remove_cb(self, key, value, org_key, org_value, org_cb):
""" remove callback, on error retry after flush_timeout
"""
if value == True:
if org_cb:
org_cb(key=org_key, value=True)
if key in self.localstore_sets:
if self.localstore_sets[key]['+']:
self.localstore_sets[key]['-'] = set([])
else:
del self.localstore_sets[key]
self.reset_flush_timeout()
else:
_log.error("Failed to update %s" % key)
if org_cb:
org_cb(key=org_key, value=False)
self.trigger_flush()
def remove(self, prefix, key, value, cb):
""" set operation remove on key: prefix+key value: value is a list of items
"""
_log.debug("Remove key %s, value %s" % (prefix + key, value))
# Keep local storage for sets updated until confirmed
if (prefix + key) in self.localstore_sets:
# Don't append value items any more
self.localstore_sets[prefix + key]['+'] -= set(value)
# Remove value items
self.localstore_sets[prefix + key]['-'] |= set(value)
else:
self.localstore_sets[prefix + key] = {'+': set([]), '-': set(value)}
if self.started:
coded_value = self.coder.encode(list(self.localstore_sets[prefix + key]['-']))
self.storage.remove(key=prefix + key, value=coded_value,
cb=CalvinCB(func=self.remove_cb, org_key=key, org_value=value, org_cb=cb))
else:
if cb:
cb(key=key, value=True)
def delete(self, prefix, key, cb):
""" Delete key: prefix+key (value set to None)
"""
_log.debug("Deleting key %s" % prefix + key)
if prefix + key in self.localstore:
del self.localstore[prefix + key]
if (prefix + key) in self.localstore_sets:
del self.localstore_sets[prefix + key]
if self.started:
self.set(prefix, key, None, cb)
else:
if cb:
cb(key, True)
### Calvin object handling ###
def add_node(self, node, cb=None):
"""
Add node to storage
"""
self.set(prefix="node-", key=node.id,
value={"uri": node.external_uri,
"control_uri": node.external_control_uri,
"attributes": {'public': node.attributes.get_public(),
'indexed_public': node.attributes.get_indexed_public(as_list=False)}}, cb=cb)
self._add_node_index(node)
# Store all actors on this node in storage
GlobalStore(node=node).export()
def _add_node_index(self, node, cb=None):
indexes = node.attributes.get_indexed_public()
try:
for index in indexes:
# TODO add callback, but currently no users supply a cb anyway
self.add_index(index, node.id)
except:
_log.debug("Add node index failed", exc_info=True)
pass
# Add the capabilities
try:
for c in node._calvinsys.list_capabilities():
self.add_index(['node', 'capabilities', c], node.id, root_prefix_level=3)
except:
_log.debug("Add node capabilities failed", exc_info=True)
pass
def get_node(self, node_id, cb=None):
"""
Get node data from storage
"""
self.get(prefix="node-", key=node_id, cb=cb)
def delete_node(self, node, cb=None):
"""
Delete node from storage
"""
self.delete(prefix="node-", key=node.id, cb=None if node.attributes.get_indexed_public() else cb)
if node.attributes.get_indexed_public():
self._delete_node_index(node, cb=cb)
def _delete_node_index(self, node, cb=None):
indexes = node.attributes.get_indexed_public()
_log.analyze(self.node.id, "+", {'indexes': indexes})
try:
counter = [len(indexes)] # counter value by reference used in callback
for index in indexes:
self.remove_index(index, node.id, cb=CalvinCB(self._delete_node_cb, counter=counter, org_cb=cb))
# The remove index gets 1 second otherwise we call the callback anyway, i.e. stop the node
async.DelayedCall(1.0, self._delete_node_timeout_cb, counter=counter, org_cb=cb)
except:
_log.debug("Remove node index failed", exc_info=True)
if cb:
cb()
def _delete_node_cb(self, counter, org_cb, *args, **kwargs):
_log.analyze(self.node.id, "+", {'counter': counter[0]})
counter[0] = counter[0] - 1
if counter[0] == 0:
org_cb(*args, **kwargs)
def _delete_node_timeout_cb(self, counter, org_cb):
_log.analyze(self.node.id, "+", {'counter': counter[0]})
if counter[0] > 0:
_log.debug("Delete node index not finished but call callback anyway")
org_cb()
def add_authz_server(self, node, cb=None):
"""Add node id to authorization server index."""
try:
self.add_index(['node', 'authz_server'], node.id, root_prefix_level=1)
except:
_log.debug("Add node id to authorization server index failed", exc_info=True)
pass
def delete_authz_server(self, node, cb=None):
"""Delete node id from authorization server index."""
_log.info("Delete node id")
try:
self.remove_index("node/authz_server", node.id, root_prefix_level=1)
except:
_log.debug("Delete node id from authorization server index failed", exc_info=True)
pass
def add_application(self, application, cb=None):
"""
Add application to storage
"""
_log.debug("Add application %s id %s" % (application.name, application.id))
self.set(prefix="application-", key=application.id,
value={"name": application.name,
"ns": application.ns,
# FIXME when all users of the actors field is updated, save the full dict only
"actors": application.actors.keys(),
"actors_name_map": application.actors,
"origin_node_id": application.origin_node_id},
cb=cb)
def get_application(self, application_id, cb=None):
"""
Get application from storage
"""
self.get(prefix="application-", key=application_id, cb=cb)
def delete_application(self, application_id, cb=None):
"""
Delete application from storage
"""
_log.debug("Delete application %s" % application_id)
self.delete(prefix="application-", key=application_id, cb=cb)
def add_actor(self, actor, node_id, cb=None):
"""
Add actor and its ports to storage
"""
_log.debug("Add actor %s id %s" % (actor, node_id))
data = {"name": actor.name, "type": actor._type, "node_id": node_id}
inports = []
for p in actor.inports.values():
port = {"id": p.id, "name": p.name}
inports.append(port)
self.add_port(p, node_id, actor.id, "in")
data["inports"] = inports
outports = []
for p in actor.outports.values():
port = {"id": p.id, "name": p.name}
outports.append(port)
self.add_port(p, node_id, actor.id, "out")
data["outports"] = outports
data["is_shadow"] = isinstance(actor, ShadowActor)
self.set(prefix="actor-", key=actor.id, value=data, cb=cb)
def get_actor(self, actor_id, cb=None):
"""
Get actor from storage
"""
self.get(prefix="actor-", key=actor_id, cb=cb)
def delete_actor(self, actor_id, cb=None):
"""
Delete actor from storage
"""
_log.debug("Delete actor id %s" % (actor_id))
self.delete(prefix="actor-", key=actor_id, cb=cb)
def add_port(self, port, node_id, actor_id=None, direction=None, cb=None):
"""
Add port to storage
"""
if direction is None:
if isinstance(port, actorport.InPort):
direction = "in"
else:
direction = "out"
if actor_id is None:
actor_id = port.owner.id
data = {"name": port.name, "connected": port.is_connected(
), "node_id": node_id, "actor_id": actor_id, "direction": direction}
if direction == "out":
if port.is_connected():
data["peers"] = port.get_peers()
else:
data["peers"] = []
elif direction == "in":
if port.is_connected():
data["peer"] = port.get_peer()
else:
data["peer"] = None
self.set(prefix="port-", key=port.id, value=data, cb=cb)
def get_port(self, port_id, cb=None):
"""
Get port from storage
"""
self.get(prefix="port-", key=port_id, cb=cb)
def delete_port(self, port_id, cb=None):
"""
Delete port from storage
"""
self.delete(prefix="port-", key=port_id, cb=cb)
def index_cb(self, key, value, org_cb, index_items):
"""
Collect all the index levels operations into one callback
"""
_log.debug("index cb key:%s, value:%s, index_items:%s" % (key, value, index_items))
#org_key = key.partition("-")[2]
org_key = key
# cb False if not already done it at first False value
if not value and index_items:
org_cb(key=org_key, value=False)
del index_items[:]
if org_key in index_items:
# remove this index level from list
index_items.remove(org_key)
# If all done send True
if not index_items:
org_cb(key=org_key, value=True)
def _index_strings(self, index, root_prefix_level):
# Make the list of index levels that should be used
# The index string must been escaped with \/ and \\ for / and \ within levels, respectively
if isinstance(index, list):
items = index
else:
items = re.split(r'(?<![^\\]\\)/', index.lstrip("/"))
root = "/".join(items[:root_prefix_level])
del items[:root_prefix_level]
items.insert(0, root)
# index strings for all levels
indexes = ['/'+'/'.join(items[:l]) for l in range(1,len(items)+1)]
return indexes
def add_index(self, index, value, root_prefix_level=3, cb=None):
"""
Add value (typically a node id) to the storage as a set.
index: a string with slash as delimiter for finer level of index,
e.g. node/address/example_street/3/buildingA/level3/room3003,
node/affiliation/owner/com.ericsson/Harald,
node/affiliation/name/com.ericsson/laptop
OR a list of strings
value: the value that is to be added to the set stored at each level of the index
root_prefix_level: the top level of the index that can be searched,
with =1 then e.g. node/address, node/affiliation
cb: will be called when done.
"""
# TODO this implementation will store the value to each level of the index.
# When time permits a proper implementation should be done with for example
# a prefix hash table on top of the DHT or using other storage backend with
# prefix search built in.
_log.debug("add index %s: %s" % (index, value))
indexes = self._index_strings(index, root_prefix_level)
# make copy of indexes since altered in callbacks
for i in indexes[:]:
self.append(prefix="index-", key=i, value=[value],
cb=CalvinCB(self.index_cb, org_cb=cb, index_items=indexes) if cb else None)
def remove_index(self, index, value, root_prefix_level=2, cb=None):
"""
Remove value (typically a node id) from the storage as a set.
index: a string with slash as delimiter for finer level of index,
e.g. node/address/example_street/3/buildingA/level3/room3003,
node/affiliation/owner/com.ericsson/Harald,
node/affiliation/name/com.ericsson/laptop
value: the value that is to be removed from the set stored at each level of the index
root_prefix_level: the top level of the index that can be searched,
with =1 then e.g. node/address, node/affiliation
cb: will be called when done.
"""
# TODO this implementation will delete the value to each level of the index.
# When time permits a proper implementation should be done with for example
# a prefix hash table on top of the DHT or using other storage backend with
# prefix search built in.
# TODO Currently we don't go deeper than the specified index for a remove,
# e.g. node/affiliation/owner/com.ericsson would remove the value from
# all deeper indeces. But no current use case exist either.
_log.debug("remove index %s: %s" % (index, value))
indexes = self._index_strings(index, root_prefix_level)
# make copy of indexes since altered in callbacks
for i in indexes[:]:
self.remove(prefix="index-", key=i, value=[value],
cb=CalvinCB(self.index_cb, org_cb=cb, index_items=indexes) if cb else None)
def get_index(self, index, cb=None):
"""
Get index from the storage.
index: a string with slash as delimiter for finer level of index,
e.g. node/address/example_street/3/buildingA/level3/room3003,
node/affiliation/owner/com.ericsson/Harald,
node/affiliation/name/com.ericsson/laptop
cb: will be called when done. Should expect to be called several times with
partial results. Currently only called once.
Since storage might be eventually consistent caller must expect that the
list can contain node ids that are removed and node ids have not yet reached
the storage.
"""
# TODO this implementation will get the value from the level of the index.
# When time permits a proper implementation should be done with for example
# a prefix hash table on top of the DHT or using other storage backend with
# prefix search built in. A proper implementation might also have several callbacks
# since might get index from several levels of index trie, and instead of building a complete
# list before returning better to return iteratively for nodes with less memory
# or system with large number of nodes, might also need a timeout.
if isinstance(index, list):
index = "/".join(index)
if not index.startswith("/"):
index = "/" + index
_log.debug("get index %s" % (index))
self.get_concat(prefix="index-", key=index, cb=cb)
def get_index_iter(self, index, include_key=False):
"""
Get index from the storage.
index: a string with slash as delimiter for finer level of index,
e.g. node/address/example_street/3/buildingA/level3/room3003,
node/affiliation/owner/com.ericsson/Harald,
node/affiliation/name/com.ericsson/laptop
Since storage might be eventually consistent caller must expect that the
list can contain node ids that are removed and node ids have not yet reached
the storage.
"""
# TODO this implementation will get the value from the level of the index.
# When time permits a proper implementation should be done with for example
# a prefix hash table on top of the DHT or using other storage backend with
# prefix search built in.
if isinstance(index, list):
index = "/".join(index)
if not index.startswith("/"):
index = "/" + index
_log.debug("get index iter %s" % (index))
return self.get_concat_iter(prefix="index-", key=index, include_key=include_key)
### Storage proxy server ###
def tunnel_request_handles(self, tunnel):
""" Incoming tunnel request for storage proxy server"""
# TODO check if we want a tunnel first
_log.analyze(self.node.id, "+ SERVER", {'tunnel_id': tunnel.id})
self.tunnel[tunnel.peer_node_id] = tunnel
tunnel.register_tunnel_down(CalvinCB(self.tunnel_down, tunnel))
tunnel.register_tunnel_up(CalvinCB(self.tunnel_up, tunnel))
tunnel.register_recv(CalvinCB(self.tunnel_recv_handler, tunnel))
# We accept it by returning True
return True
def tunnel_down(self, tunnel):
""" Callback that the tunnel is not accepted or is going down """
_log.analyze(self.node.id, "+ SERVER", {'tunnel_id': tunnel.id})
# We should always return True which sends an ACK on the destruction of the tunnel
return True
def tunnel_up(self, tunnel):
""" Callback that the tunnel is working """
_log.analyze(self.node.id, "+ SERVER", {'tunnel_id': tunnel.id})
# We should always return True which sends an ACK on the destruction of the tunnel
return True
def _proxy_reply(self, cb, *args, **kwargs):
# Should not get any replies to the server but log it just in case
_log.analyze(self.node.id, "+ SERVER", {args: args, 'kwargs': kwargs})
def tunnel_recv_handler(self, tunnel, payload):
""" Gets called when a storage client request"""
_log.debug("Storage proxy request %s" % payload)
_log.analyze(self.node.id, "+ SERVER", {'payload': payload})
if 'cmd' in payload and payload['cmd'] in self._proxy_cmds:
if 'value' in payload:
if payload['cmd'] == 'SET' and payload['value'] is None:
# We detected a delete operation, since a set op with unencoded None is a delete
payload['cmd'] = 'DELETE'
payload.pop('value')
else:
# Normal set op, but it will be encoded again in the set func when external storage, hence decode
payload['value']=self.coder.decode(payload['value'])
# Call this nodes storage methods, which could be local or DHT,
# prefix is empty since that is already in the key (due to these calls come from the storage plugin level).
# If we are doing a get or get_concat then the result needs to be encoded, to correspond with what the
# client's higher level expect from storage plugin level.
self._proxy_cmds[payload['cmd']](cb=CalvinCB(self._proxy_send_reply, tunnel=tunnel,
encode=True if payload['cmd'] in ('GET', 'GET_CONCAT') else False,
msgid=payload['msg_uuid']),
prefix="",
**{k: v for k, v in payload.iteritems() if k in ('key', 'value')})
else:
_log.error("Unknown storage proxy request %s" % payload['cmd'] if 'cmd' in payload else "")
def _proxy_send_reply(self, key, value, tunnel, encode, msgid):
_log.analyze(self.node.id, "+ SERVER", {'msgid': msgid, 'key': key, 'value': value})
tunnel.send({'cmd': 'REPLY', 'msg_uuid': msgid, 'key': key, 'value': self.coder.encode(value) if encode else value})
| apache-2.0 | -8,092,037,025,147,654,000 | 41.446626 | 131 | 0.568682 | false |
cordoval/myhdl-python | myhdl/test/conversion/general/test_adapter.py | 1 | 1607 | from myhdl import *
def adapter(o_err, i_err, o_spec, i_spec):
nomatch = Signal(bool(0))
other = Signal(bool(0))
o_err_bits = []
for s in o_spec:
if s == 'other':
o_err_bits.append(other)
elif s == 'nomatch':
o_err_bits.append(nomatch)
else:
bit = i_err(i_spec[s])
o_err_bits.append(bit)
o_err_vec = ConcatSignal(*o_err_bits)
other_bits = []
for s, i in i_spec.items():
if s in o_spec:
continue
bit = i_err(i)
other_bits.append(bit)
other_vec = ConcatSignal(*other_bits)
@always_comb
def assign():
nomatch.next = 0
other.next = (other_vec != 0)
o_err.next = o_err_vec
return assign
def bench_adapter(conv=False):
o_spec = ('c', 'a', 'other', 'nomatch')
i_spec = { 'a' : 1, 'b' : 2, 'c' : 0, 'd' : 3, 'e' : 4, 'f' : 5, }
o_err = Signal(intbv(0)[4:])
i_err = Signal(intbv(0)[6:])
if conv:
dut = conv(adapter, o_err, i_err, o_spec, i_spec)
else:
dut = adapter(o_err, i_err, o_spec, i_spec)
N = 2**len(i_err)
@instance
def stimulus():
for i in range(N):
i_err.next = i
yield delay(10)
assert o_err[0] == 0
assert o_err[1] == (i_err[2] | i_err[3] | i_err[4] | i_err[5])
assert o_err[2] == i_err[1]
assert o_err[3] == i_err[0]
print o_err
return dut, stimulus
def test_adapter():
assert conversion.verify(bench_adapter) == 0
bench_adapter(toVerilog)
bench_adapter(toVHDL)
| lgpl-2.1 | 4,795,991,567,857,670,000 | 22.985075 | 74 | 0.501556 | false |
hydroffice/hyo_ssp | hydroffice/ssp/atlases/woa09checker.py | 1 | 2109 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import logging
log = logging.getLogger(__name__)
from ..helper import Helper as BaseHelper
from hydroffice.base.ftpconnector import FtpConnector
class Woa09Checker(object):
def __init__(self, force_download=True):
self.present = False
self.atlases_folder = self.get_atlases_folder()
if not self.is_present() and force_download:
self.present = self._download_and_unzip()
else:
self.present = True
@classmethod
def get_atlases_folder(cls):
""" Return the folder used to store atlases. """
ssp_folder = BaseHelper.default_projects_folder()
return os.path.join(ssp_folder, 'Atlases')
@classmethod
def is_present(cls):
""" Check if the WOA09 atlas is present. """
atlases_folder = cls.get_atlases_folder()
if not os.path.exists(atlases_folder):
log.debug('not found atlases folder')
return False
check_woa09_file = os.path.join(atlases_folder, 'woa09', 'landsea.msk')
log.debug("checking WOA09 test file at path %s" % check_woa09_file)
if not os.path.exists(check_woa09_file):
log.debug('not found woa09 atlas')
return False
return True
def _download_and_unzip(self):
""" Attempt to download the WOA09 atlas. """
log.debug('downloading WOA9 atlas')
try:
if not os.path.exists(self.atlases_folder):
os.makedirs(self.atlases_folder)
ftp = FtpConnector("ftp.ccom.unh.edu", show_progress=True, debug_mode=False)
data_zip_src = "fromccom/hydroffice/woa09.red.zip"
data_zip_dst = os.path.join(self.atlases_folder, "woa09.red.zip")
ftp.get_file(data_zip_src, data_zip_dst, unzip_it=True)
return self.is_present()
except Exception as e:
log.error('during WOA09 download and unzip: %s' % e)
return False
| lgpl-3.0 | 8,534,105,163,208,124,000 | 33.15 | 88 | 0.602655 | false |
UAVCAN/pyuavcan | pyuavcan/transport/commons/crc/_base.py | 1 | 1455 | # Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <[email protected]>
from __future__ import annotations
import abc
import typing
class CRCAlgorithm(abc.ABC):
"""
Implementations are default-constructible.
"""
@abc.abstractmethod
def add(self, data: typing.Union[bytes, bytearray, memoryview]) -> None:
"""
Updates the value with the specified block of data.
"""
raise NotImplementedError
@abc.abstractmethod
def check_residue(self) -> bool:
"""
Checks if the current state matches the algorithm-specific residue.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def value(self) -> int:
"""
The current CRC value, with output XOR applied, if applicable.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def value_as_bytes(self) -> bytes:
"""
The current CRC value serialized in the algorithm-specific byte order.
"""
raise NotImplementedError
@classmethod
def new(cls, *fragments: typing.Union[bytes, bytearray, memoryview]) -> CRCAlgorithm:
"""
A factory that creates the new instance with the value computed over the fragments.
"""
self = cls()
for frag in fragments:
self.add(frag)
return self
| mit | 8,470,442,965,310,872,000 | 26.45283 | 91 | 0.631615 | false |
hellsgate1001/thatforum_django | thatforum/models.py | 1 | 3446 | from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import slugify
from mptt.models import MPTTModel, TreeForeignKey
class ForumCategory(MPTTModel):
parent = TreeForeignKey(
'self', blank=True, null=True, related_name='children'
)
name = models.CharField(max_length=255)
slug = models.SlugField(max_length=255)
description = models.CharField(max_length=255, blank=True)
order = models.PositiveIntegerField(blank=True, null=True)
def __unicode__(self):
return self.name
@property
def last_post(self):
if self.parent is None:
return None
response = None
for thread in self.forumthread_set.all():
if response is None:
response = thread.last_post
else:
if thread.last_post.created > response.created:
response = thread.last_post
return response
@property
def post_count(self):
count = 0
for thread in self.forumthread_set.all():
count += thread.forumpost_set.count()
return count
class Meta:
verbose_name_plural = 'Forum categories'
class ForumThread(models.Model):
category = models.ForeignKey(ForumCategory)
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255)
author = models.ForeignKey(settings.AUTH_USER_MODEL)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('thread_home', kwargs={'slug': self.slug})
@property
def last_post(self):
return self.forumpost_set.order_by('-created').first()
@property
def num_replies(self):
return self.forumpost_set.filter(is_thread_starter=False).count()
@property
def thread_starter(self):
return self.forumpost_set.get(thread=self, is_thread_starter=True)
def save(self, *args, **kwargs):
if self.slug == '':
self.slug = slugify(self.title)
return super(ForumThread, self).save(*args, **kwargs)
class ForumPost(models.Model):
thread = models.ForeignKey(ForumThread)
post = models.TextField()
author = models.ForeignKey(settings.AUTH_USER_MODEL)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
reply_to = models.ForeignKey('self', blank=True, null=True)
is_thread_starter = models.BooleanField(default=False)
def __unicode__(self):
return '%(thread)s - %(pk)s' % {
'thread': self.thread.title,
'pk': self.pk
}
def get_breadcrumb(self):
breadcrumb = [
(
self.thread.title,
reverse(
'thread_home',
kwargs={'slug': self.thread.slug}
)
),
]
category = self.thread.category
while True:
breadcrumb_item = (
category.name,
reverse(
'category_home',
kwargs={'slug': category.slug}
),
)
breadcrumb.insert(0, breadcrumb_item)
if category.parent is None:
break
category = category.parent
return breadcrumb
| mit | 2,913,269,870,506,611,700 | 28.965217 | 74 | 0.595763 | false |
domob1812/namecore | test/functional/tool_wallet.py | 1 | 9555 | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-wallet."""
import hashlib
import os
import stat
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
BUFFER_SIZE = 16 * 1024
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_wallet_tool()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/namecoin-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-chain=%s' % self.chain] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(stderr, '')
assert_equal(stdout, output)
assert_equal(p.poll(), 0)
def wallet_shasum(self):
h = hashlib.sha1()
mv = memoryview(bytearray(BUFFER_SIZE))
with open(self.wallet_path, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def wallet_timestamp(self):
return os.path.getmtime(self.wallet_path)
def wallet_permissions(self):
return oct(os.lstat(self.wallet_path).st_mode)[-3:]
def log_wallet_timestamp_comparison(self, old, new):
result = 'unchanged' if new == old else 'increased!'
self.log.debug('Wallet file timestamp {}'.format(result))
def test_invalid_tool_commands_and_args(self):
self.log.info('Testing that various invalid commands raise with specific error messages')
self.assert_raises_tool_error('Invalid command: foo', 'foo')
# `bitcoin-wallet help` raises an error. Use `bitcoin-wallet -help`.
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('Error loading wallet.dat. Is wallet being used by other process?', '-wallet=wallet.dat', 'info')
self.assert_raises_tool_error('Error: no wallet file at nonexistent.dat', '-wallet=nonexistent.dat', 'info')
def test_tool_wallet_info(self):
# Stop the node to close the wallet to call the info command.
self.stop_node(0)
self.log.info('Calling wallet tool info, testing output')
#
# TODO: Wallet tool info should work with wallet file permissions set to
# read-only without raising:
# "Error loading wallet.dat. Is wallet being used by another process?"
# The following lines should be uncommented and the tests still succeed:
#
# self.log.debug('Setting wallet file permissions to 400 (read-only)')
# os.chmod(self.wallet_path, stat.S_IRUSR)
# assert(self.wallet_permissions() in ['400', '666']) # Sanity check. 666 because Appveyor.
# shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
self.log.debug('Setting wallet file permissions back to 600 (read/write)')
os.chmod(self.wallet_path, stat.S_IRUSR | stat.S_IWUSR)
assert(self.wallet_permissions() in ['600', '666']) # Sanity check. 666 because Appveyor.
#
# TODO: Wallet tool info should not write to the wallet file.
# The following lines should be uncommented and the tests still succeed:
#
# assert_equal(timestamp_before, timestamp_after)
# shasum_after = self.wallet_shasum()
# assert_equal(shasum_before, shasum_after)
# self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_info_after_transaction(self):
"""
Mutate the wallet with a transaction to verify that the info command
output changes accordingly.
"""
self.start_node(0)
self.log.info('Generating transaction to mutate wallet')
self.nodes[0].generate(1)
self.stop_node(0)
self.log.info('Calling wallet tool info after generating a transaction, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
#
# TODO: Wallet tool info should not write to the wallet file.
# This assertion should be uncommented and succeed:
# assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_create_on_existing_wallet(self):
self.log.info('Calling wallet tool create on an existing wallet, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling create: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling create: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_getwalletinfo_on_different_wallet(self):
self.log.info('Starting node with arg -wallet=foo')
self.start_node(0, ['-wallet=foo'])
self.log.info('Calling getwalletinfo on a different wallet ("foo"), testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling getwalletinfo: {}'.format(timestamp_before))
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling getwalletinfo: {}'.format(timestamp_after))
assert_equal(0, out['txcount'])
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_after, shasum_before)
self.log.debug('Wallet file shasum unchanged\n')
def run_test(self):
self.wallet_path = os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat')
self.test_invalid_tool_commands_and_args()
# Warning: The following tests are order-dependent.
self.test_tool_wallet_info()
self.test_tool_wallet_info_after_transaction()
self.test_tool_wallet_create_on_existing_wallet()
self.test_getwalletinfo_on_different_wallet()
if __name__ == '__main__':
ToolWalletTest().main()
| mit | -5,685,872,337,448,532,000 | 44.28436 | 144 | 0.643642 | false |
jokey2k/pyClanSphere | pyClanSphere/views/core.py | 1 | 3780 | # -*- coding: utf-8 -*-
"""
pyClanSphere.views.core
~~~~~~~~~~~~~~~~~~~~~~~
This module exports the main index page where plugins can hook in to display their widgets
:copyright: (c) 2009 - 2010 by the pyClanSphere Team,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.exceptions import NotFound
from pyClanSphere import cache
from pyClanSphere.api import *
from pyClanSphere.i18n import _, ngettext
from pyClanSphere.application import Response
from pyClanSphere.models import User
from pyClanSphere.utils import dump_json
from pyClanSphere.utils.xml import generate_rsd, dump_xml, AtomFeed
@cache.response()
def index(request):
"""Just show the pyClanSphere license and some other legal stuff."""
context = {}
for callback in signals.frontpage_context_collect.receivers_for(signals.ANY):
context = callback(signals.ANY, context=context)
return render_response('index.html', **context)
@cache.response()
def imprint(request):
"""Just show the pyClanSphere license and some other legal stuff."""
return render_response('imprint.html')
@cache.response()
def profile(request, user_id):
"""Render profile page for given user"""
user = User.query.get(user_id)
if not user:
raise NotFound()
if not request.user.is_somebody:
return render_response('profile_not_public.html')
addondata = signals.public_profile_rendered.send(user=user)
addons = None
return render_response('profile.html', user=user, profileaddons=addons)
def json_service(req, identifier):
"""Handle a JSON service req."""
handler = req.app._services.get(identifier)
if handler is None:
raise NotFound()
#! if this event returns a handler it is called instead of the default
#! handler. Useful to intercept certain requests.
for callback in signals.before_json_service_called.receivers_for(signals.ANY):
rv = callback(identifier, handler)
if rv is not None:
handler = rv
result = handler(req)
#! called right after json callback returned some data with the identifier
#! of the req method and the result object. Note that events *have*
#! to return an object, even if it's just changed in place, otherwise the
#! return value will be `null` (None).
for callback in signals.after_json_service_called.receivers_for(signals.ANY):
result = callback(identifier, result)
return Response(dump_json(result), mimetype='text/javascript')
def xml_service(req, identifier):
"""Handle a XML service req."""
handler = req.app._services.get(identifier)
if handler is None:
raise NotFound()
#! if this event returns a handler it is called instead of the default
#! handler. Useful to intercept certain requests.
for callback in signals.before_xml_service_called.receivers_for(signals.ANY):
rv = callback(identifier, handler)
if rv is not None:
handler = rv
result = handler(req)
#! called right after xml callback returned some data with the identifier
#! of the req method and the result object. Note that events *have*
#! to return an object, even if it's just changed in place, otherwise the
#! return value will be None.
for callback in signals.after_xml_service_called.receivers_for(signals.ANY):
result = callback(identifier, result)
return Response(dump_xml(result), mimetype='text/xml')
def service_rsd(req):
"""Serves and RSD definition (really simple discovery) so that service
frontends can query the apis that are available.
:URL endpoint: ``core/service_rsd``
"""
return Response(generate_rsd(req.app), mimetype='application/xml')
| bsd-3-clause | 8,366,069,577,547,890,000 | 36.058824 | 94 | 0.696296 | false |
mauimuc/gptt | src/fig_discretization.py | 1 | 1032 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Stefan Mauerberger"
__copyright__ = "Copyright (C) 2017 Stefan Mauerberger"
__license__ = "GPLv3"
''' Save a plot of the discretization as PGF file '''
import numpy as np
from matplotlib import pyplot as plt
from plotting import rcParams, prepare_map
from gptt import read_station_file, ListPairs
from reference import dt_obs
plt.rcParams.update(rcParams)
# Read station coordinates
all_stations = read_station_file('../dat/stations.dat')
# Read pseudo data
pseudo_data = np.genfromtxt('../dat/pseudo_data.dat', dtype=dt_obs)
# Instantiate
pairs = ListPairs(pseudo_data, all_stations)
# Stations
stations = pairs.stations
# Discretization
points = pairs.points
# Prepare map
m = prepare_map()
# Plot station locations
m.scatter(stations['lon'], stations['lat'], lw=0, color='g', latlon=True)
# Plot discretization
m.scatter(points['lon'], points['lat'], lw=0, marker='.', s=4, latlon=True, color='g', rasterized=True)
plt.savefig('../fig_discretization.pgf')
| gpl-3.0 | -8,931,367,721,160,098,000 | 24.170732 | 103 | 0.715116 | false |
gentoo/gentoo-keys | gkeys/gkeys/actionbase.py | 1 | 2709 | #
#-*- coding:utf-8 -*-
"""
Gentoo-keys - actionbase.py
Base api interface module
@copyright: 2012-2015 by Brian Dolbec <[email protected]>
@license: GNU GPL2, see COPYING for details.
"""
from __future__ import print_function
import os
import sys
if sys.version_info[0] >= 3:
_unicode = str
else:
_unicode = unicode
from snakeoil.demandload import demandload
demandload(
"json:load",
"gkeys.lib:GkeysGPG",
"gkeys.keyhandler:KeyHandler",
)
class ActionBase(object):
'''Base actions class holding comon functions and init'''
def __init__(self, config, output=None, logger=None):
self.config = config
self.output = output
self.logger = logger
self.seeds = None
self._seedhandler = None
self._keyhandler = None
self._gpg = None
self.category = None
self.verify_recursion = False
@property
def gpg(self):
'''Holds the classwide GkeysGPG instance'''
if not self._gpg:
self._gpg = GkeysGPG(self.config,
self._set_category(self.category), self.logger)
else:
self._gpg.basedir = self._set_category(self.category)
return self._gpg
@property
def keyhandler(self):
'''Holds the classwide KeyHandler instance'''
if not self._keyhandler:
self._init_keyhandler()
return self._keyhandler
def _init_keyhandler(self):
self._keyhandler = KeyHandler(self.config, self.logger)
self._seedhandler = self._keyhandler.seedhandler
@property
def seedhandler(self):
'''Holds the classwide SeedHandler instance
which is a convienience variable for the keyhandler's instance of it'''
if not self._seedhandler:
self._init_keyhandler()
return self._seedhandler
def _set_category(self, cat):
catdir = self.config.get_key('keyrings', cat)
if not catdir:
raise Exception("No keyring set.")
if not cat:
raise Exception("No category set.")
self.category = cat
self.logger.debug(_unicode("ACTIONS: _set_category; catdir = %s") % catdir)
self._set_trust(cat)
return catdir
def _set_trust(self, cat):
trust = self.config.get_key('trust-model', cat)
if trust in [None]:
trust = 'auto'
if '--trust-model' in self.config.defaults['gpg_defaults']:
index = self.config.defaults['gpg_defaults'].index('--trust-model')
self.config.defaults['gpg_defaults'][index+1] = trust
else:
self.config.defaults['gpg_defaults'].extend(['--trust-model', trust])
| gpl-2.0 | -6,627,050,046,202,881,000 | 25.558824 | 83 | 0.604282 | false |
dursobr/Pythics | pythics/html.py | 1 | 27284 | # -*- coding: utf-8 -*-
#
# Copyright 2008 - 2019 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
#
# load libraries
#
import importlib
import re
import io
import sys, traceback
try:
from lxml import etree as ElementTree
lxml_loaded = True
except ImportError as e:
sys.stderr.write("Error: failed to import lxml module ({})".format(e))
from xml.etree import ElementTree
lxml_loaded = False
from pythics.settings import _TRY_PYSIDE
try:
if not _TRY_PYSIDE:
raise ImportError()
import PySide2.QtCore as _QtCore
import PySide2.QtGui as _QtGui
import PySide2.QtWidgets as _QtWidgets
import PySide2.QtPrintSupport as _QtPrintSupport
QtCore = _QtCore
QtGui = _QtGui
QtWidgets = _QtWidgets
QtPrintSupport = _QtPrintSupport
Signal = QtCore.Signal
Slot = QtCore.Slot
Property = QtCore.Property
USES_PYSIDE = True
except ImportError:
import PyQt5.QtCore as _QtCore
import PyQt5.QtGui as _QtGui
import PyQt5.QtWidgets as _QtWidgets
import PyQt5.QtPrintSupport as _QtPrintSupport
QtCore = _QtCore
QtGui = _QtGui
QtWidgets = _QtWidgets
QtPrintSupport = _QtPrintSupport
Signal = QtCore.pyqtSignal
Slot = QtCore.pyqtSlot
Property = QtCore.pyqtProperty
USES_PYSIDE = False
class XMLError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Hyperlink(QtWidgets.QLabel):
def __init__(self, parent, label, url):
super(Hyperlink, self).__init__('<a href=#'+url+'>'+label+'</a>')
self.setTextFormat(QtCore.Qt.RichText)
self.parent = parent
self.url = url
self.linkActivated.connect(self.go_to_url)
def go_to_url(self, url):
self.parent.scroll_to_anchor(self.url)
default_style_sheet = """
body {align: left; background-color: #eeeeee; margin: 10px; padding: 5px;
color: black; font-size: 12pt; font-family: default; font-style: normal;
font-weight: normal;}
"""
xmlschema_f = io.BytesIO(b'''\
<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<!-- definition of element groups -->
<xs:group name="bodygroup">
<xs:choice>
<xs:element ref="a"/>
<xs:element ref="br"/>
<xs:element ref="div"/>
<xs:element ref="hr"/>
<xs:element ref="h1"/>
<xs:element ref="h2"/>
<xs:element ref="h3"/>
<xs:element ref="h4"/>
<xs:element ref="h5"/>
<xs:element ref="h6"/>
<xs:element ref="object"/>
<xs:element ref="p"/>
<xs:element ref="table"/>
</xs:choice>
</xs:group>
<!-- definition of simple elements -->
<xs:element name="br"/>
<xs:element name="title" type="xs:string"/>
<!-- definition of attributes -->
<xs:attribute name="class" type="xs:string"/>
<xs:attribute name="classid" type="xs:string"/>
<xs:attribute name="colspan" type="xs:positiveInteger"/>
<xs:attribute name="height" type="xs:nonNegativeInteger"/>
<xs:attribute name="href" type="xs:string"/>
<xs:attribute name="id" type="xs:string"/>
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="rowspan" type="xs:positiveInteger"/>
<xs:attribute fixed="text/css" name="type" type="xs:string"/>
<xs:attribute name="value" type="xs:string"/>
<xs:attribute name="width">
<xs:simpleType>
<xs:restriction base="xs:string">
<xs:pattern value="[\-+]?(\d+|\d+(\.\d+)?%)"/>
</xs:restriction>
</xs:simpleType>
</xs:attribute>
<!-- definition of complex elements -->
<xs:element name="hr">
<xs:complexType>
<xs:attribute ref="width"/>
</xs:complexType>
</xs:element>
<xs:element name="h1">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute ref="class"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="h2">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute ref="class"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="h3">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute ref="class"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="h4">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute ref="class"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="h5">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute ref="class"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="h6">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute ref="class"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="p">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute ref="class"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="a">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute ref="href"/>
<xs:attribute ref="id"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="td">
<xs:complexType>
<xs:choice maxOccurs="unbounded" minOccurs="0">
<xs:group ref="bodygroup"/>
</xs:choice>
<xs:attribute ref="class"/>
<xs:attribute ref="width"/>
<xs:attribute ref="rowspan"/>
<xs:attribute ref="colspan"/>
</xs:complexType>
</xs:element>
<xs:element name="tr">
<xs:complexType>
<xs:choice maxOccurs="unbounded" minOccurs="0">
<xs:element ref="td"/>
<xs:element name="div">
<xs:complexType>
<xs:choice maxOccurs="unbounded" minOccurs="0">
<xs:element ref="td"/>
</xs:choice>
<xs:attribute ref="class" use="required"/>
</xs:complexType>
</xs:element>
</xs:choice>
<xs:attribute ref="class"/>
</xs:complexType>
</xs:element>
<xs:element name="table">
<xs:complexType>
<xs:choice maxOccurs="unbounded" minOccurs="0">
<xs:element ref="tr"/>
</xs:choice>
<xs:attribute ref="class"/>
<xs:attribute ref="width"/>
</xs:complexType>
</xs:element>
<xs:element name="param">
<xs:complexType>
<xs:attribute ref="name" use="required"/>
<xs:attribute ref="value" use="required"/>
</xs:complexType>
</xs:element>
<xs:element name="object">
<xs:complexType>
<xs:choice maxOccurs="unbounded" minOccurs="0">
<xs:element ref="param"/>
</xs:choice>
<xs:attribute ref="classid" use="required"/>
<xs:attribute ref="id"/>
<xs:attribute ref="width"/>
<xs:attribute ref="height"/>
<xs:attribute ref="class"/>
</xs:complexType>
</xs:element>
<xs:element name="style">
<xs:complexType>
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute ref="type"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:element>
<xs:element name="head">
<xs:complexType>
<xs:choice maxOccurs="unbounded" minOccurs="0">
<xs:element ref="title"/>
<xs:element minOccurs="0" ref="style"/>
</xs:choice>
</xs:complexType>
</xs:element>
<xs:element name="div">
<xs:complexType>
<xs:choice maxOccurs="unbounded" minOccurs="0">
<xs:group ref="bodygroup"/>
</xs:choice>
<xs:attribute ref="class" use="required"/>
</xs:complexType>
</xs:element>
<xs:element name="body">
<xs:complexType>
<xs:choice maxOccurs="unbounded" minOccurs="0">
<xs:group ref="bodygroup"/>
</xs:choice>
<xs:attribute ref="class"/>
</xs:complexType>
</xs:element>
<xs:element name="html">
<xs:complexType>
<xs:sequence>
<xs:element ref="head"/>
<xs:element ref="body"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
''')
class CascadingStyleSheet(object):
def __init__(self, defaults):
# regular expression patterns for parsing
self.tag_pattern = re.compile('([\w.#]+)\s*\{([^}]+)}')
self.style_pattern = re.compile('([\w,-]+)\s*:\s*([\w,-,#]+)')
self.style_dict = dict()
self.parse_css(defaults)
def parse_css(self, txt):
tag_list = self.tag_pattern.findall(txt)
for t in tag_list:
tag = t[0]
style_txt = t[1]
if tag in self.style_dict:
d = self.style_dict[tag]
else:
d = dict()
style_list = self.style_pattern.findall(style_txt)
for t in style_list:
d[t[0]] = t[1]
self.style_dict[tag] = d
def get_tci_style(self, key, tag, cls=None, id=None):
if id != None:
s = tag + '#' + id
if (s in self.style_dict) and (key in self.style_dict[s]):
return self.style_dict[s][key]
s = '#' + id
if (s in self.style_dict) and (key in self.style_dict[s]):
return self.style_dict[s][key]
if cls != None:
s = tag + '.' + cls
if (s in self.style_dict) and (key in self.style_dict[s]):
return self.style_dict[s][key]
s = '.' + cls
if (s in self.style_dict) and (key in self.style_dict[s]):
return self.style_dict[s][key]
return self.style_dict[tag][key]
def get_style(self, key, element_list):
for element in reversed(element_list):
try:
tag = element.tag
cls = element.get('class')
element_id = element.get('id')
return self.get_tci_style(key, tag, cls, element_id)
except KeyError:
pass
raise XMLError("Could not find style for element: key=%s tag=%s cls=%s id=%s." % (key, tag, cls, element_id))
class HtmlWindow(QtWidgets.QScrollArea):
def __init__(self, parent, default_mod_name, logger=None):
super(HtmlWindow, self).__init__(parent)
self.parent = parent
self.default_mod_name = default_mod_name
self.logger = logger
self.controls = dict()
self.anonymous_controls = list()
self.setup()
def setup(self):
self.frame = QtWidgets.QFrame()
self.frame.setFrameStyle(QtWidgets.QFrame.NoFrame)
self.main_sizer = QtWidgets.QVBoxLayout()
self.frame.setLayout(self.main_sizer)
self.setWidget(self.frame)
self.setWidgetResizable(True)
# the html element tree
self.tree = None
self.css = CascadingStyleSheet(default_style_sheet)
self.anchor_dict = dict()
self.row_stack = list()
def reset(self):
# clear the layout
self.frame.hide()
self.frame.deleteLater()
self.controls = dict()
self.anonymous_controls = list()
self.setup()
def open_file(self, filename):
self.frame.hide()
# load and parse the file
if lxml_loaded:
parser = ElementTree.ETCompatXMLParser()
self.tree = ElementTree.parse(filename, parser=parser)
else:
self.tree = ElementTree.parse(filename)
# validate the file if lxml is available
if lxml_loaded:
xmlschema_tree = ElementTree.parse(xmlschema_f)
xmlschema = ElementTree.XMLSchema(xmlschema_tree)
xmlschema.assertValid(self.tree)
root = self.tree.getroot()
self.layout(root, list(), self.main_sizer)
self.frame.show()
return self.anonymous_controls, self.controls
def extract_size(self, attrs):
object_width = -1
object_proportion = 0
if 'width' in attrs:
width = attrs.pop('width')
if '%' in width:
object_proportion = float(width.strip('%'))
else:
object_width = int(width)
object_height = -1
if 'height' in attrs:
object_height = int(attrs.pop('height'))
return (object_width, object_height, object_proportion)
def get_padding(self, element_list):
pad = self.css.get_style('padding', element_list)
pads = pad.split()
if len(pads) == 1:
return int(pads[0].strip('px')), int(pads[0].strip('px'))
elif len(pads) == 2:
return int(pads[0].strip('px')), int(pads[1].strip('px'))
else:
raise XMLError("Unrecognized padding: %s." % pad)
def get_font(self, element_list):
font_size = int(self.css.get_style('font-size', element_list).strip('pt'))
font_family = self.css.get_style('font-family', element_list)
font_style = self.css.get_style('font-style', element_list)
font_weight = self.css.get_style('font-weight', element_list)
# italics
if font_style == 'italic':
italic = True
else:
italic = False
# weight
if font_weight == 'bold':
weight = 75
elif font_weight == 'light':
weight = 25
else:
weight = 50
font = QtGui.QFont(font_family, font_size, weight, italic)
return font
def layout(self, element, element_list, sizer):
tag = element.tag
if tag == 'html':
el = element_list[:]
el.append(element)
for subelement in element:
self.layout(subelement, el, sizer)
return
elif tag == 'head':
el = element_list[:]
el.append(element)
for subelement in element:
self.layout(subelement, el, sizer)
return
elif tag == 'title':
self.set_title(element.text)
return
elif tag == 'style':
self.css.parse_css(element.text)
return
elif tag == 'body':
el = element_list[:]
el.append(element)
bg_color = QtGui.QColor(self.css.get_style('background-color', el))
html_palette = QtGui.QPalette()
html_palette.setColor(QtGui.QPalette.Window, bg_color)
self.setPalette(html_palette)
self.setBackgroundRole(QtGui.QPalette.Window)
# set margins and borders
bdr = int(self.css.get_style('margin', el).strip('px'))
sizer.setContentsMargins(bdr, bdr, bdr, bdr)
v_pad, h_pad = self.get_padding(el)
sizer.setSpacing(v_pad)
# layout the body contents
self.row_begin(el, sizer)
self.row_layout(element, el, sizer)
self.row_end()
# stretch space on the bottom to take up and extra vertical space
sizer.addStretch(1.0)
return
elif tag == 'br':
el = element_list[:]
el.append(element)
# end this row and start the next one
self.row_end()
self.row_begin(el, sizer)
return
elif tag == 'div':
el = element_list[:]
el.append(element)
for subelement in element:
self.layout(subelement, el, sizer)
return
elif tag == 'table':
el = element_list[:]
el.append(element)
self.row_end()
self.row_begin(element_list, sizer)
width, height, proportion = self.extract_size(element.attrib)
table_sizer = QtWidgets.QGridLayout()
v_pad, h_pad = self.get_padding(el)
table_sizer.setVerticalSpacing(v_pad)
table_sizer.setHorizontalSpacing(h_pad)
row_sizer = self.row_get_sizer()
row_sizer.addLayout(table_sizer, proportion)
row = 0
col = 0
for subelement in element:
row, col = self.table_layout(subelement, table_sizer, el, row, col)
align = self.css.get_style('align', el)
self.row_set_align_and_proportion(align, proportion)
self.row_end()
self.row_begin(element_list, sizer)
return
elif tag == 'a':
el = element_list[:]
el.append(element)
if 'href' in element.attrib:
ob = Hyperlink(self, label=element.text,
url=element.attrib['href'].strip('#'))
ob.setFont(self.get_font(el))
row_sizer = self.row_get_sizer()
row_sizer.addWidget(ob, 0, QtCore.Qt.AlignBottom)
align = self.css.get_style('align', el)
self.row_set_align_and_proportion(align, 0)
return
else:
key = element.attrib['id']
self.anchor_dict[key] = self.row_get_sizer()
align = self.css.get_style('align', el)
self.row_set_align_and_proportion(align, 0)
return
elif tag == 'p':
el = element_list[:]
el.append(element)
ob = QtWidgets.QLabel(element.text)
ob.setFont(self.get_font(el))
row_sizer = self.row_get_sizer()
row_sizer.addWidget(ob, 0, QtCore.Qt.AlignBottom)
align = self.css.get_style('align', el)
self.row_set_align_and_proportion(align, 0)
return
elif tag == 'h1' or tag == 'h2' or tag == 'h3' or tag == 'h4' or tag == 'h5' or tag == 'h6':
el = element_list[:]
el.append(element)
self.row_end()
self.row_begin(element_list, sizer)
ob = QtWidgets.QLabel(element.text)
ob.setFont(self.get_font(el))
row_sizer = self.row_get_sizer()
row_sizer.addWidget(ob, 0, QtCore.Qt.AlignBottom)
align = self.css.get_style('align', el)
self.row_set_align_and_proportion(align, 50)
self.row_end()
self.row_begin(element_list, sizer)
return
elif tag == 'object':
el = element_list[:]
el.append(element)
row_sizer = self.row_get_sizer()
# lxml docs say to do this to make a copy of the attribute dictionary
attr_dict = dict(element.attrib)
width, height, proportion = self.extract_size(attr_dict)
full_object_name = attr_dict.pop('classid')
if 'id' in attr_dict:
element_id = attr_dict.pop('id')
else:
element_id = None
for p in element.getiterator(tag='param'):
attr_dict[p.get('name')] = p.get('value')
evaled_attrs = dict()
for k, v in attr_dict.items():
try:
evaled_attrs[k] = eval(v, globals())
except Exception:
evaled_attrs[k] = v
try:
name_list = full_object_name.split('.')
if len(name_list) > 1:
mod_name = '.'.join(name_list[0:-1])
else:
mod_name = self.default_mod_name
object_name = name_list[-1]
try:
# try to import from pythics first
mod = importlib.import_module('pythics.' + mod_name)
except ImportError:
# if not found, search globally for module
mod = importlib.import_module(mod_name)
ob = getattr(mod, object_name)(parent=self, **evaled_attrs)
if isinstance(ob, QtWidgets.QWidget):
widget = ob
elif hasattr(ob, '_widget'):
widget = ob._widget
else:
widget = None
if widget is not None:
if width > 0:
widget.setFixedWidth(width)
if height > 0:
widget.setFixedHeight(height)
row_sizer.addWidget(widget, proportion, QtCore.Qt.AlignBottom)
# store the widget
if element_id is not None:
self.controls[element_id] = ob
else:
self.anonymous_controls.append(ob)
except ImportError:
if element_id is not None:
s = '%s, id: %s' % (full_object_name, element_id)
else:
s = full_object_name
ss = "Error importing xml object '%s'. The library is not available." % s
#if self.logger is not None:
# self.logger.warning(ss)
load_error = True
except:
if element_id is not None:
s = '%s, id: %s' % (full_object_name, element_id)
else:
s = full_object_name
ss = "Error initializing xml object '%s'." % s
ss = ss + '\n' + traceback.format_exc(1)
#if self.logger is not None:
# self.logger.exception(ss)
load_error = True
else:
load_error = False
if load_error:
error_box = QtWidgets.QTextEdit()
error_box.setReadOnly(True)
palette = QtGui.QPalette()
palette.setColor(QtGui.QPalette.Base, QtGui.QColor('red'))
error_box.setPalette(palette)
error_box.setBackgroundRole(QtGui.QPalette.Base)
error_box.setPlainText(ss)
if width > 0:
error_box.setFixedWidth(width)
if height > 0:
error_box.setFixedHeight(height)
row_sizer.addWidget(error_box, proportion, QtCore.Qt.AlignBottom)
align = self.css.get_style('align', el)
self.row_set_align_and_proportion(align, proportion)
return
elif tag == 'hr':
# end this row, insert horizontal line, and start the next row
self.row_end()
self.row_begin(element_list, sizer)
ob = QtWidgets.QFrame()
ob.setFrameStyle(QtWidgets.QFrame.HLine|QtWidgets.QFrame.Sunken)
row_sizer = self.row_get_sizer()
row_sizer.addWidget(ob, 1, QtCore.Qt.AlignBottom)
self.row_set_align_and_proportion('left', 100)
self.row_end()
self.row_begin(element_list, sizer)
return
else:
raise XMLError("Unrecognized tag: %s." % tag)
def row_begin(self, element_list, sizer):
# start new row
row_sizer = QtWidgets.QHBoxLayout()
v_pad, h_pad = self.get_padding(element_list)
row_sizer.setSpacing(h_pad)
sizer.addLayout(row_sizer)
total_align = self.css.get_style('align', element_list) # default
if total_align == None:
total_align = 'left'
self.row_stack.append((row_sizer, total_align, 0))
return
def row_layout(self, element, element_list, sizer):
for subelement in element:
self.layout(subelement, element_list, sizer)
def row_get_sizer(self):
return self.row_stack[-1][0]
def row_set_align_and_proportion(self, sub_align, sub_proportion):
last_sizer, total_align, total_proportion = self.row_stack[-1]
if sub_align != None:
total_align = sub_align
total_proportion += sub_proportion
self.row_stack[-1] = (last_sizer, total_align, total_proportion)
def row_end(self):
row_sizer, total_align, total_prop = self.row_stack.pop()
needed_row_proportion = 100.0 - total_prop
if needed_row_proportion > 0:
if total_align == 'left':
row_sizer.insertStretch(-1, needed_row_proportion)
elif total_align == 'center':
row_sizer.insertStretch(0, needed_row_proportion/2.0)
row_sizer.insertStretch(-1, needed_row_proportion/2.0)
elif total_align == 'right':
row_sizer.insertStretch(0, needed_row_proportion)
else:
raise XMLError("Unrecognized alignment: %s." % total_align)
return
def table_layout(self, element, table_sizer, element_list, row, col):
tag = element.tag
if tag == 'tr':
col = 0
el = element_list[:]
el.append(element)
for subelement in element:
row, col = self.table_layout(subelement, table_sizer, el, row, col)
row += 1
return row, col
elif tag == 'td':
el = element_list[:]
el.append(element)
if 'colspan' in element.attrib:
colspan = int(element.attrib['colspan'])
else:
colspan = 1
if 'rowspan' in element.attrib:
rowspan = int(element.attrib['rowspan'])
else:
rowspan = 1
sizer = QtWidgets.QVBoxLayout()
v_pad, h_pad = self.get_padding(el)
sizer.setSpacing(v_pad)
table_sizer.addLayout(sizer, row, col, rowspan, colspan,
QtCore.Qt.AlignBottom)
self.row_begin(el, sizer)
self.row_layout(element, el, sizer)
self.row_end()
width, height, proportion = self.extract_size(element.attrib)
if proportion > 0:
table_sizer.setColumnStretch(col, proportion)
col += colspan
return row, col
else:
raise XMLError("Unrecognized tag: %s." % tag)
def scroll_to_anchor(self, name):
position = self.anchor_dict[name].geometry()
#bar->setValue(bar->maximum())
self.verticalScrollBar().setValue(self.verticalScrollBar().maximum())
self.ensureVisible(position.left(), position.top())
def set_title(self, title):
# to be overriden by subclasses
pass
| gpl-3.0 | -7,351,712,269,783,081,000 | 33.580482 | 117 | 0.549626 | false |
stoeps13/ibmcnxscripting | WebSphere/cfgJ2EERoleSocialMail.py | 1 | 2136 | # cfgJ2EERolesSocialMail
#
# Author: Christoph Stoettner
# Blog: http://www.stoeps.de
# E-Mail: [email protected]
#
# Description:
# Script is tested with IBM Connections 4.5 CR2, CR3
#
# You can enable and disable Socialmail Integration through J2EE Roles with this script
# History:
# 20140225 Christoph Stoettner Initial Version
# 20140406 Christoph Stoettner Bugfixing Version
def printMenu():
state = ''
while state != ( 'ENABLE' or 'DISABLE' or 'EXIT' ):
state = raw_input( 'Do you want to enable or disable Socialmail Integration? (E|D|X)(ENABLE|DISABLE|EXIT)' ).upper()
if state == 'E':
state = 'ENABLE'
break
elif state == 'D':
state = 'DISABLE'
break
elif state == 'X':
state = 'EXIT'
break
else:
continue
if state == 'ENABLE':
print 'Enable socialmail'
auth = ''
while auth != ( 'Y' or 'YES' or 'N' or 'NO'):
auth = raw_input('Enable Mail-User Role for \"All-Authenticated in Realm\"? (Y|N)(YES|NO)').upper()
if auth == 'Y':
role_auth = 'No Yes'
break
elif auth == 'N':
role_auth = 'No No'
break
else:
continue
role_users = raw_input('Enable Mail-User Role for single Users? (Type casesensitiv uid, empty for none, multiple uids seperate by \"|\")')
role_groups = raw_input('Enable Mail-User Role for a Group? (Type casesensitiv Groupname, empty for no groups, multiple Groups seperated by \"|\")')
elif state == 'DISABLE':
print 'Disable socialmail'
role_auth='No No'
role_users=''
role_groups=''
if state != 'EXIT':
apps = ['Common','WidgetContainer']
for app in apps:
print "Setting Role for " + app
AdminApp.edit( app, '[-MapRolesToUsers [["mail-user" ' + role_auth + ' "' + role_users + '" "' + role_groups + '" ]]]' )
AdminConfig.save()
printMenu()
| apache-2.0 | 4,649,441,762,398,965,000 | 32.451613 | 156 | 0.537921 | false |
rgeorgi/intent | intent/tests/treetests.py | 1 | 22616 | import os
import unittest
import re
import intent
from intent.alignment.Alignment import Alignment
from intent.igt.rgxigt import RGWordTier
from intent.scripts.basic.corpus_stats import pos_stats, CONLL_TYPE
from intent.trees import IdTree, project_ps, TreeMergeError, DepTree, Terminal, TreeError, project_ds, get_dep_edges, \
DEPSTR_CONLL, DEPSTR_PTB, DEPSTR_STANFORD, read_conll_file
from intent.utils.env import testfile_dir
__author__ = 'rgeorgi'
class ProjectTest(unittest.TestCase):
def setUp(self):
self.t = IdTree.fromstring('''
(S
(NP
(DT The)
(NN teacher)
)
(VP
(VBD gave)
(NP
(DT a)
(NN book)
)
(PP
(IN to)
(NP
(DT the)
(NN boy)
)
)
(NP
(NN yesterday)
)
)
)''')
self.proj = IdTree.fromstring(
'''(S
(VBD rhoddodd)
(NP
(DT yr)
(NN athro)
)
(NP
(NN lyfr)
)
(PP
(IN+DT i'r)
(NN bachgen)
)
(NP
(NN ddoe)
)
)''')
self.aln = intent.alignment.Alignment.Alignment([(1,2), (2,3), (3,1), (5,4), (6, 5), (7, 5), (8, 6), (9, 7)])
def test_projection(self):
proj = project_ps(self.t, RGWordTier.from_string("rhoddodd yr athro lyfr i'r bachgen ddoe"), self.aln)
# Reassign the ids after everything has moved around.
proj.assign_ids()
self.assertEqual(self.proj, proj)
def test_duplicates(self):
"""
Test the case where an English word aligns to multiple language words.
"""
src_t = IdTree.fromstring('(ROOT (SBARQ (WHNP (WP Who)) (SQ (VP (VBZ else?)))))')
tgt_w = RGWordTier.from_string('sa-lo sa-lo')
tgt_t = IdTree.fromstring('(ROOT (SBARQ (WHNP (WP sa-lo) (WP sa-lo))))')
aln = Alignment([(1,1),(1,2)])
result = project_ps(src_t, tgt_w, aln)
self.assertTrue(tgt_t.similar(result))
def ordering_test(self):
"""
This particular tree structure results in changing a child of a tree while iterating through
the children and required a fix such that if such a change is detected, we start iterating
over the children again, so we're not holding onto a stale pointer.
"""
src_t = IdTree.fromstring('''(ROOT (FRAG
(ADVP (RB Probably))
(SBAR (S
(NP (PRP you))
(VP (VBP find)
(ADJP (JJ something))
)
)
)
))''')
tgt_t = IdTree.fromstring('''(ROOT (FRAG
(VBP chitt-u-m)
(ADVP (RB hola))
(UNK ni)
(UNK hou)
(VBP chitt-u-m)
))''')
tgt_w = RGWordTier.from_string('''chitt-u-m hola ni hou chitt-u-m''')
aln = Alignment([(1,2),(3,1),(3,5)])
proj = project_ps(src_t, tgt_w, aln)
self.assertTrue(tgt_t.similar(proj))
def failed_insertion_test(self):
t = IdTree.fromstring('''(ROOT
(SBARQ
(WHNP (WDT What) (NP (NN kind) (PP (IN of) (NP (NNP work,)))))
(SQ (VP (VBZ then?)))))''')
tgt_w = RGWordTier.from_string('kam-a na them lis-no-kha hou')
aln = Alignment([(1, 3), (2, 5), (4, 1), (5, 5)])
project_ps(t, tgt_w, aln)
class PromoteTest(unittest.TestCase):
def setUp(self):
self.t = IdTree.fromstring('(S (NP (DT the) (NN boy)) (VP (VBD ran) (IN away)))')
def test_equality(self):
t2 = self.t.copy()
t3 = self.t.copy()
self.assertEqual(self.t, t2)
t2.find_index(1).delete()
self.assertNotEqual(self.t, t2)
# Change the id
t3n = t3.find_index(1)
t3id = t3n.id
t3n.id = 'asdf'
self.assertNotEqual(self.t, t3)
# Change it back.
t3n.id = t3id
self.assertEqual(self.t, t3)
def test_promote(self):
t2 = self.t.copy()
t3 = self.t.copy()
vp = self.t[1]
vp.promote()
self.assertNotEqual(self.t, t2)
self.assertEqual(t2, t3)
class SpanTest(unittest.TestCase):
def setUp(self):
self.t = IdTree.fromstring('''(ROOT
(SBARQ
(WHNP (WP Who))
(SQ (VBP do)
(NP (PRP you))
(VP (VB believe)
(VP (VBN called))))))'''
)
def test_span(self):
self.assertEqual(self.t.span(), (1,5))
class MergeTests(unittest.TestCase):
def setUp(self):
self.t = IdTree.fromstring('''(ROOT
(SBARQ
(WHNP (WP Who))
(SQ (VBP do)
(NP (PRP you))
(VP (VB believe)
(VP (VBN called))))))''')
def test_merge_interior_nodes(self):
t = self.t.copy()
t[0].merge(0, 1, unify_children=False)
self.assertNotEqual(t, self.t)
t2 = IdTree.fromstring('''(ROOT (SBARQ (WHNP+SQ
(WP Who)
(VBP do)
(NP (PRP you))
(VP (VB believe)
(VP (VBN called))
))
))''')
self.assertTrue(t2.similar(t))
self.assertEqual(t2[0,0].label(), 'WHNP+SQ')
t[(0,0)].merge(0,1, unify_children=False)
self.assertEquals(t[(0,0,0)].span(), (1,2))
def test_merge_preterminal_nodes_no_unify(self):
t = IdTree.fromstring('''(NP (DT The) (NN Boy) (VB Ran))''')
self.assertEqual(t.span(), (1,3))
self.assertEqual(t[0].span(), (1,1))
self.assertEqual(t[1].span(), (2,2))
t.merge(0,1, unify_children=False)
self.assertEqual(t[0].label(), 'DT+NN')
self.assertEqual(t[0].span(), (1,2))
self.assertEqual(t.span(), (1,3))
def test_merge_preterminal_nodes_unify(self):
t = IdTree.fromstring('''(NP (DT The) (NN The) (VB Ran))''')
l = t.leaves()
l[0].index = 1
l[1].index = 1
l[2].index = 2
self.assertEqual(t.span(), (1,2))
self.assertEqual(t[0].span(), (1,1))
self.assertEqual(t[1].span(), (1,1))
t.merge(0,1, unify_children=True)
self.assertEqual(t[0].label(), 'DT+NN')
self.assertEqual(t[0].span(), (1,1))
self.assertEqual(t.span(), (1,2))
def test_merge_preterminal_and_nonterminal_wo_unify(self):
t = self.t.copy()
sq = t[0,1,2]
self.assertEqual(sq.span(), (4,5))
sq.merge(0,1, unify_children=False)
self.assertEqual(sq.span(), (4,5))
self.assertEqual(len(sq[0]), 2)
def test_merge_preterminal_and_nonterminal_w_unify(self):
t = self.t.copy()
sq = t[0,1,2]
self.assertEqual(sq.span(), (4,5))
sq.merge(0,1, unify_children=True)
self.assertEqual(sq.spanlength(), 0)
self.assertEqual(len(sq[0]), 1)
def internal_merge_test(self):
t = IdTree.fromstring('''(ROOT
(UCP
(S
(NP (PRP u-tus-u-kV-nɨŋ))
(VP
(RB u-tus-u-kV-nɨŋ)
(ADJP
(RB loĩs-ma)
(JJ loĩs-ma)
(S (VP (VP (VB loĩs-ma) (NP (PRP kat-a-ŋs-e)) (ADVP (RB loĩs-ma))))))))
(SBAR
(IN loĩs-ma)
(S
(NP (PRP u-tus-u-kV-nɨŋ))
(VP (VBP u-tus-u-kV-nɨŋ) (RB u-tus-u-kV-nɨŋ) (VP (VB loĩs-ma) (NP (NN loĩs-ma))))))))''')
UCP = t[0]
self.assertEqual(len(UCP), 2)
UCP.merge(0,1)
self.assertEqual(len(UCP), 1)
self.assertEqual(len(UCP[0]), 4)
def ctn_merge_2_test(self):
src_t = IdTree.fromstring('''(ROOT
(UCP
(S
(NP (PRP They))
(VP
(VBP are)
(RB also)
(ADJP
(RB too)
(JJ lazy)
(S
(VP
(TO to)
(VP (VB take) (NP (PRP it)) (ADVP (RB out,))))))))
(CC and)
(SBAR
(IN so)
(S
(NP (PRP they))
(VP (VBP do) (RB not) (VP (VB drink) (NP (NN it.))))))))''')
tgt_w = RGWordTier.from_string('loĩs-ma yaŋ hunci-suma kat-a-ŋs-e kina u-tus-u-kV-nɨŋ')
aln = Alignment([(16, 6), (3, 2), (7, 1), (15, 3), (9, 3), (11, 3), (12, 6), (14, 6), (13, 6), (4, 3), (5, 3)])
proj = project_ps(src_t, tgt_w, aln)
self.assertEqual(len(proj.leaves()), 6)
class DepTreeTests(unittest.TestCase):
def setUp(self):
dt_string = '''nsubj(ran-2, John-1)
root(ROOT-0, ran-2)
det(woods-5, the-4)
prep_into(ran-2, woods-5)'''
self.dt = DepTree.fromstring(dt_string)
def test_find(self):
j = self.dt.find_index(1)
self.assertEqual(j, DepTree('John', [], id=j.id, word_index=1, type='nsubj'))
def test_copy(self):
t2 = self.dt.copy()
self.assertEqual(self.dt, t2)
def test_equality(self):
t2 = self.dt.copy()
t2._word_index = -1
self.assertNotEqual(t2, self.dt)
t2._word_index = 0
self.assertEqual(t2, self.dt)
t2._label = 'notroot'
self.assertNotEqual(t2, self.dt)
def test_span(self):
self.assertRaises(TreeError, self.dt.span)
def parse_test(self):
dep_string = ('''advmod(meet-3, When-1)
nsubj(meet-3, we-2)
xsubj(know-7, we-2)
advmod(meet-3, let's-4)
dep(meet-3, get-5)
aux(know-7, to-6)
xcomp(get-5, know-7)
det(talk.-13, each-8)
num(let's-10, other,-9)
npadvmod(laugh,-11, let's-10)
amod(talk.-13, laugh,-11)
amod(talk.-13, let's-12)
dobj(know-7, talk.-13)''')
self.assertRaises(TreeError, DepTree.fromstring, dep_string)
class DepTreeCycleTest(unittest.TestCase):
def test_cycle(self):
dt_string = '''nsubj(did-2, And-1) root(ROOT-0, did-2) dobj(did-2, you-3) dep(did-2, make-4) dobj(make-4, rice-5) nsubj(day,-7, rice-5) rcmod(rice-5, day,-7) dep(did-2, eat-9) conj_and(make-4, eat-9) dobj(eat-9, it?-10)'''
dt = DepTree.fromstring(dt_string)
self.assertEqual(dt[0].label(), 'did')
class DepTreeParseTests(unittest.TestCase):
def broken_parse_test(self):
dt_string = '''nsubj(get-2, I-1) nsubj(get-2', I-1) conj_and(get-2, get-2') prt(get-2, up-3) prep_at(get-2, eight-5) dep(is-11, that-8) det(problem-10, the-9) nsubj(is-11, problem-10) xsubj(cook-13, problem-10) prepc_after(get-2', is-11) aux(cook-13, to-12) xcomp(is-11, cook-13) xcomp(is-11, eat-15) conj_and(cook-13, eat-15) dobj(cook-13, rice.-16)
'''
self.assertRaises(TreeError, DepTree.fromstring, dt_string)
class SwapTests(unittest.TestCase):
def setUp(self):
self.t = IdTree.fromstring('''(S (NP (DT The) (NN Boy)) (VP (VB Ran) ))''')
self.t2 = IdTree.fromstring('''(NP (DT The) (ADJ quick) (NN Fox))''')
def test_swap_nonterminals(self):
t = self.t.copy()
t.swap(0, 1)
# Now, set up the leaves with the correct indices...
t2 = IdTree.fromstring('''(S (VP (VB Ran)) (NP (DT The) (NN Boy)))''')
l = t2.leaves()
l[0].index = 3
l[1].index = 1
l[2].index = 2
self.assertTrue(t.similar(t2))
def test_swap_preterminals(self):
t2 = self.t2.copy()
t2.swap(0,2)
t3 = IdTree.fromstring('''(NP (NN Fox) (DT quick) (ADJ The))''')
l = t3.leaves()
l[0].index = 3
l[2].index = 1
self.assertTrue(t2.similar(t3))
class DeleteTests(unittest.TestCase):
def setUp(self):
self.t = IdTree.fromstring('''(ROOT (NP (DT The) (NP (NN Boy))))''')
def propagate_test(self):
tgt = IdTree.fromstring('''(ROOT (NP (DT The)))''')
self.assertFalse(self.t.similar(tgt))
# Delete the "NN" in boy.
self.t[0,1,0].delete()
self.assertTrue(self.t.similar(tgt))
def promote_test(self):
"""
Ensure that the "promote=" keyword works as expected.
"""
t = self.t.copy()
t[0].delete(promote=True)
tgt = IdTree.fromstring('''(ROOT (DT The) (NP (NN Boy)))''')
self.assertTrue(t.similar(tgt))
t2 = self.t.copy()
t2[0].delete(promote=False)
tgt = IdTree.fromstring('''(ROOT)''')
self.assertTrue(t2.similar(tgt))
t3 = self.t.copy()
t3[0][1].delete(promote=True)
tgt=IdTree.fromstring('''(ROOT (NP (DT The) (NN Boy)))''')
self.assertTrue(t3.similar(tgt))
t4 = self.t.copy()
t4[0][1].delete(promote=False)
tgt=IdTree.fromstring('''(ROOT (NP (DT The)))''')
self.assertTrue(t4.similar(tgt))
def replace_test(self):
tgt = IdTree.fromstring('''(ROOT (NP (DT The) (NN Dog)))''')
self.assertFalse(self.t.similar(tgt))
# Replace the "NP" in (NP (NN Boy)) with (NN Dog)
self.t[0,1].replace(IdTree('NN',[Terminal('Dog', index=2)]))
self.assertTrue(self.t.similar(tgt))
class ProjectDS(unittest.TestCase):
def setUp(self):
self.ds1str = """
root(ROOT-0, gave-3)
nsubj(gave-3, teacher-2)
det(teacher-2, the-1)
dobj(gave-3, book-5)
det(book-5, a-4)
prep_to(gave-3, to-6)
indobj(to-6, boy-8)
det(boy-8, the-7)
mod(gave-3, yesterday-9)"""
self.ds1bstr = """
(ROOT[0]
(gave[3]
(teacher[2] (the[1]))
(book[5] (a[4]))
(to[6] (boy[8] (the[7])))
(yesterday[9])
))
"""
self.ds2str = """ (ROOT[0]-root
(Rhoddodd[1]-nsubj
(athro[3] (yr[2]) )
(lyfr[4])
(i'r[5] (bachgen[6]) )
(ddoe[7])
)
)"""
self.ds3str = """
nmod(meet-4, Tomorrow-1)
nsubj(meet-4, Mary-2)
aux(meet-4, will-3)
root(ROOT-0, meet-4)
dobj(meet-4, Hans-5)"""
def test_stanford_ds_string(self):
"""
Unit tests for parsing the stanford dependency format, and ensuring it is written back out correctly.
"""
ds1 = DepTree.fromstring(self.ds1str)
self.assertTrue(ds1.stanford_str(separator='\n').strip() == re.sub('\s\s+', '\n', self.ds1str.strip()))
def test_ptb_ds_string(self):
"""
Unit test for parsing the PTB-style format.
"""
ds2 = DepTree.fromstring(self.ds1bstr, stype=DEPSTR_PTB)
def test_ptb_stanford_equiv(self):
ds1 = DepTree.fromstring(self.ds1str, stype=DEPSTR_STANFORD)
ds2 = DepTree.fromstring(self.ds1bstr, stype=DEPSTR_PTB)
self.assertTrue(ds1.structurally_eq(ds2))
def test_projection_1(self):
"""
Testcase for the DS projection in Fei/Will's paper.
"""
ds1 = DepTree.fromstring(self.ds1str)
ds2 = DepTree.fromstring(self.ds2str, stype=DEPSTR_PTB)
# -----------------------------------------------------------------------------
# 1 2 3 4 5 6 7
# Rhoddod yr athro lyfr i'r bachgen ddoe
# gave-3sg the teacher book to-the boy yesterday
#
# The teacher gave a book to the boy yesterday
# 1 2 3 4 5 6 7 8 9
tgt_w = RGWordTier.from_string("Rhoddodd yr athro lyfr i'r bachgen ddoe")
aln = Alignment([(1,2),(2,3),(3,1),(5,4),(6,5),(7,5),(8,6),(9,7)])
# And now, project...
ds_proj = project_ds(ds1, tgt_w, aln)
self.assertTrue(ds2.structurally_eq(ds_proj))
def test_projection_2(self):
ds3 = DepTree.fromstring(self.ds3str)
# English sentence:
# 1 2 3 4 5
# "Tomorrow Mary will meet Hans"
#
# Den Hans wird Maria morgen treffen
# 1 2 3 4 5 6
aln = Alignment([(1,5),(2,4),(3,3),(4,6),(5,2)])
tgt_w = RGWordTier.from_string("Den Hans wird Maria morgen treffen")
ds_proj = project_ds(ds3, tgt_w, aln)
exp_proj = DepTree.fromstring("""
(ROOT[0]
(treffen[6]
(Hans[2] (Den[1]))
(wird[3])
(Maria[4])
(morgen[5])
))
""",
stype=DEPSTR_PTB)
self.assertTrue(ds_proj.structurally_eq(exp_proj))
def test_projection_3(self):
ds3 = DepTree.fromstring(self.ds3str)
tgt_w = RGWordTier.from_string("morgen wird Maria Den Hans treffen")
# English sentence:
# 1 2 3 4 5
# "Tomorrow Mary will meet Hans"
#
# morgen wird Maria Den Hans treffen
# 1 2 3 4 5 6
aln = Alignment([(1,1),(2,3),(3,2),(4,6),(5,5)])
ds_proj = project_ds(ds3, tgt_w, aln)
exp_proj = DepTree.fromstring("""(ROOT[0]
(treffen[6]
(morgen[1])
(wird[2])
(Maria[3] (Den[4]))
(Hans[5])))""", stype=DEPSTR_PTB)
self.assertTrue(ds_proj.structurally_eq(exp_proj))
class CONLLTests(unittest.TestCase):
def setUp(self):
self.s = '''
1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _'''
self.conll_path = os.path.join(testfile_dir, 'conll/test.conll')
def test_conll_read(self):
ds = DepTree.fromstring(self.s, stype=DEPSTR_CONLL)
tgt = DepTree.fromstring("""
(ROOT[0]
(zag[2]
(Cathy[1])
(hen[3])
(zwaaien[5] (wild[4])
(.[6]))
))""", stype=DEPSTR_PTB)
self.assertTrue(ds.structurally_eq(tgt))
def test_conll_write(self):
ds = DepTree.fromstring(self.s, stype=DEPSTR_CONLL)
print(ds.to_conll())
def test_conll_file(self):
trees = read_conll_file(self.conll_path)
self.assertEqual(1, len(trees))
def test_conll_file_stats(self):
pos_stats([self.conll_path], filetypes=CONLL_TYPE)
| mit | 4,229,916,681,892,919,300 | 34.635647 | 358 | 0.413889 | false |
gypleon/codesCloud | assignments/1/B/1-B.py | 1 | 3657 | #!/usr/local/bin/python3
import math
import numpy as np
# get prime factors unduplicatedly, optimized
def getPrimes( num ):
pset = set()
if 0 == num % 2:
pset.add(2)
for i in range(3, num+1, 2):
if 0 == num % i:
isprime = True
for j in range(3, int(math.sqrt(i))+1, 2):
if 0 == i % j and i != j:
isprime = False
break
if isprime:
pset.add(i)
if len(pset) == 0:
pset.add(num)
return pset
# get prime factor lists, optimized: sorted by set length
def getPrimesLists( start, end ):
plist = list()
for i in range(start, end+1):
plist.append(getPrimes(i))
plist.sort(key=lambda ps:len(ps))
return plist
# find frequent itemsets, to be optimized: implemented in multi-round map-reduce
def findFrequentItemsets( buckets, candset, cursize, thrd ):
# print(len(buckets), len(candset), cursize, thrd)
filist = list()
newcandset = list()
# count frequent item sets in current loop
for itemset in buckets:
if len(itemset) == cursize:
maybe = False
if len(candset) == 0:
maybe = True
else:
for cand in candset:
if set(cand).issubset(set(itemset)):
maybe = True
if maybe:
count = 0
for bucket in buckets:
if set(itemset).issubset(set(bucket)):
count += 1
if count >= thrd:
existed = False
for check in filist:
if itemset == check:
existed = True
break
if not existed:
filist.append(itemset)
break
# construct candidate item sets for next loop
# print(filist)
for i in range(len(filist)-1):
for j in range(i+1, len(filist)):
cand = list(set(filist[i]).union(set(filist[j])))
if len(cand) == cursize+1:
existed = False
for check in newcandset:
if cand == check:
existed = True
break
if not existed:
newcandset.append(cand)
if len(newcandset) == 0:
return filist
# next loop
filist.extend(findFrequentItemsets( buckets, newcandset, cursize+1, thrd ))
# return current result
return filist
# sort frequent itemsets list & output
def sortFISandOutput( filist, outputfile ):
outlist = list()
dtype = list()
order = list()
for i in filist:
outlist.append(tuple(sorted(list(i))))
print(outlist)
maxfield = len(outlist[len(outlist)-1])
for i in range(1, maxfield+1):
dtype.append((str(i), int))
order.append(str(i))
# print(dtype, order)
outlist = np.array(outlist, dtype = dtype)
outlist.sort(order = order)
with open(outputfile, 'w') as f:
for out in outlist:
# print(out)
for i in out:
f.write("%2d\t" % i)
f.write("\n")
return 0
if __name__ == "__main__":
start = 2
end = 10000
dimstart = 3
threshold = 50
outputfile = './B.txt'
buckets = getPrimesLists(start, end)
sortFISandOutput( findFrequentItemsets(buckets, [], dimstart, threshold), outputfile)
# print( findFrequentItemsets(buckets, set([]), dimstart, threshold))
| apache-2.0 | 6,717,562,176,186,580,000 | 31.651786 | 89 | 0.507793 | false |
woutersmet/Zeosummer | lib/zeobuilder/nodes/vector.py | 1 | 6052 | # Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2009 Toon Verstraelen <[email protected]>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from zeobuilder import context
from zeobuilder.nodes.reference import SpatialReference
from zeobuilder.nodes.elementary import GLReferentBase
from zeobuilder.nodes.color_mixin import ColorMixin
from zeobuilder.gui.fields_dialogs import DialogFieldInfo
import zeobuilder.gui.fields as fields
from molmod.transformations import Complete
import numpy
import math
__all__ = ["Vector"]
class Vector(GLReferentBase):
#
# State
#
def initnonstate(self):
GLReferentBase.initnonstate(self)
self.orientation = Complete()
self.set_children([
SpatialReference(prefix="Begin"),
SpatialReference(prefix="End")
])
#
# Dialog fields (see action EditProperties)
#
dialog_fields = set([
DialogFieldInfo("Basic", (0, 2), fields.read.VectorLength(
label_text="Vector length"
)),
])
#
# Draw
#
def draw(self):
self.calc_vector_dimensions()
context.application.vis_backend.transform(self.orientation)
#
# Revalidation
#
def revalidate_total_list(self):
if self.gl_active:
vb = context.application.vis_backend
vb.begin_list(self.total_list)
if self.visible:
vb.push_name(self.draw_list)
vb.push_matrix()
self.draw_selection()
vb.call_list(self.draw_list)
vb.pop_matrix()
vb.pop_name()
vb.end_list()
self.total_list_valid = True
def revalidate_draw_list(self):
if self.gl_active:
GLReferentBase.revalidate_draw_list(self)
def revalidate_boundingbox_list(self):
if self.gl_active:
vb = context.application.vis_backend
#print "Compiling selection list (" + str(self.boundingbox_list) + "): " + str(self.name)
vb.begin_list(self.boundingbox_list)
vb.push_matrix()
vb.transform(self.orientation)
self.revalidate_bounding_box()
self.bounding_box.draw()
vb.pop_matrix()
vb.end_list()
self.boundingbox_list_valid = True
#
# Frame
#
def get_bounding_box_in_parent_frame(self):
return self.bounding_box.transformed(self.orientation)
#
# Vector
#
def shortest_vector_relative_to(self, other):
b = self.children[0].translation_relative_to(other)
e = self.children[1].translation_relative_to(other)
if (b is None) or (e is None):
return None
else:
return self.parent.shortest_vector(e - b)
def calc_vector_dimensions(self):
relative_translation = self.shortest_vector_relative_to(self.parent)
if relative_translation is None:
self.length = 0
else:
self.length = math.sqrt(numpy.dot(relative_translation, relative_translation))
if self.length > 0:
self.orientation.t = self.children[0].translation_relative_to(self.parent)
#axis = numpy.cross(relative_translation, numpy.array([1.0, 0.0, 0.0]))
c = relative_translation[2] / self.length
if c >= 1.0:
self.orientation.set_rotation_properties(0, numpy.array([1.0, 0.0, 0.0]), False)
elif c <= -1.0:
self.orientation.set_rotation_properties(math.pi, numpy.array([1.0, 0.0, 0.0]), False)
else:
x, y = relative_translation[0], relative_translation[1]
if abs(x) < abs(y):
signy = {True: 1, False: -1}[y >= 0]
a = -signy
b = signy * x / y
else:
signx = {True: 1, False: -1}[x >= 0]
a = -signx * y / x
b = signx
self.orientation.set_rotation_properties(math.acos(c), numpy.array([a, b, 0.0]), False)
def define_target(self, reference, new_target):
GLReferentBase.define_target(self, reference, new_target)
self.invalidate_boundingbox_list()
self.invalidate_draw_list()
def target_moved(self, reference, target):
GLReferentBase.target_moved(self, reference, target)
self.invalidate_boundingbox_list()
self.invalidate_draw_list()
def get_neighbor(self, one_target):
if self.children[0].target == one_target:
return self.children[1].target
else:
return self.children[0].target
| gpl-3.0 | -3,980,150,933,196,877,000 | 32.810056 | 107 | 0.61649 | false |
googleads/googleads-dfa-reporting-samples | python/v3_3/dfareporting_utils.py | 1 | 4475 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles common tasks across all API samples."""
import argparse
import os
from googleapiclient import discovery
import httplib2
from oauth2client import client
from oauth2client import file as oauthFile
from oauth2client import tools
API_NAME = 'dfareporting'
API_VERSION = 'v3.3'
API_SCOPES = ['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking',
'https://www.googleapis.com/auth/ddmconversions']
# Filename used for the credential store.
CREDENTIAL_STORE_FILE = API_NAME + '.dat'
def get_arguments(argv, desc, parents=None):
"""Validates and parses command line arguments.
Args:
argv: list of strings, the command-line parameters of the application.
desc: string, a description of the sample being executed.
parents: list of argparse.ArgumentParser, additional command-line parsers.
Returns:
The parsed command-line arguments.
"""
# Include the default oauth2client argparser
parent_parsers = [tools.argparser]
if parents:
parent_parsers.extend(parents)
parser = argparse.ArgumentParser(
description=desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=parent_parsers)
return parser.parse_args(argv[1:])
def load_application_default_credentials():
"""Atempts to load application default credentials.
Returns:
A credential object initialized with application default credentials or None
if none were found.
"""
try:
credentials = client.GoogleCredentials.get_application_default()
return credentials.create_scoped(API_SCOPES)
except client.ApplicationDefaultCredentialsError:
# No application default credentials, continue to try other options.
pass
def load_user_credentials(client_secrets, storage, flags):
"""Attempts to load user credentials from the provided client secrets file.
Args:
client_secrets: path to the file containing client secrets.
storage: the data store to use for caching credential information.
flags: command-line flags.
Returns:
A credential object initialized with user account credentials.
"""
# Set up a Flow object to be used if we need to authenticate.
flow = client.flow_from_clientsecrets(
client_secrets,
scope=API_SCOPES,
message=tools.message_if_missing(client_secrets))
# Retrieve credentials from storage.
# If the credentials don't exist or are invalid run through the installed
# client flow. The storage object will ensure that if successful the good
# credentials will get written back to file.
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, flags)
return credentials
def setup(flags):
"""Handles authentication and loading of the API.
Args:
flags: command-line flags obtained by calling ''get_arguments()''.
Returns:
An initialized service object.
"""
# Load application default credentials if they're available.
credentials = load_application_default_credentials()
# Otherwise, load credentials from the provided client secrets file.
if credentials is None:
# Name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the Credentials tab on the Google Developers Console.
client_secrets = os.path.join(os.path.dirname(__file__),
'client_secrets.json')
storage = oauthFile.Storage(CREDENTIAL_STORE_FILE)
credentials = load_user_credentials(client_secrets, storage, flags)
# Authorize HTTP object with the prepared credentials.
http = credentials.authorize(http=httplib2.Http())
# Construct and return a service object via the discovery service.
return discovery.build(API_NAME, API_VERSION, http=http)
| apache-2.0 | -2,486,802,664,709,208,000 | 32.901515 | 80 | 0.739441 | false |
DrChai/Haystack-SolrEnginePlus | backends/solr_backend.py | 1 | 5798 | from haystack.backends import BaseEngine
from haystack.backends.solr_backend import SolrSearchBackend, SolrSearchQuery
from haystack.constants import VALID_FILTERS, FILTER_SEPARATOR, DEFAULT_ALIAS, ITERATOR_LOAD_PER_QUERY
__author__ = 'Carrycat'
class EventSolrSearchBackend(SolrSearchBackend):
def _process_results(self, raw_results, highlight=False, result_class=None, distance_point=None):
result = super(EventSolrSearchBackend, self)._process_results(raw_results,
highlight=highlight,
result_class=result_class,
distance_point=distance_point)
if hasattr(raw_results, 'nextCursorMark'):
nextCursorMark = raw_results.nextCursorMark
result['nextCursorMark'] = nextCursorMark
return result
def build_search_kwargs(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None,
date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
within=None, dwithin=None, distance_point=None,
models=None, limit_to_registered_models=None,
result_class=None, stats=None, cursorMark=None, cursor_rows=None):
kwargs = super(EventSolrSearchBackend, self).build_search_kwargs(
query_string, sort_by=sort_by, start_offset=start_offset, end_offset=end_offset,
fields=fields, highlight=highlight, facets=facets,
date_facets=date_facets, query_facets=None,
narrow_queries=narrow_queries, spelling_query=spelling_query,
within=within, dwithin=dwithin, distance_point=distance_point,
models=models, limit_to_registered_models=limit_to_registered_models,
result_class=result_class, stats=stats)
if cursorMark is not None:
kwargs['cursorMark'] = cursorMark
kwargs.pop('rows', None)
kwargs['rows'] = cursor_rows
kwargs.pop('start')
if kwargs.get('sort', None):
if kwargs['sort'] == 'geodist() asc':
kwargs['sort'] = 'geodist() asc,id asc'
elif kwargs['sort'] == 'geodist() desc':
kwargs['sort'] = 'geodist() desc,id desc'
if query_facets is not None:
kwargs['facet'] = 'on'
query_list = []
for func, field, value in query_facets:
func = "{!%s}" % func if func else ""
if field is None and value is None:
query_list.append("%s" % func)
elif field and value:
query_list.append("%s%s:%s" % (func, field, value))
else:
pass
kwargs['facet.query'] = query_list
return kwargs
class EventSolrSearchQuery(SolrSearchQuery):
def __init__(self, using=DEFAULT_ALIAS):
self._next_cursor = None
self._current_cursor = None
self.cursor_rows = None
super(EventSolrSearchQuery, self).__init__(using=DEFAULT_ALIAS)
def build_params(self, spelling_query=None, **kwargs):
search_kwargs = super(EventSolrSearchQuery, self).build_params(spelling_query=spelling_query, **kwargs)
if self._next_cursor: # if next_cursor() is called
# if self._next_cursor_cache: # if cursor cache is set using cursor cache instead of next_cursor
# search_kwargs['cursorMark'] = self._next_cursor_cache
# else:
search_kwargs['cursorMark'] = self._next_cursor
search_kwargs['cursor_rows'] = self.cursor_rows
return search_kwargs
def run(self, spelling_query=None, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
search_kwargs = self.build_params(spelling_query, **kwargs)
if kwargs:
search_kwargs.update(kwargs)
results = self.backend.search(final_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._stats = results.get('stats', {})
if self._next_cursor:
self._current_cursor = self._next_cursor
self._next_cursor = results.get('nextCursorMark', None) # update next cursor
#
# self._next_cursor_cache = results.get('nextCursorMark', None) # X
self._spelling_suggestion = results.get('spelling_suggestion', None)
def add_next_cursor(self, next_cursor, rows):
"""
set next cursorMark and rows
"""
if not isinstance(next_cursor, str) or not isinstance(rows, int):
raise AttributeError('The next_cursor must be a string')
self.cursor_rows = rows
self._next_cursor = next_cursor
def add_query_facet(self, func, field, query):
"""Adds a query facet on a field."""
from haystack import connections
self.query_facets.append((func, connections[self._using].get_unified_index().get_facet_fieldname(field), query))
def _clone(self, klass=None, using=None):
clone = super(EventSolrSearchQuery, self)._clone(klass=klass, using=using)
clone._next_cursor = self._next_cursor
clone._current_cursor = self._current_cursor
clone.cursor_rows = self.cursor_rows
return clone
class SolrEngine(BaseEngine):
backend = EventSolrSearchBackend
query = EventSolrSearchQuery | mit | -2,384,183,493,642,633,700 | 46.532787 | 120 | 0.59417 | false |
fedelemantuano/thug | tools/distributed/thugd.py | 1 | 5567 | #!/usr/bin/env python
"""
Thug daemon
By [email protected]
For the iTES project (www.ites-project.org)
"""
import argparse
import pika
import sys
import json
import six
import subprocess
import os
import shutil
import six.moves.configparser as ConfigParser
class Thugd(object):
"""
A class waiting for jobs, starting thug, returning results
"""
def __init__(self, configfile, clear = False):
"""
@configfile: The configuration file to use
@clear: Clear the job chain
"""
self.clear = clear
self.username = "guest"
self.password = "guest"
self._read_config(configfile)
self._chdir()
self._run_queue()
def _read_config(self, configfile):
"""
read_config
Read configuration from configuration file
@configfile: The configfile to use
"""
self.host = "localhost"
self.queue = "thugctrl"
self.rhost = "localhost"
self.rqueue = "thugres"
if configfile is None:
return
conf = ConfigParser.ConfigParser()
conf.read(configfile)
self.host = conf.get("jobs", "host")
self.queue = conf.get("jobs", "queue")
self.rhost = conf.get("results", "host")
self.rqueue = conf.get("results", "queue")
self.resdir = conf.get("results", "resdir")
self.username = conf.get("credentials", "username")
self.password = conf.get("credentials", "password")
def _chdir(self):
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir,
'src')))
def _run_queue(self):
credentials = pika.PlainCredentials(self.username, self.password)
parameters = pika.ConnectionParameters(host = self.host, credentials = credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue = self.queue, durable = True)
print("[*] Waiting for messages on %s %s (press CTRL+C to exit)" % (self.host, self.queue, ))
channel.basic_qos(prefetch_count = 1)
channel.basic_consume(lambda c, m, p, b: self.callback(c, m, p, b), queue = self.queue)
channel.start_consuming()
def runProcess(self, exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while(True):
retcode = p.poll()
line = p.stdout.readline()
yield line
if(retcode is not None):
break
def send_results(self, data):
credentials = pika.PlainCredentials(self.username, self.password)
parameters = pika.ConnectionParameters(host = self.rhost, credentials = credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue = self.rqueue, durable = True)
message = json.dumps(data)
channel.basic_publish(exchange = '',
routing_key = self.rqueue,
body = message,
properties = pika.BasicProperties(delivery_mode = 2,))
print("[x] Sent %r" % (message,))
connection.close()
def copy_to_result(self, frompath, job):
"""
Copy result folder to result path
"""
if not frompath:
return None
respath = os.path.join(self.resdir, str(job["id"]))
shutil.copytree(frompath, respath)
return os.path.relpath(respath, self.resdir)
def process(self, job):
"""
Execute thug to process a job
"""
print("job" + str(job))
print(os.getcwd())
command = ["python", "thug.py", "-t", str(job["threshold"])]
if job["extensive"]:
command.append("-E")
if job["timeout"]:
command.append("-T")
command.append(str(job["timeout"]))
if job["referer"]:
command.append("-r")
command.append(job["referer"])
if job["proxy"]:
command.append("-p")
command.append(job["proxy"])
command.append(job["url"])
print(command)
pathname = None
for line in self.runProcess(command):
if line.startswith("["):
six.print_(line, end = " ")
if line.find("] Saving log analysis at ") >= 0:
pathname = line.split(" ")[-1].strip()
rpath = self.copy_to_result(pathname, job)
res = {"id" : job["id"],
"rpath" : rpath}
self.send_results(res)
def callback(self, ch, method, properties, body):
print("[x] Received %r" % (body, ))
if not self.clear:
self.process(json.loads(body))
print("[x] Done")
ch.basic_ack(delivery_tag=method.delivery_tag)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Receives jobs and starts Thug to process them')
parser.add_argument('--config', help = 'Configuration file to use', default = "config.ini")
parser.add_argument('--clear', help = 'Clear the job chain', default = False, action = "store_true")
args = parser.parse_args()
try:
t = Thugd(args.config, args.clear)
except KeyboardInterrupt:
sys.exit(0)
| gpl-2.0 | -2,700,596,300,748,250,600 | 30.100559 | 104 | 0.554877 | false |
jbrowne/UCSBsketch | linux/Utils/BoardChangeWatcher.py | 1 | 13053 | if __name__ == "__main__":
import sys
sys.path.append("./")
"""This module supports watching for ink-changes on a whiteboard."""
from Utils import Logger
from Utils.ForegroundFilter import ForegroundFilter
from Utils.ImageUtils import captureImage
from Utils.ImageUtils import changeExposure
from Utils.ImageUtils import initializeCapture
from Utils.ImageUtils import max_allChannel
from Utils.ImageUtils import saveimg
from Utils.ImageUtils import showResized
from Utils.ImageUtils import warpFrame
import cv
CAPSIZE00 = (2592, 1944)
CAPSIZE01 = (2048, 1536)
CAPSIZE02 = (1600, 1200)
CAPSIZE02 = (1280, 960)
CAPSIZE03 = (960, 720)
CAPSIZE04 = (800, 600)
PROJECTORSIZE = (1024, 768)
DEBUG = False
logger = Logger.getLogger("BC_Watcher", Logger.WARN)
class BoardChangeWatcher(object):
"""This class watches a whiteboard, and bundles up
changes of the board's contents as discreet "diff" events"""
def __init__(self):
self._fgFilter = ForegroundFilter()
self._boardDiffHist = []
self._lastCaptureImage = None
self.isCaptureReady = False
# Used to track if we've seen changes since last accept
self._isBoardUpdated = False
self._boardMask = None
def reset(self):
self._fgFilter = ForegroundFilter()
self._boardDiffHist = []
self._lastCaptureImage = None
self.isCaptureReady = False
self._isBoardUpdated = False
def setBoardCorners(self, corners, targetCorners=[]):
"""Set the region of the image that covers the board. Used
to set up a mask for the filters."""
if self._lastCaptureImage is not None:
if len(corners) == 4:
(w, h) = cv.GetSize(self._lastCaptureImage)
if len(targetCorners) == 0:
self._boardMask = cv.CreateMat(h, w, cv.CV_8UC1)
cv.Set(self._boardMask, 0)
cv.FillConvexPoly(self._boardMask, corners, 255)
else:
print targetCorners, len(targetCorners)
self._boardMask = cv.CreateMat(h, w, cv.CV_8UC1)
cv.Set(self._boardMask, 255)
self._boardMask = warpFrame(self._boardMask, targetCorners,
corners)
saveimg(self._boardMask, name="BoardMask")
else:
logger.warn("Not setting corners. Unknown image size!")
def setBoardImage(self, image):
"""Force the current background board image to be image"""
print "Setting board image"
self._lastCaptureImage = cv.CloneMat(image)
self._fgFilter.setBackground(image)
self._boardDiffHist = []
self.isCaptureReady = False
self._isBoardUpdated = False
def acceptCurrentImage(self):
"""Confirm the current view of the whiteboard as correct (commit it)"""
image = self._fgFilter.getBackgroundImage()
self.setBoardImage(image)
self.isCaptureReady = False
self._isBoardUpdated = False
return image
def updateBoardImage(self, image):
global DEBUG
precentDiffThresh = 0.2
diffMaskThresh = 50
windowLen = 2
if self._lastCaptureImage is None:
self.setBoardImage(image)
return
self._fgFilter.updateBackground(image)
# Now that we've updated the background a little bit, analyze the
# difference from the previous backgrounds for consistency
# background images for consistency
# Track the new strokes that are added
captureDiffMask = cv.CloneMat(self._fgFilter.getBackgroundImage())
cv.AbsDiff(captureDiffMask, self._lastCaptureImage, captureDiffMask)
captureDiffMask = max_allChannel(captureDiffMask)
cv.Threshold(captureDiffMask, captureDiffMask,
diffMaskThresh, 255, cv.CV_THRESH_BINARY)
# Mask the information according to where the board is
if self._boardMask is not None:
cv.And(captureDiffMask, self._boardMask, captureDiffMask)
else:
logger.warn("Watching with no board area set!")
if len(self._boardDiffHist) > windowLen:
self._boardDiffHist.pop(0)
self._boardDiffHist.append(captureDiffMask)
prev = None
cumulativeDiff = None
thisDiff = None
for frame in self._boardDiffHist:
if prev is None:
prev = frame
cumulativeDiff = cv.CreateMat(prev.rows, prev.cols, prev.type)
cv.Set(cumulativeDiff, (0, 0, 0))
thisDiff = cv.CreateMat(prev.rows, prev.cols, prev.type)
else:
cv.AbsDiff(prev, frame, thisDiff)
cv.Max(thisDiff, cumulativeDiff, cumulativeDiff)
# Now that we have the max sequential difference between frames,
# smooth out the edge artifacts due to noise
cv.Smooth(cumulativeDiff, cumulativeDiff, smoothtype=cv.CV_MEDIAN)
# Mask the information according to where the board is
if self._boardMask is not None:
cv.And(cumulativeDiff, self._boardMask, cumulativeDiff)
else:
logger.warn("Watching with no board area set!")
# The difference percentage is in terms of the size of
# the changed component from the background
percentDiff = (cv.CountNonZero(cumulativeDiff) /
float(max(cv.CountNonZero(captureDiffMask), 1)))
if percentDiff < precentDiffThresh:
if self._isBoardUpdated:
self.isCaptureReady = True
else:
# Only set unready if the difference is large
self.isCaptureReady = False
self._isBoardUpdated = True
if DEBUG:
showResized("Capture Difference", captureDiffMask, 0.4)
def captureBoardDifferences(self):
"""Returns a tuple of binary images: (darkerDiff, lighterDiff)
where the non-zero mask in darkerDiff is the board contents that
is darker than the last capture, and lighterDiff is the contents
that is lighter.
Should check isCaptureReady field before using the results"""
global DEBUG
differenceThresh = 10
curBackground = self._fgFilter.getBackgroundImage()
darkerDiff = cv.CreateMat(self._lastCaptureImage.rows,
self._lastCaptureImage.cols, cv.CV_8UC1)
lighterDiff = cv.CloneMat(darkerDiff)
subtractedImage = cv.CloneMat(curBackground)
# Get all the points that are lighter in the previous capture,
# cleaned up slightly.
cv.Sub(self._lastCaptureImage, curBackground, subtractedImage)
cv.Threshold(max_allChannel(subtractedImage), darkerDiff,
differenceThresh, 255, cv.CV_THRESH_TOZERO)
cv.Smooth(darkerDiff, darkerDiff, smoothtype=cv.CV_MEDIAN)
# Get all the points that are lighter in the current capture,
# cleaned up slightly.
cv.Sub(curBackground, self._lastCaptureImage, subtractedImage)
cv.Threshold(max_allChannel(subtractedImage), lighterDiff,
differenceThresh, 255, cv.CV_THRESH_TOZERO)
cv.Smooth(lighterDiff, lighterDiff, smoothtype=cv.CV_MEDIAN)
# Light spots (projector augmented) in the previous image
lightSpotsImage = cv.CloneMat(self._lastCaptureImage)
lightSpotMask_Prev = cv.CreateMat(self._lastCaptureImage.rows,
self._lastCaptureImage.cols,
cv.CV_8UC1)
cv.Smooth(lightSpotsImage, lightSpotsImage, smoothtype=cv.CV_MEDIAN,
param1=5, param2=5)
cv.Erode(lightSpotsImage, lightSpotsImage, iterations=10)
cv.Sub(self._lastCaptureImage, lightSpotsImage, lightSpotsImage)
cv.CvtColor(lightSpotsImage, lightSpotMask_Prev, cv.CV_RGB2GRAY)
cv.Threshold(lightSpotMask_Prev, lightSpotMask_Prev, 50, 255,
cv.CV_THRESH_BINARY_INV)
# Light spots (projector augmented) in the current image
lightSpotsImage = cv.CloneMat(curBackground)
lightSpotMask_Current = cv.CreateMat(curBackground.rows,
curBackground.cols, cv.CV_8UC1)
cv.Smooth(lightSpotsImage, lightSpotsImage, smoothtype=cv.CV_MEDIAN,
param1=5, param2=5)
cv.Erode(lightSpotsImage, lightSpotsImage, iterations=10)
cv.Sub(curBackground, lightSpotsImage, lightSpotsImage)
cv.CvtColor(lightSpotsImage, lightSpotMask_Current, cv.CV_RGB2GRAY)
cv.Threshold(lightSpotMask_Current, lightSpotMask_Current, 50, 255,
cv.CV_THRESH_BINARY_INV)
# Filter out the spots that were projected before and are now darker
cv.And(lightSpotMask_Prev, darkerDiff, darkerDiff)
# Filter out the spots that are now lighter due to projection
cv.And(lightSpotMask_Current, lighterDiff, lighterDiff)
# Filter out artifacts along harsh edges from digitization process
edges = cv.CreateMat(curBackground.rows, curBackground.cols,
cv.CV_8UC1)
edges_temp = cv.CloneMat(edges)
grayImage = cv.CloneMat(edges)
cv.CvtColor(self._lastCaptureImage, grayImage, cv.CV_RGB2GRAY)
cv.Canny(grayImage, edges, 50, 100)
cv.CvtColor(curBackground, grayImage, cv.CV_RGB2GRAY)
cv.Canny(grayImage, edges_temp, 50, 100)
cv.Max(edges, edges_temp, edges)
# "Opening" the edge images
# Remove the edge information from the differences
cv.Sub(darkerDiff, edges, darkerDiff)
cv.Sub(lighterDiff, edges, lighterDiff)
# Dilate the differences to finish the "opening"
cv.Dilate(darkerDiff, darkerDiff)
cv.Dilate(lighterDiff, lighterDiff)
# Mask the information according to where the board is
if self._boardMask is not None:
cv.And(self._boardMask, darkerDiff, darkerDiff)
cv.And(self._boardMask, lighterDiff, lighterDiff)
else:
logger.warn("Watching with no board area set!")
if DEBUG:
showResized("Darker", darkerDiff, 0.25)
# showResized("Edges", edges, 0.5)
showResized("Lighter", lighterDiff, 0.25)
# showResized("Previous Projection", lightSpotMask_Prev, 0.4)
# showResized("Current Projection", lightSpotMask_Prev, 0.4)
return (darkerDiff, lighterDiff)
def main(args):
global DEBUG
DEBUG = True
if len(args) > 1:
camNum = int(args[1])
print "Using cam %s" % (camNum,)
else:
camNum = 0
capture, dims = initializeCapture(cam=camNum, dims=CAPSIZE00)
changeExposure(camNum, value=100)
dispScale = 0.5
warpCorners = []
targetCorners = [(0, 0), (dims[0], 0), (dims[0], dims[1]), (0, dims[1])]
def onMouseClick(event, x, y, flags, param):
if event == cv.CV_EVENT_LBUTTONUP:
if len(warpCorners) != 4:
warpCorners.append((int(x / dispScale), int(y / dispScale),))
if len(warpCorners) == 4:
print warpCorners
cv.NamedWindow("Output")
cv.SetMouseCallback("Output", onMouseClick)
bcWatcher = BoardChangeWatcher()
dispImage = captureImage(capture)
# dispImage = warpFrame(dispImage, warpCorners, targetCorners)
isPaused = False
while True:
image = captureImage(capture)
if not isPaused:
if len(warpCorners) == 4:
bcWatcher.setBoardCorners(warpCorners)
bcWatcher.updateBoardImage(image)
showResized("FGFilter",
bcWatcher._fgFilter.getBackgroundImage(), 0.4)
if bcWatcher.isCaptureReady:
(darker, lighter) = bcWatcher.captureBoardDifferences()
showResized("Darker", darker, 0.3)
showResized("Lighter", lighter, 0.3)
dispImage = bcWatcher.acceptCurrentImage()
for corner in warpCorners:
cv.Circle(dispImage, corner, 3, (255, 200, 0))
showResized("Output", dispImage, dispScale)
key = cv.WaitKey(50)
if key != -1:
key = chr(key % 256)
if key == 'q':
break
if key == 'p':
isPaused = not isPaused
if isPaused:
print "Paused"
else:
print "Unpaused"
if key == 'r':
bcWatcher.setBoardImage(image)
dispImage = image
if key == 'R':
warpCorners = []
if key in ('+', '='):
changeExposure(camNum, 100)
elif key in ('-', '_'):
changeExposure(camNum, -100)
cv.DestroyAllWindows()
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | -6,130,084,411,366,002,000 | 40.702875 | 79 | 0.612273 | false |
sylvestre/bedrock | lib/l10n_utils/dotlang.py | 1 | 10514 | # coding=utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""This library parses dotlang files migrated over from the old PHP
system.
It caches them using the django caching library, but it could
potentially just use thread-local variables. Caching seems safer at
the expense of another caching layer."""
import codecs
import inspect
import os
import re
from functools import partial
from django.conf import settings
from django.core.cache import caches
from django.utils.functional import lazy
from jinja2 import Markup
from product_details import product_details
from lib.l10n_utils import translation
from lib.l10n_utils.utils import ContainsEverything, strip_whitespace
ALL_THE_THINGS = ContainsEverything()
FORMAT_IDENTIFIER_RE = re.compile(r"""(%
(?:\((\w+)\))? # Mapping key
s)""", re.VERBOSE)
TAG_REGEX = re.compile(r"^## ([\w-]+) ##")
cache = caches['l10n']
def parse(path, skip_untranslated=True, extract_comments=False):
"""
Parse a dotlang file and return a dict of translations.
:param path: Absolute path to a lang file.
:param skip_untranslated: Exclude strings for which the ID and translation
match.
:param extract_comments: Extract one line comments from template if True
:return: dict
"""
trans = {}
if not os.path.exists(path):
return trans
with codecs.open(path, 'r', 'utf-8', errors='replace') as lines:
source = None
comment = None
for line in lines:
l10n_tag = None
if u'�' in line:
mail_error(path, line)
line = line.strip()
if not line:
continue
if line[0] == '#':
comment = line.lstrip('#').strip()
continue
if line[0] == ';':
source = line[1:]
elif source:
for tag in ('{ok}', '{l10n-extra}'):
if line.lower().endswith(tag):
l10n_tag = tag.strip('{}')
line = line[:-len(tag)]
line = line.strip()
if skip_untranslated and source == line and l10n_tag != 'ok':
continue
if extract_comments:
trans[source] = [comment, line]
comment = None
else:
trans[source] = line
return trans
def mail_error(path, message):
"""Email managers when an error is detected"""
from django.core import mail
subject = '%s is corrupted' % path
mail.mail_managers(subject, message)
def fix_case(locale):
"""Convert lowercase locales to uppercase: en-us -> en-US"""
parts = locale.split('-')
if len(parts) == 1:
return locale
else:
return '%s-%s' % (parts[0], parts[1].upper())
def translate(text, files):
"""Search a list of .lang files for a translation"""
lang = fix_case(translation.get_language())
# don't attempt to translate the default language.
if lang == settings.LANGUAGE_CODE:
return Markup(text)
tweaked_text = strip_whitespace(text)
for file_ in files:
key = "dotlang-%s-%s" % (lang, file_)
rel_path = os.path.join('locale', lang, '%s.lang' % file_)
trans = cache.get(key)
if trans is None:
path = os.path.join(settings.ROOT, rel_path)
trans = parse(path)
cache.set(key, trans, settings.DOTLANG_CACHE)
if tweaked_text in trans:
original = FORMAT_IDENTIFIER_RE.findall(text)
translated = FORMAT_IDENTIFIER_RE.findall(trans[tweaked_text])
if set(original) != set(translated):
explanation = ('The translation has a different set of '
'replaced text (aka %s)')
message = '%s\n\n%s\n%s' % (explanation, text,
trans[tweaked_text])
mail_error(rel_path, message)
return Markup(text)
return Markup(trans[tweaked_text])
return Markup(text)
def _get_extra_lang_files():
frame = inspect.currentframe()
new_lang_files = []
if frame is None:
if settings.DEBUG:
import warnings
warnings.warn('Your Python runtime does not support the frame '
'stack. Extra LANG_FILES specified in Python '
'source files will not work.', RuntimeWarning)
else:
try:
# gets value of LANG_FILE constant in calling module if specified.
# have to go back 2x to compensate for this function.
new_lang_files = frame.f_back.f_back.f_globals.get('LANG_FILES', [])
finally:
del frame
if new_lang_files:
if isinstance(new_lang_files, str):
new_lang_files = [new_lang_files]
return [lf for lf in new_lang_files if lf not in settings.DOTLANG_FILES]
def gettext(text, *args, **kwargs):
"""
Translate a piece of text from the global files. If `LANG_FILES` is defined
in the module from which this function is called, those files (or file)
will be searched first for the translation, followed by the default files.
:param text: string to translate
:param args: items for interpolation into `text`
:param lang_files: extra lang file names to search for a translation.
NOTE: DO NOT USE THIS for string extraction. It will NOT respect
the values in this kwarg when extracting strings. This is only useful
if you know the string is in a different file but you don't want to
add that file for the whole module via the `LANG_FILES` constant.
:return: translated string
"""
lang_files = kwargs.pop('lang_files', [])
if isinstance(lang_files, list):
lang_files = lang_files[:]
else:
lang_files = [lang_files]
if not lang_files:
lang_files += _get_extra_lang_files()
lang_files += settings.DOTLANG_FILES
text = translate(text, lang_files)
if args:
text = text % args
return text
_lazy_proxy = lazy(gettext, str)
def gettext_lazy(*args, **kwargs):
lang_files = _get_extra_lang_files()
if lang_files:
return partial(_lazy_proxy, lang_files=lang_files)(*args, **kwargs)
return _lazy_proxy(*args, **kwargs)
# backward compat
_ = gettext
_lazy = gettext_lazy
def get_lang_path(path):
"""Generate the path to a lang file from a django path.
/apps/foo/templates/foo/bar.html -> foo/bar
/templates/foo.html -> foo
/foo/bar.html -> foo/bar"""
p = path.split('/')
try:
i = p.index('templates')
p = p[i + 1:]
except ValueError:
pass
path = '/'.join(p)
base, ext = os.path.splitext(path)
return base
def lang_file_is_active(path, lang=None):
"""
If the lang file for a locale exists and has the correct comment returns
True, and False otherwise.
:param path: the relative lang file name
:param lang: the language code
:return: bool
"""
return lang_file_has_tag(path, lang, 'active')
def lang_file_tag_set(path, lang=None):
"""Return a set of tags for a specific lang file and locale.
:param path: the relative lang file name
:param lang: the language code or the lang of the request if omitted
:return: set of strings
"""
if settings.DEV or lang == settings.LANGUAGE_CODE:
return ALL_THE_THINGS
lang = lang or fix_case(translation.get_language())
rel_path = os.path.join('locale', lang, '%s.lang' % path)
cache_key = 'tag:%s' % rel_path
tag_set = cache.get(cache_key)
if tag_set is None:
tag_set = set()
fpath = os.path.join(settings.ROOT, rel_path)
try:
with codecs.open(fpath, 'r', 'utf-8', errors='replace') as lines:
for line in lines:
# Filter out Byte order Mark
line = line.replace(u'\ufeff', '')
m = TAG_REGEX.match(line)
if m:
tag_set.add(m.group(1))
else:
# Stop at the first non-tag line.
break
except IOError:
pass
cache.set(cache_key, tag_set, settings.DOTLANG_CACHE)
return tag_set
def lang_file_has_tag(path, lang=None, tag='active'):
"""
Return True if the lang file exists and has a line like "^## tag ##"
at the top. Stops looking at the line that doesn't have a tag.
Always returns true for the default lang.
:param path: the relative lang file name
:param lang: the language code or the lang of the request if omitted
@param tag: The string that should appear between ##'s. Can contain
alphanumerics and "_".
@return: bool
"""
return tag in lang_file_tag_set(path, lang)
def get_translations_for_langfile(langfile):
"""
Return the list of available translations for the langfile.
:param langfile: the path to a lang file, retrieved with get_lang_path()
:return: list, like ['en-US', 'fr']
"""
cache_key = 'translations:%s' % langfile
translations = cache.get(cache_key, None)
if translations:
return translations
translations = []
for lang in settings.PROD_LANGUAGES:
if (lang in product_details.languages and
(lang == settings.LANGUAGE_CODE or
lang_file_is_active(langfile, lang))):
translations.append(lang)
cache.set(cache_key, translations, settings.DOTLANG_CACHE)
return translations
def get_translations_native_names(locales):
"""
Return a dict of locale codes and native language name strings.
Returned dict is suitable for use in view contexts and is filtered to only codes in PROD_LANGUAGES.
:param locales: list of locale codes
:return: dict, like {'en-US': 'English (US)', 'fr': 'Français'}
"""
translations = {}
for locale in locales:
if locale in settings.PROD_LANGUAGES:
language = product_details.languages.get(locale)
translations[locale] = language['native'] if language else locale
return translations
| mpl-2.0 | 6,054,639,285,895,285,000 | 31.341538 | 103 | 0.596233 | false |
jasontlam/snorkel | snorkel/candidates.py | 1 | 13432 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
from collections import defaultdict
from copy import deepcopy
from itertools import product
import re
from sqlalchemy.sql import select
from .models import Candidate, TemporarySpan, Sentence
from .udf import UDF, UDFRunner
QUEUE_COLLECT_TIMEOUT = 5
class CandidateExtractor(UDFRunner):
"""
An operator to extract Candidate objects from a Context.
:param candidate_class: The type of relation to extract, defined using
:func:`snorkel.models.candidate_subclass <snorkel.models.candidate.candidate_subclass>`
:param cspaces: one or list of :class:`CandidateSpace` objects, one for each relation argument. Defines space of
Contexts to consider
:param matchers: one or list of :class:`snorkel.matchers.Matcher` objects, one for each relation argument. Only tuples of
Contexts for which each element is accepted by the corresponding Matcher will be returned as Candidates
:param self_relations: Boolean indicating whether to extract Candidates that relate the same context.
Only applies to binary relations. Default is False.
:param nested_relations: Boolean indicating whether to extract Candidates that relate one Context with another
that contains it. Only applies to binary relations. Default is False.
:param symmetric_relations: Boolean indicating whether to extract symmetric Candidates, i.e., rel(A,B) and rel(B,A),
where A and B are Contexts. Only applies to binary relations. Default is False.
"""
def __init__(self, candidate_class, cspaces, matchers, self_relations=False, nested_relations=False, symmetric_relations=False):
super(CandidateExtractor, self).__init__(CandidateExtractorUDF,
candidate_class=candidate_class,
cspaces=cspaces,
matchers=matchers,
self_relations=self_relations,
nested_relations=nested_relations,
symmetric_relations=symmetric_relations)
def apply(self, xs, split=0, **kwargs):
super(CandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class CandidateExtractorUDF(UDF):
def __init__(self, candidate_class, cspaces, matchers, self_relations, nested_relations, symmetric_relations, **kwargs):
self.candidate_class = candidate_class
# Note: isinstance is the way to check types -- not type(x) in [...]!
self.candidate_spaces = cspaces if isinstance(cspaces, (list, tuple)) else [cspaces]
self.matchers = matchers if isinstance(matchers, (list, tuple)) else [matchers]
self.nested_relations = nested_relations
self.self_relations = self_relations
self.symmetric_relations = symmetric_relations
# Check that arity is same
if len(self.candidate_spaces) != len(self.matchers):
raise ValueError("Mismatched arity of candidate space and matcher.")
else:
self.arity = len(self.candidate_spaces)
# Make sure the candidate spaces are different so generators aren't expended!
self.candidate_spaces = list(map(deepcopy, self.candidate_spaces))
# Preallocates internal data structures
self.child_context_sets = [None] * self.arity
for i in range(self.arity):
self.child_context_sets[i] = set()
super(CandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, **kwargs):
# Generate TemporaryContexts that are children of the context using the candidate_space and filtered
# by the Matcher
for i in range(self.arity):
self.child_context_sets[i].clear()
for tc in self.matchers[i].apply(self.candidate_spaces[i].apply(context)):
tc.load_id_or_insert(self.session)
self.child_context_sets[i].add(tc)
# Generates and persists candidates
extracted = set()
candidate_args = {'split': split}
for args in product(*[enumerate(child_contexts) for child_contexts in self.child_context_sets]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations. For symmetric relations, if mentions are of the same type, maintain
# their order in the sentence.
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ((b, a) in extracted or
(self.matchers[0] == self.matchers[1] and a.char_start > b.char_start)):
continue
# Keep track of extracted
extracted.add((a,b))
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
# Checking for existence
if not clear:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
class CandidateSpace(object):
"""
Defines the **space** of candidate objects
Calling _apply(x)_ given an object _x_ returns a generator over candidates in _x_.
"""
def __init__(self):
pass
def apply(self, x):
raise NotImplementedError()
class Ngrams(CandidateSpace):
"""
Defines the space of candidates as all n-grams (n <= n_max) in a Sentence _x_,
indexing by **character offset**.
"""
def __init__(self, n_max=5, split_tokens=('-', '/')):
CandidateSpace.__init__(self)
self.n_max = n_max
self.split_rgx = r'('+r'|'.join(split_tokens)+r')' if split_tokens and len(split_tokens) > 0 else None
def apply(self, context):
# These are the character offset--**relative to the sentence start**--for each _token_
offsets = context.char_offsets
# Loop over all n-grams in **reverse** order (to facilitate longest-match semantics)
L = len(offsets)
seen = set()
for l in range(1, self.n_max+1)[::-1]:
for i in range(L-l+1):
w = context.words[i+l-1]
start = offsets[i]
end = offsets[i+l-1] + len(w) - 1
ts = TemporarySpan(char_start=start, char_end=end, sentence=context)
if ts not in seen:
seen.add(ts)
yield ts
# Check for split
# NOTE: For simplicity, we only split single tokens right now!
if l == 1 and self.split_rgx is not None and end - start > 0:
m = re.search(self.split_rgx, context.text[start-offsets[0]:end-offsets[0]+1])
if m is not None and l < self.n_max + 1:
ts1 = TemporarySpan(char_start=start, char_end=start + m.start(1) - 1, sentence=context)
if ts1 not in seen:
seen.add(ts1)
yield ts
ts2 = TemporarySpan(char_start=start + m.end(1), char_end=end, sentence=context)
if ts2 not in seen:
seen.add(ts2)
yield ts2
class PretaggedCandidateExtractor(UDFRunner):
"""UDFRunner for PretaggedCandidateExtractorUDF"""
def __init__(self, candidate_class, entity_types, self_relations=False,
nested_relations=False, symmetric_relations=True, entity_sep='~@~'):
super(PretaggedCandidateExtractor, self).__init__(
PretaggedCandidateExtractorUDF, candidate_class=candidate_class,
entity_types=entity_types, self_relations=self_relations,
nested_relations=nested_relations, entity_sep=entity_sep,
symmetric_relations=symmetric_relations,
)
def apply(self, xs, split=0, **kwargs):
super(PretaggedCandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class PretaggedCandidateExtractorUDF(UDF):
"""
An extractor for Sentences with entities pre-tagged, and stored in the entity_types and entity_cids
fields.
"""
def __init__(self, candidate_class, entity_types, self_relations=False, nested_relations=False, symmetric_relations=False, entity_sep='~@~', **kwargs):
self.candidate_class = candidate_class
self.entity_types = entity_types
self.arity = len(entity_types)
self.self_relations = self_relations
self.nested_relations = nested_relations
self.symmetric_relations = symmetric_relations
self.entity_sep = entity_sep
super(PretaggedCandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, check_for_existing=True, **kwargs):
"""Extract Candidates from a Context"""
# For now, just handle Sentences
if not isinstance(context, Sentence):
raise NotImplementedError("%s is currently only implemented for Sentence contexts." % self.__name__)
# Do a first pass to collect all mentions by entity type / cid
entity_idxs = dict((et, defaultdict(list)) for et in set(self.entity_types))
L = len(context.words)
for i in range(L):
if context.entity_types[i] is not None:
ets = context.entity_types[i].split(self.entity_sep)
cids = context.entity_cids[i].split(self.entity_sep)
for et, cid in zip(ets, cids):
if et in entity_idxs:
entity_idxs[et][cid].append(i)
# Form entity Spans
entity_spans = defaultdict(list)
entity_cids = {}
for et, cid_idxs in iteritems(entity_idxs):
for cid, idxs in iteritems(entity_idxs[et]):
while len(idxs) > 0:
i = idxs.pop(0)
char_start = context.char_offsets[i]
char_end = char_start + len(context.words[i]) - 1
while len(idxs) > 0 and idxs[0] == i + 1:
i = idxs.pop(0)
char_end = context.char_offsets[i] + len(context.words[i]) - 1
# Insert / load temporary span, also store map to entity CID
tc = TemporarySpan(char_start=char_start, char_end=char_end, sentence=context)
tc.load_id_or_insert(self.session)
entity_cids[tc.id] = cid
entity_spans[et].append(tc)
# Generates and persists candidates
candidate_args = {'split' : split}
for args in product(*[enumerate(entity_spans[et]) for et in self.entity_types]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ai > bi:
continue
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
candidate_args[arg_name + '_cid'] = entity_cids[args[i][1].id]
# Checking for existence
if check_for_existing:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
| apache-2.0 | 6,448,931,044,896,654,000 | 45.801394 | 155 | 0.582638 | false |
nive/nive_cms | nive_cms/tests/test_configuration.py | 1 | 3218 | # -*- coding: utf-8 -*-
import unittest
from nive.helper import FormatConfTestFailure
from nive_cms import app, box, column, file, image, link, media, menublock, news, note, page, root
from nive_cms import spacer, text, codeblock
class TestConf(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_conf1(self):
r=app.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf3(self):
r=box.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf4(self):
r=column.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf5(self):
r=file.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf7(self):
r=image.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf8(self):
r=link.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf9(self):
r=media.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf10(self):
r=menublock.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf11(self):
r=news.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf12(self):
r=note.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf13(self):
r=page.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf14(self):
r=root.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf15(self):
r=spacer.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf16(self):
r=text.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
def test_conf17(self):
r=codeblock.configuration.test()
if not r:
return
print FormatConfTestFailure(r)
self.assert_(False, "Configuration Error")
| gpl-3.0 | -6,212,781,265,735,746,000 | 24.744 | 98 | 0.593226 | false |
turbulent/substance | substance/core.py | 1 | 7385 | import sys
import os
import logging
from collections import OrderedDict
from substance.monads import *
from substance.logs import *
from substance.shell import Shell
from substance.engine import Engine
from substance.link import Link
from substance.box import Box
from substance.db import DB
from substance.constants import Tables, EngineStates, DefaultEngineBox
from substance.utils import (
readYAML,
writeYAML,
readSupportFile,
getSupportFile,
streamDownload,
makeXHRRequest,
sha1sum
)
from substance.path import (getHomeDirectory)
from substance.config import (Config)
from substance.driver.virtualbox import VirtualBoxDriver
from substance.exceptions import (
FileSystemError,
FileDoesNotExist,
EngineNotFoundError,
EngineExistsError,
EngineNotRunning
)
import requests
logger = logging.getLogger(__name__)
class Core(object):
def __init__(self, configFile=None, basePath=None):
self.basePath = os.path.abspath(basePath) if basePath else getHomeDirectory('.substance')
self.enginesPath = os.path.join(self.basePath, "engines")
self.boxesPath = os.path.join(self.basePath, "boxes")
self.dbFile = os.path.join(self.basePath, "db.json")
configFile = configFile if configFile else "substance.yml"
configFile = os.path.join(self.basePath, configFile)
self.config = Config(configFile)
self.insecureKey = None
self.insecurePubKey = None
self.assumeYes = False
self.initialized = False
def getBasePath(self):
return self.basePath
def getEnginesPath(self):
return self.enginesPath
def getBoxesPath(self):
return self.boxesPath
def getDbFile(self):
return self.dbFile
def initialize(self):
if self.initialized:
return OK(None)
return self.assertPaths().then(self.assertConfig).then(self.initializeDB).then(defer(self.setInitialized, b=True))
def setInitialized(self, b):
self.initialized = b
def assertPaths(self):
return OK([self.basePath, self.enginesPath, self.boxesPath]).mapM(Shell.makeDirectory)
def assertConfig(self):
return self.config.loadConfigFile() \
.catchError(FileDoesNotExist, self.makeDefaultConfig)
def getDefaultConfig(self):
defaults = OrderedDict()
defaults['assumeYes'] = False
defaults['drivers'] = ['virtualbox']
defaults['tld'] = '.dev'
defaults['devroot'] = getHomeDirectory('substance')
defaults['current'] = OrderedDict()
defaults['engine'] = None
defaults['subenv'] = None
return defaults
def makeDefaultConfig(self, data=None):
logger.info("Generating default substance configuration in %s",
self.config.getConfigFile())
defaults = self.getDefaultConfig()
for kkk, vvv in defaults.items():
self.config.set(kkk, vvv)
self.config.set("basePath", self.basePath)
return self.config.saveConfig()
# -- Use
def setUse(self, engine, subenvName=None):
ops = [self.setCurrentEngine(engine)]
if subenvName:
ops.append(engine.envSwitch(subenvName))
return Try.sequence(ops)
def setCurrentEngine(self, engine):
current = self.config.get('current')
current.update({'engine': engine.name})
self.config.set('current', current)
return OK(self)
def readCurrentEngineName(self):
current = self.config.get('current', {})
name = current.get('engine', None)
if not name:
return Fail(EngineNotFoundError("No current engine is specified. Check the 'use' command for details."))
return OK(name)
def loadCurrentEngine(self, name=None):
current = self.config.get('current', {})
engineName = name
if not engineName:
engineName = current.get('engine', None)
if not engineName:
return Fail(EngineNotFoundError("No current engine is specified. Check the 'use' command for details."))
engine = self.loadEngine(engineName) \
.bind(Engine.loadConfigFile) \
.bind(Engine.loadState)
if engine.isFail():
return engine
engine = engine.getOK()
if engine.state is not EngineStates.RUNNING:
return Fail(EngineNotRunning("Engine '%s' is not running." % engine.name))
return OK(engine)
# -- Runtime
def setAssumeYes(self, ay):
self.assumeYes = True
return True
def getAssumeYes(self):
if self.config.get('assumeYes', False):
return True
elif self.assumeYes:
return True
return False
def getDefaultBoxString(self):
return DefaultEngineBox
# -- Engine library management
def getEngines(self):
ddebug("getEngines()")
dirs = [d for d in os.listdir(self.enginesPath) if os.path.isdir(
os.path.join(self.enginesPath, d))]
return OK(dirs)
def loadEngines(self, engines=[]):
return OK([self.loadEngine(x) for x in engines])
def loadEngine(self, name):
enginePath = os.path.join(self.enginesPath, name)
if not os.path.isdir(enginePath):
return Fail(EngineNotFoundError("Engine \"%s\" does not exist." % name))
else:
return OK(Engine(name, enginePath=enginePath, core=self))
def createEngine(self, name, config=None, profile=None):
enginePath = os.path.join(self.enginesPath, name)
newEngine = Engine(name, enginePath=enginePath, core=self)
return newEngine.create(config=config, profile=profile)
def removeEngine(self, name):
return self.loadEngine(name) \
>> Engine.remove
# -- Driver handling
def getDrivers(self):
return self.config.get('drivers', [])
def validateDriver(self, driver):
if driver in self.getDrivers():
return OK(driver)
return Fail(ValueError("Driver '%s' is not a valid driver."))
def getDriver(self, name):
cls = {
'virtualbox': VirtualBoxDriver
}.get(name, 'virtualbox')
driver = cls(core=self)
return driver
# -- Link handling
def getLink(self, type="ssh"):
link = Link(keyFile=self.getInsecureKeyFile(), keyFormat='RSA')
return link
# -- Database
def getDB(self):
return self.db
def initializeDB(self):
db = DB(self.dbFile)
db = db.initialize()
if db.isFail():
return db
self.db = db.getOK()
return OK(self.db)
# -- Box handling
def readBox(self, boxstring):
return Box.parseBoxString(boxstring) \
.map(lambda p: Box(core=self, **p))
def pullBox(self, box):
return box.fetch()
def removeBox(self, box):
return box.delete()
def getBoxes(self):
return self.getDB().getBoxRecords() \
.mapM(lambda r: OK(Box(self, r.get('name'), r.get('version'), r.get('namespace'), r.get('registry'), r.get('boxstring'), r.get('archiveSHA1'))))
def getInsecureKeyFile(self):
return getSupportFile('support/substance_insecure')
def getInsecurePubKeyFile(self):
return getSupportFile('support/substance_insecure.pub')
| apache-2.0 | 7,815,226,040,247,544,000 | 29.266393 | 156 | 0.636696 | false |
Azure/azure-sdk-for-python | sdk/deviceupdate/azure-iot-deviceupdate/azure/iot/deviceupdate/models/_models.py | 1 | 43651 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class AccessCondition(msrest.serialization.Model):
"""Parameter group.
:param if_none_match: Defines the If-None-Match condition. The operation will be performed only
if the ETag on the server does not match this value.
:type if_none_match: str
"""
_attribute_map = {
'if_none_match': {'key': 'If-None-Match', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessCondition, self).__init__(**kwargs)
self.if_none_match = kwargs.get('if_none_match', None)
class Compatibility(msrest.serialization.Model):
"""Update compatibility information.
All required parameters must be populated in order to send to Azure.
:param device_manufacturer: Required. The manufacturer of device the update is compatible with.
:type device_manufacturer: str
:param device_model: Required. The model of device the update is compatible with.
:type device_model: str
"""
_validation = {
'device_manufacturer': {'required': True},
'device_model': {'required': True},
}
_attribute_map = {
'device_manufacturer': {'key': 'deviceManufacturer', 'type': 'str'},
'device_model': {'key': 'deviceModel', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Compatibility, self).__init__(**kwargs)
self.device_manufacturer = kwargs['device_manufacturer']
self.device_model = kwargs['device_model']
class Deployment(msrest.serialization.Model):
"""Deployment metadata.
All required parameters must be populated in order to send to Azure.
:param deployment_id: Required. Gets or sets the deployment identifier.
:type deployment_id: str
:param deployment_type: Required. Gets or sets the deployment type. Possible values include:
"Complete", "Download", "Install".
:type deployment_type: str or ~azure.iot.deviceupdate.models.DeploymentType
:param device_class_id: Gets or sets the device class identifier.
:type device_class_id: str
:param start_date_time: Required. Gets or sets the Deployment start datetime.
:type start_date_time: ~datetime.datetime
:param device_group_type: Required. Gets or sets the device group type. Possible values
include: "All", "Devices", "DeviceGroupDefinitions".
:type device_group_type: str or ~azure.iot.deviceupdate.models.DeviceGroupType
:param device_group_definition: Required. Gets or sets the device group definition.
:type device_group_definition: list[str]
:param update_id: Required. Update identity.
:type update_id: ~azure.iot.deviceupdate.models.UpdateId
:param is_canceled: Boolean flag indicating whether the deployment was canceled.
:type is_canceled: bool
:param is_retried: Boolean flag indicating whether the deployment has been retried.
:type is_retried: bool
:param is_completed: Boolean flag indicating whether the deployment was completed.
:type is_completed: bool
"""
_validation = {
'deployment_id': {'required': True},
'deployment_type': {'required': True},
'start_date_time': {'required': True},
'device_group_type': {'required': True},
'device_group_definition': {'required': True},
'update_id': {'required': True},
}
_attribute_map = {
'deployment_id': {'key': 'deploymentId', 'type': 'str'},
'deployment_type': {'key': 'deploymentType', 'type': 'str'},
'device_class_id': {'key': 'deviceClassId', 'type': 'str'},
'start_date_time': {'key': 'startDateTime', 'type': 'iso-8601'},
'device_group_type': {'key': 'deviceGroupType', 'type': 'str'},
'device_group_definition': {'key': 'deviceGroupDefinition', 'type': '[str]'},
'update_id': {'key': 'updateId', 'type': 'UpdateId'},
'is_canceled': {'key': 'isCanceled', 'type': 'bool'},
'is_retried': {'key': 'isRetried', 'type': 'bool'},
'is_completed': {'key': 'isCompleted', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(Deployment, self).__init__(**kwargs)
self.deployment_id = kwargs['deployment_id']
self.deployment_type = kwargs['deployment_type']
self.device_class_id = kwargs.get('device_class_id', None)
self.start_date_time = kwargs['start_date_time']
self.device_group_type = kwargs['device_group_type']
self.device_group_definition = kwargs['device_group_definition']
self.update_id = kwargs['update_id']
self.is_canceled = kwargs.get('is_canceled', None)
self.is_retried = kwargs.get('is_retried', None)
self.is_completed = kwargs.get('is_completed', None)
class DeploymentDeviceState(msrest.serialization.Model):
"""Deployment device status.
All required parameters must be populated in order to send to Azure.
:param device_id: Required. Device identity.
:type device_id: str
:param retry_count: Required. The number of times this deployment has been retried on this
device.
:type retry_count: int
:param moved_on_to_new_deployment: Required. Boolean flag indicating whether this device is in
a newer deployment and can no longer retry this deployment.
:type moved_on_to_new_deployment: bool
:param device_state: Required. Deployment device state. Possible values include: "Succeeded",
"InProgress", "Failed", "Canceled", "Incompatible".
:type device_state: str or ~azure.iot.deviceupdate.models.DeviceDeploymentState
"""
_validation = {
'device_id': {'required': True},
'retry_count': {'required': True},
'moved_on_to_new_deployment': {'required': True},
'device_state': {'required': True},
}
_attribute_map = {
'device_id': {'key': 'deviceId', 'type': 'str'},
'retry_count': {'key': 'retryCount', 'type': 'int'},
'moved_on_to_new_deployment': {'key': 'movedOnToNewDeployment', 'type': 'bool'},
'device_state': {'key': 'deviceState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeploymentDeviceState, self).__init__(**kwargs)
self.device_id = kwargs['device_id']
self.retry_count = kwargs['retry_count']
self.moved_on_to_new_deployment = kwargs['moved_on_to_new_deployment']
self.device_state = kwargs['device_state']
class DeploymentDeviceStatesFilter(msrest.serialization.Model):
"""Deployment device state filter.
:param device_id: Device Identifier.
:type device_id: str
:param device_state: The deployment device state. Possible values include: "NotStarted",
"Incompatible", "AlreadyInDeployment", "Canceled", "InProgress", "Failed", "Succeeded".
:type device_state: str or ~azure.iot.deviceupdate.models.DeviceState
"""
_attribute_map = {
'device_id': {'key': 'deviceId', 'type': 'str'},
'device_state': {'key': 'deviceState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeploymentDeviceStatesFilter, self).__init__(**kwargs)
self.device_id = kwargs.get('device_id', None)
self.device_state = kwargs.get('device_state', None)
class DeploymentFilter(msrest.serialization.Model):
"""Deployment filter.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:param version: Update version.
:type version: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeploymentFilter, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.name = kwargs.get('name', None)
self.version = kwargs.get('version', None)
class DeploymentStatus(msrest.serialization.Model):
"""Deployment status metadata.
All required parameters must be populated in order to send to Azure.
:param deployment_state: Required. Gets or sets the state of the deployment. Possible values
include: "Active", "Superseded", "Canceled".
:type deployment_state: str or ~azure.iot.deviceupdate.models.DeploymentState
:param total_devices: Gets or sets the total number of devices in the deployment.
:type total_devices: int
:param devices_incompatible_count: Gets or sets the number of incompatible devices in the
deployment.
:type devices_incompatible_count: int
:param devices_in_progress_count: Gets or sets the number of devices that are currently in
deployment.
:type devices_in_progress_count: int
:param devices_completed_failed_count: Gets or sets the number of devices that have completed
deployment with a failure.
:type devices_completed_failed_count: int
:param devices_completed_succeeded_count: Gets or sets the number of devices which have
successfully completed deployment.
:type devices_completed_succeeded_count: int
:param devices_canceled_count: Gets or sets the number of devices which have had their
deployment canceled.
:type devices_canceled_count: int
"""
_validation = {
'deployment_state': {'required': True},
}
_attribute_map = {
'deployment_state': {'key': 'deploymentState', 'type': 'str'},
'total_devices': {'key': 'totalDevices', 'type': 'int'},
'devices_incompatible_count': {'key': 'devicesIncompatibleCount', 'type': 'int'},
'devices_in_progress_count': {'key': 'devicesInProgressCount', 'type': 'int'},
'devices_completed_failed_count': {'key': 'devicesCompletedFailedCount', 'type': 'int'},
'devices_completed_succeeded_count': {'key': 'devicesCompletedSucceededCount', 'type': 'int'},
'devices_canceled_count': {'key': 'devicesCanceledCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DeploymentStatus, self).__init__(**kwargs)
self.deployment_state = kwargs['deployment_state']
self.total_devices = kwargs.get('total_devices', None)
self.devices_incompatible_count = kwargs.get('devices_incompatible_count', None)
self.devices_in_progress_count = kwargs.get('devices_in_progress_count', None)
self.devices_completed_failed_count = kwargs.get('devices_completed_failed_count', None)
self.devices_completed_succeeded_count = kwargs.get('devices_completed_succeeded_count', None)
self.devices_canceled_count = kwargs.get('devices_canceled_count', None)
class Device(msrest.serialization.Model):
"""Device metadata.
All required parameters must be populated in order to send to Azure.
:param device_id: Required. Device identity.
:type device_id: str
:param device_class_id: Required. Device class identity.
:type device_class_id: str
:param manufacturer: Required. Device manufacturer.
:type manufacturer: str
:param model: Required. Device model.
:type model: str
:param group_id: Device group identity.
:type group_id: str
:param last_attempted_update_id: Update identity.
:type last_attempted_update_id: ~azure.iot.deviceupdate.models.UpdateId
:param deployment_status: State of the device in its last deployment. Possible values include:
"Succeeded", "InProgress", "Failed", "Canceled", "Incompatible".
:type deployment_status: str or ~azure.iot.deviceupdate.models.DeviceDeploymentState
:param installed_update_id: Update identity.
:type installed_update_id: ~azure.iot.deviceupdate.models.UpdateId
:param on_latest_update: Required. Boolean flag indicating whether the latest update is
installed on the device.
:type on_latest_update: bool
:param last_deployment_id: The deployment identifier for the last deployment to the device.
:type last_deployment_id: str
"""
_validation = {
'device_id': {'required': True},
'device_class_id': {'required': True},
'manufacturer': {'required': True},
'model': {'required': True},
'on_latest_update': {'required': True},
}
_attribute_map = {
'device_id': {'key': 'deviceId', 'type': 'str'},
'device_class_id': {'key': 'deviceClassId', 'type': 'str'},
'manufacturer': {'key': 'manufacturer', 'type': 'str'},
'model': {'key': 'model', 'type': 'str'},
'group_id': {'key': 'groupId', 'type': 'str'},
'last_attempted_update_id': {'key': 'lastAttemptedUpdateId', 'type': 'UpdateId'},
'deployment_status': {'key': 'deploymentStatus', 'type': 'str'},
'installed_update_id': {'key': 'installedUpdateId', 'type': 'UpdateId'},
'on_latest_update': {'key': 'onLatestUpdate', 'type': 'bool'},
'last_deployment_id': {'key': 'lastDeploymentId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Device, self).__init__(**kwargs)
self.device_id = kwargs['device_id']
self.device_class_id = kwargs['device_class_id']
self.manufacturer = kwargs['manufacturer']
self.model = kwargs['model']
self.group_id = kwargs.get('group_id', None)
self.last_attempted_update_id = kwargs.get('last_attempted_update_id', None)
self.deployment_status = kwargs.get('deployment_status', None)
self.installed_update_id = kwargs.get('installed_update_id', None)
self.on_latest_update = kwargs['on_latest_update']
self.last_deployment_id = kwargs.get('last_deployment_id', None)
class DeviceClass(msrest.serialization.Model):
"""Device class metadata.
All required parameters must be populated in order to send to Azure.
:param device_class_id: Required. The device class identifier.
:type device_class_id: str
:param manufacturer: Required. Device manufacturer.
:type manufacturer: str
:param model: Required. Device model.
:type model: str
:param best_compatible_update_id: Required. Update identity.
:type best_compatible_update_id: ~azure.iot.deviceupdate.models.UpdateId
"""
_validation = {
'device_class_id': {'required': True},
'manufacturer': {'required': True},
'model': {'required': True},
'best_compatible_update_id': {'required': True},
}
_attribute_map = {
'device_class_id': {'key': 'deviceClassId', 'type': 'str'},
'manufacturer': {'key': 'manufacturer', 'type': 'str'},
'model': {'key': 'model', 'type': 'str'},
'best_compatible_update_id': {'key': 'bestCompatibleUpdateId', 'type': 'UpdateId'},
}
def __init__(
self,
**kwargs
):
super(DeviceClass, self).__init__(**kwargs)
self.device_class_id = kwargs['device_class_id']
self.manufacturer = kwargs['manufacturer']
self.model = kwargs['model']
self.best_compatible_update_id = kwargs['best_compatible_update_id']
class DeviceFilter(msrest.serialization.Model):
"""Operation status filter.
:param group_id: Device group identifier.
:type group_id: str
"""
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeviceFilter, self).__init__(**kwargs)
self.group_id = kwargs.get('group_id', None)
class DeviceTag(msrest.serialization.Model):
"""Device tag properties.
All required parameters must be populated in order to send to Azure.
:param tag_name: Required. Tag name.
:type tag_name: str
:param device_count: Required. Number of devices with this tag.
:type device_count: int
"""
_validation = {
'tag_name': {'required': True},
'device_count': {'required': True},
}
_attribute_map = {
'tag_name': {'key': 'tagName', 'type': 'str'},
'device_count': {'key': 'deviceCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DeviceTag, self).__init__(**kwargs)
self.tag_name = kwargs['tag_name']
self.device_count = kwargs['device_count']
class Error(msrest.serialization.Model):
"""Error details.
All required parameters must be populated in order to send to Azure.
:param code: Required. Server defined error code.
:type code: str
:param message: Required. A human-readable representation of the error.
:type message: str
:param target: The target of the error.
:type target: str
:param details: An array of errors that led to the reported error.
:type details: list[~azure.iot.deviceupdate.models.Error]
:param innererror: An object containing more specific information than the current object about
the error.
:type innererror: ~azure.iot.deviceupdate.models.InnerError
:param occurred_date_time: Date and time in UTC when the error occurred.
:type occurred_date_time: ~datetime.datetime
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[Error]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'occurred_date_time': {'key': 'occurredDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
self.innererror = kwargs.get('innererror', None)
self.occurred_date_time = kwargs.get('occurred_date_time', None)
class File(msrest.serialization.Model):
"""Update file metadata.
All required parameters must be populated in order to send to Azure.
:param file_id: Required. File identity, generated by server at import time.
:type file_id: str
:param file_name: Required. File name.
:type file_name: str
:param size_in_bytes: Required. File size in number of bytes.
:type size_in_bytes: long
:param hashes: Required. Mapping of hashing algorithm to base64 encoded hash values.
:type hashes: dict[str, str]
:param mime_type: File MIME type.
:type mime_type: str
:param etag: File ETag.
:type etag: str
"""
_validation = {
'file_id': {'required': True},
'file_name': {'required': True},
'size_in_bytes': {'required': True},
'hashes': {'required': True},
}
_attribute_map = {
'file_id': {'key': 'fileId', 'type': 'str'},
'file_name': {'key': 'fileName', 'type': 'str'},
'size_in_bytes': {'key': 'sizeInBytes', 'type': 'long'},
'hashes': {'key': 'hashes', 'type': '{str}'},
'mime_type': {'key': 'mimeType', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(File, self).__init__(**kwargs)
self.file_id = kwargs['file_id']
self.file_name = kwargs['file_name']
self.size_in_bytes = kwargs['size_in_bytes']
self.hashes = kwargs['hashes']
self.mime_type = kwargs.get('mime_type', None)
self.etag = kwargs.get('etag', None)
class FileImportMetadata(msrest.serialization.Model):
"""Metadata describing an update file.
All required parameters must be populated in order to send to Azure.
:param filename: Required. Update file name as specified inside import manifest.
:type filename: str
:param url: Required. Azure Blob location from which the update file can be downloaded by
Device Update for IoT Hub. This is typically a read-only SAS-protected blob URL with an
expiration set to at least 4 hours.
:type url: str
"""
_validation = {
'filename': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'filename': {'key': 'filename', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FileImportMetadata, self).__init__(**kwargs)
self.filename = kwargs['filename']
self.url = kwargs['url']
class Group(msrest.serialization.Model):
"""Group details.
All required parameters must be populated in order to send to Azure.
:param group_id: Required. Group identity.
:type group_id: str
:param group_type: Required. Group type. Possible values include: "IoTHubTag".
:type group_type: str or ~azure.iot.deviceupdate.models.GroupType
:param tags: Required. A set of tags. IoT Hub tags.
:type tags: list[str]
:param created_date_time: Required. Date and time when the update was created.
:type created_date_time: str
:param device_count: The number of devices in the group.
:type device_count: int
"""
_validation = {
'group_id': {'required': True},
'group_type': {'required': True},
'tags': {'required': True},
'created_date_time': {'required': True},
}
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'group_type': {'key': 'groupType', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[str]'},
'created_date_time': {'key': 'createdDateTime', 'type': 'str'},
'device_count': {'key': 'deviceCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(Group, self).__init__(**kwargs)
self.group_id = kwargs['group_id']
self.group_type = kwargs['group_type']
self.tags = kwargs['tags']
self.created_date_time = kwargs['created_date_time']
self.device_count = kwargs.get('device_count', None)
class GroupBestUpdatesFilter(msrest.serialization.Model):
"""Group best updates filter.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:param version: Update version.
:type version: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GroupBestUpdatesFilter, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.name = kwargs.get('name', None)
self.version = kwargs.get('version', None)
class ImportManifestMetadata(msrest.serialization.Model):
"""Metadata describing the import manifest, a document which describes the files and other metadata about an update version.
All required parameters must be populated in order to send to Azure.
:param url: Required. Azure Blob location from which the import manifest can be downloaded by
Device Update for IoT Hub. This is typically a read-only SAS-protected blob URL with an
expiration set to at least 4 hours.
:type url: str
:param size_in_bytes: Required. File size in number of bytes.
:type size_in_bytes: long
:param hashes: Required. A JSON object containing the hash(es) of the file. At least SHA256
hash is required. This object can be thought of as a set of key-value pairs where the key is
the hash algorithm, and the value is the hash of the file calculated using that algorithm.
:type hashes: dict[str, str]
"""
_validation = {
'url': {'required': True},
'size_in_bytes': {'required': True},
'hashes': {'required': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
'size_in_bytes': {'key': 'sizeInBytes', 'type': 'long'},
'hashes': {'key': 'hashes', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(ImportManifestMetadata, self).__init__(**kwargs)
self.url = kwargs['url']
self.size_in_bytes = kwargs['size_in_bytes']
self.hashes = kwargs['hashes']
class ImportUpdateInput(msrest.serialization.Model):
"""Import update input metadata.
All required parameters must be populated in order to send to Azure.
:param import_manifest: Required. Import manifest metadata like source URL, file size/hashes,
etc.
:type import_manifest: ~azure.iot.deviceupdate.models.ImportManifestMetadata
:param files: Required. One or more update file properties like filename and source URL.
:type files: list[~azure.iot.deviceupdate.models.FileImportMetadata]
"""
_validation = {
'import_manifest': {'required': True},
'files': {'required': True, 'min_items': 1},
}
_attribute_map = {
'import_manifest': {'key': 'importManifest', 'type': 'ImportManifestMetadata'},
'files': {'key': 'files', 'type': '[FileImportMetadata]'},
}
def __init__(
self,
**kwargs
):
super(ImportUpdateInput, self).__init__(**kwargs)
self.import_manifest = kwargs['import_manifest']
self.files = kwargs['files']
class InnerError(msrest.serialization.Model):
"""An object containing more specific information than the current object about the error.
All required parameters must be populated in order to send to Azure.
:param code: Required. A more specific error code than what was provided by the containing
error.
:type code: str
:param message: A human-readable representation of the error.
:type message: str
:param error_detail: The internal error or exception message.
:type error_detail: str
:param inner_error: An object containing more specific information than the current object
about the error.
:type inner_error: ~azure.iot.deviceupdate.models.InnerError
"""
_validation = {
'code': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'error_detail': {'key': 'errorDetail', 'type': 'str'},
'inner_error': {'key': 'innerError', 'type': 'InnerError'},
}
def __init__(
self,
**kwargs
):
super(InnerError, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs.get('message', None)
self.error_detail = kwargs.get('error_detail', None)
self.inner_error = kwargs.get('inner_error', None)
class Operation(msrest.serialization.Model):
"""Operation metadata.
All required parameters must be populated in order to send to Azure.
:param operation_id: Required. Operation Id.
:type operation_id: str
:param status: Required. Operation status. Possible values include: "Undefined", "NotStarted",
"Running", "Succeeded", "Failed".
:type status: str or ~azure.iot.deviceupdate.models.OperationStatus
:param update_id: The identity of update being imported or deleted. For import, this property
will only be populated after import manifest is processed successfully.
:type update_id: ~azure.iot.deviceupdate.models.UpdateId
:param resource_location: Location of the imported update when operation is successful.
:type resource_location: str
:param error: Operation error encountered, if any.
:type error: ~azure.iot.deviceupdate.models.Error
:param trace_id: Operation correlation identity that can used by Microsoft Support for
troubleshooting.
:type trace_id: str
:param last_action_date_time: Required. Date and time in UTC when the operation status was last
updated.
:type last_action_date_time: ~datetime.datetime
:param created_date_time: Required. Date and time in UTC when the operation was created.
:type created_date_time: ~datetime.datetime
:param etag: Operation ETag.
:type etag: str
"""
_validation = {
'operation_id': {'required': True},
'status': {'required': True},
'last_action_date_time': {'required': True},
'created_date_time': {'required': True},
}
_attribute_map = {
'operation_id': {'key': 'operationId', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'update_id': {'key': 'updateId', 'type': 'UpdateId'},
'resource_location': {'key': 'resourceLocation', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'trace_id': {'key': 'traceId', 'type': 'str'},
'last_action_date_time': {'key': 'lastActionDateTime', 'type': 'iso-8601'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.operation_id = kwargs['operation_id']
self.status = kwargs['status']
self.update_id = kwargs.get('update_id', None)
self.resource_location = kwargs.get('resource_location', None)
self.error = kwargs.get('error', None)
self.trace_id = kwargs.get('trace_id', None)
self.last_action_date_time = kwargs['last_action_date_time']
self.created_date_time = kwargs['created_date_time']
self.etag = kwargs.get('etag', None)
class OperationFilter(msrest.serialization.Model):
"""Operation status filter.
:param status: Operation status filter. Possible values include: "Running", "NotStarted".
:type status: str or ~azure.iot.deviceupdate.models.OperationFilterStatus
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationFilter, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
class PageableListOfDeploymentDeviceStates(msrest.serialization.Model):
"""The list of deployment device states.
:param value: The collection of pageable items.
:type value: list[~azure.iot.deviceupdate.models.DeploymentDeviceState]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DeploymentDeviceState]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfDeploymentDeviceStates, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PageableListOfDeployments(msrest.serialization.Model):
"""The list of deployments.
:param value: The collection of pageable items.
:type value: list[~azure.iot.deviceupdate.models.Deployment]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Deployment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfDeployments, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PageableListOfDeviceClasses(msrest.serialization.Model):
"""The list of device classes.
:param value: The collection of pageable items.
:type value: list[~azure.iot.deviceupdate.models.DeviceClass]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DeviceClass]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfDeviceClasses, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PageableListOfDevices(msrest.serialization.Model):
"""The list of devices.
:param value: The collection of pageable items.
:type value: list[~azure.iot.deviceupdate.models.Device]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Device]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfDevices, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PageableListOfDeviceTags(msrest.serialization.Model):
"""The list of device tags.
:param value: The collection of pageable items.
:type value: list[~azure.iot.deviceupdate.models.DeviceTag]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DeviceTag]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfDeviceTags, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PageableListOfGroups(msrest.serialization.Model):
"""The list of groups.
:param value: The collection of pageable items.
:type value: list[~azure.iot.deviceupdate.models.Group]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Group]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfGroups, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PageableListOfOperations(msrest.serialization.Model):
"""The list of operations with server paging support.
:param value: The collection of pageable items.
:type value: list[~azure.iot.deviceupdate.models.Operation]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfOperations, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PageableListOfStrings(msrest.serialization.Model):
"""The list of strings with server paging support.
:param value: The collection of pageable items.
:type value: list[str]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[str]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfStrings, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PageableListOfUpdatableDevices(msrest.serialization.Model):
"""The list of updatable devices.
:param value: The collection of pageable items.
:type value: list[~azure.iot.deviceupdate.models.UpdatableDevices]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[UpdatableDevices]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfUpdatableDevices, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PageableListOfUpdateIds(msrest.serialization.Model):
"""The list of update identities.
:param value: The collection of pageable items.
:type value: list[~azure.iot.deviceupdate.models.UpdateId]
:param next_link: The link to the next page of items.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[UpdateId]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PageableListOfUpdateIds, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class UpdatableDevices(msrest.serialization.Model):
"""Update identifier and the number of devices for which the update is applicable.
All required parameters must be populated in order to send to Azure.
:param update_id: Required. Update identity.
:type update_id: ~azure.iot.deviceupdate.models.UpdateId
:param device_count: Required. Total number of devices for which the update is applicable.
:type device_count: int
"""
_validation = {
'update_id': {'required': True},
'device_count': {'required': True},
}
_attribute_map = {
'update_id': {'key': 'updateId', 'type': 'UpdateId'},
'device_count': {'key': 'deviceCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(UpdatableDevices, self).__init__(**kwargs)
self.update_id = kwargs['update_id']
self.device_count = kwargs['device_count']
class Update(msrest.serialization.Model):
"""Update metadata.
All required parameters must be populated in order to send to Azure.
:param update_id: Required. Update identity.
:type update_id: ~azure.iot.deviceupdate.models.UpdateId
:param update_type: Required. Update type.
:type update_type: str
:param installed_criteria: Required. String interpreted by Device Update client to determine if
the update is installed on the device.
:type installed_criteria: str
:param compatibility: Required. List of update compatibility information.
:type compatibility: list[~azure.iot.deviceupdate.models.Compatibility]
:param manifest_version: Required. Schema version of manifest used to import the update.
:type manifest_version: str
:param imported_date_time: Required. Date and time in UTC when the update was imported.
:type imported_date_time: ~datetime.datetime
:param created_date_time: Required. Date and time in UTC when the update was created.
:type created_date_time: ~datetime.datetime
:param etag: Update ETag.
:type etag: str
"""
_validation = {
'update_id': {'required': True},
'update_type': {'required': True},
'installed_criteria': {'required': True},
'compatibility': {'required': True, 'min_items': 1},
'manifest_version': {'required': True},
'imported_date_time': {'required': True},
'created_date_time': {'required': True},
}
_attribute_map = {
'update_id': {'key': 'updateId', 'type': 'UpdateId'},
'update_type': {'key': 'updateType', 'type': 'str'},
'installed_criteria': {'key': 'installedCriteria', 'type': 'str'},
'compatibility': {'key': 'compatibility', 'type': '[Compatibility]'},
'manifest_version': {'key': 'manifestVersion', 'type': 'str'},
'imported_date_time': {'key': 'importedDateTime', 'type': 'iso-8601'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Update, self).__init__(**kwargs)
self.update_id = kwargs['update_id']
self.update_type = kwargs['update_type']
self.installed_criteria = kwargs['installed_criteria']
self.compatibility = kwargs['compatibility']
self.manifest_version = kwargs['manifest_version']
self.imported_date_time = kwargs['imported_date_time']
self.created_date_time = kwargs['created_date_time']
self.etag = kwargs.get('etag', None)
class UpdateCompliance(msrest.serialization.Model):
"""Update compliance information.
All required parameters must be populated in order to send to Azure.
:param total_device_count: Required. Total number of devices.
:type total_device_count: int
:param on_latest_update_device_count: Required. Number of devices on the latest update.
:type on_latest_update_device_count: int
:param new_updates_available_device_count: Required. Number of devices with a newer update
available.
:type new_updates_available_device_count: int
:param updates_in_progress_device_count: Required. Number of devices with update in-progress.
:type updates_in_progress_device_count: int
"""
_validation = {
'total_device_count': {'required': True},
'on_latest_update_device_count': {'required': True},
'new_updates_available_device_count': {'required': True},
'updates_in_progress_device_count': {'required': True},
}
_attribute_map = {
'total_device_count': {'key': 'totalDeviceCount', 'type': 'int'},
'on_latest_update_device_count': {'key': 'onLatestUpdateDeviceCount', 'type': 'int'},
'new_updates_available_device_count': {'key': 'newUpdatesAvailableDeviceCount', 'type': 'int'},
'updates_in_progress_device_count': {'key': 'updatesInProgressDeviceCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(UpdateCompliance, self).__init__(**kwargs)
self.total_device_count = kwargs['total_device_count']
self.on_latest_update_device_count = kwargs['on_latest_update_device_count']
self.new_updates_available_device_count = kwargs['new_updates_available_device_count']
self.updates_in_progress_device_count = kwargs['updates_in_progress_device_count']
class UpdateId(msrest.serialization.Model):
"""Update identifier.
All required parameters must be populated in order to send to Azure.
:param provider: Required. Update provider.
:type provider: str
:param name: Required. Update name.
:type name: str
:param version: Required. Update version.
:type version: str
"""
_validation = {
'provider': {'required': True},
'name': {'required': True},
'version': {'required': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UpdateId, self).__init__(**kwargs)
self.provider = kwargs['provider']
self.name = kwargs['name']
self.version = kwargs['version']
| mit | -3,180,025,066,798,150,700 | 34.838259 | 128 | 0.619619 | false |
kozmonaut/django-blog-unchained | django_blog/settings.py | 1 | 3990 | """ Django settings for django_blog project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6(bg@u&97mtybf2^p#z+cvdy3h6ok=0k_cdxb^03&eel08d_w2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Needed for flatpages
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'blog',
'django.contrib.sites',
'django.contrib.flatpages',
#'django_comments',
'ckeditor',
'disqus'
)
# Disqus
DISQUS_API_KEY = 'FOOBARFOOBARFOOBARFOOBARFOOBARF'
DISQUS_WEBSITE_SHORTNAME = 'foobar'
# Template directory
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
PROJECT_APPS = ['blog']
# Paths for storing media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
CKEDITOR_UPLOAD_PATH = "uploads/"
# Ckeditor plugin
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
# Ckeditor plugins
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'CMS',
'toolbar_CMS': [
{
'name': 'basicstyles',
'groups': ['basicstyles', 'cleanup'],
'items': ['Bold', 'Italic', 'Underline', '-', 'RemoveFormat']
},
{
'name': 'paragraph',
'groups': ['list', 'indent', 'blocks'],
'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote']
},
{
'name': 'links',
'items': ['Link', 'Unlink']
},
{
'name': 'insert',
'items': ['Image', 'HorizontalRule', 'Table', 'Iframe', ]
},
{
'name': 'colors',
'items': ['TextColor', 'BGColor']
},
{
'name': 'styles',
'items' : [ 'Styles','Format','Font','FontSize' ]
},
{
'name': 'document',
'items' : [ 'Source','-','Save','NewPage','DocProps','Preview','Print','-','Templates' ]
},
{
'name': 'youtube',
'items': ['youtube',]
},
],
},
}
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware', # For flatpages
)
ROOT_URLCONF = 'django_blog.urls'
WSGI_APPLICATION = 'django_blog.wsgi.application'
# Database settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Heroku config
# Parse database configuration from $DATABASE_URL
#import dj_database_url
#DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
# Static asset configuration
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'blog/static'),
)
# Memcached config
CACHE_BACKEND = 'memcached://127.0.0.1:11211/'
| gpl-2.0 | -730,967,659,484,838,400 | 23.9375 | 101 | 0.617794 | false |
initrunlevel0/sns | python/nltk/word_classes.py | 1 | 1638 | from __future__ import division
import nltk, re, pprint
from urllib import urlopen
# WORD CLASSES
# Belajar mengklasifikasikan kata (kata benda, kata sifat, dkk)
# POS Tagging
# Merupakan mekanisme untuk menandai (tag) kata dengan suatu kelas tertentu pada proses NLP
# Menjadi suatu kelas kata atau kategori leksikal
# Tagger
text = nltk.word_tokenize("And now for something completely different")
tags = nltk.pos_tag(text)
print tags
print "#######"
# Info utama :
# Tagging pada NLTK: Menggunakan tupple dua item
# ('fly', 'NN')
# [('And', 'CC'), ('now', 'RB'), ('for', 'IN'), ('something', 'NN'), ('completely', 'RB'), ('different', 'JJ')]
# Keterangan hasil
# * CC: Coordinating Conjunction
# * RB: Adverbs
# * IN: Preposition
# * NN: Noun
# * JJ: Adjective
# Dalam bahasa inggris, kadang ada satu kata yang sama penulisannya (walaupun homofon atau tidak), tapi memiliki kelas yang berbeda
text = nltk.word_tokenize("They refuse to permit us to obtain the refuse permit")
tags = nltk.pos_tag(text);
print tags
print "#######"
# [('They', 'PRP'), ('refuse', 'VBP'), ('to', 'TO'), ('permit', 'VB'), ('us', 'PRP'), ('to', 'TO'), ('obtain', 'VB'), ('the', 'DT'), ('refuse', 'NN'), ('permit', 'NN')]
# Apa kegunaan dari klasifikasi ini, lihat contoh berikut
text = nltk.Text(word.lower() for word in nltk.corpus.brown.words())
print text.similar('woman')
# Hasil :
# bman day time year car moment world family house boy child country job
# state girl place war way case question
# Dari kata woman (Noun/Kata Benda) tadi, kita bisa mencari kata yang mirip penggunaannya dengan woman (N) ini.
# Seperti pada hasil di atas.
| mit | -924,523,288,895,908,100 | 29.333333 | 168 | 0.681929 | false |
revoer/keystone-8.0.0 | swift/common/request_helpers.py | 1 | 22483 | # Copyright (c) 2010-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous utility functions for use in generating responses.
Why not swift.common.utils, you ask? Because this way we can import things
from swob in here without creating circular imports.
"""
import hashlib
import itertools
import sys
import time
import six
from six.moves.urllib.parse import unquote
from swift import gettext_ as _
from swift.common.storage_policy import POLICIES
from swift.common.constraints import FORMAT2CONTENT_TYPE
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.http import is_success
from swift.common.swob import HTTPBadRequest, HTTPNotAcceptable, \
HTTPServiceUnavailable, Range, is_chunked
from swift.common.utils import split_path, validate_device_partition, \
close_if_possible, maybe_multipart_byteranges_to_document_iters, \
multipart_byteranges_to_document_iters, parse_content_type, \
parse_content_range
from swift.common.wsgi import make_subrequest
def get_param(req, name, default=None):
"""
Get parameters from an HTTP request ensuring proper handling UTF-8
encoding.
:param req: request object
:param name: parameter name
:param default: result to return if the parameter is not found
:returns: HTTP request parameter value
(as UTF-8 encoded str, not unicode object)
:raises: HTTPBadRequest if param not valid UTF-8 byte sequence
"""
value = req.params.get(name, default)
if value and not isinstance(value, six.text_type):
try:
value.decode('utf8') # Ensure UTF8ness
except UnicodeDecodeError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='"%s" parameter not valid UTF-8' % name)
return value
def get_listing_content_type(req):
"""
Determine the content type to use for an account or container listing
response.
:param req: request object
:returns: content type as a string (e.g. text/plain, application/json)
:raises: HTTPNotAcceptable if the requested content type is not acceptable
:raises: HTTPBadRequest if the 'format' query param is provided and
not valid UTF-8
"""
query_format = get_param(req, 'format')
if query_format:
req.accept = FORMAT2CONTENT_TYPE.get(
query_format.lower(), FORMAT2CONTENT_TYPE['plain'])
out_content_type = req.accept.best_match(
['text/plain', 'application/json', 'application/xml', 'text/xml'])
if not out_content_type:
raise HTTPNotAcceptable(request=req)
return out_content_type
#根据请求,获取路径信息以及存储策略对象的元组
def get_name_and_placement(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path and storage
policy. The storage policy index is extracted from the headers of
the request and converted to a StoragePolicy instance. The
remaining args are passed through to
:meth:`split_and_validate_path`.
:returns: a list, result of :meth:`split_and_validate_path` with
the BaseStoragePolicy instance appended on the end
:raises: HTTPServiceUnavailable if the path is invalid or no policy exists
with the extracted policy_index.
"""
#根据请求头,获取后端策略是副本模式,还是EC纠删码模式
# 获取存储策略索引index,并根据index获取存储策略对象policy
policy_index = request.headers.get('X-Backend-Storage-Policy-Index')
policy = POLICIES.get_by_index(policy_index)
if not policy:
raise HTTPServiceUnavailable(
body=_("No policy with index %s") % policy_index,
request=request, content_type='text/plain')
#根据请求,获取路径信息的元组
results = split_and_validate_path(request, minsegs=minsegs,
maxsegs=maxsegs,
rest_with_last=rest_with_last)
results.append(policy)
return results
def split_and_validate_path(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path.
:returns: result of :meth:`~swift.common.utils.split_path` if
everything's okay
:raises: HTTPBadRequest if something's not okay
"""
try:
segs = split_path(unquote(request.path),
minsegs, maxsegs, rest_with_last)
validate_device_partition(segs[0], segs[1])
return segs
except ValueError as err:
raise HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
def is_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 8 + len(server_type):
return False
return key.lower().startswith(get_user_meta_prefix(server_type))
def is_sys_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
if len(key) <= 11 + len(server_type):
return False
return key.lower().startswith(get_sys_meta_prefix(server_type))
def is_sys_or_user_meta(server_type, key):
"""
Tests if a header key starts with and is longer than the user or system
metadata prefix for given server type.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: True if the key satisfies the test, False otherwise
"""
return is_user_meta(server_type, key) or is_sys_meta(server_type, key)
def strip_user_meta_prefix(server_type, key):
"""
Removes the user metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
return key[len(get_user_meta_prefix(server_type)):]
def strip_sys_meta_prefix(server_type, key):
"""
Removes the system metadata prefix for a given server type from the start
of a header key.
:param server_type: type of backend server i.e. [account|container|object]
:param key: header key
:returns: stripped header key
"""
return key[len(get_sys_meta_prefix(server_type)):]
def get_user_meta_prefix(server_type):
"""
Returns the prefix for user metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's user metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'meta')
def get_sys_meta_prefix(server_type):
"""
Returns the prefix for system metadata headers for given server type.
This prefix defines the namespace for headers that will be persisted
by backend servers.
:param server_type: type of backend server i.e. [account|container|object]
:returns: prefix string for server type's system metadata headers
"""
return 'x-%s-%s-' % (server_type.lower(), 'sysmeta')
def remove_items(headers, condition):
"""
Removes items from a dict whose keys satisfy
the given condition.
:param headers: a dict of headers
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be removed.
:returns: a dict, possibly empty, of headers that have been removed
"""
removed = {}
keys = filter(condition, headers)
removed.update((key, headers.pop(key)) for key in keys)
return removed
def copy_header_subset(from_r, to_r, condition):
"""
Will copy desired subset of headers from from_r to to_r.
:param from_r: a swob Request or Response
:param to_r: a swob Request or Response
:param condition: a function that will be passed the header key as a
single argument and should return True if the header
is to be copied.
"""
for k, v in from_r.headers.items():
if condition(k):
to_r.headers[k] = v
class SegmentedIterable(object):
"""
Iterable that returns the object contents for a large object.
:param req: original request object
:param app: WSGI application from which segments will come
:param listing_iter: iterable yielding the object segments to fetch,
along with the byte subranges to fetch, in the
form of a tuple (object-path, first-byte, last-byte)
or (object-path, None, None) to fetch the whole thing.
:param max_get_time: maximum permitted duration of a GET request (seconds)
:param logger: logger object
:param swift_source: value of swift.source in subrequest environ
(just for logging)
:param ua_suffix: string to append to user-agent.
:param name: name of manifest (used in logging only)
:param response_body_length: optional response body length for
the response being sent to the client.
"""
def __init__(self, req, app, listing_iter, max_get_time,
logger, ua_suffix, swift_source,
name='<not specified>', response_body_length=None):
self.req = req
self.app = app
self.listing_iter = listing_iter
self.max_get_time = max_get_time
self.logger = logger
self.ua_suffix = " " + ua_suffix
self.swift_source = swift_source
self.name = name
self.response_body_length = response_body_length
self.peeked_chunk = None
self.app_iter = self._internal_iter()
self.validated_first_segment = False
self.current_resp = None
def _coalesce_requests(self):
start_time = time.time()
pending_req = None
pending_etag = None
pending_size = None
try:
for seg_path, seg_etag, seg_size, first_byte, last_byte \
in self.listing_iter:
first_byte = first_byte or 0
go_to_end = last_byte is None or (
seg_size is not None and last_byte == seg_size - 1)
if time.time() - start_time > self.max_get_time:
raise SegmentError(
'ERROR: While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
# The "multipart-manifest=get" query param ensures that the
# segment is a plain old object, not some flavor of large
# object; therefore, its etag is its MD5sum and hence we can
# check it.
path = seg_path + '?multipart-manifest=get'
seg_req = make_subrequest(
self.req.environ, path=path, method='GET',
headers={'x-auth-token': self.req.headers.get(
'x-auth-token')},
agent=('%(orig)s ' + self.ua_suffix),
swift_source=self.swift_source)
seg_req_rangeval = None
if first_byte != 0 or not go_to_end:
seg_req_rangeval = "%s-%s" % (
first_byte, '' if go_to_end else last_byte)
seg_req.headers['Range'] = "bytes=" + seg_req_rangeval
# We can only coalesce if paths match and we know the segment
# size (so we can check that the ranges will be allowed)
if pending_req and pending_req.path == seg_req.path and \
seg_size is not None:
# Make a new Range object so that we don't goof up the
# existing one in case of invalid ranges. Note that a
# range set with too many individual byteranges is
# invalid, so we can combine N valid byteranges and 1
# valid byterange and get an invalid range set.
if pending_req.range:
new_range_str = str(pending_req.range)
else:
new_range_str = "bytes=0-%d" % (seg_size - 1)
if seg_req.range:
new_range_str += "," + seg_req_rangeval
else:
new_range_str += ",0-%d" % (seg_size - 1)
if Range(new_range_str).ranges_for_length(seg_size):
# Good news! We can coalesce the requests
pending_req.headers['Range'] = new_range_str
continue
# else, Too many ranges, or too much backtracking, or ...
if pending_req:
yield pending_req, pending_etag, pending_size
pending_req = seg_req
pending_etag = seg_etag
pending_size = seg_size
except ListingIterError:
e_type, e_value, e_traceback = sys.exc_info()
if time.time() - start_time > self.max_get_time:
raise SegmentError(
'ERROR: While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
if pending_req:
yield pending_req, pending_etag, pending_size
six.reraise(e_type, e_value, e_traceback)
if time.time() - start_time > self.max_get_time:
raise SegmentError(
'ERROR: While processing manifest %s, '
'max LO GET time of %ds exceeded' %
(self.name, self.max_get_time))
if pending_req:
yield pending_req, pending_etag, pending_size
def _internal_iter(self):
bytes_left = self.response_body_length
try:
for seg_req, seg_etag, seg_size in self._coalesce_requests():
seg_resp = seg_req.get_response(self.app)
if not is_success(seg_resp.status_int):
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'ERROR: While processing manifest %s, '
'got %d while retrieving %s' %
(self.name, seg_resp.status_int, seg_req.path))
elif ((seg_etag and (seg_resp.etag != seg_etag)) or
(seg_size and (seg_resp.content_length != seg_size) and
not seg_req.range)):
# The content-length check is for security reasons. Seems
# possible that an attacker could upload a >1mb object and
# then replace it with a much smaller object with same
# etag. Then create a big nested SLO that calls that
# object many times which would hammer our obj servers. If
# this is a range request, don't check content-length
# because it won't match.
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Object segment no longer valid: '
'%(path)s etag: %(r_etag)s != %(s_etag)s or '
'%(r_size)s != %(s_size)s.' %
{'path': seg_req.path, 'r_etag': seg_resp.etag,
'r_size': seg_resp.content_length,
's_etag': seg_etag,
's_size': seg_size})
else:
self.current_resp = seg_resp
seg_hash = None
if seg_resp.etag and not seg_req.headers.get('Range'):
# Only calculate the MD5 if it we can use it to validate
seg_hash = hashlib.md5()
document_iters = maybe_multipart_byteranges_to_document_iters(
seg_resp.app_iter,
seg_resp.headers['Content-Type'])
for chunk in itertools.chain.from_iterable(document_iters):
if seg_hash:
seg_hash.update(chunk)
if bytes_left is None:
yield chunk
elif bytes_left >= len(chunk):
yield chunk
bytes_left -= len(chunk)
else:
yield chunk[:bytes_left]
bytes_left -= len(chunk)
close_if_possible(seg_resp.app_iter)
raise SegmentError(
'Too many bytes for %(name)s; truncating in '
'%(seg)s with %(left)d bytes left' %
{'name': self.name, 'seg': seg_req.path,
'left': bytes_left})
close_if_possible(seg_resp.app_iter)
if seg_hash and seg_hash.hexdigest() != seg_resp.etag:
raise SegmentError(
"Bad MD5 checksum in %(name)s for %(seg)s: headers had"
" %(etag)s, but object MD5 was actually %(actual)s" %
{'seg': seg_req.path, 'etag': seg_resp.etag,
'name': self.name, 'actual': seg_hash.hexdigest()})
if bytes_left:
raise SegmentError(
'Not enough bytes for %s; closing connection' % self.name)
except (ListingIterError, SegmentError):
self.logger.exception(_('ERROR: An error occurred '
'while retrieving segments'))
raise
finally:
if self.current_resp:
close_if_possible(self.current_resp.app_iter)
def app_iter_range(self, *a, **kw):
"""
swob.Response will only respond with a 206 status in certain cases; one
of those is if the body iterator responds to .app_iter_range().
However, this object (or really, its listing iter) is smart enough to
handle the range stuff internally, so we just no-op this out for swob.
"""
return self
def validate_first_segment(self):
"""
Start fetching object data to ensure that the first segment (if any) is
valid. This is to catch cases like "first segment is missing" or
"first segment's etag doesn't match manifest".
Note: this does not validate that you have any segments. A
zero-segment large object is not erroneous; it is just empty.
"""
if self.validated_first_segment:
return
self.validated_first_segment = True
try:
self.peeked_chunk = next(self.app_iter)
except StopIteration:
pass
def __iter__(self):
if self.peeked_chunk is not None:
pc = self.peeked_chunk
self.peeked_chunk = None
return itertools.chain([pc], self.app_iter)
else:
return self.app_iter
def close(self):
"""
Called when the client disconnect. Ensure that the connection to the
backend server is closed.
"""
close_if_possible(self.app_iter)
# 获取一个成功返回对象GET请求的HTTP响应,并把它转换为一个迭代器(first-byte, last-byte, length, headers, body-file)
def http_response_to_document_iters(response, read_chunk_size=4096):
"""
Takes a successful object-GET HTTP response and turns it into an
iterator of (first-byte, last-byte, length, headers, body-file)
5-tuples.
The response must either be a 200 or a 206; if you feed in a 204 or
something similar, this probably won't work.
:param response: HTTP response, like from bufferedhttp.http_connect(),
not a swob.Response.
"""
chunked = is_chunked(dict(response.getheaders()))
if response.status == 200:
if chunked:
# Single "range" that's the whole object with an unknown length
return iter([(0, None, None, response.getheaders(),
response)])
# Single "range" that's the whole object
content_length = int(response.getheader('Content-Length'))
return iter([(0, content_length - 1, content_length,
response.getheaders(), response)])
content_type, params_list = parse_content_type(
response.getheader('Content-Type'))
if content_type != 'multipart/byteranges':
# Single range; no MIME framing, just the bytes. The start and end
# byte indices are in the Content-Range header.
start, end, length = parse_content_range(
response.getheader('Content-Range'))
return iter([(start, end, length, response.getheaders(), response)])
else:
# Multiple ranges; the response body is a multipart/byteranges MIME
# document, and we have to parse it using the MIME boundary
# extracted from the Content-Type header.
params = dict(params_list)
return multipart_byteranges_to_document_iters(
response, params['boundary'], read_chunk_size)
| apache-2.0 | 6,721,470,277,451,898,000 | 39.485455 | 87 | 0.591189 | false |
srcc-msu/octotron | octopy/react_wrapper.py | 1 | 1761 | from octopy.utils import *
from ru.parallel.octotron.core.logic import Response
from ru.parallel.octotron.generators.tmpl import ReactionTemplate
from ru.parallel.octotron.generators.tmpl import ReactionAction as Reaction
def ReactsFromDict(reactions_dict):
res = []
for name, reaction in reactions_dict.items():
if len(reaction) > 1:
raise RuntimeError("duplicated reaction: " + name + " : " + str(reaction))
res.append(ReactionTemplate(name, reaction[0]))
return res
def ConvertReacts(var):
return ReactsFromDict(MergeDicts(var))
def Info(tag, message):
return Response("INFO", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_info")
def Warning(tag, message):
return Response("WARNING", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_warning")
def Danger(tag, message):
return Response("DANGER", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_danger")
def Critical(tag, message):
return Response("CRITICAL", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_critical")
def RInfo(tag, message):
return Response("RECOVER_INFO", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_info")
def RWarning(tag, message):
return Response("RECOVER_WARNING", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_warning")
def RDanger(tag, message):
return Response("RECOVER_DANGER", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_danger")
def RCritical(tag, message):
return Response("RECOVER_CRITICAL", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_critical")
def GenRStatus(status):
return {
Info : RInfo
, Warning : RWarning
, Danger : RDanger
, Critical : RCritical
}[status]
def Prophecy(tag, message):
return Response("PROPHECY", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_prophecy")
| mit | -5,880,287,855,817,266,000 | 30.446429 | 98 | 0.675185 | false |
robertoyubero/ptavi-p2 | calcplus.py | 1 | 1079 | # ROBERTO YUBERO DE DIEGO
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import calcoohija
if __name__ == "__main__":
fichero = open(sys.argv[1], 'r')
#leo todo el fichero
lista_lineas = fichero.read()
#creamos una lista de lineas sin salto de linea
lista_lineas = lista_lineas.splitlines()
miCalculadora = calcoohija.CalculadoraHija()
#recorro la lista linea por linea
for i in range(0, len(lista_lineas)):
#eliminamos las comas de cada linea
lista_lineas[i] = lista_lineas[i].split(',')
#separo cada linea y la guardo en una variable
linea = lista_lineas[i]
#elijo el tipo de operacion de la linea
operacion = linea[0]
#el primer operando lo meto directamente en resultado
resultado = int(linea[1])
#recorro la linea desde 2operando
for k in range(2, len(linea)):
operando = int(linea[k])
resultado = miCalculadora.operar(operacion, resultado, operando)
print(operacion + " = " + str(resultado))
fichero.close()
| gpl-2.0 | 8,556,814,540,427,485,000 | 28.972222 | 76 | 0.625579 | false |
lcpt/xc | python_modules/actions/roadway_trafic/load_model_base.py | 1 | 8361 | # -*- coding: utf-8 -*-
from __future__ import division
__author__= "Luis C. Pérez Tato (LCPT) Ana Ortega (AO_O)"
__copyright__= "Copyright 2018, LCPT AO_O "
__license__= "GPL"
__version__= "3.0"
__email__= "[email protected] [email protected]"
import xc_base
import geom
import xc
from actions import loads
from model.geometry import geom_utils as gu
class WheelLoad(object):
''' Load of a wheel.
:ivar position: position of the wheel
:ivar load: load
:ivar lx: lenght wheel in transversal direction
:ivar ly: lenght wheel in longitudinal direction
'''
def __init__(self,pos,ld,lx=0,ly=0):
self.position= pos
self.load= ld
self.lx=lx
self.ly=ly
class LoadModel(object):
''' Roadway trafic load model
:ivar wheelLoads: position and loads of each wheel
:ivar vehicleBoundary: polygon without uniform load around the vehicle.
'''
def __init__(self,wLoads, vBoundary= None):
self.wheelLoads= wLoads # Wheel positions and loads
self.vehicleBoundary= vBoundary
def getPositions(self):
retval= list()
for p in self.wheelLoads:
retval.append(p.position)
return retval
def getLoads(self):
retval= list()
for l in self.wheelLoads:
retval.append(l.load)
return retval
def getTotalLoad(self):
retval= 0.0
for l in self.wheelLoads:
retval+= l.load
return retval
def getCentroid(self):
'''Return the centroid of the loads.'''
retvalPos= geom.Pos2d(0.0,0.0)
totalLoad= 0.0
for i in self.wheelLoads:
retvalPos.x+= i.load*i.position.x
retvalPos.y+= i.load*i.position.y
totalLoad+= i.load
if(totalLoad!=0.0):
retvalPos.x/=totalLoad
retvalPos.y/=totalLoad
return retvalPos
def getLoadRelativePositions(self):
'''Return the loads positions with respect to the loads centroid.'''
centroidVector= self.getCentroid().getPositionVector()
retval= list()
for i in self.wheelLoads:
pos= i.position-centroidVector
retval.append(pos)
return retval
def normalize(self):
'''Sets the positions with respect to the loads centroid.'''
dispModulus= self.getCentroid().getPositionVector().getModulo()
if(dispModulus>1e-4):
normalizedPositions= self.getLoadRelativePositions()
if(self.vehicleBoundary):
normalizedVehicleBoundary= self.getVehicleBoundaryRelativePositions()
self.vehicleBoundary= normalizedVehicleBoundary
for (old,new) in zip(self.wheelLoads,normalizedPositions):
old.position= new
def getVehicleBoundaryRelativePositions(self):
'''Return the vehicle boundary positions with respect to
the loads centroid.'''
centroidVector= self.getCentroid().getPositionVector()
retval= list()
for p in self.vehicleBoundary:
retval.append(p-centroidVector)
return retval
def getCenteredLoadBoundary(self):
'''Return the boundary of the wheel loads with respect to
the load centroid.'''
retval= geom.BND2d()
tmp= self.getLoadRelativePositions()
for p in tmp:
retval.update(p)
return retval
def getCenteredVehicleBoundary(self):
'''Return the boundary of the vehicle with respect to
the load centroid.'''
retval= geom.Polygon2d()
tmp= self.getVehicleBoundaryRelativePositions()
for p in tmp:
retval.appendVertex(p)
return retval
def getRotatedPi(self):
'''Return the load model rotated 180 degrees (pi radians).'''
newLoads= list()
for i in self.wheelLoads:
newLoads.append(WheelLoad(geom.Pos2d(-i.position.x,-i.position.y),i.load))
newVehicleBoundary= None
if(self.vehicleBoundary):
newVehicleBoundary= list()
for p in self.vehicleBoundary:
newVehicleBoundary.append(geom.Pos2d(-p.x,-p.y))
return LoadModel(newLoads,newVehicleBoundary)
class VehicleDistrLoad(object):
''' Distribute the point loads defined in the object lModel over the shell
elements under the wheels affected by them.
:ivar name: name identifying the load
:ivar xcSet: set that contains the shell elements
:ivar lModel: instance of the class LoadModel with the definition of
vehicle of the load model.
:ivar xCent: global coord. X where to place the centroid of the vehicle
:ivar yCent: global coord. Y where to place the centroid of the vehicle
:ivar hDistr: height considered to distribute each point load with
slope slopeDistr
:ivar slopeDistr: slope (H/V) through hDistr to distribute the load of
a wheel
'''
def __init__(self,name,xcSet,loadModel, xCentr,yCentr,hDistr,slopeDistr):
self.name=name
self.xcSet=xcSet
self.loadModel= loadModel
self.xCentr=xCentr
self.yCentr=yCentr
self.hDistr=hDistr
self.slopeDistr=slopeDistr
self.ldsWheels=self.genLstLoadDef()
def genLstLoadDef(self):
'''generates a list with the definition of all the wheel loads
'''
deltaL=2*self.slopeDistr*self.hDistr
ldWheels=list()
cont=0
for w in self.loadModel.wheelLoads:
nm=self.name+str(cont)
lVect=xc.Vector([0,0,-w.load,0,0,0])
xCwheel=self.xCentr+w.position.x
yCwheel=self.yCentr+w.position.y
basePrism=gu.rect2DPolygon(xCent=xCwheel,yCent=yCwheel,Lx=w.lx+deltaL,Ly=w.ly+deltaL)
ldWheels.append(loads.PointLoadOverShellElems(nm,self.xcSet,lVect,basePrism,'Z','Global'))
cont+=1
return ldWheels
def appendLoadToCurrentLoadPattern(self):
''' Append load to the current load pattern.'''
# ldW=self.genLstLoadDef()
# for l in ldW:
for l in self.ldsWheels:
l.appendLoadToCurrentLoadPattern()
def __mul__(self,factor):
'''Apply the factor to the load and append it to the current load pattern'''
for l in self.ldsWheels:
l.__mul__(factor)
def __rmul__(self,factor):
'''Apply the factor to the load and append it to the current load pattern'''
for l in self.ldsWheels:
l.__mul__(factor)
class VehicleLoad(object):
'''Position of a load model in the structure.
:ivar loadModel: load model that corresponds to the vehicle.
:ivar ref2d3d: position and orientation of the vehicle in the structure.
'''
def __init__(self,lModel, rfSys):
'''Constructor:
:param lModel: load model that corresponds to the vehicle.
:param rfSys: position and orientation of the vehicle in the structure.
'''
self.loadModel= lModel
self.loadModel.normalize()
self.refSys= rfSys
def getCentroid(self):
'''Return the centroid of the loads.'''
return self.refSys.getOrg()
def getGlobalPositions(self,localPositions):
'''Return the corresponding global positions.'''
retval= list()
for p in localPositions:
p3D= self.refSys.getPosGlobal(p)
retval.append(p3D)
return retval
def getLoadPositions(self):
'''Return the positions of the vehicle loads.'''
positions= self.loadModel.getPositions()
return self.getGlobalPositions(positions)
def getVehicleBoundaryPositions(self):
'''Return the vehicle boundary positions.'''
return self.getGlobalPositions(self.loadModel.vehicleBoundary)
def getLoadBoundary(self):
'''Return the boundary of the vehicle loads.'''
retval= geom.BND3d()
tmp= self.getLoadPositions()
for p in tmp:
retval.update(p)
return retval
def getVehicleBoundary(self):
'''Return the boundary of the vehicle.'''
retval= geom.Polygon2d()
tmp= self.getVehicleBoundaryPositions()
for p in tmp:
retval.appendVertex(geom.Pos2d(p.x,p.y))
return retval
| gpl-3.0 | -642,362,778,233,924,100 | 33.403292 | 102 | 0.627273 | false |
bgmerrell/desmod | desmod/component.py | 1 | 11791 | """Component is the building block for desmod models.
Hierarchy
---------
A desmod model consists of a directed acyclical graph (DAG) of
:class:`Component` subclasses. Each Component is composed of zero or more child
Components. A single top-level Component class is passed to the
:func:`~desmod.simulation.simulate()` function to initiate simulation.
The :class:`Component` hierarchy does not define the behavior of a model, but
instead exists as a tool to build large models out of composable and
encapsulated pieces.
Connections
-----------
Components connect to other components via connection objects. Each component
is responsible for declaring the names of external connections as well as make
connections for its child components. The final network of inter-component
connections is neither directed (a connection object may enable two-way
communication), acyclic (groups of components may form cyclical connections),
nor constrained to match the component hierarchy.
Ultimately, a connection between two components means that each component
instance has a [pythonic] reference to the connection object.
In the spirit of Python, the types connection objects are flexible and dynamic.
A connection object may be of any type--it is up to the connected components to
cooperatively decide how to use the connection object for communication. That
said, some object types are more useful than others for connections. Some
useful connection object types include:
* :class:`desmod.queue.Queue`
* :class:`simpy.resources.resource.Resource`
Processes
---------
A component may have zero or more simulation processes
(:class:`simpy.events.Process`). It is these processes that give a model its
simulation-time behavior. The process methods declared by components are
started at simulation time. These "standing" processes may dynamically launch
addtional processes using `self.env.process()`.
Use Cases
---------
Given the flexibility components to have zero or more children, zero or more
processes, and zero or more connections, it can be helpful to give names to
the various roles components may play in a model.
* Structural Component -- a component with child components, but no processes
* Behavioral Component -- a component with processes, but no child components
* Hybrid Component -- a component with child components and processes
* State Component -- a component with neither children or processes
It is typical for the top-level component in a model to be purely structural,
while behavioral components are leaves in the model DAG.
A component with neither children or processes may still be useful. Such a
component could, for example, be used as a connection object.
"""
class ConnectError(Exception):
pass
class Component(object):
"""Building block for composing models.
This class is meant to be subclassed. Component subclasses must declare
their children, connections, and processes.
:param Component parent: Parent component or None for top-level Component.
:param SimEnvironment env: SimPy simulation environment.
:param str name: Optional name of Component instance.
:param int index:
Optional index of Component. This is used when multiple sibling
components of the same type are instantiated as an array/list.
"""
#: Short/friendly name used in the scope (class attribute).
base_name = ''
def __init__(self, parent, env=None, name=None, index=None):
assert parent or env
#: The simulation environment; a :class:`SimEnvironment` instance.
self.env = parent.env if env is None else env
#: The component name (str).
self.name = ((self.base_name if name is None else name) +
('' if index is None else str(index)))
#: Index of Component instance within group of sibling instances.
#: Will be None for un-grouped Components.
self.index = index
if parent is None or not parent.scope:
#: String indicating the full scope of Component instance in the
#: Component DAG.
self.scope = self.name
else:
self.scope = parent.scope + '.' + self.name
if parent:
parent._children.append(self)
self._children = []
self._processes = []
self._connections = []
self._not_connected = set()
#: Log an error message.
self.error = self.env.tracemgr.get_trace_function(
self.scope, log={'level': 'ERROR'})
#: Log a warning message.
self.warn = self.env.tracemgr.get_trace_function(
self.scope, log={'level': 'WARNING'})
#: Log an informative message.
self.info = self.env.tracemgr.get_trace_function(
self.scope, log={'level': 'INFO'})
#: Log a debug message.
self.debug = self.env.tracemgr.get_trace_function(
self.scope, log={'level': 'DEBUG'})
def add_process(self, process_func, *args, **kwargs):
"""Add a process method to be run at simulation-time.
Subclasses should call this in `__init__()` to declare the process
methods to be started at simulation-time.
:param function process_func:
Typically a bound method of the Component subclass.
:param args: arguments to pass to `process_func`.
:param kwargs: keyword arguments to pass to `process_func`.
"""
self._processes.append((process_func, args, kwargs))
def add_processes(self, *process_funcs):
"""Declare multiple processes at once.
This is a convenience wrapper for :meth:`add_process()` that may be
used to quickly declare a list of process methods that do not require
any arguments.
:param process_funcs: argument-less process functions (methods).
"""
for process_func in process_funcs:
self.add_process(process_func)
def add_connections(self, *connection_names):
"""Declare names of externally-provided connection objects.
The named connections must be connected (assigned) by an ancestor at
elaboration time.
"""
self._not_connected.update(connection_names)
def connect(self, dst, dst_connection, src=None, src_connection=None,
conn_obj=None):
"""Assign connection object from source to destination component.
At elaboration-time, Components must call `connect()` to make the
connections declared by descendant (child, grandchild, etc.)
components.
.. Note::
:meth:`connect()` is nominally called from
:meth:`connect_children()`.
:param Component dst:
Destination component being assigned the connection object.
:param str dst_connection:
Destination's name for the connection object.
:param Component src:
Source component providing the connection object. If omitted, the
source component is assumed to be `self`.
:param str src_connection:
Source's name for the connection object. If omitted,
`dst_connection` is used.
:param conn_obj:
The connection object to be assigned to the destination component.
This parameter may typically be omitted in which case the
connection object is resolved using `src` and `src_connection`.
"""
if src is None:
src = self
if src_connection is None:
src_connection = dst_connection
if conn_obj is None:
if hasattr(src, src_connection):
conn_obj = getattr(src, src_connection)
else:
raise ConnectError(
'src "{}" (class {}) does not have attr "{}"'.format(
src.scope, type(src).__name__, src_connection))
if dst_connection in dst._not_connected:
setattr(dst, dst_connection, conn_obj)
dst._not_connected.remove(dst_connection)
dst._connections.append(
(dst_connection, src, src_connection, conn_obj))
else:
raise ConnectError(
'dst "{}" (class {}) does not declare connection "{}"'.format(
dst.scope, type(dst).__name__, dst_connection))
def connect_children(self):
"""Make connections for descendant components.
This method must be overridden in Component subclasses that need to
make any connections on behalf of its descendant components.
Connections are made using :meth:`connect()`.
"""
if any(child._not_connected for child in self._children):
raise ConnectError(
'{0} has unconnected children; implement '
'{0}.connect_children()'.format(type(self).__name__))
def auto_probe(self, name, target=None, **hints):
if target is None:
target = getattr(self, name)
target_scope = '.'.join([self.scope, name])
self.env.tracemgr.auto_probe(target_scope, target, **hints)
def get_trace_function(self, name, **hints):
target_scope = '.'.join([self.scope, name])
return self.env.tracemgr.get_trace_function(target_scope, **hints)
@classmethod
def pre_init(cls, env):
"""Override-able class method called prior to model initialization.
Component subclasses may override this classmethod to gain access
to the simulation environment (`env`) prior to :meth:`__init__()` being
called.
"""
pass
def elaborate(self):
"""Recursively elaborate the model.
The elaboration phase prepares the model for simulation. Descendant
connections are made and components' processes are started at
elaboration-time.
"""
self.connect_children()
for child in self._children:
if child._not_connected:
raise ConnectError('{scope}.{conn_name} not connected'.format(
scope=child.scope, conn_name=child._not_connected.pop()))
child.elaborate()
for proc, args, kwargs in self._processes:
self.env.process(proc(*args, **kwargs))
self.elab_hook()
def elab_hook(self):
"""Hook called after elaboration and before simulation phase.
Component subclasses may override :meth:`elab_hook()` to inject
behavior after elaboration, but prior to simulation.
"""
pass
def post_simulate(self):
"""Recursively run post-simulation hooks."""
for child in self._children:
child.post_simulate()
self.post_sim_hook()
def post_sim_hook(self):
"""Hook called after simulation completes.
Component subclasses may override `post_sim_hook()` to inject behavior
after the simulation completes successfully. Note that
`post_sim_hook()` will not be called if the simulation terminates with
an unhandled exception.
"""
pass
def get_result(self, result):
"""Recursively compose simulation result dict.
Upon successful completion of the simulation phase, each component in
the model has the opportunity to add-to or modify the `result` dict via
its :meth:`get_result_hook` method.
The fully composed `result` dict is returned by :func:`simulate`.
:param dict result: Result dictionary to be modified.
"""
for child in self._children:
child.get_result(result)
self.get_result_hook(result)
def get_result_hook(self, result):
"""Hook called after result is composed by descendant components."""
pass
| mit | 7,515,155,429,324,628,000 | 36.670927 | 79 | 0.653889 | false |
cdgriffith/Box | box/box_list.py | 1 | 16433 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 - Chris Griffith - MIT License
import copy
import re
from os import PathLike
from typing import Iterable, Type, Union
import box
from box.converters import (
BOX_PARAMETERS,
_from_csv,
_from_json,
_from_msgpack,
_from_toml,
_from_yaml,
_to_csv,
_to_json,
_to_msgpack,
_to_toml,
_to_yaml,
msgpack_available,
toml_available,
yaml_available,
)
from box.exceptions import BoxError, BoxTypeError
_list_pos_re = re.compile(r"\[(\d+)\]")
class BoxList(list):
"""
Drop in replacement of list, that converts added objects to Box or BoxList
objects as necessary.
"""
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls, *args, **kwargs)
# This is required for pickling to work correctly
obj.box_options = {"box_class": box.Box}
obj.box_options.update(kwargs)
obj.box_org_ref = 0
return obj
def __init__(self, iterable: Iterable = None, box_class: Type[box.Box] = box.Box, **box_options):
self.box_options = box_options
self.box_options["box_class"] = box_class
self.box_org_ref = id(iterable) if iterable else 0
if iterable:
for x in iterable:
self.append(x)
if box_options.get("frozen_box"):
def frozen(*args, **kwargs):
raise BoxError("BoxList is frozen")
for method in ["append", "extend", "insert", "pop", "remove", "reverse", "sort"]:
self.__setattr__(method, frozen)
def __getitem__(self, item):
if self.box_options.get("box_dots") and isinstance(item, str) and item.startswith("["):
list_pos = _list_pos_re.search(item)
value = super(BoxList, self).__getitem__(int(list_pos.groups()[0]))
if len(list_pos.group()) == len(item):
return value
return value.__getitem__(item[len(list_pos.group()) :].lstrip("."))
return super(BoxList, self).__getitem__(item)
def __delitem__(self, key):
if self.box_options.get("frozen_box"):
raise BoxError("BoxList is frozen")
if self.box_options.get("box_dots") and isinstance(key, str) and key.startswith("["):
list_pos = _list_pos_re.search(key)
pos = int(list_pos.groups()[0])
if len(list_pos.group()) == len(key):
return super(BoxList, self).__delitem__(pos)
if hasattr(self[pos], "__delitem__"):
return self[pos].__delitem__(key[len(list_pos.group()) :].lstrip(".")) # type: ignore
super(BoxList, self).__delitem__(key)
def __setitem__(self, key, value):
if self.box_options.get("frozen_box"):
raise BoxError("BoxList is frozen")
if self.box_options.get("box_dots") and isinstance(key, str) and key.startswith("["):
list_pos = _list_pos_re.search(key)
pos = int(list_pos.groups()[0])
if len(list_pos.group()) == len(key):
return super(BoxList, self).__setitem__(pos, value)
return super(BoxList, self).__getitem__(pos).__setitem__(key[len(list_pos.group()) :].lstrip("."), value)
super(BoxList, self).__setitem__(key, value)
def _is_intact_type(self, obj):
if self.box_options.get("box_intact_types") and isinstance(obj, self.box_options["box_intact_types"]):
return True
return False
def _convert(self, p_object):
if isinstance(p_object, dict) and not self._is_intact_type(p_object):
p_object = self.box_options["box_class"](p_object, **self.box_options)
elif isinstance(p_object, box.Box):
p_object._box_config.update(self.box_options)
if isinstance(p_object, list) and not self._is_intact_type(p_object):
p_object = self if id(p_object) == self.box_org_ref else self.__class__(p_object, **self.box_options)
elif isinstance(p_object, BoxList):
p_object.box_options.update(self.box_options)
return p_object
def append(self, p_object):
super(BoxList, self).append(self._convert(p_object))
def extend(self, iterable):
for item in iterable:
self.append(item)
def insert(self, index, p_object):
super(BoxList, self).insert(index, self._convert(p_object))
def _dotted_helper(self):
keys = []
for idx, item in enumerate(self):
added = False
if isinstance(item, box.Box):
for key in item.keys(dotted=True):
keys.append(f"[{idx}].{key}")
added = True
elif isinstance(item, BoxList):
for key in item._dotted_helper():
keys.append(f"[{idx}]{key}")
added = True
if not added:
keys.append(f"[{idx}]")
return keys
def __repr__(self):
return f"<BoxList: {self.to_list()}>"
def __str__(self):
return str(self.to_list())
def __copy__(self):
return self.__class__((x for x in self), **self.box_options)
def __deepcopy__(self, memo=None):
out = self.__class__()
memo = memo or {}
memo[id(self)] = out
for k in self:
out.append(copy.deepcopy(k, memo=memo))
return out
def __hash__(self):
if self.box_options.get("frozen_box"):
hashing = 98765
hashing ^= hash(tuple(self))
return hashing
raise BoxTypeError("unhashable type: 'BoxList'")
def to_list(self):
new_list = []
for x in self:
if x is self:
new_list.append(new_list)
elif isinstance(x, box.Box):
new_list.append(x.to_dict())
elif isinstance(x, BoxList):
new_list.append(x.to_list())
else:
new_list.append(x)
return new_list
def to_json(
self,
filename: Union[str, PathLike] = None,
encoding: str = "utf-8",
errors: str = "strict",
multiline: bool = False,
**json_kwargs,
):
"""
Transform the BoxList object into a JSON string.
:param filename: If provided will save to file
:param encoding: File encoding
:param errors: How to handle encoding errors
:param multiline: Put each item in list onto it's own line
:param json_kwargs: additional arguments to pass to json.dump(s)
:return: string of JSON or return of `json.dump`
"""
if filename and multiline:
lines = [_to_json(item, filename=None, encoding=encoding, errors=errors, **json_kwargs) for item in self]
with open(filename, "w", encoding=encoding, errors=errors) as f:
f.write("\n".join(lines))
else:
return _to_json(self.to_list(), filename=filename, encoding=encoding, errors=errors, **json_kwargs)
@classmethod
def from_json(
cls,
json_string: str = None,
filename: Union[str, PathLike] = None,
encoding: str = "utf-8",
errors: str = "strict",
multiline: bool = False,
**kwargs,
):
"""
Transform a json object string into a BoxList object. If the incoming
json is a dict, you must use Box.from_json.
:param json_string: string to pass to `json.loads`
:param filename: filename to open and pass to `json.load`
:param encoding: File encoding
:param errors: How to handle encoding errors
:param multiline: One object per line
:param kwargs: parameters to pass to `Box()` or `json.loads`
:return: BoxList object from json data
"""
box_args = {}
for arg in list(kwargs.keys()):
if arg in BOX_PARAMETERS:
box_args[arg] = kwargs.pop(arg)
data = _from_json(
json_string, filename=filename, encoding=encoding, errors=errors, multiline=multiline, **kwargs
)
if not isinstance(data, list):
raise BoxError(f"json data not returned as a list, but rather a {type(data).__name__}")
return cls(data, **box_args)
if yaml_available:
def to_yaml(
self,
filename: Union[str, PathLike] = None,
default_flow_style: bool = False,
encoding: str = "utf-8",
errors: str = "strict",
**yaml_kwargs,
):
"""
Transform the BoxList object into a YAML string.
:param filename: If provided will save to file
:param default_flow_style: False will recursively dump dicts
:param encoding: File encoding
:param errors: How to handle encoding errors
:param yaml_kwargs: additional arguments to pass to yaml.dump
:return: string of YAML or return of `yaml.dump`
"""
return _to_yaml(
self.to_list(),
filename=filename,
default_flow_style=default_flow_style,
encoding=encoding,
errors=errors,
**yaml_kwargs,
)
@classmethod
def from_yaml(
cls,
yaml_string: str = None,
filename: Union[str, PathLike] = None,
encoding: str = "utf-8",
errors: str = "strict",
**kwargs,
):
"""
Transform a yaml object string into a BoxList object.
:param yaml_string: string to pass to `yaml.load`
:param filename: filename to open and pass to `yaml.load`
:param encoding: File encoding
:param errors: How to handle encoding errors
:param kwargs: parameters to pass to `BoxList()` or `yaml.load`
:return: BoxList object from yaml data
"""
box_args = {}
for arg in list(kwargs.keys()):
if arg in BOX_PARAMETERS:
box_args[arg] = kwargs.pop(arg)
data = _from_yaml(yaml_string=yaml_string, filename=filename, encoding=encoding, errors=errors, **kwargs)
if not data:
return cls(**box_args)
if not isinstance(data, list):
raise BoxError(f"yaml data not returned as a list but rather a {type(data).__name__}")
return cls(data, **box_args)
else:
def to_yaml(
self,
filename: Union[str, PathLike] = None,
default_flow_style: bool = False,
encoding: str = "utf-8",
errors: str = "strict",
**yaml_kwargs,
):
raise BoxError('yaml is unavailable on this system, please install the "ruamel.yaml" or "PyYAML" package')
@classmethod
def from_yaml(
cls,
yaml_string: str = None,
filename: Union[str, PathLike] = None,
encoding: str = "utf-8",
errors: str = "strict",
**kwargs,
):
raise BoxError('yaml is unavailable on this system, please install the "ruamel.yaml" or "PyYAML" package')
if toml_available:
def to_toml(
self,
filename: Union[str, PathLike] = None,
key_name: str = "toml",
encoding: str = "utf-8",
errors: str = "strict",
):
"""
Transform the BoxList object into a toml string.
:param filename: File to write toml object too
:param key_name: Specify the name of the key to store the string under
(cannot directly convert to toml)
:param encoding: File encoding
:param errors: How to handle encoding errors
:return: string of TOML (if no filename provided)
"""
return _to_toml({key_name: self.to_list()}, filename=filename, encoding=encoding, errors=errors)
@classmethod
def from_toml(
cls,
toml_string: str = None,
filename: Union[str, PathLike] = None,
key_name: str = "toml",
encoding: str = "utf-8",
errors: str = "strict",
**kwargs,
):
"""
Transforms a toml string or file into a BoxList object
:param toml_string: string to pass to `toml.load`
:param filename: filename to open and pass to `toml.load`
:param key_name: Specify the name of the key to pull the list from
(cannot directly convert from toml)
:param encoding: File encoding
:param errors: How to handle encoding errors
:param kwargs: parameters to pass to `Box()`
:return:
"""
box_args = {}
for arg in list(kwargs.keys()):
if arg in BOX_PARAMETERS:
box_args[arg] = kwargs.pop(arg)
data = _from_toml(toml_string=toml_string, filename=filename, encoding=encoding, errors=errors)
if key_name not in data:
raise BoxError(f"{key_name} was not found.")
return cls(data[key_name], **box_args)
else:
def to_toml(
self,
filename: Union[str, PathLike] = None,
key_name: str = "toml",
encoding: str = "utf-8",
errors: str = "strict",
):
raise BoxError('toml is unavailable on this system, please install the "toml" package')
@classmethod
def from_toml(
cls,
toml_string: str = None,
filename: Union[str, PathLike] = None,
key_name: str = "toml",
encoding: str = "utf-8",
errors: str = "strict",
**kwargs,
):
raise BoxError('toml is unavailable on this system, please install the "toml" package')
if msgpack_available:
def to_msgpack(self, filename: Union[str, PathLike] = None, **kwargs):
"""
Transform the BoxList object into a toml string.
:param filename: File to write toml object too
:return: string of TOML (if no filename provided)
"""
return _to_msgpack(self.to_list(), filename=filename, **kwargs)
@classmethod
def from_msgpack(cls, msgpack_bytes: bytes = None, filename: Union[str, PathLike] = None, **kwargs):
"""
Transforms a toml string or file into a BoxList object
:param msgpack_bytes: string to pass to `msgpack.packb`
:param filename: filename to open and pass to `msgpack.pack`
:param kwargs: parameters to pass to `Box()`
:return:
"""
box_args = {}
for arg in list(kwargs.keys()):
if arg in BOX_PARAMETERS:
box_args[arg] = kwargs.pop(arg)
data = _from_msgpack(msgpack_bytes=msgpack_bytes, filename=filename, **kwargs)
if not isinstance(data, list):
raise BoxError(f"msgpack data not returned as a list but rather a {type(data).__name__}")
return cls(data, **box_args)
else:
def to_msgpack(self, filename: Union[str, PathLike] = None, **kwargs):
raise BoxError('msgpack is unavailable on this system, please install the "msgpack" package')
@classmethod
def from_msgpack(
cls,
msgpack_bytes: bytes = None,
filename: Union[str, PathLike] = None,
encoding: str = "utf-8",
errors: str = "strict",
**kwargs,
):
raise BoxError('msgpack is unavailable on this system, please install the "msgpack" package')
def to_csv(self, filename: Union[str, PathLike] = None, encoding: str = "utf-8", errors: str = "strict"):
return _to_csv(self, filename=filename, encoding=encoding, errors=errors)
@classmethod
def from_csv(
cls,
csv_string: str = None,
filename: Union[str, PathLike] = None,
encoding: str = "utf-8",
errors: str = "strict",
):
return cls(_from_csv(csv_string=csv_string, filename=filename, encoding=encoding, errors=errors))
| mit | 5,929,839,658,323,711,000 | 35.599109 | 118 | 0.546887 | false |
theikkila/lopputili | lib/finv2pdf.py | 1 | 10024 | # -*- encoding: utf-8 -*-
import reportlab
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import mm
def n(integer):
return str('{:.2f}'.format(integer))
class Invoice2PDF:
def __init__(self, type, file):
self.file = file
self.type = type
self.p = canvas.Canvas(self.file, pagesize=A4, bottomup=0)
self.width, self.height = A4
self.left = 0
self.p.scale(0.982, 0.982)
self.p.translate(2*mm, 2*mm)
self.index = 0
self.sum = 0
self.sumvat = 0
def drawLines(self):
#horizontal lines
self.p.setLineWidth(.3*mm)
self.drawFullLine(65)
self.drawFullLine(170)
def drawProductHeader(self, invoice):
if invoice.info1 == "":
self.start = 70
else:
self.start = 85
self.p.drawString(20*mm, (self.start-15)*mm, invoice.info1 or "")
self.p.setFont("Helvetica-Bold", 10)
self.p.drawString(10*mm, self.start*mm, "#")
self.p.drawString(20*mm, self.start*mm, "Nimi")
self.p.drawString(80*mm, self.start*mm, "Määrä")
self.p.drawString(100*mm, self.start*mm, "á-hinta")
self.p.drawString(120*mm, self.start*mm, "Ale-%")
self.p.drawString(135*mm, self.start*mm, "Alv-%")
self.p.drawString(150*mm, self.start*mm, "Veroton")
self.p.drawString(180*mm, self.start*mm, "Verollinen")
def drawProduct(self, name, count, each, vat, discount):
self.index +=1
offset = 7
self.p.setFont("Helvetica", 10)
self.p.drawString(10*mm, (self.start+self.index*offset)*mm, str(self.index))
self.p.drawString(20*mm, (self.start+self.index*offset)*mm, name)
self.p.drawString(80*mm, (self.start+self.index*offset)*mm, str(count))
self.p.drawString(100*mm, (self.start+self.index*offset)*mm, str(each))
self.p.drawString(120*mm, (self.start+self.index*offset)*mm, n(discount))
self.p.drawString(135*mm, (self.start+self.index*offset)*mm, n(vat))
self.sum += count*each-(count*each*(float(discount)/100))
self.p.drawString(150*mm, (self.start+self.index*offset)*mm, n(count*each-(count*each*(float(discount)/100))))
self.sumvat += count*each*(1+(float(vat)/100))-count*each*(1+(float(vat)/100))*(float(discount)/100)
self.p.drawString(180*mm, (self.start+self.index*offset)*mm, n(count*each*(1+(float(vat)/100))-count*each*(1+(float(vat)/100))*(float(discount)/100)))
def drawProductFooter(self, invoice):
self.index += 1
offset = 7
self.p.setLineWidth(.2*mm)
self.drawFullLine((self.start+self.index*offset)-4)
self.p.setFont("Helvetica-Bold", 10)
self.p.drawString(20*mm, (self.start+self.index*offset)*mm, "Veroton")
self.p.drawString(50*mm, (self.start+self.index*offset)*mm, "Vero")
self.p.drawString(80*mm, (self.start+self.index*offset)*mm, "Verollinen")
self.p.drawString(130*mm, (self.start+self.index*offset)*mm, "Yhteensä")
self.p.drawString(150*mm, (self.start+self.index*offset)*mm, n(self.sum))
self.p.drawString(180*mm, (self.start+self.index*offset)*mm, n(self.sumvat))
self.index += 1
offset = 7
self.p.setFont("Helvetica", 10)
self.p.drawString(20*mm, (self.start+self.index*offset-3)*mm, n(self.sum))
self.p.drawString(50*mm, (self.start+self.index*offset-3)*mm, n(self.sumvat-self.sum))
self.p.drawString(80*mm, (self.start+self.index*offset-3)*mm, n(self.sumvat))
self.index += 1
if invoice.info2 != "":
self.p.drawString(20*mm, (self.start+self.index*offset-5)*mm, invoice.info2 or "")
def drawInvoiceInfo(self, invoice):
if self.type == "invoice":
types = "Lasku"
if self.type == "receipt":
types = "Kuitti"
if self.type == "reminder":
types = "Maksuhuomautus"
# draws all the fields
#self.p.setDash(False)
self.p.setStrokeColorRGB(0,0,0)
self.p.rect(110*mm, 10*mm, 85*mm, 5*mm)
self.p.setFont("Helvetica-Bold", 12)
self.p.drawString(112*mm, 13.5*mm, types)
'''
self.date = date
self.id = invoice_id
self.ref = ref
self.client_id = client_id
self.our_ref = our_ref
self.your_ref = your_ref
self.payment_type = payment_type
self.due_date = due_date
self.reclamation_time = reclamation_time
self.penalty_interest = penalty_interest
self.sum = sum
'''
fields = ("Päivämäärä", "Laskun numero", "Asiakasnumero", "Eräpäivä", "Huomautusaika", "Viivästyskorko-%", "Viitteenne", "Viitteemme")
values = (invoice.date, invoice.id, invoice.client_id, invoice.due_date, invoice.reclamation_time, invoice.penalty_interest, invoice.your_ref, invoice.our_ref)
self.p.setFont("Helvetica", 11)
i=0
for field in fields:
self.p.drawString(110*mm, (20+i)*mm, field)
i += 5
i=0
for value in values:
self.p.drawString(150*mm, (20+i)*mm, value)
i += 5
def populateWindow(self, company, payer):
self.drawWindowValue(company.name+"\n"+company.address+"\n"+company.zipcode+" "+company.city, (20, 13), False)
self.drawWindowValue(payer.name+"\n"+payer.address+"\n"+payer.zipcode+" "+payer.city, (20, 40))
def drawFooter(self, company):
#height 180
self.p.setFont("Helvetica-Bold", 10)
self.p.drawString(10*mm, 180*mm, company.name)
self.p.setFont("Helvetica", 10)
self.p.drawString(10*mm, 185*mm, company.address)
self.p.drawString(10*mm, 189*mm, company.zipcode+" "+company.city)
#second col
if company.website != "":
self.p.drawString(80*mm, 180*mm, company.website)
if company.phone != "":
self.p.drawString(80*mm, 185*mm, "Puh. "+company.phone)
if company.email != "":
self.p.drawString(80*mm, 189*mm, company.email)
#third col
self.p.drawString(145*mm, 185*mm, "Kotipaikka "+company.homeplace)
self.p.drawString(145*mm, 189*mm, "Y-tunnus "+company.companyid)
def populateBankform(self, company, invoice, payer):
self.drawBankformValue(company.iban, (120, 199))
self.drawBankformValue(company.bic, (170, 199))
self.drawBankformValue(company.name, (21, 216))
self.drawBankformValue("Laskun numero\n"+invoice.id, (120, 216))
self.drawBankformValue(invoice.ref, (124,257))
if self.type == "reminder":
self.drawBankformValue("HETI", (124,265))
else:
self.drawBankformValue(invoice.due_date, (124,265))
self.drawBankformValue(n(invoice.sum), (170,265))
self.drawBankformValue(payer.name+"\n"+payer.address+"\n"+payer.zipcode+" "+payer.city, (22, 230))
def drawCutline(self):
#cutline
self.p.setDash([2, 2], 0)
self.drawFullLine(195)
def drawBankform(self):
#horizontal fulllines
self.drawFullLine(211)
self.drawFullLine(260)
self.drawFullLine(268)
#horizontal partlines
self.drawPartLine(226, 110)
self.drawPartLine(252, 100, 110)
#vertical lines
#saajan tiedot
self.drawVLine(195, 31, 20)
#middle
self.drawVLine(195, 73, 110)
#viite, era
self.drawVLine(252, 16, 123)
#bic
self.drawVLine(195, 16, 160)
# euro
self.drawVLine(260, 8, 160)
#tililta
self.p.setLineWidth(.13*mm)
self.drawVLine(260, 8, 20)
#allek
self.p.line(21*mm, 255*mm, 110*mm, 255*mm)
#texts
self.drawBankformLabel("Saajan\ntilinumero\nMottagarens\nkontonummer", (19,199))
self.drawBankformLabel("IBAN", (118,199))
self.drawBankformLabel("BIC", (168,199))
self.drawBankformLabel("Saaja\nMottagare", (19,216))
self.drawBankformLabel("Maksaja\nBetalare", (19,230))
self.drawBankformLabel("Allekirjoitus\nUnderskrift", (19,254))
self.drawBankformLabel(u"Tililtä nro\nFrån konto nr", (19,263))
self.drawBankformLabel("Viitenro\nRef.nr", (121,255))
self.drawBankformLabel(u"Eräpäivä\nFörf.dag", (121,263))
self.drawBankformLabel(u"Euro", (168,263))
def drawFullLine(self, height):
self.p.line(0+self.left, height*mm, self.width, height*mm)
def drawBankformLabel(self, text, pos):
self.p.setFont("Helvetica", 7)
x, y = pos
lines = text.split("\n")
i=0
for line in lines:
self.p.drawRightString(x*mm, (y+i)*mm, line)
i += 2.5
def drawWindowValue(self, text, pos, big=True):
if big:
self.p.setFont("Helvetica", 15)
s=6
else:
self.p.setFont("Helvetica", 11)
s=4
x, y = pos
lines = text.split("\n")
i=0
for line in lines:
self.p.drawString(x*mm, (y+i)*mm, line)
i += s
def drawBankformValue(self, text, pos):
self.p.setFont("Helvetica", 10)
x, y = pos
lines = text.split("\n")
i=0
for line in lines:
self.p.drawString(x*mm, (y+i)*mm, line)
i += 4
def drawPartLine(self, height, width, padding=0):
self.p.line((padding+self.left)*mm, height*mm, (padding+width+self.left)*mm, height*mm)
def drawVLine(self, height, lenght, padding):
self.p.line((padding+self.left)*mm, height*mm, (padding+self.left)*mm, (height+lenght)*mm)
def createInvoice(self, invdata, company, client):
self.invdata = invdata
self.drawInvoiceInfo(invdata)
self.drawProductHeader(invdata)
self.populateWindow(company, client)
if self.type != "receipt":
self.drawBankform()
self.populateBankform(company, invdata, client)
self.drawFooter(company)
def ready(self):
self.drawProductFooter(self.invdata)
self.drawLines()
self.drawCutline()
def save(self):
self.p.showPage()
self.p.save()
class Company():
def __init__(self, name, address, zipcode, city, phone, email, homeplace, companyid, iban, bic, website=""):
self.name = name
self.address = address
self.zipcode = zipcode
self.city = city
self.phone = phone
self.email = email
self.homeplace = homeplace
self.companyid = companyid
self.iban = iban
self.bic = bic
self.website = website
class InvoiceData():
def __init__(self, date, invoice_id, ref, client_id, our_ref, your_ref, payment_type, due_date, reclamation_time, penalty_interest, sum, info1="", info2=""):
self.date = date
self.id = invoice_id
self.ref = ref
self.client_id = client_id
self.our_ref = our_ref
self.your_ref = your_ref
self.payment_type = payment_type
self.due_date = due_date
self.reclamation_time = reclamation_time
self.penalty_interest = penalty_interest
self.sum = sum
self.info1 = info1
self.info2 = info2
class Client():
def __init__(self, name, address, zipcode, city):
self.name = name
self.address = address
self.zipcode = zipcode
self.city = city | mit | -6,308,574,670,345,561,000 | 32.573826 | 161 | 0.686026 | false |
chiamingyen/PythonCAD_py3 | Interface/Entity/ellipse.py | 1 | 1620 | #
# Copyright (c) ,2010 Matteo Boscolo
#
# This file is part of PythonCAD.
#
# PythonCAD is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PythonCAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PythonCAD; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# qt ellipse class
#
from Interface.Entity.base import *
class Ellipse(BaseEntity):
def __init__(self, entity):
super(Ellipse, self).__init__(entity)
geoEnt=self.geoItem
self.xc,self.yc=geoEnt.center.getCoords()
self.yc=self.yc*-1.0
self.h=geoEnt.verticalRadius
self.w=geoEnt.horizontalRadius
self.setPos(QtCore.QPointF(self.xc, self.yc))
self.rotate(0.0)
return
def drawShape(self, painterPath):
"""
called from the shape method
"""
w2=self.w/2.0
h2=self.h/2.0
painterPath.addEllipse(-w2,-h2,self.w,self.h )
def drawGeometry(self, painter, option, widget):
"""
called from the paint method
"""
# Create Ellipse
painter.drawEllipse(self.boundingRect())
| gpl-2.0 | 3,794,190,349,041,091,000 | 31.4 | 75 | 0.662963 | false |
wojtask/CormenPy | test/test_chapter17/test_textbook17_4.py | 1 | 1291 | import random
from unittest import TestCase
from hamcrest import *
from chapter17.textbook17_4 import table_insert, table_delete
from datastructures.dynamic_table import DynamicTable
class TestTextbook17_4(TestCase):
def test_table_insert(self):
nelements = 20
elements = [random.randint(0, 999) for _ in range(nelements)]
T = DynamicTable()
for i in range(nelements):
table_insert(T, elements[i])
assert_that(T.num, is_(equal_to(i + 1)))
assert_that(T.num, is_(greater_than(T.size // 2)))
assert_that(T.num, is_(less_than_or_equal_to(T.size)))
actual_elements = T.table[:nelements]
assert_that(actual_elements, is_(equal_to(elements)))
def test_table_delete(self):
T = DynamicTable()
nelements = T.num = 20
T.table = [random.randint(0, 999) for _ in range(nelements)] + [None] * (32 - nelements)
T.size = 32
for i in range(nelements):
index = random.randint(0, T.num - 1)
table_delete(T, T.table[index])
assert_that(T.num, is_(equal_to(nelements - 1 - i)))
assert_that(T.num, is_(greater_than_or_equal_to(T.size // 4)))
assert_that(T.num, is_(less_than_or_equal_to(T.size)))
| gpl-3.0 | -7,282,318,737,319,688,000 | 32.102564 | 96 | 0.601084 | false |
lciti/cvxEDA | src/cvxEDA.py | 1 | 5876 | """
______________________________________________________________________________
File: cvxEDA.py
Last revised: 07 Nov 2015 r69
______________________________________________________________________________
Copyright (C) 2014-2015 Luca Citi, Alberto Greco
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You may contact the author by e-mail ([email protected]).
______________________________________________________________________________
This method was first proposed in:
A Greco, G Valenza, A Lanata, EP Scilingo, and L Citi
"cvxEDA: a Convex Optimization Approach to Electrodermal Activity Processing"
IEEE Transactions on Biomedical Engineering, 2015
DOI: 10.1109/TBME.2015.2474131
If you use this program in support of published research, please include a
citation of the reference above. If you use this code in a software package,
please explicitly inform the end users of this copyright notice and ask them
to cite the reference above in their published research.
______________________________________________________________________________
"""
import numpy as np
import cvxopt as cv
import cvxopt.solvers
def cvxEDA(y, delta, tau0=2., tau1=0.7, delta_knot=10., alpha=8e-4, gamma=1e-2,
solver=None, options={'reltol':1e-9}):
"""CVXEDA Convex optimization approach to electrodermal activity processing
This function implements the cvxEDA algorithm described in "cvxEDA: a
Convex Optimization Approach to Electrodermal Activity Processing"
(http://dx.doi.org/10.1109/TBME.2015.2474131, also available from the
authors' homepages).
Arguments:
y: observed EDA signal (we recommend normalizing it: y = zscore(y))
delta: sampling interval (in seconds) of y
tau0: slow time constant of the Bateman function
tau1: fast time constant of the Bateman function
delta_knot: time between knots of the tonic spline function
alpha: penalization for the sparse SMNA driver
gamma: penalization for the tonic spline coefficients
solver: sparse QP solver to be used, see cvxopt.solvers.qp
options: solver options, see:
http://cvxopt.org/userguide/coneprog.html#algorithm-parameters
Returns (see paper for details):
r: phasic component
p: sparse SMNA driver of phasic component
t: tonic component
l: coefficients of tonic spline
d: offset and slope of the linear drift term
e: model residuals
obj: value of objective function being minimized (eq 15 of paper)
"""
n = len(y)
y = cv.matrix(y)
# bateman ARMA model
a1 = 1./min(tau1, tau0) # a1 > a0
a0 = 1./max(tau1, tau0)
ar = np.array([(a1*delta + 2.) * (a0*delta + 2.), 2.*a1*a0*delta**2 - 8.,
(a1*delta - 2.) * (a0*delta - 2.)]) / ((a1 - a0) * delta**2)
ma = np.array([1., 2., 1.])
# matrices for ARMA model
i = np.arange(2, n)
A = cv.spmatrix(np.tile(ar, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n))
M = cv.spmatrix(np.tile(ma, (n-2,1)), np.c_[i,i,i], np.c_[i,i-1,i-2], (n,n))
# spline
delta_knot_s = int(round(delta_knot / delta))
spl = np.r_[np.arange(1.,delta_knot_s), np.arange(delta_knot_s, 0., -1.)] # order 1
spl = np.convolve(spl, spl, 'full')
spl /= max(spl)
# matrix of spline regressors
i = np.c_[np.arange(-(len(spl)//2), (len(spl)+1)//2)] + np.r_[np.arange(0, n, delta_knot_s)]
nB = i.shape[1]
j = np.tile(np.arange(nB), (len(spl),1))
p = np.tile(spl, (nB,1)).T
valid = (i >= 0) & (i < n)
B = cv.spmatrix(p[valid], i[valid], j[valid])
# trend
C = cv.matrix(np.c_[np.ones(n), np.arange(1., n+1.)/n])
nC = C.size[1]
# Solve the problem:
# .5*(M*q + B*l + C*d - y)^2 + alpha*sum(A,1)*p + .5*gamma*l'*l
# s.t. A*q >= 0
old_options = cv.solvers.options.copy()
cv.solvers.options.clear()
cv.solvers.options.update(options)
if solver == 'conelp':
# Use conelp
z = lambda m,n: cv.spmatrix([],[],[],(m,n))
G = cv.sparse([[-A,z(2,n),M,z(nB+2,n)],[z(n+2,nC),C,z(nB+2,nC)],
[z(n,1),-1,1,z(n+nB+2,1)],[z(2*n+2,1),-1,1,z(nB,1)],
[z(n+2,nB),B,z(2,nB),cv.spmatrix(1.0, range(nB), range(nB))]])
h = cv.matrix([z(n,1),.5,.5,y,.5,.5,z(nB,1)])
c = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T,z(nC,1),1,gamma,z(nB,1)])
res = cv.solvers.conelp(c, G, h, dims={'l':n,'q':[n+2,nB+2],'s':[]})
obj = res['primal objective']
else:
# Use qp
Mt, Ct, Bt = M.T, C.T, B.T
H = cv.sparse([[Mt*M, Ct*M, Bt*M], [Mt*C, Ct*C, Bt*C],
[Mt*B, Ct*B, Bt*B+gamma*cv.spmatrix(1.0, range(nB), range(nB))]])
f = cv.matrix([(cv.matrix(alpha, (1,n)) * A).T - Mt*y, -(Ct*y), -(Bt*y)])
res = cv.solvers.qp(H, f, cv.spmatrix(-A.V, A.I, A.J, (n,len(f))),
cv.matrix(0., (n,1)), solver=solver)
obj = res['primal objective'] + .5 * (y.T * y)
cv.solvers.options.clear()
cv.solvers.options.update(old_options)
l = res['x'][-nB:]
d = res['x'][n:n+nC]
t = B*l + C*d
q = res['x'][:n]
p = A * q
r = M * q
e = y - r - t
return (np.array(a).ravel() for a in (r, p, t, l, d, e, obj))
| gpl-3.0 | -5,067,598,910,324,977,000 | 40.273381 | 96 | 0.550374 | false |
janinamass/gardening | Scythe/src/parseConfig.py | 1 | 45050 | #todo:
#get rid of *info* field
#close selecor box after downloads
#show "waiting" window
#show "done" windo when finished
#write Error file
#clean up/ delete temp files if "cleanup", even when crashing
import tkinter as tk
import configparser
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
from tkinter import OptionMenu
from tkinter import Scale
from tkinter import Listbox
import multiprocessing
from scytheGUI_classes import ScytheConvertDialogLoc
from scytheGUI_classes import ScytheConvertDialogGrp
import scythe_nv as scythe
import rmSubsetsFromGrp4 as mergeSubsets
#import scythe
import ensembl
root=tk.Tk()
root.title("Scythe GUI alpha")
root.iconbitmap('@scy.xbm')
import os
import ensembl_ortho_mysql
import ensembl2grp
import sys
global LOGSTR
LOGSTR = ""
global SCYTHE_PROCESS
SCYTHE_PROCESS = None
global CURRENTCONFIG
CURRENTCONFIG = configparser.ConfigParser()
BACKUPCONFIG = configparser.ConfigParser()
print(CURRENTCONFIG,BACKUPCONFIG)
############ labels ##################
CF_MODE = "Mode"
CF_MODE_use_ensembl = "use_ensembl_api"
CF_MODE_use_local_files = "use_local_files"
CF_PATHS = "Paths"
CF_PATHS_fasta_directory = "fasta_directory"
CF_PATHS_loc_directory = "loc_directory"
CF_PATHS_grp_file = "grp_file"
CF_PATHS_output_directory = "output_directory"
CF_CLEANUP = "Cleanup"
CF_CLEANUP_clean_up_directories = "clean_up_directories"
CF_RUN="Run_options"
CF_RUN_max_threads ="max_threads"
CF_RUN_split_input="split_input"
CF_PENALTIES = "Penalties"
CF_PENALTIES_gap_open_cost = "gap_open_cost"
CF_PENALTIES_gap_extend_cost="gap_extend_cost"
CF_PENALTIES_substitution_matrix="substitution_matrix"
CF_ALGORITHM = "Algorithm"
CF_ALGORITHM_use_global_max ="use_global_max"
CF_ALGORITHM_use_default="use_default"
CF_ALGORITHM_use_global_sum="use_global_sum"
CF_PARALOGS="Paralogs"
CF_PARALOGS_include_paralogs = "include_paralogs"
###### TODO 02.12.13 #######
CF_FASTAHEADER="Fasta_header"
CF_FASTAHEADER_delimiter = "fasta_header_delimiter"
CF_FASTAHEADER_part = "fasta_header_part"
############################
##################options###########################
OPTIONS = {}
#dropdown menus
yn =["yes","no"]
for o in [CF_PARALOGS_include_paralogs, CF_ALGORITHM_use_global_max,CF_ALGORITHM_use_default,
CF_ALGORITHM_use_global_sum,CF_RUN_split_input, CF_CLEANUP_clean_up_directories,
CF_MODE_use_local_files,CF_MODE_use_ensembl]:
OPTIONS[o]=yn
MAXCPU=multiprocessing.cpu_count
###################################################
SECTIONS = [CF_MODE,CF_PATHS, CF_CLEANUP, CF_RUN, CF_PENALTIES,CF_ALGORITHM,CF_PARALOGS, CF_FASTAHEADER]
MAXCONFIG = configparser.ConfigParser()
for i in SECTIONS:
MAXCONFIG.add_section(i)
for i in [CF_MODE_use_ensembl,CF_MODE_use_local_files]:
MAXCONFIG.set(CF_MODE,i,"unset")
for i in [CF_PATHS_fasta_directory,CF_PATHS_loc_directory,CF_PATHS_grp_file,CF_PATHS_output_directory ]:
MAXCONFIG.set(CF_PATHS,i,"unset")
for i in [CF_CLEANUP_clean_up_directories]:
MAXCONFIG.set(CF_CLEANUP,i,"yes")
for i in [CF_RUN_max_threads,CF_RUN_split_input]:
MAXCONFIG.set(CF_RUN,i,"1")
for i in [CF_PENALTIES_gap_open_cost,CF_PENALTIES_gap_extend_cost,CF_PENALTIES_substitution_matrix]:
if i == CF_PENALTIES_gap_open_cost:
MAXCONFIG.set(CF_PENALTIES,i,"10")
if i == CF_PENALTIES_gap_extend_cost:
MAXCONFIG.set(CF_PENALTIES,i,"0.5")
if i == CF_PENALTIES_substitution_matrix:
MAXCONFIG.set(CF_PENALTIES,i,"EBLOSUM62")
for i in [CF_ALGORITHM_use_global_max,CF_ALGORITHM_use_default,CF_ALGORITHM_use_global_sum ]:
MAXCONFIG.set(CF_ALGORITHM,i,"unset")
for i in [CF_PARALOGS_include_paralogs ]:
MAXCONFIG.set(CF_PARALOGS,i,"unset")
for i in [CF_FASTAHEADER_delimiter, CF_FASTAHEADER_part]:
if i == CF_FASTAHEADER_delimiter:
MAXCONFIG.set(CF_FASTAHEADER,i,'" "')
if i == CF_FASTAHEADER_part:
MAXCONFIG.set(CF_FASTAHEADER,i,"0")
for i in MAXCONFIG.items():
print(i)
######################################
#q'n'd
def initConfCurrent():
global CURRENTCONFIG
global MAXCONFIG
for i in MAXCONFIG.sections():
try:
CURRENTCONFIG.add_section(i)
except configparser.DuplicateSectionError as e:
pass
for j in MAXCONFIG.options(i):
print(i,j)
CURRENTCONFIG.set(i,j,MAXCONFIG.get(i,j))
def backupConf():
global CURRENTCONFIG
global BACKUPCONFIG
for i in CURRENTCONFIG.sections():
try:
BACKUPCONFIG.add_section(i)
except configparser.DuplicateSectionError as e:
pass
for j in CURRENTCONFIG.options(i):
BACKUPCONFIG.set(i,j,CURRENTCONFIG.get(i,j) )
def backupConfTo(newconf):
global CURRENTCONFIG
for i in CURRENTCONFIG.sections():
try:
newconf.add_section(i)
except configparser.DuplicateSectionError as e:
pass
for j in CURRENTCONFIG.options(i):
newconf.set(i,j,CURRENTCONFIG.get(i,j) )
def setCurrentConf(newconf):
global CURRENTCONFIG
for i in newconf.sections():
try:
CURRENTCONFIG.add_section(i)
except configparser.DuplicateSectionError as e:
pass
for j in newconf.options(i):
CURRENTCONFIG.set(i,j,newconf.get(i,j) )
def restoreConf():
global BACKUPCONFIG
global CURRENTCONFIG
for i in BACKUPCONFIG.sections():
try:
CURRENTCONFIG.add_section(i)
except configparser.DuplicateSectionError as e:
pass
for j in BACKUPCONFIG.options(i):
CURRENTCONFIG.set(i,j,BACKUPCONFIG.get(i,j))
######################################
def logged(f):
global LOGSTR
def wrapped(*args, **kargs):
global LOGSTR
print ("%s called..." % f.__name__)
try:
LOGSTR=LOGSTR+f.__name__+str(args)+str(kargs)
return f(*args, **kargs)
finally:
print ("..Done.")
print(LOGSTR)
return wrapped
class ConfigHandler():
def __init__(self):
self._currentconfig = configparser.ConfigParser()
@property
def currentconfig(self):
return self._currentconfig
@currentconfig.setter
def currentconfig(self,config):
self._currentconfig=config
def reset(self):
initConfCurrent()
print("full reset")
class ScytheConfigEditor():
def __init__(self):
global CURRENTCONFIG
global MAXCONFIG
global CF_MODE
backupConf()
print("BACKED UP")
tmpconfig= configparser.ConfigParser()
backupConfTo(tmpconfig)
top = tk.Toplevel()
top.title("Set configuration")
nb = ttk.Notebook(top)
b_config_ok = tk.Button(top, text="OK", command=top.destroy)
b_config_ok.bind('<ButtonRelease-1>',self.onSetConfigOK)
b_config_apply = tk.Button(top, text="Apply", command=self.onSetConfigApply)
b_config_cancel = tk.Button(top, text="Cancel", command=top.destroy)
b_config_cancel.bind('<ButtonRelease-1>',self.onSetConfigCancel())
fr_paths = tk.Frame(nb,width=200, height=100)
fr_penalties = tk.Frame(nb,width=200, height=100)
fr_mode = ttk.Frame(nb,width=200, height=100)
#fr_output = ttk.Frame(nb,width=200, height=100)
fr_cleanup = ttk.Frame(nb,width=200, height=100)
fr_run = ttk.Frame(nb,width=200, height=100)
fr_algorithm = ttk.Frame(nb,width=200, height=100)
fr_paralogs = ttk.Frame(nb,width=200, height=100)
fr_fastaheader = ttk.Frame(nb,width=200, height=100)
#######labels########################
self.txt_sec=[]
self.txt_subsec={}
for section in MAXCONFIG.sections():
print( "["+section +"]\n")
self.txt_sec.append(section)
for opt in MAXCONFIG.options(section):
try:
self.txt_subsec[section].append(opt)
except KeyError as e:
self.txt_subsec[section]=[opt]
lab_sec=[]
lab_subsec={}
dd_subsec={}
self.var_subsec={}
for t in self.txt_sec:
lab_sec.append(tk.Label(fr_paths,text = t))
for t in self.txt_subsec:
print(t,self.txt_subsec[t])
for u in self.txt_subsec[t]:
if t == CF_MODE:
fr = fr_mode
elif t == CF_PATHS:
fr = fr_paths
elif t == CF_CLEANUP:
fr = fr_cleanup
elif t == CF_RUN:
fr = fr_run
elif t == CF_PENALTIES:
fr = fr_penalties
elif t == CF_ALGORITHM:
fr = fr_algorithm
elif t == CF_PARALOGS:
fr = fr_paralogs
############TODO################
elif t == CF_FASTAHEADER:
fr = fr_fastaheader
print("fastaheader_fr")
################################
else:
print("No such section:",t)
try:
lab_subsec[t].append(tk.Label(fr,text = u))
self.var_subsec[t].append(tk.StringVar(fr))
if u in OPTIONS:
dd_subsec[t].append(OptionMenu(fr,self.var_subsec[t][-1],*OPTIONS[u]))
else:
dd_subsec[t].append("")
except KeyError as e:
try:
lab_subsec[t]=[tk.Label(fr,text = u)]
self.var_subsec[t]=[tk.StringVar(fr)]
if u in OPTIONS:
dd_subsec[t] = [OptionMenu(fr,self.var_subsec[t][-1],*OPTIONS[u])]
else:
dd_subsec[t] = [""]
except KeyError as e:
print(e)
dd_subsec[t].append("")
for t in lab_subsec:
r=0
c=0
for i in lab_subsec[t]:
print(i.cget("text"))
i.grid(row=r,column=c, sticky=tk.E)
r+=1
print(r,i.cget("text"))
for t in dd_subsec:
c=1
r=0
for i in dd_subsec[t]:
print(i)
if i is not "":
i.grid(row=r,column=c,sticky=tk.N)
r+=1
print(r)
######################################
self.st_submat = tk.StringVar()
#self.st_outpref = tk.StringVar()
#self.st_spliteach = tk.StringVar()
self.st_fasta_header_delimiter = tk.StringVar()
self.st_fasta_header_part = tk.StringVar()
self.sc_config_numthreads = Scale(fr_run, from_=1, to=multiprocessing.cpu_count(), orient=tk.HORIZONTAL)
self.sc_config_numthreads.grid(row=0, column=1, sticky=tk.E)
en_config_gapopen=tk.Entry(fr_penalties, textvariable=self.var_subsec[CF_PENALTIES][0])
en_config_gapextend=tk.Entry(fr_penalties,textvariable=self.var_subsec[CF_PENALTIES][1] )
#self.en_config_spliteach=tk.Entry(fr_run,textvariable=self.st_spliteach,width=6 )
self.en_config_fasta_header_delimiter= tk.Entry(fr_fastaheader,textvariable=self.st_fasta_header_delimiter,width=6 )
self.en_config_fasta_header_part= tk.Entry(fr_fastaheader,textvariable=self.st_fasta_header_part ,width=6 )
self.om_config_submat=tk.OptionMenu(fr_penalties, self.st_submat, *["EBLOSUM62","EDNAFULL"])
self.om_config_submat.grid(row=2,column=1 )
#self.en_config_outpref=tk.Entry(fr_output, width=6, textvariable=self.st_outpref)
en_config_gapopen.grid(row=0, column=1)
en_config_gapextend.grid(row=1, column=1)
#en_config_submat.grid(row=2, column=1)
#self.en_config_outpref.grid(row=1, column=1)
#self.en_config_spliteach.grid(row=2,column=1)
self.en_config_fasta_header_delimiter.grid(row=0, column=1)
self.en_config_fasta_header_part.grid(row=1,column=1)
#nb.add(fr_mode, text=CF_MODE)
#nb.add(fr_paths, text=CF_PATHS)
nb.add(fr_penalties, text=CF_PENALTIES)
#nb.add(fr_output, text=CF_OUTPUT)
nb.add(fr_cleanup, text=CF_CLEANUP)
nb.add(fr_run, text=CF_RUN)
nb.add(fr_algorithm, text=CF_ALGORITHM)
#nb.add(fr_paralogs, text=CF_PARALOGS)
###################TODO#################
nb.add(fr_fastaheader, text=CF_FASTAHEADER)
nb.grid()
b_config_cancel.grid(row=1, column=0, sticky=tk.E,padx=115)
b_config_apply.grid(row=1, column=0, sticky=tk.E,padx=50)
b_config_ok.grid(row=1, column=0, sticky=tk.E)
self.setFieldsFromConfig()
def onSetConfigApply(self):
print("configapply")
self.setConfigFromFields()
#Infobox().todo()
def onSetConfigOK(self,event):
print("configapply")
self.setConfigFromFields()
def onSetConfigCancel(self):
restoreConf()
print("RESTORED-->CURRENTCONF set")
#self.restoreOldConfig()
print("Config CANCEL")
def setConfigFromFields(self):
tempconf = configparser.ConfigParser()
backupConfTo(tempconf)
#get all values from fields
#penalties
tempconf.set(CF_PENALTIES,CF_PENALTIES_gap_open_cost,self.var_subsec[CF_PENALTIES][0].get() )
tempconf.set(CF_PENALTIES, CF_PENALTIES_gap_extend_cost,self.var_subsec[CF_PENALTIES][1].get())
tempconf.set(CF_PENALTIES, CF_PENALTIES_substitution_matrix,self.st_submat.get())
tempconf.set(CF_ALGORITHM, CF_ALGORITHM_use_global_max,self.var_subsec[CF_ALGORITHM][0].get())
tempconf.set(CF_ALGORITHM, CF_ALGORITHM_use_default,self.var_subsec[CF_ALGORITHM ][1].get())
tempconf.set(CF_ALGORITHM, CF_ALGORITHM_use_global_sum,self.var_subsec[CF_ALGORITHM][2].get())
tempconf.set(CF_RUN, CF_RUN_max_threads,str(self.sc_config_numthreads.get()))
tempconf.set(CF_RUN, CF_RUN_split_input, self.var_subsec[CF_RUN][1].get())
#CLEANUP
tempconf.set(CF_CLEANUP, CF_CLEANUP_clean_up_directories, self.var_subsec[CF_CLEANUP][0].get())
#Fasta header
tempconf.set(CF_FASTAHEADER, CF_FASTAHEADER_delimiter, self.var_subsec[CF_FASTAHEADER][0].get())
print("blabla",self.var_subsec[CF_FASTAHEADER][0].get())
tempconf.set(CF_FASTAHEADER, CF_FASTAHEADER_part, self.var_subsec[CF_FASTAHEADER][1].get())
print("III",self.var_subsec[CF_FASTAHEADER][0].get())
tempconf.set(CF_FASTAHEADER, CF_FASTAHEADER_part,self.st_fasta_header_part.get())
tempconf.set(CF_FASTAHEADER, CF_FASTAHEADER_delimiter,self.st_fasta_header_delimiter.get())
#output
#outputprefix:
#tempconf.set(CF_OUTPUT,CF_OUTPUT_output_prefix,self.en_config_outpref.get())
########## TODO 02.12.13 ############
#print (self.en_config_outpref.get())
#self.var_subsec[CF_PENALTIES][0].set(CURRENTCONFIG.get(CF_PENALTIES,self.txt_subsec[CF_PENALTIES][0]))
#print(CURRENTCONFIG.get(CF_PENALTIES,self.txt_subsec[CF_PENALTIES][1]))
setCurrentConf(tempconf)
print(CURRENTCONFIG)
for t in tempconf.options(CF_PENALTIES):
print(t)
print(tempconf.get(CF_PENALTIES,t))
for t in tempconf.options(CF_ALGORITHM):
print(t)
print(tempconf.get(CF_ALGORITHM,t))
def setFieldsFromConfig(self):
#penalties
print(self.txt_subsec[CF_PENALTIES][0])
print(CURRENTCONFIG.get(CF_PENALTIES,self.txt_subsec[CF_PENALTIES][0]))
self.var_subsec[CF_PENALTIES][0].set(CURRENTCONFIG.get(CF_PENALTIES,self.txt_subsec[CF_PENALTIES][0]))
print(CURRENTCONFIG.get(CF_PENALTIES,self.txt_subsec[CF_PENALTIES][1]))
self.var_subsec[CF_PENALTIES][1].set(CURRENTCONFIG.get(CF_PENALTIES,self.txt_subsec[CF_PENALTIES][1]))
self.st_submat.set(CURRENTCONFIG.get(CF_PENALTIES, CF_PENALTIES_substitution_matrix))
#output
#cleanup
self.var_subsec[CF_CLEANUP][0].set(CURRENTCONFIG.get(CF_CLEANUP,self.txt_subsec[CF_CLEANUP][0]))
#run
#slider
self.var_subsec[CF_RUN][1].set(CURRENTCONFIG.get(CF_RUN,self.txt_subsec[CF_RUN][1]))
#algo
self.var_subsec[CF_ALGORITHM][0].set(CURRENTCONFIG.get(CF_ALGORITHM,self.txt_subsec[CF_ALGORITHM][0]))
self.var_subsec[CF_ALGORITHM][1].set(CURRENTCONFIG.get(CF_ALGORITHM,self.txt_subsec[CF_ALGORITHM][1]))
self.var_subsec[CF_ALGORITHM][2].set(CURRENTCONFIG.get(CF_ALGORITHM,self.txt_subsec[CF_ALGORITHM][2]))
#paralogs
self.var_subsec[CF_PARALOGS][0].set(CURRENTCONFIG.get(CF_PARALOGS,self.txt_subsec[CF_PARALOGS][0]))
#########TODO 02.12.13 ???
self.var_subsec[CF_FASTAHEADER][0].set(CURRENTCONFIG.get(CF_FASTAHEADER,self.txt_subsec[CF_FASTAHEADER][0]))
#self.var_subsec[CF_FASTAHEADER][1].set(CURRENTCONFIG.get(CF_FASTAHEADER,self.st_fasta_header_part))
print(self.txt_subsec[CF_FASTAHEADER][0])
print(CURRENTCONFIG.get(CF_FASTAHEADER,self.txt_subsec[CF_FASTAHEADER][0]))
self.var_subsec[CF_FASTAHEADER][0].set(CURRENTCONFIG.get(CF_FASTAHEADER,self.txt_subsec[CF_FASTAHEADER][0]))
print(CURRENTCONFIG.get(CF_FASTAHEADER,self.txt_subsec[CF_FASTAHEADER][1]))
self.var_subsec[CF_FASTAHEADER][1].set(CURRENTCONFIG.get(CF_FASTAHEADER,self.txt_subsec[CF_FASTAHEADER][1]))
self.st_fasta_header_part.set(CURRENTCONFIG.get(CF_FASTAHEADER, CF_FASTAHEADER_part))
self.st_fasta_header_delimiter.set(CURRENTCONFIG.get(CF_FASTAHEADER, CF_FASTAHEADER_delimiter))
class Infobox():
@logged
def todo(self):
message="Soon (tm)."
messagebox.showinfo(title="Todo...", message = message )
@logged
def about(self):
message="Scythe GUI 0.4 \nOctober 2013\nJ. Mass\n"
messagebox.showinfo(title="About Scythe", message = message )
def wanttosave(self):
pass
def bepatient(self):
message="Scythe is running\n This may take some time.\n"
messagebox.showinfo(title="Running", message = message )
def saveConfig(self):
formats = [('Scythe configuration','*.scy')]
tmp= tk.filedialog.asksaveasfilename(parent=self.parent,filetypes=formats ,title="Save configuration as...")
def showConfig(self):
global CURRENTCONFIG
tmp = ConfigHandler()
print(CURRENTCONFIG)
tmp.currentconfig=CURRENTCONFIG
#read configuration
print("tmp", tmp)
print("tmp.cc", tmp.currentconfig)
message = ""
for section in tmp.currentconfig.sections():
message += "["+section +"]\n"
for option in tmp.currentconfig.options(section):
message += " "+ option+ "="+ tmp.currentconfig.get(section, option)+"\n"
#messagebox.showinfo(title="Config", message = "" )
top = tk.Toplevel(root)
top.title("Configuration")
txt = tk.Text(top)
scrollv = tk.Scrollbar(top, command=txt.yview)
txt.insert(tk.INSERT,message)
txt.configure(yscrollcommand=scrollv.set, state=tk.DISABLED, background="black", foreground="green" )
txt.grid(row=0, column=0)
scrollv.grid(row=0, column=1)
class ScytheMenu(tk.Frame):
def __init__(self, parent, arg = None):
tk.Frame.__init__(self, parent)
self.parent = parent
self.initGUI()
self.confighandler = ConfigHandler()
initConfCurrent()
self.scythewizard= ScytheWizard(self.parent)
self.configEditor = None
if arg:
print(arg)
self.loadConfigArg(arg)
def initGUI(self):
menubar = tk.Menu(self.parent)
self.parent.config(menu=menubar)
fileMenu = tk.Menu(menubar)
fileMenu.add_command(label="New run...", command=self.onNewRun)
#fileMenu.add_command(label="Convert files...", command=self.onConvertFiles)
fileMenu.add_command(label="Load configuration...", command=self.onLoadConfig)
fileMenu.add_command(label="Save configuration...", command=self.onSaveConfig)
convertMenu = tk.Menu(fileMenu)
convertMenu.add_command(label="convert orthology information to .grp", command=self.onConvertToGrp)
convertMenu.add_command(label="convert loci/transcript information to .loc", command=self.onConvertToLoc)
fileMenu.add_cascade(label='Convert files...', menu=convertMenu, underline=0)
fileMenu.add_command(label="Exit", command=self.onExit)
optionsMenu = tk.Menu(menubar)
optionsMenu.add_command(label="Show configuration...", command=self.onShowOptions)
optionsMenu.add_command(label="Set configuration...", command=self.onSetOptions)
infoMenu = tk.Menu(menubar)
infoMenu.add_command(label="Show log...", command=self.onShowLog)
helpMenu = tk.Menu(menubar)
helpMenu.add_command(label="About...", command=self.onAbout)
menubar.add_cascade(label="File", menu=fileMenu)
menubar.add_cascade(label="Options", menu=optionsMenu)
menubar.add_cascade(label="Info", menu=infoMenu)
menubar.add_cascade(label="Help", menu=helpMenu)
#self.onNewRun()
def onConvertToGrp(self):
ScytheConvertDialogGrp()
def onConvertToLoc(self):
ScytheConvertDialogLoc()
def onExit(self):
self.quit()
def onNewRun(self):
self.scythewizard=ScytheWizard(self.parent)
self.confighandler.reset()
def loadConfigArg(self,arg):
cfg = self.confighandler.currentconfig.read(arg)
global CURRENTCONFIG
CURRENTCONFIG=self.confighandler.currentconfig
self.scythewizard.st_fastaDir.set(self.confighandler.currentconfig.get(CF_PATHS,'fasta_directory') )
self.scythewizard.st_locDir.set(self.confighandler.currentconfig.get(CF_PATHS,'loc_directory') )
self.scythewizard.st_grpFile.set(self.confighandler.currentconfig.get(CF_PATHS,'grp_file') )
self.scythewizard.st_outDir.set(self.confighandler.currentconfig.get(CF_PATHS,'output_directory') )
self.onSetOptions()
def onLoadConfig(self):
formats = [('Scythe configuration','*.scy')]
tmp = tk.filedialog.askopenfilename(parent=self.parent,filetypes=[('Scythe configuration','*.scy')],title="Load configuration...")
cfg = self.confighandler.currentconfig.read(tmp)
global CURRENTCONFIG
CURRENTCONFIG=self.confighandler.currentconfig
if self.confighandler.currentconfig.get(CF_MODE,'use_local_files')=='yes':
self.scythewizard.st_fastaDir.set(self.confighandler.currentconfig.get(CF_PATHS,'fasta_directory') )
self.scythewizard.st_locDir.set(self.confighandler.currentconfig.get(CF_PATHS,'loc_directory') )
self.scythewizard.st_grpFile.set(self.confighandler.currentconfig.get(CF_PATHS,'grp_file') )
self.scythewizard.st_outDir.set(self.confighandler.currentconfig.get(CF_PATHS,'output_directory') )
self.scythewizard.cb_use_local.select()
self.scythewizard.cb_use_local.configure(state=tk.NORMAL)
self.scythewizard.cb_use_ensembl.configure(state=tk.DISABLED)
self.scythewizard.ent_fastaDir.configure(state=tk.NORMAL)
self.scythewizard.ent_locDir.configure(state=tk.NORMAL)
self.scythewizard.ent_grpFile.configure(state=tk.NORMAL)
self.scythewizard.ent_outDir.configure(state=tk.NORMAL)
#for section in cfg.sections():
# print(section)
# for option in cfg.options(section):
# print (" ", option, "=", cfg.get(section, option))
#self.scythewizard.st_fastaDir.set(cfg["Local_directories"]['fasta_directory'])
#print(self.confighandler.cuurentconfig())
return tmp
def onSaveConfig(self):
formats = [('Scythe configuration','*.scy')]
tmp= tk.filedialog.asksaveasfilename(parent=self.parent,filetypes=formats ,title="Save configuration as...")
print(tmp)
global CURRENTCONFIG
print(CURRENTCONFIG)
try:
out = open(tmp,"w")
CURRENTCONFIG.write(out)
out.close()
except Error as e:
print (e)
#self.ent_grpFile.config(state=tk.NORMAL)
#self.st_grpFile.set(tmp)
return tmp
def onShowLog(self):
pass
def onEnsembl(self):
pass
def onLocal(self):
pass
def onAbout(self):
Infobox().about()
def onConvertFiles(self):
pass
def onShowOptions(self):
Infobox().showConfig()
#Infobox().todo()
#def restoreOldConfig(self):
# CURRENTCONFIG = configparser.ConfigParser(BACKUPCONFIG)
#def saveNewConfig(self):
# CURRENTCONFIG = configparser.ConfigParser(self.tempconfig)
# BACKUPCONFIG = configparser.ConfigParser(CURRENTCONFIG)
# def applyPenalties(self):
# pass
# def applyOutput(self):
# pass
# def applyCleanup(self):
# pass
# def applyRun(self):
# pass
# def applyAlgorithm(self):
# pass
# def applyParalogs(self):
# pass
def onSetOptions(self):
self.configEditor = ScytheConfigEditor()
class EnsemblSelector(tk.Listbox):
lb = None
top= None
speclist = None
rellist = None
itemlist = None
outdir = None
def __init__(self, outdir):
speclist = []
rellist = []
itemlist = []
data = ensembl.specInfo()
self.data = data
self.outdir = outdir
top = tk.Toplevel(root)
#adjust width and height
top.title("Select Species from Ensembl")
self.top=top
lb = Listbox(self.top,selectmode='multiple',exportselection=0 ,width=40, height=30,)
self.lb= lb
#self.top=top
print(data)
for d in data["species"]:
print(d["name"])
if not d["name"].startswith("Ancestral"):
speclist.append(d["name"])
itemlist.append(d["name"]+'_core_'+str(d["release"]))
rellist.append(d["release"])
self.itemlist=itemlist
self.speclist=speclist
self.rellist = rellist
self.b_ensOK = tk.Button(self.top, text="OK", command=self.onEnsOK)
self.b_ensQuit = tk.Button(self.top, text="Cancel", command=self.onEnsQuit)
self.b_ensOK.grid(row=1, column=0,sticky="E", padx=60)
self.b_ensQuit.grid(row=1, column=0,sticky="E", padx=0)
self.prepRun(itemlist)
def fileExists(self,filename):
try:
tmp = open(filename,'r')
tmp.close()
except IOError as e:
return(False)
return(True)
def onEnsOK(self):
print("onOK")
specs,rel = self.readListBox()
#ensembl.useEnsemblDB(specs,rel, self.outdir)
print("wait...", self.outdir, specs, rel)
self.b_ensOK.configure(state=tk.DISABLED)
self.b_ensQuit.configure(state=tk.DISABLED)
fapath = self.outdir+os.sep+"fa"
tmp = [s for s in specs if not self.fileExists(fapath+os.sep+s+".fa")]
unrel = [s for s in specs if self.fileExists(fapath+os.sep+s+".fa")]
specs = tmp
for u in unrel:
print("already there: "+fapath+os.sep+u+".fa")
ensembl.getSequencesFromFTP(self.outdir, rel, specs)
locpath = self.outdir+os.sep+"loc"
print("fasta done",self.outdir)
for i in specs:
print(i)
if not self.fileExists(locpath+os.sep+i+".loc"):
try:
ensembl.prepareLocFromFasta(fapath+os.sep+i+".fa",locpath+os.sep,i )
except IOError as e:
print(e)
print("Warning: No such fasta: ",fapath+os.sep+i+".fa")
else:
print("already there: "+locpath+os.sep+u+".loc")
###test:TODO deal with different releases: Throw warning, has to be done manually
grpstring =""
for i in specs:
grpstring+=i[0:2]
grpfile = self.outdir+os.sep+grpstring+"_tmp.grp"
if self.fileExists(grpfile):
print("alredy there:", grpfile)
else:
listoftsv=ensembl_ortho_mysql.fetchOrthoFromMySQL(specieslist = specs, release=rel[0])
#grpstring =""
#for i in specs:
# grpstring+=i[0:2]
ensembl2grp.readTsvFiles(listoftsv=listoftsv, outfile=grpfile)
#####update
outNoSubsets=self.outdir+os.sep+grpstring+".full.grp"
out = self.outdir+os.sep+grpstring+".shared_by_all.grp"
numspec= mergeSubsets.filterGroups(grpfile,None, None, False)
mergeSubsets.mergeSubsets(grpfile,outNoSubsets, True)
mergeSubsets.filterGroups(outNoSubsets, out, numspec, False)
self.top.destroy()
CURRENTCONFIG.set(CF_PATHS,CF_PATHS_fasta_directory, fapath+os.sep)
CURRENTCONFIG.set(CF_PATHS,CF_PATHS_loc_directory, locpath+os.sep)
CURRENTCONFIG.set(CF_PATHS,CF_PATHS_grp_file, out)
ScytheWizard(root).prepRun(reloadFields=False)
#dat = ensembl.specInfo()
#print(dat)
def onEnsQuit(self):
print("onQuit")
self.top.destroy()
#def cancelRun(self, process):
# process.terminate()
def prepRun(self, itemlist):
print("EnsemblSelectorPrepRun")
#pass
#top = tk.Toplevel(root)
#top.title("Ensembl Species Selector")
#tmp = Listbox(top,selectmode='multiple',exportselection=0)
for item in itemlist:
self.lb.insert(tk.END, item)
self.lb.grid(row=0, column=0)
#b_ensOK = tk.Button(self.top, text="OK", command=self.onEnsOK)
#b_ensQuit = tk.Button(self.top, text="Cancel", command=self.onEnsQuit)
#b_ensOK.grid(row=1, column=0,sticky="E", padx=60)
#b_ensQuit.grid(row=1, column=0,sticky="E", padx=0)
def readListBox(self):
items = self.lb.curselection()
print(items)
print(self.speclist)
selecteditemsspec = [self.speclist[int(item)] for item in items]
selecteditemsrel = [self.rellist[int(item)] for item in items]
print(selecteditemsspec, selecteditemsrel)
return(selecteditemsspec, selecteditemsrel)
#def callScythe(groups,delim,asID,faFileList,namesList, cleanUp, stopAfter=stopAfter, inDir=inDir, outDir=outDir,
# gapOpen=gapOpen, gapExtend=gapExtend,
# locDir=locDir,faDir=faDir):
# scythe.runScythe(groups=groups, delim=delim,
# asID=asID, faFileList=faFileList,
# namesList=namesList, cleanUp=cleanUp,
# stopAfter=stopAfter, inDir=inDir, outDir=outDir,
# gapOpen=gapOpen, gapExtend=gapExtend,
# locDir=locDir,faDir=faDir)
class ScytheWizard(tk.Tk):
def __init__(self, parent):
self.parent = parent
self.initWizard()
def quit(self):
root.destroy()
def setConfigFromFields(self):
tempconf = configparser.ConfigParser()
backupConfTo(tempconf)
tempconf.set(CF_PATHS, CF_PATHS_output_directory,self.ent_outDir.get())
tempconf.set(CF_PATHS, CF_PATHS_fasta_directory,self.ent_fastaDir.get())
tempconf.set(CF_PATHS, CF_PATHS_loc_directory,self.ent_locDir.get())
tempconf.set(CF_PATHS, CF_PATHS_grp_file,self.ent_grpFile.get())
setCurrentConf(tempconf)
print(CURRENTCONFIG)
def prepRun(self, reloadFields=True): ####TODO!
global SCYTHE_PROCESS
scythe.VERBOSE=False
#config = CURRENTCONFIG
print("prep run called")
#############09.12.13 ######
#update config one more time
if reloadFields:
self.setConfigFromFields()
#CURRENTCONFIG.set(CF_PATHS, CF_PATHS_output_directory,self.ent_outDir.get())
print("Read entry fields one more time", CURRENTCONFIG.get(CF_PATHS, CF_PATHS_output_directory))
###############################################
#check whether ensembl or local is checked
useEnsembl= CURRENTCONFIG.get(CF_MODE, CF_MODE_use_ensembl)
useLocal = CURRENTCONFIG.get(CF_MODE, CF_MODE_use_local_files)
#outdir to
outdir = CURRENTCONFIG.get(CF_PATHS,CF_PATHS_output_directory)
#catch unset outdir
cleanUp="yes"
scythe.GLOBMAX = False
scythe.GLOBSUM = False
print(useEnsembl)
if useEnsembl == "yes":
ens = EnsemblSelector(outdir)
else:
if useLocal == "yes":
fastaDir = CURRENTCONFIG.get(CF_PATHS,CF_PATHS_fasta_directory)
locDir = CURRENTCONFIG.get(CF_PATHS,CF_PATHS_loc_directory)
grpFile = CURRENTCONFIG.get(CF_PATHS,CF_PATHS_grp_file)
print("Will use local files")
print(fastaDir,locDir,grpFile,outdir)
################
if CURRENTCONFIG.get(CF_ALGORITHM,CF_ALGORITHM_use_global_max)!="yes":
scythe.GLOBMAX = False
else:
scythe.GLOBMAX = True
if CURRENTCONFIG.get(CF_ALGORITHM,CF_ALGORITHM_use_global_sum)!="yes":
scythe.GLOBSUM = False
else:
scythe.GLOBSUM = True
if CURRENTCONFIG.get(CF_CLEANUP,CF_CLEANUP_clean_up_directories) !="yes":
cleanUp = False
else:
cleanUp = True
groups= CURRENTCONFIG.get(CF_PATHS,CF_PATHS_grp_file)
namesList = None
faDir = CURRENTCONFIG.get(CF_PATHS,CF_PATHS_fasta_directory)+os.sep
inDir = faDir+os.sep
outDir = CURRENTCONFIG.get(CF_PATHS,CF_PATHS_output_directory)+os.sep
locDir = CURRENTCONFIG.get(CF_PATHS,CF_PATHS_loc_directory)+os.sep
fastaList = os.listdir(faDir)
#gffList = None
delim = CURRENTCONFIG.get(CF_FASTAHEADER,CF_FASTAHEADER_delimiter).strip('"')
try:
asID = int(CURRENTCONFIG.get(CF_FASTAHEADER,CF_FASTAHEADER_part))
except ValueError as e:
print(e)
asID=None
stopAfter = False
gapOpen= CURRENTCONFIG.get(CF_PENALTIES,CF_PENALTIES_gap_open_cost)
gapExtend =CURRENTCONFIG.get(CF_PENALTIES,CF_PENALTIES_gap_extend_cost)
faFileList = os.listdir(faDir)
namesList = os.listdir(faDir)
namesList = [n[0:3] for n in namesList]
print(groups)
print(namesList)
print(gapOpen,gapExtend )
print(faDir, faFileList)
print("Loc", locDir)
##run scythe
#order matters for argument list
p = multiprocessing.Process(target=scythe.runScythe,args=[groups,delim,asID,namesList,cleanUp,stopAfter,faFileList,inDir,outDir,gapOpen, gapExtend,locDir,faDir])
SCYTHE_PROCESS = p
p.start()
print (p, p.is_alive())
#multiprocessing.Process.
#threads.append(p)
#scythe.runScythe(groups=groups, delim=delim,
# asID=asID, faFileList=faFileList,
# namesList=namesList, cleanUp=cleanUp,
# stopAfter=stopAfter, inDir=inDir, outDir=outDir,
# gapOpen=gapOpen, gapExtend=gapExtend,
# locDir=locDir,faDir=faDir)
################
#scythe.runScythe(groups=groups, delim=delim,
# asID=asID, faFileList=faFileList,
# namesList=namesList, cleanUp=cleanUp,
# stopAfter=stopAfter, inDir=inDir, outDir=outDir,
# gapOpen=gapOpen, gapExtend=gapExtend,
# locDir=locDir,faDir=faDir)
def cancelRun(self, process):
if process:
process.terminate()
print("('Cancel') -> Terminated by User.")
process = None
else:
print("No running process.")
def initWizard(self):
global SCYTHE_PROCESS
#Labels
self.lab_fastaDir = tk.Label(text="Fasta Directory")
self.lab_locDir = tk.Label(text=".loc Directory")
self.lab_grpFile = tk.Label(text=".grp File")
self.lab_outDir = tk.Label(text="Output Directory")
#Ints
self.int_ensembl = tk.IntVar()
self.int_local = tk.IntVar()
#Strings
self.st_fastaDir = tk.StringVar()
self.st_fastaDir.set("")
self.st_locDir= tk.StringVar()
self.st_locDir.set("")
self.st_grpFile= tk.StringVar()
self.st_grpFile.set("")
self.st_outDir= tk.StringVar()
self.st_outDir.set("")
#Entries
self.ent_fastaDir = tk.Entry(root, width = 30,
textvariable = self.st_fastaDir, state = tk.DISABLED)
self.ent_locDir = tk.Entry(root, width = 30,
textvariable = self.st_locDir, state = tk.DISABLED)
self.ent_grpFile = tk.Entry(root, width = 30,
textvariable = self.st_grpFile, state = tk.DISABLED)
self.ent_outDir = tk.Entry(root, width = 30,
textvariable = self.st_outDir, state = tk.DISABLED)
#Buttons
self.b_loadConfig = tk.Button()
self.b_saveConfig = tk.Button()
self.b_fastaDir = tk.Button(text="open...",command=self.askFastaDir, state=tk.DISABLED)#self.askopenfilename)
self.b_locDir = tk.Button(text="open...",command=self.askLocDir, state=tk.DISABLED)#self.askopenfilename)
self.b_grpFile = tk.Button(text="open...",command=self.askGrpFile, state=tk.DISABLED)#self.askopenfilename)
self.b_outDir = tk.Button(text="open...",command=self.askOutDir, state=tk.DISABLED)#self.askopenfilename)
######21.10
self.b_next = tk.Button(root, text="Next...", command = self.prepRun)
self.b_quit = tk.Button(root, text="Quit", command = self.quit)
#####06.12
self.b_cancel = tk.Button(root, text = "Cancel", command = lambda: self.cancelRun(SCYTHE_PROCESS))
######
#Checkuttons
self.cb_use_ensembl = tk.Checkbutton(root, text='use ENSEMBL',variable=self.int_ensembl,
command=self.useEnsembl)
self.cb_use_local = tk.Checkbutton(root, text='use local files',variable=self.int_local,
command=self.useLocal, state=tk.NORMAL)
#add to grid
self.cb_use_ensembl.grid(row=0, column=0 )
self.cb_use_local.grid(row=0, column=1)
self.lab_fastaDir.grid(row=1, column=0)
self.ent_fastaDir.grid(row=1, column=1)
self.b_fastaDir.grid(row=1, column=2)
self.lab_locDir.grid(row=2, column=0)
self.ent_locDir.grid(row=2, column=1)
self.b_locDir.grid(row=2, column=2)
self.lab_grpFile.grid(row=3, column=0)
self.ent_grpFile.grid(row=3, column=1)
self.b_grpFile.grid(row=3, column=2)
self.lab_outDir.grid(row=4, column=0, sticky="E")
self.ent_outDir.grid(row=4, column=1, sticky="E")
self.b_outDir.grid(row=4, column=2, sticky="E")
#self.b_convertOrtho.grid(row=3,column=3)
self.b_next.grid(row=5, column=1, sticky="E")
self.b_quit.grid(row=5, column=2, sticky="W")
self.b_cancel.grid(row=5, column=3, sticky="E")
def useLocal(self):
global CURRENTCONFIG
print("CURRENTCONFIG",CURRENTCONFIG)
if self.int_local.get() ==1:
CURRENTCONFIG.set(CF_MODE, "use_local_files", "yes")
CURRENTCONFIG.set(CF_MODE, "use_ensembl_api", "no")
#self.st_fastaDir
self.ent_fastaDir.config(state=tk.NORMAL)
self.ent_locDir.config(state=tk.NORMAL)
self.ent_grpFile.config(state=tk.NORMAL)
self.ent_outDir.config(state=tk.NORMAL)
self.b_fastaDir.config(state=tk.NORMAL)
self.b_locDir.config(state=tk.NORMAL)
self.b_grpFile.config(state=tk.NORMAL)
self.b_outDir.config(state=tk.NORMAL)
self.cb_use_ensembl.config(state=tk.DISABLED)
else:
CURRENTCONFIG.set("Mode", "use_local_files", "no")
CURRENTCONFIG.set("Mode", "use_ensembl_api", "no")
#self.st_fastaDir="some"
self.ent_fastaDir.config(state=tk.DISABLED)
self.ent_locDir.config(state=tk.DISABLED)
self.ent_grpFile.config(state=tk.DISABLED)
self.ent_outDir.config(state=tk.DISABLED)
self.b_fastaDir.config(state=tk.DISABLED)
self.b_locDir.config(state=tk.DISABLED)
self.b_grpFile.config(state=tk.DISABLED)
self.b_outDir.config(state=tk.DISABLED)
self.cb_use_ensembl.config(state=tk.NORMAL)
def askFastaDir(self):
tmp= filedialog.askdirectory()
print(tmp)
self.ent_fastaDir.config(state=tk.NORMAL)
self.st_fastaDir.set(tmp)
if self.ent_locDir.get()=="":
if os.path.isdir(os.path.split(tmp)[0]+os.sep+"loc"):
self.st_locDir.set(os.path.split(tmp)[0]+os.sep+"loc")
CURRENTCONFIG.set(CF_PATHS, "loc_directory", os.path.split(tmp)[0]+os.sep+"loc")
CURRENTCONFIG.set(CF_PATHS, "fasta_directory",tmp)
return tmp
def askLocDir(self):
tmp= filedialog.askdirectory()
print(tmp)
self.ent_locDir.config(state=tk.NORMAL)
self.st_locDir.set(tmp)
if self.ent_locDir.get()=="":
if os.path.isdir(os.path.split(tmp)[0]+os.sep+"fa"):
self.st_locDir.set(os.path.split(tmp)[0]+os.sep+"fa")
CURRENTCONFIG.set(CF_PATHS, "fasta_directory",os.path.split(tmp)[0]+os.sep+"fa")
CURRENTCONFIG.set(CF_PATHS, "loc_directory",tmp)
return filedialog.tmp()
def askGrpFile(self):
tmp= filedialog.askopenfilename()
self.ent_grpFile.config(state=tk.NORMAL)
self.st_grpFile.set(tmp)
CURRENTCONFIG.set(CF_PATHS, "grp_file",tmp)
return tmp
def askOutDir(self):
tmp= filedialog.askdirectory(mustexist=False)
print(tmp)
self.ent_outDir.config(state=tk.NORMAL)
self.st_outDir.set(tmp)
CURRENTCONFIG.set(CF_PATHS, CF_PATHS_output_directory,tmp)
#if self.ent_locDir.get()=="":
# if os.path.isdir(os.path.split(tmp)[0]+os.sep+"fa"):
# self.st_Dir.set(os.path.split(tmp)[0]+os.sep+"fa")
#return filedialog.tmp()
def useEnsembl(self):
CURRENTCONFIG.set("Mode", "use_local_files", "no")
CURRENTCONFIG.set("Mode", "use_ensembl_api", "yes")
print(self.int_ensembl)
print(self.ent_fastaDir)
if self.int_ensembl.get() ==1:
print("True", self.st_fastaDir)
self.ent_fastaDir.config(state=tk.DISABLED)
self.ent_locDir.config(state=tk.DISABLED)
self.ent_outDir.config(state=tk.NORMAL)
self.b_fastaDir.config(state=tk.DISABLED)
self.b_locDir.config(state=tk.DISABLED)
self.b_outDir.config(state=tk.NORMAL)
self.cb_use_local.config(state=tk.DISABLED)
else:
self.cb_use_local.config(state=tk.NORMAL)
self.b_outDir.config(state=tk.DISABLED)
self.ent_outDir.config(state=tk.DISABLED)
CURRENTCONFIG.set("Mode", "use_local_files", "no")
CURRENTCONFIG.set("Mode", "use_ensembl_api", "no")
try:
arg = sys.argv[1]
except IndexError as e:
app=ScytheMenu(root)
else:
app=ScytheMenu(root,arg)
root.mainloop()
| gpl-3.0 | -1,004,195,054,635,034,200 | 40.330275 | 169 | 0.591942 | false |
arthurmensch/cogspaces | exps/train.py | 1 | 8861 | """Perform traininig of a multi-study model using the fetchers provided by cogspaces.
Hyperparameters can be edited in the file."""
import argparse
import json
import os
from os.path import join
import numpy as np
from joblib import Memory, dump
from sklearn.metrics import accuracy_score
from cogspaces.classification.ensemble import EnsembleClassifier
from cogspaces.classification.logistic import MultiLogisticClassifier
from cogspaces.classification.multi_study import MultiStudyClassifier
from cogspaces.datasets import STUDY_LIST, load_reduced_loadings
from cogspaces.datasets.contrast import load_masked_contrasts
from cogspaces.datasets.derivative import load_from_directory, split_studies
from cogspaces.datasets.utils import get_output_dir
from cogspaces.model_selection import train_test_split
from cogspaces.preprocessing import MultiStandardScaler, MultiTargetEncoder
from cogspaces.utils import compute_metrics, ScoreCallback, MultiCallback
def run(estimator='multi_study', seed=0, plot=False, n_jobs=1, use_gpu=False, split_by_task=False,
verbose=0):
# Parameters
system = dict(
verbose=verbose,
n_jobs=n_jobs,
plot=plot,
seed=seed,
output_dir=None
)
data = dict(
studies='all',
dataset='loadings', # Useful to override source directory
test_size=0.5,
train_size=0.5,
reduced=True,
data_dir=None,
)
model = dict(
split_by_task=split_by_task,
estimator=estimator,
normalize=True,
seed=100,
target_study=None,
)
config = {'system': system, 'data': data, 'model': model}
if model['estimator'] in ['multi_study', 'ensemble']:
multi_study = dict(
latent_size=128,
weight_power=0.6,
batch_size=128,
init='resting-state',
latent_dropout=0.75,
input_dropout=0.25,
device='cuda:0' if use_gpu else 'cpu',
seed=100,
lr={'pretrain': 1e-3, 'train': 1e-3, 'finetune': 1e-3},
max_iter={'pretrain': 200, 'train': 300, 'finetune': 200},
)
config['multi_study'] = multi_study
if model['estimator'] == 'ensemble':
ensemble = dict(
seed=100,
n_runs=120,
alpha=1e-5, )
config['ensemble'] = ensemble
else:
logistic = dict(l2_penalty=np.logspace(-7, 0, 4).tolist(),
max_iter=1000, )
config['logistic'] = logistic
output_dir = join(get_output_dir(config['system']['output_dir']),
'normalized',
'split_by_task' if split_by_task else 'split_by_study',
config['model']['estimator'],
str(config['system']['seed']))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
info = {}
with open(join(output_dir, 'config.json'), 'w+') as f:
json.dump(config, f)
print("Loading data")
if data['studies'] == 'all':
studies = STUDY_LIST
elif isinstance(data['studies'], str):
studies = [data['studies']]
elif isinstance(data['studies'], list):
studies = data['studies']
else:
raise ValueError("Studies should be a list or 'all'")
if data['dataset'] is not None:
input_data, targets = load_from_directory(dataset=data['dataset'], data_dir=data['data_dir'])
elif data['reduced']:
input_data, targets = load_reduced_loadings(data_dir=data['data_dir'])
else:
input_data, targets = load_masked_contrasts(data_dir=data['data_dir'])
input_data = {study: input_data[study] for study in studies}
targets = {study: targets[study] for study in studies}
if model['split_by_task']:
_, split_targets = split_studies(input_data, targets)
target_encoder = MultiTargetEncoder().fit(split_targets)
train_data, test_data, train_targets, test_targets = \
train_test_split(input_data, targets, random_state=system['seed'],
test_size=data['test_size'],
train_size=data['train_size'])
if model['split_by_task']:
train_data, train_targets = split_studies(train_data, train_targets)
test_data, test_targets = split_studies(test_data, test_targets)
train_targets = target_encoder.transform(train_targets)
test_targets = target_encoder.transform(test_targets)
print("Setting up model")
if model['normalize']:
standard_scaler = MultiStandardScaler().fit(train_data)
train_data = standard_scaler.transform(train_data)
test_data = standard_scaler.transform(test_data)
else:
standard_scaler = None
if model['estimator'] in ['multi_study', 'ensemble']:
estimator = MultiStudyClassifier(verbose=system['verbose'],
n_jobs=system['n_jobs'],
**multi_study)
if model['estimator'] == 'ensemble':
memory = Memory(cachedir=None)
estimator = EnsembleClassifier(estimator,
n_jobs=system['n_jobs'],
memory=memory,
**ensemble
)
callback = None
else:
# Set some callback to obtain useful verbosity
test_callback = ScoreCallback(Xs=test_data, ys=test_targets,
score_function=accuracy_score)
train_callback = ScoreCallback(Xs=train_data, ys=train_targets,
score_function=accuracy_score)
callback = MultiCallback({'train': train_callback,
'test': test_callback})
info['n_iter'] = train_callback.n_iter_
info['train_scores'] = train_callback.scores_
info['test_scores'] = test_callback.scores_
elif model['estimator'] == 'logistic':
estimator = MultiLogisticClassifier(verbose=system['verbose'],
n_jobs=n_jobs,
**logistic)
callback = None
print("Training model")
estimator.fit(train_data, train_targets, callback=callback)
print("Evaluating model")
test_preds = estimator.predict(test_data)
metrics = compute_metrics(test_preds, test_targets, target_encoder)
print(metrics['accuracy'])
print("Saving model")
# Save model for further analysis
dump(target_encoder, join(output_dir, 'target_encoder.pkl'))
if model['normalize']:
dump(standard_scaler, join(output_dir, 'standard_scaler.pkl'))
dump(estimator, join(output_dir, 'estimator.pkl'))
with open(join(output_dir, 'metrics.json'), 'w+') as f:
json.dump(metrics, f)
with open(join(output_dir, 'info.json'), 'w+') as f:
json.dump(info, f)
if config['system']['plot']:
from utils.plotting import make_plots, prepare_plots
print('Preparing plots')
prepare_plots(output_dir)
print("Plotting model")
plot_components = config['model']['estimator'] in ['multi_study',
'ensemble']
make_plots(output_dir, plot_classifs=True,
plot_components=plot_components,
plot_surface=False, plot_wordclouds=True,
n_jobs=config['system']['n_jobs'])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-e', '--estimator', type=str,
choices=['logistic', 'multi_study', 'ensemble'],
default='multi_study',
help='estimator type')
parser.add_argument('-s', '--seed', type=int,
default=0,
help='Integer to use to seed the model and half-split cross-validation')
parser.add_argument('-v', '--verbose', type=int,
default=0,
help='Verbosity')
parser.add_argument('-p', '--plot', action="store_true",
help='Plot the results (classification maps, cognitive components)')
parser.add_argument('-t', '--task', action="store_true",
help='Split by tasks')
parser.add_argument('-g', '--gpu', action="store_true",
help='Split by tasks')
parser.add_argument('-j', '--n_jobs', type=int,
default=1, help='Number of CPUs to use')
args = parser.parse_args()
run(args.estimator, args.seed, args.plot, args.n_jobs, args.gpu, args.task, args.verbose)
| bsd-2-clause | -1,676,447,398,017,185,000 | 40.023148 | 101 | 0.573412 | false |
uclouvain/osis | program_management/ddd/domain/service/get_program_tree_version_for_tree.py | 1 | 1896 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from typing import Set, List
import program_management.ddd
from program_management.ddd.business_types import *
from program_management.ddd.service.read.search_all_versions_from_root_nodes import search_all_versions_from_root_nodes
def get_program_tree_version_for_tree(tree_nodes: Set['Node']) -> List['ProgramTreeVersion']:
commands = [
program_management.ddd.command.SearchAllVersionsFromRootNodesCommand(code=node.code,
year=node.year) for node in tree_nodes
]
return search_all_versions_from_root_nodes(commands)
| agpl-3.0 | 1,912,027,011,069,440,500 | 48.868421 | 119 | 0.655409 | false |
nidkil/gunbot_config_updater | gunbot_config_updater.py | 1 | 11281 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script takes a Gunbot configuration file from the specified URL, updates the keys, creates a Gunthy GUI config
file and then stops and starts Gunthy GUI.
@author Stephen Oostenbrink (nidkil) <stephen at oostenbrink dot com>
"""
import os
import json
import requests
import logging
import subprocess
import shutil
import argparse
from check_webpage_updated import WebPageMonitor
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
# Use Dante's Pastbin as default url
DEFAULT_URL = 'https://pastebin.com/raw/WSZdAP1m'
class GunbotConfigHandler(object):
__TEST_MODE = 'test-'
def __init__(self, config_dir=None, test_mode=None):
self.test_mode_overrule = test_mode
if config_dir is None:
self.config_dir = os.path.dirname(os.path.abspath(__file__))
else:
self.config_dir = config_dir
self.config_path = os.path.join(self.config_dir, 'gunbot_config_updater.json')
if not os.path.isfile(self.config_path):
logging.error("Configuration file missing {}".format(self.config_path))
exit(-3)
logging.info("Loading config file from {}".format(self.config_path))
with open(self.config_path) as config_file:
self.configuration = json.load(config_file)
config_file.close()
logging.debug(self)
@property
def test_mode(self):
if self.test_mode_overrule is None:
return self.configuration['testMode']
else:
return self.test_mode_overrule
@property
def gunbot_location(self):
if len(self.configuration['gunbot']['location']) == 0:
return self.config_dir
else:
return self.configuration['gunbot']['location']
@property
def gunbot_config(self):
if self.test_mode:
return self.__TEST_MODE + self.configuration['gunbot']['config']
else:
return self.configuration['gunbot']['config']
@property
def gunbot_start(self):
return self.configuration['gunbot']['start']
@property
def gunbot_stop(self):
return self.configuration['gunbot']['stop']
@property
def gunthy_gui_config(self):
if self.test_mode:
return self.__TEST_MODE + self.configuration['gui']['config']
else:
return self.configuration['gui']['config']
@property
def gunthy_gui_gunbot_enabled(self):
return self.configuration['gui']['enabled']
@property
def gunthy_gui_gunbot_version(self):
return self.configuration['gui']['gunbotVersion']
@property
def gunthy_gui_start(self):
return self.configuration['gui']['start']
@property
def gunthy_gui_stop(self):
return self.configuration['gui']['stop']
@property
def backup(self):
return self.configuration['backup']
def __repr__(self):
return "<%s instance at %s>" % (self.__class__.__name__, id(self))
def __str__(self):
return "%s (\n\ttest_mode_overrule=%s\n\ttest_mode=%s\n\tgunbot_location=%s\n\tgunbot_config=%s" \
"\n\tgunthy_gui_config=%s\n\tgunthy_gui_gunbot_version=%s\n\tgunthy_gui_start=%s\n\tgunthy_gui_stop=%s" \
"\n\tbackup=%s\n)" % (
self.__class__.__name__,
self.test_mode_overrule,
self.test_mode,
self.gunbot_location,
self.gunbot_config,
self.gunthy_gui_config,
self.gunthy_gui_gunbot_version,
self.gunthy_gui_start,
self.gunthy_gui_stop,
self.backup
)
class GunbotConfigUpdater(object):
__SECRETS_FILE = 'secrets.json'
__BACKUP_EXT = '.backup'
config = None
def __init__(self, config_dir=None, test_mode=None):
self.config_handler = GunbotConfigHandler(config_dir=config_dir, test_mode=test_mode)
self.secrets_file_path = os.path.join(self.config_handler.gunbot_location, self.__SECRETS_FILE)
if not os.path.isfile(self.secrets_file_path):
logging.error("Secrets file missing: {}".format(self.secrets_file_path))
exit(-4)
logging.info("Loading API keys from {}".format(self.secrets_file_path))
with open(self.secrets_file_path) as secrets_file:
self.secrets = json.load(secrets_file)
secrets_file.close()
def __update_keys(self):
for secret in self.secrets['exchanges']:
exchange = secret['exchange']
logging.info("Updating API keys for {}".format(exchange))
self.config['exchanges'][exchange]['key'] = secret['api_key']
self.config['exchanges'][exchange]['secret'] = secret['api_secret']
def __update_gunthy_gui(self):
exchanges = ['poloniex', 'kraken', 'bittrex', 'cryptopia']
gui_config = []
for exchange in exchanges:
if exchange in self.config['pairs']:
logging.info("Updating Gunthy GUI config for {}".format(exchange))
pairs = self.config['pairs'][exchange]
pair_config = {}
for pair in pairs:
loaded_pair_config = self.config['pairs'][exchange][pair]
pair_config['gunbotVersion'] = self.config_handler.gunthy_gui_gunbot_version
pair_config['exchange'] = exchange
pair_config['pair'] = pair
pair_config['config'] = {}
pair_config['config']['strategy'] = loaded_pair_config['strategy']
pair_config['config']['override'] = {}
if 'overrride' in loaded_pair_config:
for key in loaded_pair_config['override']:
pair_config['config']['override'][key] = loaded_pair_config['override'][key]
gui_config.append(pair_config.copy())
self.__write_json_to_file(self.config_handler.gunthy_gui_config, gui_config, True)
def __update_gunbot_config(self):
self.__write_json_to_file(self.config_handler.gunbot_config, self.config, True)
def __write_json_to_file(self, dest_file, json_data, backup=False):
dest_path = os.path.join(self.config_handler.gunbot_location, dest_file)
backup_path = dest_path + self.__BACKUP_EXT
if backup and os.path.isfile(backup_path):
logging.info("Deleting old backup file {}".format(backup_path))
os.remove(backup_path)
if backup and os.path.isfile(dest_path):
logging.info("Backing up config file from '{}' to '{}'".format(dest_path, backup_path))
shutil.copy2(dest_path, backup_path)
with open(dest_path, 'w') as f:
json.dump(json_data, f, sort_keys=False, indent=4, ensure_ascii=False)
f.close
def __rollback_config(self, dest_file):
dest_path = os.path.join(self.config_handler.gunbot_location, dest_file)
backup_path = dest_path + self.__BACKUP_EXT
if not os.path.isfile(backup_path):
logging.info("Backup file '{}' does not exist, skipping".format(backup_path))
elif os.path.isfile(dest_path):
logging.info("Deleting configuration file '{}'".format(dest_path))
os.remove(dest_path)
logging.info("Restoring previous configuration file from '{}' to '{}'".format(dest_path, backup_path))
shutil.copy2(backup_path, dest_path)
logging.info("Deleting backup configuration file '{}'".format(backup_path))
os.remove(backup_path)
def __exec_cmd(self, cmd):
if self.config_handler.test_mode:
logging.info("Command: ".format(cmd))
else:
try:
logging.debug("Command: {}".format(cmd))
output = subprocess.check_output(cmd.split(' '))
logging.debug("Command output: {}".format(output))
except subprocess.CalledProcessError as e:
logging.error("Error executing command [exit code= {}, message=\n{}]".format(e.returncode, e.output))
def __restart_gunthy_gui(self):
self.__exec_cmd(self.config_handler.gunbot_stop)
self.__exec_cmd(self.config_handler.gunbot_start)
self.__exec_cmd(self.config_handler.gunthy_gui_stop)
self.__exec_cmd(self.config_handler.gunthy_gui_start)
def execute(self, config_url):
logging.info("Loading Gunbot configuration from {}".format(config_url))
response = requests.get(config_url)
self.config = json.loads(response.text)
self.__update_keys()
self.__update_gunbot_config()
if self.config_handler.gunthy_gui_gunbot_enabled:
self.__update_gunthy_gui()
self.__restart_gunthy_gui()
def rollback(self):
logging.info("Rollback configuration files to previous version")
self.__rollback_config(self.config_handler.gunbot_config)
if self.config_handler.gunthy_gui_gunbot_enabled:
self.__rollback_config(self.config_handler.gunthy_gui_config)
# TODO send Telegram message when updated or on error
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='download a Gunbot configuration from the specified url')
parser.add_argument('-r', '--rollback', help='rollback to the previous configuration', action='store_true')
parser.add_argument('url', nargs='?', help='url to retrieve Gunbot configuration from')
parser.add_argument('-c', '--changed', help='only update if web page changed', action='store_true')
parser.add_argument('-o', '--onlycheck', help='only check if web page changed', action='store_true')
parser.add_argument('-t', '--testmode', help='create test configuration files only', action='store_true')
verbose_group = parser.add_mutually_exclusive_group()
verbose_group.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true')
verbose_group.add_argument('-q', '--quiet', help="no output", action='store_true')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
name = os.path.basename(os.path.abspath(__file__))
if args.url is None and not args.rollback:
args.url = DEFAULT_URL
if args.url is not None and args.rollback:
print '{}: url and rollback cannot be specified at the same time'.format(args.prog)
print 'try \'{} --help\' for more information.'.format(args.prog)
exit(-2)
if args.onlycheck:
monitor = WebPageMonitor(args.url, force_use_hash=True)
print "Web page changed: {}".format(monitor.check())
exit(5)
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if args.quiet:
logging.getLogger().setLevel(logging.ERROR)
if args.changed:
monitor = WebPageMonitor(args.url, force_use_hash=True)
if not monitor.check():
exit(0)
if args.testmode:
updater = GunbotConfigUpdater(test_mode=args.testmode)
else:
updater = GunbotConfigUpdater()
if args.rollback:
updater.rollback()
else:
updater.execute(args.url)
| mit | 3,852,402,770,475,489,300 | 40.322344 | 117 | 0.616701 | false |
michalbachowski/pycontentmakeup | src/contentmakeup/strategy/template_renderer.py | 1 | 1147 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from contentmakeup.strategy import StrategyInterface
class TemplateRenderer(StrategyInterface):
"""Class describes strategy for extracting metadata"""
subject = ('template_renderer',)
def __init__(self, format_discovery, template_manager, input_reader):
self.template_manager = template_manager
self.discover_format = format_discovery
self.read_input = input_reader
self._compiled = {}
def __call__(self, content, config, template_file):
return self._get_compiled(template_file)({'content': \
content, 'config': config})
def _get_compiled(self, template_file):
if template_file not in self._compiled:
self._compiled[template_file] = self._compile(template_file)
return self._compiled[template_file]
def _compile(self, template_file):
template_type = self.discover_format(template_file)
return self.template_manager(template_type).compile(template_type, \
self.read_input(template_file))
| mit | -1,085,219,933,147,370,900 | 39.964286 | 79 | 0.622493 | false |
RedHatInsights/insights-core | insights/parsers/libvirtd_log.py | 1 | 2771 | """
Libvirtd Logs
=============
This module contains the following parsers:
LibVirtdLog - file ``/var/log/libvirt/libvirtd.log``
----------------------------------------------------
LibVirtdQemuLog - file ``/var/log/libvirt/qemu/*.log``
------------------------------------------------------
"""
from insights.specs import Specs
from insights import LogFileOutput, parser
@parser(Specs.libvirtd_log)
class LibVirtdLog(LogFileOutput):
"""
Parse the ``/var/log/libvirt/libvirtd.log`` log file.
.. note::
Please refer to its super-class :class:`insights.core.LogFileOutput`
Sample input::
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1174 : trying driver 0 (Test) ...
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1180 : driver 0 Test returned DECLINED
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1174 : trying driver 1 (ESX) ...
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1180 : driver 1 ESX returned DECLINED
2013-10-23 17:32:19.909+0000: 14069: debug : do_open:1174 : trying driver 2 (remote) ...
2013-10-23 17:32:19.957+0000: 14069: error : virNetTLSContextCheckCertDN:418 : Certificate [session] owner does not match the hostname AA.BB.CC.DD <============= IP Address
2013-10-23 17:32:19.957+0000: 14069: warning : virNetTLSContextCheckCertificate:1102 : Certificate check failed Certificate [session] owner does not match the hostname AA.BB.CC.DD
2013-10-23 17:32:19.957+0000: 14069: error : virNetTLSContextCheckCertificate:1105 : authentication failed: Failed to verify peer's certificate
Examples:
>>> "Certificate check failed Certificate" in libvirtd_log
True
>>> len(libvirtd_log.lines) # All lines, before filtering
8
>>> len(libvirtd_log.get('NetTLSContext')) # After filtering
3
"""
pass
@parser(Specs.libvirtd_qemu_log)
class LibVirtdQemuLog(LogFileOutput):
"""
Parse the ``/var/log/libvirt/qemu/*.log`` log file.
.. note::
Please refer to its super-class :class:`insights.core.LogFileOutput`
Sample input from file /var/log/libvirt/qemu/bb912729-fa51-443b-bac6-bf4c795f081d.log::
2019-06-04 05:33:22.280743Z qemu-kvm: -vnc 10.xxx.xxx.xxx:0: Failed to start VNC server: Failed to bind socket: Cannot assign requested address
2019-06-04 05:33:2.285+0000: shutting down
Examples:
>>> from datetime import datetime
>>> "shutting down" in libvirtd_qemu_log
True
>>> len(list(libvirtd_qemu_log.get_after(datetime(2019, 4, 26, 6, 55, 20))))
2
>>> libvirtd_qemu_log.file_name.strip('.log') # Instance UUID
'bb912729-fa51-443b-bac6-bf4c795f081d'
"""
pass
| apache-2.0 | -4,709,715,493,350,284,000 | 38.585714 | 187 | 0.636593 | false |
SathyaBhat/spotify-dl | setup.py | 1 | 1543 | #!/usr/bin/env python
from setuptools import setup, find_packages
from spotify_dl.constants import VERSION
with open('README.md') as f:
long_description = f.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name='spotify_dl',
version=VERSION,
python_requires='>=3.6',
install_requires=requirements,
author='Sathya Bhat',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
url='https://github.com/SathyaBhat/spotify-dl/',
license='MIT',
description='Downloads songs from a Spotify Playlist/Track/Album that you provide',
long_description=long_description,
long_description_content_type='text/markdown',
entry_points={
'console_scripts': [
'spotify_dl=spotify_dl.spotify_dl:spotify_dl',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
| mit | 1,793,278,256,263,504,400 | 31.829787 | 87 | 0.626701 | false |
akittas/geocoder | geocoder/api.py | 1 | 16248 | #!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.osm import Osm
from geocoder.w3w import W3W
from geocoder.bing import Bing
from geocoder.here import Here
from geocoder.tamu import Tamu
from geocoder.tgos import Tgos
from geocoder.yahoo import Yahoo
from geocoder.baidu import Baidu
from geocoder.tomtom import Tomtom
from geocoder.arcgis import Arcgis
from geocoder.ottawa import Ottawa
from geocoder.yandex import Yandex
from geocoder.mapbox import Mapbox
from geocoder.mapzen import Mapzen
from geocoder.ipinfo import Ipinfo
from geocoder.komoot import Komoot
from geocoder.maxmind import Maxmind
from geocoder.location import Location
from geocoder.opencage import OpenCage
from geocoder.geonames import Geonames
from geocoder.mapquest import Mapquest
from geocoder.distance import Distance
from geocoder.geolytica import Geolytica
from geocoder.freegeoip import FreeGeoIP
from geocoder.canadapost import Canadapost
from geocoder.geocodefarm import GeocodeFarm
from geocoder.uscensus import USCensus
from geocoder.w3w_reverse import W3WReverse
from geocoder.osm_reverse import OsmReverse
from geocoder.here_reverse import HereReverse
from geocoder.bing_reverse import BingReverse
from geocoder.arcgis_reverse import ArcgisReverse
from geocoder.mapzen_reverse import MapzenReverse
from geocoder.komoot_reverse import KomootReverse
from geocoder.mapbox_reverse import MapboxReverse
from geocoder.yandex_reverse import YandexReverse
from geocoder.mapquest_reverse import MapquestReverse
from geocoder.opencage_reverse import OpenCageReverse
from geocoder.geocodefarm_reverse import GeocodeFarmReverse
from geocoder.uscensus_reverse import USCensusReverse
# Google Services
from geocoder.google import Google
from geocoder.google_timezone import Timezone
from geocoder.google_reverse import GoogleReverse
from geocoder.google_elevation import Elevation
from geocoder.google_places import Places
options = {
'osm': {
'geocode': Osm,
'reverse': OsmReverse,
},
'tgos': {
'geocode': Tgos
},
'here': {
'geocode': Here,
'reverse': HereReverse,
},
'baidu': {'geocode': Baidu},
'yahoo': {'geocode': Yahoo},
'tomtom': {'geocode': Tomtom},
'arcgis': {
'geocode': Arcgis,
'reverse': ArcgisReverse
},
'ottawa': {'geocode': Ottawa},
'mapbox': {
'geocode': Mapbox,
'reverse': MapboxReverse,
},
'maxmind': {'geocode': Maxmind},
'ipinfo': {'geocode': Ipinfo},
'geonames': {'geocode': Geonames},
'freegeoip': {'geocode': FreeGeoIP},
'w3w': {
'geocode': W3W,
'reverse': W3WReverse,
},
'yandex': {
'geocode': Yandex,
'reverse': YandexReverse
},
'mapquest': {
'geocode': Mapquest,
'reverse': MapquestReverse,
},
'geolytica': {'geocode': Geolytica},
'canadapost': {'geocode': Canadapost},
'opencage': {
'geocode': OpenCage,
'reverse': OpenCageReverse,
},
'bing': {
'geocode': Bing,
'reverse': BingReverse,
},
'google': {
'geocode': Google,
'reverse': GoogleReverse,
'timezone': Timezone,
'elevation': Elevation,
'places': Places,
},
'mapzen': {
'geocode': Mapzen,
'reverse': MapzenReverse,
},
'komoot': {
'geocode': Komoot,
'reverse': KomootReverse,
},
'tamu': {
'geocode': Tamu
},
'geocodefarm': {
'geocode': GeocodeFarm,
'reverse': GeocodeFarmReverse,
},
'uscensus': {
'geocode': USCensus,
'reverse': USCensusReverse,
},
}
def get(location, **kwargs):
"""Get Geocode
:param ``location``: Your search location you want geocoded.
:param ``provider``: The geocoding engine you want to use.
:param ``method``: Define the method (geocode, method).
"""
provider = kwargs.get('provider', 'bing').lower().strip()
method = kwargs.get('method', 'geocode').lower().strip()
if isinstance(location, (list, dict)) and method == 'geocode':
raise ValueError("Location should be a string")
if provider not in options:
raise ValueError("Invalid provider")
else:
if method not in options[provider]:
raise ValueError("Invalid method")
return options[provider][method](location, **kwargs)
def distance(*args, **kwargs):
"""Distance tool measures the distance between two or multiple points.
:param location: (min 2x locations) Your search location you want geocoded.
:param units: (default=kilometers) Unit of measurement.
> kilometers
> miles
> feet
> meters
"""
return Distance(*args, **kwargs)
def location(location, **kwargs):
"""Parser for different location formats
"""
return Location(location, **kwargs)
def google(location, **kwargs):
"""Google Provider
:param location: Your search location you want geocoded.
:param method: (default=geocode) Use the following:
> geocode
> places
> reverse
> batch
> timezone
> elevation
"""
return get(location, provider='google', **kwargs)
def mapbox(location, **kwargs):
"""Mapbox Provider
:param location: Your search location you want geocoded.
:param proximity: Search nearby [lat, lng]
:param method: (default=geocode) Use the following:
> geocode
> reverse
> batch
"""
return get(location, provider='mapbox', **kwargs)
def yandex(location, **kwargs):
"""Yandex Provider
:param location: Your search location you want geocoded.
:param lang: Chose the following language:
> ru-RU — Russian (by default)
> uk-UA — Ukrainian
> be-BY — Belarusian
> en-US — American English
> en-BR — British English
> tr-TR — Turkish (only for maps of Turkey)
:param kind: Type of toponym (only for reverse geocoding):
> house - house or building
> street - street
> metro - subway station
> district - city district
> locality - locality (city, town, village, etc.)
"""
return get(location, provider='yandex', **kwargs)
def w3w(location, **kwargs):
"""what3words Provider
:param location: Your search location you want geocoded.
:param key: W3W API key.
:param method: Chose a method (geocode, method)
"""
return get(location, provider='w3w', **kwargs)
def baidu(location, **kwargs):
"""Baidu Provider
:param location: Your search location you want geocoded.
:param key: Baidu API key.
:param referer: Baidu API referer website.
"""
return get(location, provider='baidu', **kwargs)
def komoot(location, **kwargs):
"""Ottawa Provider
:param location: Your search location you want geocoded.
"""
return get(location, provider='komoot', **kwargs)
def ottawa(location, **kwargs):
"""Ottawa Provider
:param location: Your search location you want geocoded.
"""
return get(location, provider='ottawa', **kwargs)
def elevation(location, **kwargs):
"""Elevation - Google Provider
:param location: Your search location you want to retrieve elevation data.
"""
return get(location, method='elevation', provider='google', **kwargs)
def places(location, **kwargs):
"""Places - Google Provider
:param location: Your search location you want geocoded.
"""
return get(location, method='places', provider='google', **kwargs)
def timezone(location, **kwargs):
"""Timezone - Google Provider
:param location: Your search location you want to retrieve timezone data.
:param timestamp: Define your own specified time to calculate timezone.
"""
return get(location, method='timezone', provider='google', **kwargs)
def reverse(location, provider="google", **kwargs):
"""Reverse Geocoding
:param location: Your search location you want to reverse geocode.
:param key: (optional) use your own API Key from Bing.
:param provider: (default=google) Use the following:
> google
> bing
"""
return get(location, method='reverse', provider=provider, **kwargs)
def bing(location, **kwargs):
"""Bing Provider
:param location: Your search location you want geocoded.
:param key: (optional) use your own API Key from Bing.
:param method: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='bing', **kwargs)
def yahoo(location, **kwargs):
"""Yahoo Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='yahoo', **kwargs)
def geolytica(location, **kwargs):
"""Geolytica (Geocoder.ca) Provider
:param location: Your search location you want geocoded.
"""
return get(location, provider='geolytica', **kwargs)
def opencage(location, **kwargs):
"""Opencage Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from OpenCage.
"""
return get(location, provider='opencage', **kwargs)
def arcgis(location, **kwargs):
"""ArcGIS Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='arcgis', **kwargs)
def here(location, **kwargs):
"""HERE Provider
:param location: Your search location you want geocoded.
:param app_code: (optional) use your own Application Code from HERE.
:param app_id: (optional) use your own Application ID from HERE.
:param method: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='here', **kwargs)
def nokia(location, **kwargs):
"""HERE Provider
:param location: Your search location you want geocoded.
:param app_code: (optional) use your own Application Code from HERE.
:param app_id: (optional) use your own Application ID from HERE.
:param method: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='here', **kwargs)
def tomtom(location, **kwargs):
"""TomTom Provider
:param location: Your search location you want geocoded.
:param key: (optional) use your own API Key from TomTom.
"""
return get(location, provider='tomtom', **kwargs)
def mapquest(location, **kwargs):
"""MapQuest Provider
:param location: Your search location you want geocoded.
:param key: (optional) use your own API Key from MapQuest.
:param method: (default=geocode) Use the following:
> geocode
> reverse
"""
return get(location, provider='mapquest', **kwargs)
def osm(location, **kwargs):
"""OSM Provider
:param location: Your search location you want geocoded.
:param url: Custom OSM Server URL location
(ex: http://nominatim.openstreetmap.org/search)
"""
return get(location, provider='osm', **kwargs)
def maxmind(location='me', **kwargs):
"""MaxMind Provider
:param location: Your search IP Address you want geocoded.
:param location: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='maxmind', **kwargs)
def ipinfo(location='', **kwargs):
"""IP Info.io Provider
:param location: Your search IP Address you want geocoded.
:param location: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='ipinfo', **kwargs)
def freegeoip(location, **kwargs):
"""FreeGeoIP Provider
:param location: Your search IP Address you want geocoded.
:param location: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='freegeoip', **kwargs)
def ip(location, **kwargs):
"""IP Address lookup
:param location: Your search IP Address you want geocoded.
:param location: (optional) if left blank will return your
current IP address's location.
"""
return get(location, provider='ipinfo', **kwargs)
def canadapost(location, **kwargs):
"""CanadaPost Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) API Key from CanadaPost Address Complete.
:param ``language``: (default=en) Output language preference.
:param ``country``: (default=ca) Geofenced query by country.
"""
return get(location, provider='canadapost', **kwargs)
def postal(location, **kwargs):
"""CanadaPost Provider
:param ``location``: Your search location you want geocoded.
:param ``key``: (optional) use your own API Key from
CanadaPost Address Complete.
"""
return get(location, provider='canadapost', **kwargs)
def geonames(location, **kwargs):
"""GeoNames Provider
:param ``location``: Your search location you want geocoded.
:param ``username``: (required) needs to be passed with each request.
"""
return get(location, provider='geonames', **kwargs)
def mapzen(location, **kwargs):
"""Mapzen Provider
:param ``location``: Your search location you want geocoded.
"""
return get(location, provider='mapzen', **kwargs)
def tamu(location, **kwargs):
"""TAMU Provider
Params
------
:param location: The street address of the location you want geocoded.
:param city: The city of the location to geocode.
:param state: The state of the location to geocode.
:param zipcode: The zipcode of the location to geocode.
:param key: The API key (use API key "demo" for testing).
API Reference
-------------
https://geoservices.tamu.edu/Services/Geocode/WebService
"""
return get(location, provider='tamu', **kwargs)
def geocodefarm(location, **kwargs):
"""GeocodeFarm Provider
Params
------
:param location: The string to search for. Usually a street address.
:param key: (optional) API Key. Only Required for Paid Users.
:param lang: (optional) 2 digit language code to return results in. Currently only "en"(English) or "de"(German) supported.
:param country: (optional) The country to return results in. Used for biasing purposes and may not fully filter results to this specific country.
API Reference
-------------
https://geocode.farm/geocoding/free-api-documentation/
"""
return get(location, provider='geocodefarm', **kwargs)
def tgos(location, **kwargs):
"""TGOS Provider
:param location: Your search location you want geocoded.
:param language: (default=taiwan) Use the following:
> taiwan
> english
> chinese
:param method: (default=geocode) Use the following:
> geocode
API Reference
-------------
http://api.tgos.nat.gov.tw/TGOS_MAP_API/Web/Default.aspx
"""
return get(location, provider='tgos', **kwargs)
def uscensus(location, **kwargs):
"""US Census Provider
Params
------
:param location: Your search location you want geocoded.
:param benchmark: (default=4) Use the following:
> Public_AR_Current or 4
> Public_AR_ACSYYYY or 8
> Public_AR_Census2010 or 9
:param vintage: (default=4) Use the following:
> Current_Current or 4
> Census2010_Current or 410
> ACS2013_Current or 413
> ACS2014_Current or 414
> ACS2015_Current or 415
> Current_ACS2015 or 8
> Census2010_ACS2015 or 810
> ACS2013_ACS2015 or 813
> ACS2014_ACS2015 or 814
> ACS2015_ACS2015 or 815
> Census2010_Census2010 or 910
> Census2000_Census2010 or 900
:param method: (default=geocode) Use the following:
> geocode
> reverse
API Reference
-------------
https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf
"""
return get(location, provider='uscensus', **kwargs)
| mit | -8,501,324,559,396,109,000 | 27.992857 | 149 | 0.652685 | false |
mfalesni/cfme_tests | cfme/tests/cli/test_appliance_console.py | 1 | 19042 | import pytest
from collections import namedtuple
from wait_for import wait_for
from cfme.utils import os
from cfme.utils.log_validator import LogValidator
from cfme.utils.log import logger
from cfme.utils.conf import hidden
import tempfile
import lxml.etree
import yaml
TimedCommand = namedtuple('TimedCommand', ['command', 'timeout'])
LoginOption = namedtuple('LoginOption', ['name', 'option', 'index'])
TZ = namedtuple('TimeZone', ['name', 'option'])
tzs = [
TZ('Africa/Abidjan', ('1', '1')),
TZ('America/Argentina/Buenos_Aires', ('2', '6', '1')),
TZ('Antarctica/Casey', ('3', 'q', '1')),
TZ('Arctic/Longyearbyen', ('4', 'q', '1')),
TZ('Asia/Aden', ('5', '1')),
TZ('Atlantic/Azores', ('6', 'q', '1')),
TZ('Australia/Adelaide', ('7', 'q', '1')),
TZ('Europe/Amsterdam', ('8', '1')),
TZ('Indian/Antananarivo', ('9', 'q', '1')),
TZ('Pacific/Apia', ('10', '1')),
TZ('UTC', ('11',))
]
@pytest.mark.smoke
def test_appliance_console(appliance):
"""'ap | tee /tmp/opt.txt)' saves stdout to file, 'ap' launch appliance_console."""
command_set = ('ap | tee -a /tmp/opt.txt', 'ap')
appliance.appliance_console.run_commands(command_set)
assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Virtual Appliance'"
.format(appliance.product_name))
assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Database:'"
.format(appliance.product_name))
assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Version:'"
.format(appliance.product_name))
def test_appliance_console_set_hostname(appliance):
"""'ap' launch appliance_console, '' clear info screen, '1' loads network settings, '5' gives
access to set hostname, 'hostname' sets new hostname."""
hostname = 'test.example.com'
command_set = ('ap', '', '1', '5', hostname,)
appliance.appliance_console.run_commands(command_set)
def is_hostname_set(appliance):
assert appliance.ssh_client.run_command("hostname -f | grep {hostname}"
.format(hostname=hostname))
wait_for(is_hostname_set, func_args=[appliance])
return_code, output = appliance.ssh_client.run_command("hostname -f")
assert output.strip() == hostname
assert return_code == 0
@pytest.mark.parametrize('timezone', tzs, ids=[tz.name for tz in tzs])
def test_appliance_console_set_timezone(timezone, temp_appliance_preconfig_modscope):
"""'ap' launch appliance_console, '' clear info screen, '2' set timezone, 'opt' select
region, 'timezone' selects zone, 'y' confirm slection, '' finish."""
command_set = ('ap', '', '2') + timezone[1] + ('y', '')
temp_appliance_preconfig_modscope.appliance_console.run_commands(command_set)
temp_appliance_preconfig_modscope.appliance_console.timezone_check(timezone)
def test_appliance_console_datetime(temp_appliance_preconfig_funcscope):
"""Grab fresh appliance and set time and date through appliance_console and check result"""
app = temp_appliance_preconfig_funcscope
command_set = ('ap', '', '3', 'y', '2020-10-20', '09:58:00', 'y', '')
app.appliance_console.run_commands(command_set)
def date_changed():
return app.ssh_client.run_command("date +%F-%T | grep 2020-10-20-10:00").success
wait_for(date_changed)
@pytest.mark.uncollectif(lambda appliance: appliance.version < '5.9')
def test_appliance_console_db_maintenance_hourly(appliance_with_preset_time):
"""Test database hourly re-indexing through appliance console"""
app = appliance_with_preset_time
command_set = ('ap', '', '7', 'y', 'n', '')
app.appliance_console.run_commands(command_set)
def maintenance_run():
return app.ssh_client.run_command(
"grep REINDEX /var/www/miq/vmdb/log/hourly_continuous_pg_maint_stdout.log").success
wait_for(maintenance_run, timeout=300)
@pytest.mark.parametrize('period', [
['hourly'],
['daily', '10'],
['weekly', '10', '2'],
['monthly', '10', '20']
], ids=['hour', 'day', 'week', 'month'])
def test_appliance_console_db_maintenance_periodic(period, appliance_with_preset_time):
"""Tests full vacuums on database through appliance console"""
app = appliance_with_preset_time
command_set = ['ap', '', '7', 'n', 'y']
command_set.extend(period)
app.appliance_console.run_commands(command_set)
def maintenance_run():
return app.ssh_client.run_command(
"grep 'periodic vacuum full completed' "
"/var/www/miq/vmdb/log/hourly_continuous_pg_maint_stdout.log"
).success
wait_for(maintenance_run, timeout=300)
def test_appliance_console_internal_db(app_creds, unconfigured_appliance):
"""'ap' launch appliance_console, '' clear info screen, '5' setup db, '1' Creates v2_key,
'1' selects internal db, 'y' continue, '1' use partition, 'n' don't create dedicated db, '0'
db region number, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
pwd = app_creds['password']
command_set = ('ap', '', '5', '1', '1', 'y', '1', 'n', '0', pwd, TimedCommand(pwd, 360), '')
unconfigured_appliance.appliance_console.run_commands(command_set)
unconfigured_appliance.wait_for_evm_service()
unconfigured_appliance.wait_for_web_ui()
def test_appliance_console_internal_db_reset(temp_appliance_preconfig_funcscope):
"""'ap' launch appliance_console, '' clear info screen, '5' setup db, '4' reset db, 'y'
confirm db reset, '1' db region number + wait 360 secs, '' continue"""
temp_appliance_preconfig_funcscope.ssh_client.run_command('systemctl stop evmserverd')
command_set = ('ap', '', '5', '4', 'y', TimedCommand('1', 360), '')
temp_appliance_preconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_preconfig_funcscope.ssh_client.run_command('systemctl start evmserverd')
temp_appliance_preconfig_funcscope.wait_for_evm_service()
temp_appliance_preconfig_funcscope.wait_for_web_ui()
def test_appliance_console_dedicated_db(unconfigured_appliance, app_creds):
"""'ap' launch appliance_console, '' clear info screen, '5' setup db, '1' Creates v2_key,
'1' selects internal db, 'y' continue, '1' use partition, 'y' create dedicated db, 'pwd'
db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
pwd = app_creds['password']
command_set = ('ap', '', '5', '1', '1', 'y', '1', 'y', pwd, TimedCommand(pwd, 360), '')
unconfigured_appliance.appliance_console.run_commands(command_set)
wait_for(lambda: unconfigured_appliance.db.is_dedicated_active)
def test_appliance_console_ha_crud(unconfigured_appliances, app_creds):
"""Testing HA configuration with 3 appliances.
Appliance one configuring dedicated database, 'ap' launch appliance_console,
'' clear info screen, '5' setup db, '1' Creates v2_key, '1' selects internal db,
'1' use partition, 'y' create dedicated db, 'pwd' db password, 'pwd' confirm db password + wait
360 secs and '' finish.
Appliance two creating region in dedicated database, 'ap' launch appliance_console, '' clear
info screen, '5' setup db, '2' fetch v2_key, 'app0_ip' appliance ip address, '' default user,
'pwd' appliance password, '' default v2_key location, '2' create region in external db, '0' db
region number, 'y' confirm create region in external db 'app0_ip', '' ip and default port for
dedicated db, '' use default db name, '' default username, 'pwd' db password, 'pwd' confirm db
password + wait 360 seconds and '' finish.
Appliance one configuring primary node for replication, 'ap' launch appliance_console, '' clear
info screen, '6' configure db replication, '1' configure node as primary, '1' cluster node
number set to 1, '' default dbname, '' default user, 'pwd' password, 'pwd' confirm password,
'app0_ip' primary appliance ip, confirm settings and wait 360 seconds to configure, '' finish.
Appliance three configuring standby node for replication, 'ap' launch appliance_console, ''
clear info screen, '6' configure db replication, '1' configure node as primary, '1' cluster node
number set to 1, '' default dbname, '' default user, 'pwd' password, 'pwd' confirm password,
'app0_ip' primary appliance ip, confirm settings and wait 360 seconds to configure, '' finish.
Appliance two configuring automatic failover of database nodes, 'ap' launch appliance_console,
'' clear info screen '9' configure application database failover monitor, '1' start failover
monitor. wait 30 seconds for service to start '' finish.
Appliance one, stop APPLIANCE_PG_SERVICE and check that the standby node takes over correctly
and evm starts up again pointing at the new primary database.
"""
apps = unconfigured_appliances
app0_ip = apps[0].hostname
app1_ip = apps[1].hostname
pwd = app_creds['password']
# Configure first appliance as dedicated database
command_set = ('ap', '', '5', '1', '1', '1', 'y', pwd, TimedCommand(pwd, 360), '')
apps[0].appliance_console.run_commands(command_set)
wait_for(lambda: apps[0].db.is_dedicated_active)
# Configure EVM webui appliance with create region in dedicated database
command_set = ('ap', '', '5', '2', app0_ip, '', pwd, '', '2', '0', 'y', app0_ip, '', '', '',
pwd, TimedCommand(pwd, 360), '')
apps[2].appliance_console.run_commands(command_set)
apps[2].wait_for_evm_service()
apps[2].wait_for_web_ui()
# Configure primary replication node
command_set = ('ap', '', '6', '1', '1', '', '', pwd, pwd, app0_ip, 'y',
TimedCommand('y', 60), '')
apps[0].appliance_console.run_commands(command_set)
# Configure secondary replication node
command_set = ('ap', '', '6', '2', '1', '2', '', '', pwd, pwd, app0_ip, app1_ip, 'y',
TimedCommand('y', 60), '')
apps[1].appliance_console.run_commands(command_set)
# Configure automatic failover on EVM appliance
command_set = ('ap', '', '9', TimedCommand('1', 30), '')
apps[2].appliance_console.run_commands(command_set)
def is_ha_monitor_started(appliance):
return bool(appliance.ssh_client.run_command(
"grep {} /var/www/miq/vmdb/config/failover_databases.yml".format(app1_ip)).success)
wait_for(is_ha_monitor_started, func_args=[apps[2]], timeout=300, handle_exception=True)
# Cause failover to occur
rc, out = apps[0].ssh_client.run_command('systemctl stop $APPLIANCE_PG_SERVICE', timeout=15)
assert rc == 0, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(out)
def is_failover_started(appliance):
return bool(appliance.ssh_client.run_command(
"grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log").success)
wait_for(is_failover_started, func_args=[apps[2]], timeout=450, handle_exception=True)
apps[2].wait_for_evm_service()
apps[2].wait_for_web_ui()
def test_appliance_console_external_db(temp_appliance_unconfig_funcscope, app_creds, appliance):
"""'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '2' fetch v2_key,
'ip' address to fetch from, '' default username, 'pwd' db password, '' default v2_key location,
'3' join external region, 'port' ip and port of joining region, '' use default db name, ''
default username, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish."""
ip = appliance.hostname
pwd = app_creds['password']
command_set = ('ap', '', '5', '2', ip, '', pwd, '', '3', ip, '', '', '',
pwd, TimedCommand(pwd, 360), '')
temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
temp_appliance_unconfig_funcscope.wait_for_evm_service()
temp_appliance_unconfig_funcscope.wait_for_web_ui()
def test_appliance_console_external_db_create(
app_creds, dedicated_db_appliance, unconfigured_appliance_secondary):
"""'ap' launch appliance_console, '' clear info screen, '5' setup db, '1' create v2_key,
'2' create region in external db, '0' db region number, 'y' confirm create region in external db
'ip', '' ip and port for dedicated db, '' use default db name, '' default username, 'pwd' db
password, 'pwd' confirm db password + wait 360 secs and '' finish."""
ip = dedicated_db_appliance.hostname
pwd = app_creds['password']
command_set = ('ap', '', '5', '1', '2', '0', 'y', ip, '', '', '', pwd,
TimedCommand(pwd, 300), '')
unconfigured_appliance_secondary.appliance_console.run_commands(command_set)
unconfigured_appliance_secondary.wait_for_evm_service()
unconfigured_appliance_secondary.wait_for_web_ui()
def test_appliance_console_extend_storage(unconfigured_appliance):
"""'ap' launches appliance_console, '' clears info screen, '10' extend storage, '1' select
disk, 'y' confirm configuration and '' complete."""
command_set = ('ap', '', '10', '1', 'y', '')
unconfigured_appliance.appliance_console.run_commands(command_set)
def is_storage_extended():
assert unconfigured_appliance.ssh_client.run_command("df -h | grep /var/www/miq_tmp")
wait_for(is_storage_extended)
@pytest.mark.uncollect('No IPA servers currently available')
def test_appliance_console_ipa(ipa_creds, configured_appliance):
"""'ap' launches appliance_console, '' clears info screen, '11' setup IPA, 'y' confirm setup
+ wait 40 secs and '' finish."""
command_set = ('ap', '', '11', ipa_creds['hostname'], ipa_creds['domain'], '',
ipa_creds['username'], ipa_creds['password'], TimedCommand('y', 40), '')
configured_appliance.appliance_console.run_commands(command_set)
def is_sssd_running(configured_appliance):
assert configured_appliance.ssh_client.run_command("systemctl status sssd | grep running")
wait_for(is_sssd_running, func_args=[configured_appliance])
return_code, output = configured_appliance.ssh_client.run_command(
"cat /etc/ipa/default.conf | grep 'enable_ra = True'")
assert return_code == 0
@pytest.mark.uncollect('No IPA servers currently available')
@pytest.mark.parametrize('auth_type', [
LoginOption('sso', 'sso_enabled', '1'),
LoginOption('saml', 'saml_enabled', '2'),
LoginOption('local_login', 'local_login_disabled', '3')
], ids=['sso', 'saml', 'local_login'])
def test_appliance_console_external_auth(auth_type, app_creds, ipa_crud):
"""'ap' launches appliance_console, '' clears info screen, '12' change ext auth options,
'auth_type' auth type to change, '4' apply changes."""
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*{} to true.*'.format(auth_type.option)],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
command_set = ('ap', '', '12', auth_type.index, '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*{} to false.*'.format(auth_type.option)],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
command_set = ('ap', '', '12', auth_type.index, '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
@pytest.mark.uncollect('No IPA servers currently available')
def test_appliance_console_external_auth_all(app_creds, ipa_crud):
"""'ap' launches appliance_console, '' clears info screen, '12' change ext auth options,
'auth_type' auth type to change, '4' apply changes."""
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*sso_enabled to true.*', '.*saml_enabled to true.*',
'.*local_login_disabled to true.*'],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
command_set = ('ap', '', '12', '1', '2', '3', '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log',
matched_patterns=['.*sso_enabled to false.*',
'.*saml_enabled to false.*', '.*local_login_disabled to false.*'],
hostname=ipa_crud.hostname,
username=app_creds['sshlogin'],
password=app_creds['password'])
evm_tail.fix_before_start()
command_set = ('ap', '', '12', '1', '2', '3', '4')
ipa_crud.appliance_console.run_commands(command_set)
evm_tail.validate_logs()
def test_appliance_console_scap(temp_appliance_preconfig, soft_assert):
"""'ap' launches appliance_console, '' clears info screen, '14' Hardens appliance using SCAP
configuration, '' complete."""
command_set = ('ap', '', '14', '')
temp_appliance_preconfig.appliance_console.run_commands(command_set)
with tempfile.NamedTemporaryFile('w') as f:
f.write(hidden['scap.rb'])
f.flush()
os.fsync(f.fileno())
temp_appliance_preconfig.ssh_client.put_file(
f.name, '/tmp/scap.rb')
if temp_appliance_preconfig.version >= "5.8":
rules = '/var/www/miq/vmdb/productization/appliance_console/config/scap_rules.yml'
else:
rules = '/var/www/miq/vmdb/gems/pending/appliance_console/config/scap_rules.yml'
temp_appliance_preconfig.ssh_client.run_command('cd /tmp/ && ruby scap.rb '
'--rulesfile={rules}'.format(rules=rules))
temp_appliance_preconfig.ssh_client.get_file(
'/tmp/scap-results.xccdf.xml', '/tmp/scap-results.xccdf.xml')
temp_appliance_preconfig.ssh_client.get_file(
'{rules}'.format(rules=rules), '/tmp/scap_rules.yml') # Get the scap rules
with open('/tmp/scap_rules.yml') as f:
yml = yaml.load(f.read())
rules = yml['rules']
tree = lxml.etree.parse('/tmp/scap-results.xccdf.xml')
root = tree.getroot()
for rule in rules:
elements = root.findall(
'.//{{http://checklists.nist.gov/xccdf/1.1}}rule-result[@idref="{}"]'.format(rule))
if elements:
result = elements[0].findall('./{http://checklists.nist.gov/xccdf/1.1}result')
if result:
soft_assert(result[0].text == 'pass')
logger.info("{}: {}".format(rule, result[0].text))
else:
logger.info("{}: no result".format(rule))
else:
logger.info("{}: rule not found".format(rule))
| gpl-2.0 | 3,892,743,873,802,618,400 | 47.576531 | 100 | 0.642317 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.