repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
hwmay/pordb3 | pordb_bilddatei_umbenennen.py | 1 | 4294 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pordb_bilddatei_umbenennen.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1035, 307)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("pypordb/8027068_splash.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.verticalLayout_3.addWidget(self.label)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.groupBox = QtWidgets.QGroupBox(Dialog)
self.groupBox.setObjectName("groupBox")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout.setObjectName("gridLayout")
self.listWidgetDateinamen = QtWidgets.QListWidget(self.groupBox)
self.listWidgetDateinamen.setObjectName("listWidgetDateinamen")
self.gridLayout.addWidget(self.listWidgetDateinamen, 0, 0, 1, 1)
self.horizontalLayout.addWidget(self.groupBox)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox_2 = QtWidgets.QGroupBox(Dialog)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.lineEditDateiname = QtWidgets.QLineEdit(self.groupBox_2)
self.lineEditDateiname.setObjectName("lineEditDateiname")
self.gridLayout_2.addWidget(self.lineEditDateiname, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_2)
self.labelDateiname = QtWidgets.QLabel(Dialog)
self.labelDateiname.setText("")
self.labelDateiname.setObjectName("labelDateiname")
self.verticalLayout.addWidget(self.labelDateiname)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButtonUmbenennen = QtWidgets.QPushButton(Dialog)
self.pushButtonUmbenennen.setDefault(True)
self.pushButtonUmbenennen.setFlat(False)
self.pushButtonUmbenennen.setObjectName("pushButtonUmbenennen")
self.horizontalLayout_2.addWidget(self.pushButtonUmbenennen)
self.pushButtonCancel = QtWidgets.QPushButton(Dialog)
self.pushButtonCancel.setObjectName("pushButtonCancel")
self.horizontalLayout_2.addWidget(self.pushButtonCancel)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Edit filename"))
self.label.setText(_translate("Dialog", "Filename already exists or has more than 256 characters"))
self.groupBox.setTitle(_translate("Dialog", "Similar files in directory"))
self.groupBox_2.setTitle(_translate("Dialog", "New filename"))
self.pushButtonUmbenennen.setText(_translate("Dialog", "Rename file"))
self.pushButtonUmbenennen.setShortcut(_translate("Dialog", "Return"))
self.pushButtonCancel.setText(_translate("Dialog", "Cancel"))
| gpl-3.0 | 852,912,724,824,715,000 | 50.73494 | 114 | 0.71728 | false |
brchiu/tensorflow | tensorflow/python/ops/parsing_ops.py | 1 | 82440 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parsing Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_parsing_ops import *
# pylint: enable=wildcard-import,undefined-variable
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable("DecodeRaw")
ops.NotDifferentiable("ParseTensor")
ops.NotDifferentiable("SerializeTensor")
ops.NotDifferentiable("StringToNumber")
@tf_export("io.VarLenFeature", v1=["VarLenFeature", "io.VarLenFeature"])
class VarLenFeature(collections.namedtuple("VarLenFeature", ["dtype"])):
"""Configuration for parsing a variable-length input feature.
Fields:
dtype: Data type of input.
"""
pass
@tf_export("io.SparseFeature", v1=["io.SparseFeature", "SparseFeature"])
class SparseFeature(
collections.namedtuple(
"SparseFeature",
["index_key", "value_key", "dtype", "size", "already_sorted"])):
"""Configuration for parsing a sparse input feature from an `Example`.
Note, preferably use `VarLenFeature` (possibly in combination with a
`SequenceExample`) in order to parse out `SparseTensor`s instead of
`SparseFeature` due to its simplicity.
Closely mimicking the `SparseTensor` that will be obtained by parsing an
`Example` with a `SparseFeature` config, a `SparseFeature` contains a
* `value_key`: The name of key for a `Feature` in the `Example` whose parsed
`Tensor` will be the resulting `SparseTensor.values`.
* `index_key`: A list of names - one for each dimension in the resulting
`SparseTensor` whose `indices[i][dim]` indicating the position of
the `i`-th value in the `dim` dimension will be equal to the `i`-th value in
the Feature with key named `index_key[dim]` in the `Example`.
* `size`: A list of ints for the resulting `SparseTensor.dense_shape`.
For example, we can represent the following 2D `SparseTensor`
```python
SparseTensor(indices=[[3, 1], [20, 0]],
values=[0.5, -1.0]
dense_shape=[100, 3])
```
with an `Example` input proto
```python
features {
feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } }
feature { key: "ix0" value { int64_list { value: [ 3, 20 ] } } }
feature { key: "ix1" value { int64_list { value: [ 1, 0 ] } } }
}
```
and `SparseFeature` config with 2 `index_key`s
```python
SparseFeature(index_key=["ix0", "ix1"],
value_key="val",
dtype=tf.float32,
size=[100, 3])
```
Fields:
index_key: A single string name or a list of string names of index features.
For each key the underlying feature's type must be `int64` and its length
must always match that of the `value_key` feature.
To represent `SparseTensor`s with a `dense_shape` of `rank` higher than 1
a list of length `rank` should be used.
value_key: Name of value feature. The underlying feature's type must
be `dtype` and its length must always match that of all the `index_key`s'
features.
dtype: Data type of the `value_key` feature.
size: A Python int or list thereof specifying the dense shape. Should be a
list if and only if `index_key` is a list. In that case the list must be
equal to the length of `index_key`. Each for each entry `i` all values in
the `index_key`[i] feature must be in `[0, size[i])`.
already_sorted: A Python boolean to specify whether the values in
`value_key` are already sorted by their index position. If so skip
sorting. False by default (optional).
"""
def __new__(cls, index_key, value_key, dtype, size, already_sorted=False):
return super(SparseFeature, cls).__new__(
cls, index_key, value_key, dtype, size, already_sorted)
@tf_export("io.FixedLenFeature", v1=["io.FixedLenFeature", "FixedLenFeature"])
class FixedLenFeature(collections.namedtuple(
"FixedLenFeature", ["shape", "dtype", "default_value"])):
"""Configuration for parsing a fixed-length input feature.
To treat sparse input as dense, provide a `default_value`; otherwise,
the parse functions will fail on any examples missing this feature.
Fields:
shape: Shape of input data.
dtype: Data type of input.
default_value: Value to be used if an example is missing this feature. It
must be compatible with `dtype` and of the specified `shape`.
"""
def __new__(cls, shape, dtype, default_value=None):
return super(FixedLenFeature, cls).__new__(
cls, shape, dtype, default_value)
@tf_export("io.FixedLenSequenceFeature",
v1=["io.FixedLenSequenceFeature", "FixedLenSequenceFeature"])
class FixedLenSequenceFeature(collections.namedtuple(
"FixedLenSequenceFeature",
["shape", "dtype", "allow_missing", "default_value"])):
"""Configuration for parsing a variable-length input feature into a `Tensor`.
The resulting `Tensor` of parsing a single `SequenceExample` or `Example` has
a static `shape` of `[None] + shape` and the specified `dtype`.
The resulting `Tensor` of parsing a `batch_size` many `Example`s has
a static `shape` of `[batch_size, None] + shape` and the specified `dtype`.
The entries in the `batch` from different `Examples` will be padded with
`default_value` to the maximum length present in the `batch`.
To treat a sparse input as dense, provide `allow_missing=True`; otherwise,
the parse functions will fail on any examples missing this feature.
Fields:
shape: Shape of input data for dimension 2 and higher. First dimension is
of variable length `None`.
dtype: Data type of input.
allow_missing: Whether to allow this feature to be missing from a feature
list item. Is available only for parsing `SequenceExample` not for
parsing `Examples`.
default_value: Scalar value to be used to pad multiple `Example`s to their
maximum length. Irrelevant for parsing a single `Example` or
`SequenceExample`. Defaults to "" for dtype string and 0 otherwise
(optional).
"""
def __new__(cls, shape, dtype, allow_missing=False, default_value=None):
return super(FixedLenSequenceFeature, cls).__new__(
cls, shape, dtype, allow_missing, default_value)
def _features_to_raw_params(features, types):
"""Split feature tuples into raw params used by `gen_parsing_ops`.
Args:
features: A `dict` mapping feature keys to objects of a type in `types`.
types: Type of features to allow, among `FixedLenFeature`, `VarLenFeature`,
`SparseFeature`, and `FixedLenSequenceFeature`.
Returns:
Tuple of `sparse_keys`, `sparse_types`, `dense_keys`, `dense_types`,
`dense_defaults`, `dense_shapes`.
Raises:
ValueError: if `features` contains an item not in `types`, or an invalid
feature.
"""
sparse_keys = []
sparse_types = []
dense_keys = []
dense_types = []
# When the graph is built twice, multiple dense_defaults in a normal dict
# could come out in different orders. This will fail the _e2e_test which
# expects exactly the same graph.
# OrderedDict which preserves the order can solve the problem.
dense_defaults = collections.OrderedDict()
dense_shapes = []
if features:
# NOTE: We iterate over sorted keys to keep things deterministic.
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, VarLenFeature):
if VarLenFeature not in types:
raise ValueError("Unsupported VarLenFeature %s." % (feature,))
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
sparse_keys.append(key)
sparse_types.append(feature.dtype)
elif isinstance(feature, SparseFeature):
if SparseFeature not in types:
raise ValueError("Unsupported SparseFeature %s." % (feature,))
if not feature.index_key:
raise ValueError(
"Missing index_key for SparseFeature %s." % (feature,))
if not feature.value_key:
raise ValueError(
"Missing value_key for SparseFeature %s." % (feature,))
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
index_keys = feature.index_key
if isinstance(index_keys, str):
index_keys = [index_keys]
elif len(index_keys) > 1:
tf_logging.warning("SparseFeature is a complicated feature config "
"and should only be used after careful "
"consideration of VarLenFeature.")
for index_key in sorted(index_keys):
if index_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(index_key)]
if dtype != dtypes.int64:
raise ValueError("Conflicting type %s vs int64 for feature %s." %
(dtype, index_key))
else:
sparse_keys.append(index_key)
sparse_types.append(dtypes.int64)
if feature.value_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(feature.value_key)]
if dtype != feature.dtype:
raise ValueError("Conflicting type %s vs %s for feature %s." % (
dtype, feature.dtype, feature.value_key))
else:
sparse_keys.append(feature.value_key)
sparse_types.append(feature.dtype)
elif isinstance(feature, FixedLenFeature):
if FixedLenFeature not in types:
raise ValueError("Unsupported FixedLenFeature %s." % (feature,))
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
if feature.shape is None:
raise ValueError("Missing shape for feature %s." % key)
feature_tensor_shape = tensor_shape.as_shape(feature.shape)
if (feature.shape and feature_tensor_shape.ndims and
feature_tensor_shape.dims[0].value is None):
raise ValueError("First dimension of shape for feature %s unknown. "
"Consider using FixedLenSequenceFeature." % key)
if (feature.shape is not None and
not feature_tensor_shape.is_fully_defined()):
raise ValueError("All dimensions of shape for feature %s need to be "
"known but received %s." % (key, str(feature.shape)))
dense_keys.append(key)
dense_shapes.append(feature.shape)
dense_types.append(feature.dtype)
if feature.default_value is not None:
dense_defaults[key] = feature.default_value
elif isinstance(feature, FixedLenSequenceFeature):
if FixedLenSequenceFeature not in types:
raise ValueError("Unsupported FixedLenSequenceFeature %s." % (
feature,))
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
if feature.shape is None:
raise ValueError("Missing shape for feature %s." % key)
dense_keys.append(key)
dense_shapes.append(feature.shape)
dense_types.append(feature.dtype)
if feature.allow_missing:
dense_defaults[key] = None
if feature.default_value is not None:
dense_defaults[key] = feature.default_value
else:
raise ValueError("Invalid feature %s:%s." % (key, feature))
return (
sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes)
def _construct_sparse_tensors_for_sparse_features(features, tensor_dict):
"""Merges SparseTensors of indices and values of SparseFeatures.
Constructs new dict based on `tensor_dict`. For `SparseFeatures` in the values
of `features` expects their `index_key`s and `index_value`s to be present in
`tensor_dict` mapping to `SparseTensor`s. Constructs a single `SparseTensor`
from them, and adds it to the result with the key from `features`.
Copies other keys and values from `tensor_dict` with keys present in
`features`.
Args:
features: A `dict` mapping feature keys to `SparseFeature` values.
Values of other types will be ignored.
tensor_dict: A `dict` mapping feature keys to `Tensor` and `SparseTensor`
values. Expected to contain keys of the `SparseFeature`s' `index_key`s and
`value_key`s and mapping them to `SparseTensor`s.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Similar
to `tensor_dict` except each `SparseFeature`s in `features` results in a
single `SparseTensor`.
"""
tensor_dict = dict(tensor_dict) # Do not modify argument passed in.
# Construct SparseTensors for SparseFeatures.
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, SparseFeature):
if isinstance(feature.index_key, str):
sp_ids = tensor_dict[feature.index_key]
else:
sp_ids = [tensor_dict[index_key] for index_key in feature.index_key]
sp_values = tensor_dict[feature.value_key]
tensor_dict[key] = sparse_ops.sparse_merge(
sp_ids,
sp_values,
vocab_size=feature.size,
already_sorted=feature.already_sorted)
# Remove tensors from dictionary that were only used to construct
# SparseTensors for SparseFeature.
for key in set(tensor_dict) - set(features):
del tensor_dict[key]
return tensor_dict
def _prepend_none_dimension(features):
if features:
modified_features = dict(features) # Create a copy to modify
for key, feature in features.items():
if isinstance(feature, FixedLenSequenceFeature):
if not feature.allow_missing:
raise ValueError("Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True.")
modified_features[key] = FixedLenSequenceFeature(
[None] + list(feature.shape),
feature.dtype,
feature.allow_missing,
feature.default_value)
return modified_features
else:
return features
@tf_export("io.parse_example", v1=["io.parse_example", "parse_example"])
def parse_example(serialized, features, name=None, example_names=None):
# pylint: disable=line-too-long
"""Parses `Example` protos into a `dict` of tensors.
Parses a number of serialized [`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
protos given in `serialized`. We refer to `serialized` as a batch with
`batch_size` many entries of individual `Example` protos.
`example_names` may contain descriptive names for the corresponding serialized
protos. These may be useful for debugging purposes, but they have no effect on
the output. If not `None`, `example_names` must be the same length as
`serialized`.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`.
Each `VarLenFeature` maps to a `SparseTensor` of the specified type
representing a ragged matrix. Its indices are `[batch, index]` where `batch`
identifies the example in `serialized`, and `index` is the value's index in
the list of values associated with that feature and example.
Each `SparseFeature` maps to a `SparseTensor` of the specified type
representing a Tensor of `dense_shape` `[batch_size] + SparseFeature.size`.
Its `values` come from the feature in the examples with key `value_key`.
A `values[i]` comes from a position `k` in the feature of an example at batch
entry `batch`. This positional information is recorded in `indices[i]` as
`[batch, index_0, index_1, ...]` where `index_j` is the `k-th` value of
the feature in the example at with key `SparseFeature.index_key[j]`.
In other words, we split the indices (except the first index indicating the
batch entry) of a `SparseTensor` by dimension into different features of the
`Example`. Due to its complexity a `VarLenFeature` should be preferred over a
`SparseFeature` whenever possible.
Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or
`tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`.
`FixedLenFeature` entries with a `default_value` are optional. With no default
value, we will fail if that `Feature` is missing from any example in
`serialized`.
Each `FixedLenSequenceFeature` `df` maps to a `Tensor` of the specified type
(or `tf.float32` if not specified) and shape
`(serialized.size(), None) + df.shape`.
All examples in `serialized` will be padded with `default_value` along the
second dimension.
Examples:
For example, if one expects a `tf.float32` `VarLenFeature` `ft` and three
serialized `Example`s are provided:
```
serialized = [
features
{ feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } },
features
{ feature []},
features
{ feature { key: "ft" value { float_list { value: [3.0] } } }
]
```
then the output will look like:
```python
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
dense_shape=(3, 2)) }
```
If instead a `FixedLenSequenceFeature` with `default_value = -1.0` and
`shape=[]` is used then the output will look like:
```python
{"ft": [[1.0, 2.0], [3.0, -1.0]]}
```
Given two `Example` input protos in `serialized`:
```
[
features {
feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } }
feature { key: "gps" value { float_list { value: [] } } }
},
features {
feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } }
feature { key: "dank" value { int64_list { value: [ 42 ] } } }
feature { key: "gps" value { } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"kw": VarLenFeature(tf.string),
"dank": VarLenFeature(tf.int64),
"gps": VarLenFeature(tf.float32),
}
```
Then the output is a dictionary:
```python
{
"kw": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["knit", "big", "emmy"]
dense_shape=[2, 2]),
"dank": SparseTensor(
indices=[[1, 0]],
values=[42],
dense_shape=[2, 1]),
"gps": SparseTensor(
indices=[],
values=[],
dense_shape=[2, 0]),
}
```
For dense results in two serialized `Example`s:
```
[
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
}
]
```
We can use arguments:
```
example_names: ["input0", "input1"],
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
}
```
An alternative to `VarLenFeature` to obtain a `SparseTensor` is
`SparseFeature`. For example, given two `Example` input protos in
`serialized`:
```
[
features {
feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } }
feature { key: "ix" value { int64_list { value: [ 3, 20 ] } } }
},
features {
feature { key: "val" value { float_list { value: [ 0.0 ] } } }
feature { key: "ix" value { int64_list { value: [ 42 ] } } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"sparse": SparseFeature(
index_key="ix", value_key="val", dtype=tf.float32, size=100),
}
```
Then the output is a dictionary:
```python
{
"sparse": SparseTensor(
indices=[[0, 3], [0, 20], [1, 42]],
values=[0.5, -1.0, 0.0]
dense_shape=[2, 100]),
}
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing: features was %s." % features)
features = _prepend_none_dimension(features)
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = _features_to_raw_params(
features,
[VarLenFeature, SparseFeature, FixedLenFeature, FixedLenSequenceFeature])
outputs = _parse_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
return _construct_sparse_tensors_for_sparse_features(features, outputs)
def _parse_example_raw(serialized,
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name=None):
"""Parses `Example` protos.
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
name: A name for this operation (optional).
Returns:
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
"""
with ops.name_scope(name, "ParseExample", [serialized, names]):
(names, dense_defaults_vec, sparse_keys, sparse_types,
dense_keys, dense_shapes, _) = _process_raw_parameters(
names, dense_defaults, sparse_keys, sparse_types, dense_keys,
dense_types, dense_shapes)
outputs = gen_parsing_ops.parse_example(
serialized=serialized,
names=names,
dense_defaults=dense_defaults_vec,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_shapes=dense_shapes,
name=name)
(sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs
sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(sparse_indices, sparse_values, sparse_shapes)]
return dict(zip(sparse_keys + dense_keys, sparse_tensors + dense_values))
def _process_raw_parameters(names, dense_defaults, sparse_keys, sparse_types,
dense_keys, dense_types, dense_shapes):
"""Process raw parameters to params used by `gen_parsing_ops`.
Args:
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
Returns:
Tuple of `names`, `dense_defaults_vec`, `sparse_keys`, `sparse_types`,
`dense_keys`, `dense_shapes`.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up.
"""
names = [] if names is None else names
dense_defaults = collections.OrderedDict(
) if dense_defaults is None else dense_defaults
sparse_keys = [] if sparse_keys is None else sparse_keys
sparse_types = [] if sparse_types is None else sparse_types
dense_keys = [] if dense_keys is None else dense_keys
dense_types = [] if dense_types is None else dense_types
dense_shapes = ([[]] * len(dense_keys)
if dense_shapes is None else dense_shapes)
num_dense = len(dense_keys)
num_sparse = len(sparse_keys)
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) != len(dense_keys): %d vs. %d" %
(len(dense_shapes), num_dense))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) != len(num_dense): %d vs. %d" %
(len(dense_types), num_dense))
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) != len(sparse_keys): %d vs. %d" %
(len(sparse_types), num_sparse))
if num_dense + num_sparse == 0:
raise ValueError("Must provide at least one sparse key or dense key")
if not set(dense_keys).isdisjoint(set(sparse_keys)):
raise ValueError(
"Dense and sparse keys must not intersect; intersection: %s" %
set(dense_keys).intersection(set(sparse_keys)))
# Convert dense_shapes to TensorShape object.
dense_shapes = [tensor_shape.as_shape(shape) for shape in dense_shapes]
dense_defaults_vec = []
for i, key in enumerate(dense_keys):
default_value = dense_defaults.get(key)
dense_shape = dense_shapes[i]
if (dense_shape.ndims is not None and dense_shape.ndims > 0 and
dense_shape.dims[0].value is None):
# Variable stride dense shape, the default value should be a
# scalar padding value
if default_value is None:
default_value = ops.convert_to_tensor(
"" if dense_types[i] == dtypes.string else 0, dtype=dense_types[i])
else:
# Reshape to a scalar to ensure user gets an error if they
# provide a tensor that's not intended to be a padding value
# (0 or 2+ elements).
key_name = "padding_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, [])
else:
if default_value is None:
default_value = constant_op.constant([], dtype=dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, dense_shape)
dense_defaults_vec.append(default_value)
# Finally, convert dense_shapes to TensorShapeProto
dense_shapes_as_proto = [shape.as_proto() for shape in dense_shapes]
return (names, dense_defaults_vec, sparse_keys, sparse_types, dense_keys,
dense_shapes_as_proto, dense_shapes)
@tf_export("io.parse_single_example",
v1=["io.parse_single_example", "parse_single_example"])
def parse_single_example(serialized, features, name=None, example_names=None):
"""Parses a single `Example` proto.
Similar to `parse_example`, except:
For dense tensors, the returned `Tensor` is identical to the output of
`parse_example`, except there is no batch dimension, the output shape is the
same as the shape given in `dense_shape`.
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
(the indices matrix is a column vector), the values vector is unchanged, and
the first (`batch_size`) entry of the shape vector is removed (it is now a
single element vector).
One might see performance advantages by batching `Example` protos with
`parse_example` instead of using this function directly.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See `_parse_single_example_raw` documentation for more details.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
name: A name for this operation (optional).
example_names: (Optional) A scalar string Tensor, the associated name.
See `_parse_single_example_raw` documentation for more details.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing features.")
if example_names is None:
return parse_single_example_v2(serialized, features, name)
features = _prepend_none_dimension(features)
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = _features_to_raw_params(
features,
[VarLenFeature, FixedLenFeature, FixedLenSequenceFeature, SparseFeature])
outputs = _parse_single_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
return _construct_sparse_tensors_for_sparse_features(features, outputs)
def _parse_single_example_raw(serialized,
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name=None):
"""Parses a single `Example` proto.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See `_parse_example_raw` documentation for more details.
names: (Optional) A scalar string Tensor, the associated name.
See `_parse_example_raw` documentation for more details.
sparse_keys: See `_parse_example_raw` documentation for more details.
sparse_types: See `_parse_example_raw` documentation for more details.
dense_keys: See `_parse_example_raw` documentation for more details.
dense_types: See `_parse_example_raw` documentation for more details.
dense_defaults: See `_parse_example_raw` documentation for more details.
dense_shapes: See `_parse_example_raw` documentation for more details.
name: A name for this operation (optional).
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
with ops.name_scope(name, "ParseSingleExample", [serialized, names]):
serialized = ops.convert_to_tensor(serialized)
serialized_shape = serialized.get_shape()
if serialized_shape.ndims is not None:
if serialized_shape.ndims != 0:
raise ValueError("Input serialized must be a scalar")
else:
serialized = control_flow_ops.with_dependencies(
[control_flow_ops.Assert(
math_ops.equal(array_ops.rank(serialized), 0),
["Input serialized must be a scalar"],
name="SerializedIsScalar")],
serialized,
name="SerializedDependencies")
serialized = array_ops.expand_dims(serialized, 0)
if names is not None:
names = ops.convert_to_tensor(names)
names_shape = names.get_shape()
if names_shape.ndims is not None:
if names_shape.ndims != 0:
raise ValueError("Input names must be a scalar")
else:
names = control_flow_ops.with_dependencies(
[control_flow_ops.Assert(
math_ops.equal(array_ops.rank(names), 0),
["Input names must be a scalar"],
name="NamesIsScalar")],
names,
name="NamesDependencies")
names = array_ops.expand_dims(names, 0)
outputs = _parse_example_raw(
serialized,
names=names,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_types=dense_types,
dense_defaults=dense_defaults,
dense_shapes=dense_shapes,
name=name)
if dense_keys is not None:
for d in dense_keys:
d_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", d)
outputs[d] = array_ops.squeeze(
outputs[d], [0], name="Squeeze_%s" % d_name)
if sparse_keys is not None:
for s in sparse_keys:
s_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", s)
outputs[s] = sparse_tensor.SparseTensor(
array_ops.slice(outputs[s].indices,
[0, 1], [-1, -1], name="Slice_Indices_%s" % s_name),
outputs[s].values,
array_ops.slice(outputs[s].dense_shape,
[1], [-1], name="Squeeze_Shape_%s" % s_name))
return outputs
@tf_export("io.parse_sequence_example")
def parse_sequence_example(serialized,
context_features=None,
sequence_features=None,
example_names=None,
name=None):
# pylint: disable=line-too-long
"""Parses a batch of `SequenceExample` protos.
Parses a vector of serialized
[`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
protos given in `serialized`.
This op parses serialized sequence examples into a tuple of dictionaries
mapping keys to `Tensor` and `SparseTensor` objects respectively.
The first dictionary contains mappings for keys appearing in
`context_features`, and the second dictionary contains mappings for keys
appearing in `sequence_features`.
At least one of `context_features` and `sequence_features` must be provided
and non-empty.
The `context_features` keys are associated with a `SequenceExample` as a
whole, independent of time / frame. In contrast, the `sequence_features` keys
provide a way to access variable-length data within the `FeatureList` section
of the `SequenceExample` proto. While the shapes of `context_features` values
are fixed with respect to frame, the frame dimension (the first dimension)
of `sequence_features` values may vary between `SequenceExample` protos,
and even between `feature_list` keys within the same `SequenceExample`.
`context_features` contains `VarLenFeature` and `FixedLenFeature` objects.
Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature`
is mapped to a `Tensor`, of the specified type, shape, and default value.
`sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature`
objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each
`FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type.
The shape will be `(B,T,) + df.dense_shape` for `FixedLenSequenceFeature`
`df`, where `B` is the batch size, and `T` is the length of the associated
`FeatureList` in the `SequenceExample`. For instance,
`FixedLenSequenceFeature([])` yields a scalar 2-D `Tensor` of static shape
`[None, None]` and dynamic shape `[B, T]`, while
`FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 3-D matrix `Tensor`
of static shape `[None, None, k]` and dynamic shape `[B, T, k]`.
Like the input, the resulting output tensors have a batch dimension. This
means that the original per-example shapes of `VarLenFeature`s and
`FixedLenSequenceFeature`s can be lost. To handle that situation, this op also
provides dicts of shape tensors as part of the output. There is one dict for
the context features, and one for the feature_list features. Context features
of type `FixedLenFeature`s will not be present, since their shapes are already
known by the caller. In situations where the input 'FixedLenFeature`s are of
different lengths across examples, the shorter examples will be padded with
default datatype values: 0 for numeric types, and the empty string for string
types.
Each `SparseTensor` corresponding to `sequence_features` represents a ragged
vector. Its indices are `[time, index]`, where `time` is the `FeatureList`
entry and `index` is the value's index in the list of values associated with
that time.
`FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature`
entries with `allow_missing=True` are optional; otherwise, we will fail if
that `Feature` or `FeatureList` is missing from any example in `serialized`.
`example_name` may contain a descriptive name for the corresponding serialized
proto. This may be useful for debugging purposes, but it has no effect on the
output. If not `None`, `example_name` must be a scalar.
Args:
serialized: A vector (1-D Tensor) of type string containing binary
serialized `SequenceExample` protos.
context_features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. These features are associated with a
`SequenceExample` as a whole.
sequence_features: A `dict` mapping feature keys to
`FixedLenSequenceFeature` or `VarLenFeature` values. These features are
associated with data within the `FeatureList` section of the
`SequenceExample` proto.
example_names: A vector (1-D Tensor) of strings (optional), the name of the
serialized protos.
name: A name for this operation (optional).
Returns:
A tuple of three `dict`s, each mapping keys to `Tensor`s and
`SparseTensor`s. The first dict contains the context key/values,
the second dict contains the feature_list key/values, and the final dict
contains the lengths of any dense feature_list features.
Raises:
ValueError: if any feature is invalid.
"""
if not (context_features or sequence_features):
raise ValueError("Missing features.")
(context_sparse_keys, context_sparse_types, context_dense_keys,
context_dense_types,
context_dense_defaults, context_dense_shapes) = _features_to_raw_params(
context_features, [VarLenFeature, FixedLenFeature])
(feature_list_sparse_keys, feature_list_sparse_types, feature_list_dense_keys,
feature_list_dense_types, feature_list_dense_defaults,
feature_list_dense_shapes) = _features_to_raw_params(
sequence_features, [VarLenFeature, FixedLenSequenceFeature])
return _parse_sequence_example_raw(
serialized, example_names, context_sparse_keys, context_sparse_types,
context_dense_keys, context_dense_types, context_dense_defaults,
context_dense_shapes, feature_list_sparse_keys, feature_list_sparse_types,
feature_list_dense_keys, feature_list_dense_types,
feature_list_dense_shapes, feature_list_dense_defaults, name)
def _parse_sequence_example_raw(serialized,
debug_name=None,
context_sparse_keys=None,
context_sparse_types=None,
context_dense_keys=None,
context_dense_types=None,
context_dense_defaults=None,
context_dense_shapes=None,
feature_list_sparse_keys=None,
feature_list_sparse_types=None,
feature_list_dense_keys=None,
feature_list_dense_types=None,
feature_list_dense_shapes=None,
feature_list_dense_defaults=None,
name=None):
"""Parses a vector of `SequenceExample` protos.
Args:
serialized: A vector (1-D Tensor) of type string, containing binary
serialized `SequenceExample` protos.
debug_name: A vector (1-D Tensor) of strings (optional), the names of the
serialized protos.
context_sparse_keys: A list of string keys in the `SequenceExample`'s
features. The results for these keys will be returned as `SparseTensor`
objects.
context_sparse_types: A list of `DTypes`, the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string`
(`BytesList`) are supported.
context_dense_keys: A list of string keys in the examples' features. The
results for these keys will be returned as `Tensor`s
context_dense_types: A list of DTypes, same length as `context_dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string`
(`BytesList`) are supported.
context_dense_defaults: A dict mapping string keys to `Tensor`s. The keys of
the dict must match the context_dense_keys of the feature.
context_dense_shapes: A list of tuples, same length as `context_dense_keys`.
The shape of the data for each context_dense feature referenced by
`context_dense_keys`. Required for any input tensors identified by
`context_dense_keys` whose shapes are anything other than `[]` or `[1]`.
feature_list_sparse_keys: A list of string keys in the `SequenceExample`'s
feature_lists. The results for these keys will be returned as
`SparseTensor` objects.
feature_list_sparse_types: A list of `DTypes`, same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string`
(`BytesList`) are supported.
feature_list_dense_keys: A list of string keys in the `SequenceExample`'s
features_lists. The results for these keys will be returned as `Tensor`s.
feature_list_dense_types: A list of `DTypes`, same length as
`feature_list_dense_keys`. Only `tf.float32` (`FloatList`), `tf.int64`
(`Int64List`), and `tf.string` (`BytesList`) are supported.
feature_list_dense_shapes: A list of tuples, same length as
`feature_list_dense_keys`. The shape of the data for each `FeatureList`
feature referenced by `feature_list_dense_keys`.
feature_list_dense_defaults: A dict mapping key strings to values. The only
currently allowed value is `None`. Any key appearing in this dict with
value `None` is allowed to be missing from the `SequenceExample`. If
missing, the key is treated as zero-length.
name: A name for this operation (optional).
Returns:
A tuple of three `dict`s, each mapping keys to `Tensor`s and
`SparseTensor`s. The first dict contains the context key/values,
the second dict contains the feature_list key/values, and the final dict
contains the lengths of any dense feature_list features.
Raises:
ValueError: If context_sparse and context_dense key sets intersect,
if feature_list_sparse and feature_list_dense key sets intersect,
if input lengths do not match up, or if a value in
feature_list_dense_defaults is not None.
TypeError: if feature_list_dense_defaults is not either None or a dict.
"""
with ops.name_scope(name, "ParseSequenceExample", [serialized]):
context_dense_defaults = ({} if context_dense_defaults is None else
context_dense_defaults)
context_sparse_keys = ([] if context_sparse_keys is None else
context_sparse_keys)
context_sparse_types = ([] if context_sparse_types is None else
context_sparse_types)
context_dense_keys = ([]
if context_dense_keys is None else context_dense_keys)
context_dense_types = ([] if context_dense_types is None else
context_dense_types)
context_dense_shapes = ([[]] * len(context_dense_keys)
if context_dense_shapes is None else
context_dense_shapes)
feature_list_sparse_keys = ([] if feature_list_sparse_keys is None else
feature_list_sparse_keys)
feature_list_sparse_types = ([] if feature_list_sparse_types is None else
feature_list_sparse_types)
feature_list_dense_keys = ([] if feature_list_dense_keys is None else
feature_list_dense_keys)
feature_list_dense_types = ([] if feature_list_dense_types is None else
feature_list_dense_types)
feature_list_dense_shapes = ([[]] * len(feature_list_dense_keys)
if feature_list_dense_shapes is None else
feature_list_dense_shapes)
feature_list_dense_defaults = (
dict()
if feature_list_dense_defaults is None else feature_list_dense_defaults)
debug_name = [] if debug_name is None else debug_name
# Internal
feature_list_dense_missing_assumed_empty = []
num_context_dense = len(context_dense_keys)
num_feature_list_dense = len(feature_list_dense_keys)
num_context_sparse = len(context_sparse_keys)
num_feature_list_sparse = len(feature_list_sparse_keys)
if len(context_dense_shapes) != num_context_dense:
raise ValueError(
"len(context_dense_shapes) != len(context_dense_keys): %d vs. %d" %
(len(context_dense_shapes), num_context_dense))
if len(context_dense_types) != num_context_dense:
raise ValueError(
"len(context_dense_types) != len(num_context_dense): %d vs. %d" %
(len(context_dense_types), num_context_dense))
if len(feature_list_dense_shapes) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_shapes) != len(feature_list_dense_keys): "
"%d vs. %d" % (len(feature_list_dense_shapes),
num_feature_list_dense))
if len(feature_list_dense_types) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_types) != len(num_feature_list_dense):"
"%d vs. %d" % (len(feature_list_dense_types), num_feature_list_dense))
if len(context_sparse_types) != num_context_sparse:
raise ValueError(
"len(context_sparse_types) != len(context_sparse_keys): %d vs. %d" %
(len(context_sparse_types), num_context_sparse))
if len(feature_list_sparse_types) != num_feature_list_sparse:
raise ValueError(
"len(feature_list_sparse_types) != len(feature_list_sparse_keys): "
"%d vs. %d" % (len(feature_list_sparse_types),
num_feature_list_sparse))
if (num_context_dense + num_context_sparse + num_feature_list_dense +
num_feature_list_sparse) == 0:
raise ValueError(
"Must provide at least one context_sparse key, context_dense key, "
", feature_list_sparse key, or feature_list_dense key")
if not set(context_dense_keys).isdisjoint(set(context_sparse_keys)):
raise ValueError(
"context_dense and context_sparse keys must not intersect; "
"intersection: %s" % set(context_dense_keys).intersection(
set(context_sparse_keys)))
if not set(feature_list_dense_keys).isdisjoint(
set(feature_list_sparse_keys)):
raise ValueError(
"feature_list_dense and feature_list_sparse keys must not intersect; "
"intersection: %s" % set(feature_list_dense_keys).intersection(
set(feature_list_sparse_keys)))
if not isinstance(feature_list_dense_defaults, dict):
raise TypeError("feature_list_dense_defaults must be a dict")
for k, v in feature_list_dense_defaults.items():
if v is not None:
raise ValueError(
"Value feature_list_dense_defaults[%s] must be None" % k)
feature_list_dense_missing_assumed_empty.append(k)
context_dense_defaults_vec = []
for i, key in enumerate(context_dense_keys):
default_value = context_dense_defaults.get(key)
if default_value is None:
default_value = constant_op.constant([], dtype=context_dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=context_dense_types[i], name=key_name)
context_dense_defaults_vec.append(default_value)
context_dense_shapes = [
tensor_shape.as_shape(shape).as_proto()
for shape in context_dense_shapes
]
feature_list_dense_shapes = [
tensor_shape.as_shape(shape).as_proto()
for shape in feature_list_dense_shapes
]
# pylint: disable=protected-access
outputs = gen_parsing_ops.parse_sequence_example(
serialized=serialized,
debug_name=debug_name,
Ncontext_sparse=num_context_sparse,
Ncontext_dense=num_context_dense,
Nfeature_list_sparse=num_feature_list_sparse,
Nfeature_list_dense=num_feature_list_dense,
context_dense_defaults=context_dense_defaults_vec,
context_sparse_keys=context_sparse_keys,
context_sparse_types=context_sparse_types,
context_dense_keys=context_dense_keys,
context_dense_shapes=context_dense_shapes,
feature_list_sparse_keys=feature_list_sparse_keys,
feature_list_sparse_types=feature_list_sparse_types,
feature_list_dense_keys=feature_list_dense_keys,
feature_list_dense_types=feature_list_dense_types,
feature_list_dense_shapes=feature_list_dense_shapes,
feature_list_dense_missing_assumed_empty=(
feature_list_dense_missing_assumed_empty),
name=name)
# pylint: enable=protected-access
(context_sparse_indices, context_sparse_values, context_sparse_shapes,
context_dense_values, feature_list_sparse_indices,
feature_list_sparse_values, feature_list_sparse_shapes,
feature_list_dense_values, feature_list_dense_lengths) = outputs
context_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape)
for (ix, val,
shape) in zip(context_sparse_indices, context_sparse_values,
context_sparse_shapes)
]
feature_list_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape)
for (ix, val, shape
) in zip(feature_list_sparse_indices, feature_list_sparse_values,
feature_list_sparse_shapes)
]
context_output = dict(
zip(context_sparse_keys + context_dense_keys,
context_sparse_tensors + context_dense_values))
feature_list_output = dict(
zip(feature_list_sparse_keys + feature_list_dense_keys,
feature_list_sparse_tensors + feature_list_dense_values))
feature_list_lengths = dict(
zip(feature_list_dense_keys, feature_list_dense_lengths))
return (context_output, feature_list_output, feature_list_lengths)
# TODO(sundberg): rewrite this method to call the batch version, which is more
# efficient especially for large inputs.
@tf_export("io.parse_single_sequence_example",
v1=["io.parse_single_sequence_example",
"parse_single_sequence_example"])
def parse_single_sequence_example(
serialized, context_features=None, sequence_features=None,
example_name=None, name=None):
# pylint: disable=line-too-long
"""Parses a single `SequenceExample` proto.
Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
proto given in `serialized`.
This op parses a serialized sequence example into a tuple of dictionaries
mapping keys to `Tensor` and `SparseTensor` objects respectively.
The first dictionary contains mappings for keys appearing in
`context_features`, and the second dictionary contains mappings for keys
appearing in `sequence_features`.
At least one of `context_features` and `sequence_features` must be provided
and non-empty.
The `context_features` keys are associated with a `SequenceExample` as a
whole, independent of time / frame. In contrast, the `sequence_features` keys
provide a way to access variable-length data within the `FeatureList` section
of the `SequenceExample` proto. While the shapes of `context_features` values
are fixed with respect to frame, the frame dimension (the first dimension)
of `sequence_features` values may vary between `SequenceExample` protos,
and even between `feature_list` keys within the same `SequenceExample`.
`context_features` contains `VarLenFeature` and `FixedLenFeature` objects.
Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature`
is mapped to a `Tensor`, of the specified type, shape, and default value.
`sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature`
objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each
`FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type.
The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where
`T` is the length of the associated `FeatureList` in the `SequenceExample`.
For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of
static shape `[None]` and dynamic shape `[T]`, while
`FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor`
of static shape `[None, k]` and dynamic shape `[T, k]`.
Each `SparseTensor` corresponding to `sequence_features` represents a ragged
vector. Its indices are `[time, index]`, where `time` is the `FeatureList`
entry and `index` is the value's index in the list of values associated with
that time.
`FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature`
entries with `allow_missing=True` are optional; otherwise, we will fail if
that `Feature` or `FeatureList` is missing from any example in `serialized`.
`example_name` may contain a descriptive name for the corresponding serialized
proto. This may be useful for debugging purposes, but it has no effect on the
output. If not `None`, `example_name` must be a scalar.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. These features are associated with a
`SequenceExample` as a whole.
sequence_features: A `dict` mapping feature keys to
`FixedLenSequenceFeature` or `VarLenFeature` values. These features are
associated with data within the `FeatureList` section of the
`SequenceExample` proto.
example_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: if any feature is invalid.
"""
# pylint: enable=line-too-long
if not (context_features or sequence_features):
raise ValueError("Missing features.")
(context_sparse_keys, context_sparse_types, context_dense_keys,
context_dense_types, context_dense_defaults,
context_dense_shapes) = _features_to_raw_params(
context_features, [VarLenFeature, FixedLenFeature])
(feature_list_sparse_keys, feature_list_sparse_types,
feature_list_dense_keys, feature_list_dense_types,
feature_list_dense_defaults,
feature_list_dense_shapes) = _features_to_raw_params(
sequence_features, [VarLenFeature, FixedLenSequenceFeature])
return _parse_single_sequence_example_raw(
serialized, context_sparse_keys, context_sparse_types,
context_dense_keys, context_dense_types, context_dense_defaults,
context_dense_shapes, feature_list_sparse_keys,
feature_list_sparse_types, feature_list_dense_keys,
feature_list_dense_types, feature_list_dense_shapes,
feature_list_dense_defaults, example_name, name)
def _parse_single_sequence_example_raw(serialized,
context_sparse_keys=None,
context_sparse_types=None,
context_dense_keys=None,
context_dense_types=None,
context_dense_defaults=None,
context_dense_shapes=None,
feature_list_sparse_keys=None,
feature_list_sparse_types=None,
feature_list_dense_keys=None,
feature_list_dense_types=None,
feature_list_dense_shapes=None,
feature_list_dense_defaults=None,
debug_name=None,
name=None):
"""Parses a single `SequenceExample` proto.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_sparse_keys: A list of string keys in the `SequenceExample`'s
features. The results for these keys will be returned as
`SparseTensor` objects.
context_sparse_types: A list of `DTypes`, the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
context_dense_types: A list of DTypes, same length as `context_dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the context_dense_keys of the feature.
context_dense_shapes: A list of tuples, same length as `context_dense_keys`.
The shape of the data for each context_dense feature referenced by
`context_dense_keys`. Required for any input tensors identified by
`context_dense_keys` whose shapes are anything other than `[]` or `[1]`.
feature_list_sparse_keys: A list of string keys in the `SequenceExample`'s
feature_lists. The results for these keys will be returned as
`SparseTensor` objects.
feature_list_sparse_types: A list of `DTypes`, same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
feature_list_dense_keys: A list of string keys in the `SequenceExample`'s
features_lists. The results for these keys will be returned as `Tensor`s.
feature_list_dense_types: A list of `DTypes`, same length as
`feature_list_dense_keys`. Only `tf.float32` (`FloatList`),
`tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
feature_list_dense_shapes: A list of tuples, same length as
`feature_list_dense_keys`. The shape of the data for each
`FeatureList` feature referenced by `feature_list_dense_keys`.
feature_list_dense_defaults: A dict mapping key strings to values.
The only currently allowed value is `None`. Any key appearing
in this dict with value `None` is allowed to be missing from the
`SequenceExample`. If missing, the key is treated as zero-length.
debug_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: If context_sparse and context_dense key sets intersect,
if input lengths do not match up, or if a value in
feature_list_dense_defaults is not None.
TypeError: if feature_list_dense_defaults is not either None or a dict.
"""
with ops.name_scope(name, "ParseSingleSequenceExample", [serialized]):
context_dense_defaults = (
{} if context_dense_defaults is None else context_dense_defaults)
context_sparse_keys = (
[] if context_sparse_keys is None else context_sparse_keys)
context_sparse_types = (
[] if context_sparse_types is None else context_sparse_types)
context_dense_keys = (
[] if context_dense_keys is None else context_dense_keys)
context_dense_types = (
[] if context_dense_types is None else context_dense_types)
context_dense_shapes = (
[[]] * len(context_dense_keys)
if context_dense_shapes is None else context_dense_shapes)
feature_list_sparse_keys = (
[] if feature_list_sparse_keys is None else feature_list_sparse_keys)
feature_list_sparse_types = (
[] if feature_list_sparse_types is None else feature_list_sparse_types)
feature_list_dense_keys = (
[] if feature_list_dense_keys is None else feature_list_dense_keys)
feature_list_dense_types = (
[] if feature_list_dense_types is None else feature_list_dense_types)
feature_list_dense_shapes = (
[[]] * len(feature_list_dense_keys)
if feature_list_dense_shapes is None else feature_list_dense_shapes)
feature_list_dense_defaults = (
dict() if feature_list_dense_defaults is None
else feature_list_dense_defaults)
debug_name = "" if debug_name is None else debug_name
# Internal
feature_list_dense_missing_assumed_empty = []
num_context_dense = len(context_dense_keys)
num_feature_list_dense = len(feature_list_dense_keys)
num_context_sparse = len(context_sparse_keys)
num_feature_list_sparse = len(feature_list_sparse_keys)
if len(context_dense_shapes) != num_context_dense:
raise ValueError(
"len(context_dense_shapes) != len(context_dense_keys): %d vs. %d"
% (len(context_dense_shapes), num_context_dense))
if len(context_dense_types) != num_context_dense:
raise ValueError(
"len(context_dense_types) != len(num_context_dense): %d vs. %d"
% (len(context_dense_types), num_context_dense))
if len(feature_list_dense_shapes) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_shapes) != len(feature_list_dense_keys): "
"%d vs. %d" % (len(feature_list_dense_shapes),
num_feature_list_dense))
if len(feature_list_dense_types) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_types) != len(num_feature_list_dense):"
"%d vs. %d" % (len(feature_list_dense_types), num_feature_list_dense))
if len(context_sparse_types) != num_context_sparse:
raise ValueError(
"len(context_sparse_types) != len(context_sparse_keys): %d vs. %d"
% (len(context_sparse_types), num_context_sparse))
if len(feature_list_sparse_types) != num_feature_list_sparse:
raise ValueError(
"len(feature_list_sparse_types) != len(feature_list_sparse_keys): "
"%d vs. %d"
% (len(feature_list_sparse_types), num_feature_list_sparse))
if (num_context_dense + num_context_sparse
+ num_feature_list_dense + num_feature_list_sparse) == 0:
raise ValueError(
"Must provide at least one context_sparse key, context_dense key, "
", feature_list_sparse key, or feature_list_dense key")
if not set(context_dense_keys).isdisjoint(set(context_sparse_keys)):
raise ValueError(
"context_dense and context_sparse keys must not intersect; "
"intersection: %s" %
set(context_dense_keys).intersection(set(context_sparse_keys)))
if not set(feature_list_dense_keys).isdisjoint(
set(feature_list_sparse_keys)):
raise ValueError(
"feature_list_dense and feature_list_sparse keys must not intersect; "
"intersection: %s" %
set(feature_list_dense_keys).intersection(
set(feature_list_sparse_keys)))
if not isinstance(feature_list_dense_defaults, dict):
raise TypeError("feature_list_dense_defaults must be a dict")
for k, v in feature_list_dense_defaults.items():
if v is not None:
raise ValueError("Value feature_list_dense_defaults[%s] must be None"
% k)
feature_list_dense_missing_assumed_empty.append(k)
context_dense_defaults_vec = []
for i, key in enumerate(context_dense_keys):
default_value = context_dense_defaults.get(key)
if default_value is None:
default_value = constant_op.constant([], dtype=context_dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=context_dense_types[i], name=key_name)
default_value = array_ops.reshape(
default_value, context_dense_shapes[i])
context_dense_defaults_vec.append(default_value)
context_dense_shapes = [tensor_shape.as_shape(shape).as_proto()
for shape in context_dense_shapes]
feature_list_dense_shapes = [tensor_shape.as_shape(shape).as_proto()
for shape in feature_list_dense_shapes]
outputs = gen_parsing_ops.parse_single_sequence_example(
serialized=serialized,
debug_name=debug_name,
context_dense_defaults=context_dense_defaults_vec,
context_sparse_keys=context_sparse_keys,
context_sparse_types=context_sparse_types,
context_dense_keys=context_dense_keys,
context_dense_shapes=context_dense_shapes,
feature_list_sparse_keys=feature_list_sparse_keys,
feature_list_sparse_types=feature_list_sparse_types,
feature_list_dense_keys=feature_list_dense_keys,
feature_list_dense_types=feature_list_dense_types,
feature_list_dense_shapes=feature_list_dense_shapes,
feature_list_dense_missing_assumed_empty=(
feature_list_dense_missing_assumed_empty),
name=name)
(context_sparse_indices, context_sparse_values,
context_sparse_shapes, context_dense_values,
feature_list_sparse_indices, feature_list_sparse_values,
feature_list_sparse_shapes, feature_list_dense_values) = outputs
context_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(context_sparse_indices,
context_sparse_values,
context_sparse_shapes)]
feature_list_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(feature_list_sparse_indices,
feature_list_sparse_values,
feature_list_sparse_shapes)]
context_output = dict(
zip(context_sparse_keys + context_dense_keys,
context_sparse_tensors + context_dense_values))
feature_list_output = dict(
zip(feature_list_sparse_keys + feature_list_dense_keys,
feature_list_sparse_tensors + feature_list_dense_values))
return (context_output, feature_list_output)
# Swap `name` and `na_value` for backward compatibility.
@tf_export("io.decode_csv", v1=["io.decode_csv", "decode_csv"])
@deprecation.deprecated_endpoints("decode_csv")
def decode_csv(records,
record_defaults,
field_delim=",",
use_quote_delim=True,
name=None,
na_value="",
select_cols=None):
"""Convert CSV records to tensors. Each column maps to one tensor.
RFC 4180 format is expected for the CSV records.
(https://tools.ietf.org/html/rfc4180)
Note that we allow leading and trailing spaces with int or float field.
Args:
records: A `Tensor` of type `string`.
Each string is a record/row in the csv and all records should have
the same format.
record_defaults: A list of `Tensor` objects with specific types.
Acceptable types are `float32`, `float64`, `int32`, `int64`, `string`.
One tensor per column of the input record, with either a
scalar default value for that column or an empty vector if the column is
required.
field_delim: An optional `string`. Defaults to `","`.
char delimiter to separate fields in a record.
use_quote_delim: An optional `bool`. Defaults to `True`.
If false, treats double quotation marks as regular
characters inside of the string fields (ignoring RFC 4180, Section 2,
Bullet 5).
name: A name for the operation (optional).
na_value: Additional string to recognize as NA/NaN.
select_cols: Optional sorted list of column indices to select. If specified,
only this subset of columns will be parsed and returned.
Returns:
A list of `Tensor` objects. Has the same type as `record_defaults`.
Each tensor will have the same shape as records.
Raises:
ValueError: If any of the arguments is malformed.
"""
if select_cols is not None and any(select_cols[i] >= select_cols[i + 1]
for i in range(len(select_cols) - 1)):
raise ValueError("select_cols is not strictly increasing.")
if select_cols is not None and select_cols[0] < 0:
raise ValueError("select_cols contains negative values.")
if select_cols is not None and len(select_cols) != len(record_defaults):
raise ValueError("Length of select_cols and record_defaults do not match.")
return gen_parsing_ops.decode_csv(
records=records,
record_defaults=record_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
na_value=na_value,
name=name,
select_cols=select_cols,
)
# TODO(b/70890287): Combine the implementation of this op and
# `parse_single_example()` after 1/10/2018.
def parse_single_example_v2(serialized, features, name=None):
# pylint: disable=line-too-long
"""Parses an `Example` proto into a `dict` of tensors.
Parses a serialized
[`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
proto given in `serialized`.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`.
Each `VarLenFeature` maps to a `SparseTensor` of the specified type
representing a ragged matrix. Its indices are `[index]` where
`index` is the value's index in the list of values associated with
that feature and example.
Each `SparseFeature` maps to a `SparseTensor` of the specified type
representing a Tensor of `dense_shape` `SparseFeature.size`.
Its `values` come from the feature in the examples with key `value_key`.
A `values[i]` comes from a position `k` in the feature of an example at batch
entry `batch`. This positional information is recorded in `indices[i]` as
`[batch, index_0, index_1, ...]` where `index_j` is the `k-th` value of
the feature in the example at with key `SparseFeature.index_key[j]`.
In other words, we split the indices (except the first index indicating the
batch entry) of a `SparseTensor` by dimension into different features of the
`Example`. Due to its complexity a `VarLenFeature` should be preferred over a
`SparseFeature` whenever possible.
Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or
`tf.float32` if not specified) and shape `df.shape`.
`FixedLenFeature` entries with a `default_value` are optional. With no default
value, we will fail if that `Feature` is missing from any example in
`serialized`.
Each `FixedLenSequenceFeature` `df` maps to a `Tensor` of the specified type
(or `tf.float32` if not specified) and shape `(None,) + df.shape`.
Args:
serialized: A scalar (0-D Tensor) string, a serialized `Example` proto.
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
name: A name for this operation (optional).
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing: features was %s." % features)
features = _prepend_none_dimension(features)
(sparse_keys, sparse_types, dense_keys, dense_types,
dense_defaults, dense_shapes) = _features_to_raw_params(
features,
[VarLenFeature, SparseFeature, FixedLenFeature, FixedLenSequenceFeature])
outputs = _parse_single_example_v2_raw(serialized, sparse_keys, sparse_types,
dense_keys, dense_types,
dense_defaults, dense_shapes, name)
return _construct_sparse_tensors_for_sparse_features(features, outputs)
def _parse_single_example_v2_raw(serialized, sparse_keys, sparse_types,
dense_keys, dense_types, dense_defaults,
dense_shapes, name):
"""Parses `Example` protos.
Args:
serialized: A scalar (0-D Tensor) string, containing a binary
serialized `Example` proto.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
name: A name for this operation (optional).
Returns:
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up.
"""
with ops.name_scope(name, "ParseSingleExample", [serialized]):
serialized = ops.convert_to_tensor(serialized, name="serialized")
dense_defaults = collections.OrderedDict(
) if dense_defaults is None else dense_defaults
sparse_keys = [] if sparse_keys is None else sparse_keys
sparse_types = [] if sparse_types is None else sparse_types
dense_keys = [] if dense_keys is None else dense_keys
dense_types = [] if dense_types is None else dense_types
dense_shapes = ([[]] * len(dense_keys)
if dense_shapes is None else dense_shapes)
num_dense = len(dense_keys)
num_sparse = len(sparse_keys)
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) != len(dense_keys): %d vs. %d" %
(len(dense_shapes), num_dense))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) != len(num_dense): %d vs. %d" %
(len(dense_types), num_dense))
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) != len(sparse_keys): %d vs. %d" %
(len(sparse_types), num_sparse))
if num_dense + num_sparse == 0:
raise ValueError("Must provide at least one sparse key or dense key")
if not set(dense_keys).isdisjoint(set(sparse_keys)):
raise ValueError(
"Dense and sparse keys must not intersect; intersection: %s" %
set(dense_keys).intersection(set(sparse_keys)))
# Convert dense_shapes to TensorShape object.
dense_shapes = [tensor_shape.as_shape(shape) for shape in dense_shapes]
dense_defaults_vec = []
for i, key in enumerate(dense_keys):
default_value = dense_defaults.get(key)
dense_shape = dense_shapes[i]
if (dense_shape.ndims is not None and dense_shape.ndims > 0 and
dense_shape.dims[0].value is None):
# Variable stride dense shape, the default value should be a
# scalar padding value
if default_value is None:
default_value = ops.convert_to_tensor(
"" if dense_types[i] == dtypes.string else 0,
dtype=dense_types[i])
else:
# Reshape to a scalar to ensure user gets an error if they
# provide a tensor that's not intended to be a padding value
# (0 or 2+ elements).
key_name = "padding_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, [])
else:
if default_value is None:
default_value = constant_op.constant([], dtype=dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, dense_shape)
dense_defaults_vec.append(default_value)
# Finally, convert dense_shapes to TensorShapeProto
dense_shapes = [shape.as_proto() for shape in dense_shapes]
outputs = gen_parsing_ops.parse_single_example(
serialized=serialized,
dense_defaults=dense_defaults_vec,
num_sparse=len(sparse_keys),
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_shapes=dense_shapes,
name=name)
(sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs
sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape)
for (ix, val,
shape) in zip(sparse_indices, sparse_values, sparse_shapes)
]
return dict(zip(sparse_keys + dense_keys, sparse_tensors + dense_values))
| apache-2.0 | -5,291,462,859,335,343,000 | 44.197368 | 119 | 0.660929 | false |
tdtrask/ansible | lib/ansible/modules/network/aci/aci_interface_policy_lldp.py | 1 | 6987 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_lldp
short_description: Manage LLDP interface policies on Cisco ACI fabrics (lldp:IfPol)
description:
- Manage LLDP interface policies on Cisco ACI fabrics.
- More information from the internal APIC class I(lldp:IfPol) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
lldp_policy:
description:
- The LLDP interface policy name.
required: yes
aliases: [ name ]
description:
description:
- The description for the LLDP interface policy name.
aliases: [ descr ]
receive_state:
description:
- Enable or disable Receive state (FIXME!)
required: yes
choices: [ disabled, enabled ]
default: enabled
transmit_state:
description:
- Enable or Disable Transmit state (FIXME!)
required: false
choices: [ disabled, enabled ]
default: enabled
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_interface_policy_lldp:
host: '{{ hostname }}'
username: '{{ username }}'
password: '{{ password }}'
lldp_policy: '{{ lldp_policy }}'
description: '{{ description }}'
receive_state: '{{ receive_state }}'
transmit_state: '{{ transmit_state }}'
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
lldp_policy=dict(type='str', require=False, aliases=['name']),
description=dict(type='str', aliases=['descr']),
receive_state=dict(type='raw'), # Turn into a boolean in v2.9
transmit_state=dict(type='raw'), # Turn into a boolean in v2.9
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['lldp_policy']],
['state', 'present', ['lldp_policy']],
],
)
lldp_policy = module.params['lldp_policy']
description = module.params['description']
receive_state = aci.boolean(module.params['receive_state'], 'enabled', 'disabled')
transmit_state = aci.boolean(module.params['transmit_state'], 'enabled', 'disabled')
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='lldpIfPol',
aci_rn='infra/lldpIfP-{0}'.format(lldp_policy),
filter_target='eq(lldpIfPol.name, "{0}")'.format(lldp_policy),
module_object=lldp_policy,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='lldpIfPol',
class_config=dict(
name=lldp_policy,
descr=description,
adminRxSt=receive_state,
adminTxSt=transmit_state,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='lldpIfPol')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 | 4,721,450,852,116,332,000 | 28.1125 | 146 | 0.596536 | false |
ActiveState/code | recipes/Python/579027_Publish_SQLite_datPDF_using_named/recipe-579027.py | 1 | 2776 | # SQLiteToPDFWithNamedTuples.py
# Author: Vasudev Ram - http://www.dancingbison.com
# SQLiteToPDFWithNamedTuples.py is a program to demonstrate how to read
# SQLite database data and convert it to PDF. It uses the Python
# data structure called namedtuple from the collections module of
# the Python standard library.
from __future__ import print_function
import sys
from collections import namedtuple
import sqlite3
from PDFWriter import PDFWriter
# Helper function to output a string to both screen and PDF.
def print_and_write(pw, strng):
print(strng)
pw.writeLine(strng)
try:
# Create the stocks database.
conn = sqlite3.connect('stocks.db')
# Get a cursor to it.
curs = conn.cursor()
# Create the stocks table.
curs.execute('''DROP TABLE IF EXISTS stocks''')
curs.execute('''CREATE TABLE stocks
(date text, trans text, symbol text, qty real, price real)''')
# Insert a few rows of data into the stocks table.
curs.execute("INSERT INTO stocks VALUES ('2006-01-05', 'BUY', 'RHAT', 100, 25.1)")
curs.execute("INSERT INTO stocks VALUES ('2007-02-06', 'SELL', 'ORCL', 200, 35.2)")
curs.execute("INSERT INTO stocks VALUES ('2008-03-07', 'HOLD', 'IBM', 300, 45.3)")
conn.commit()
# Create a namedtuple to represent stock rows.
StockRecord = namedtuple('StockRecord', 'date, trans, symbol, qty, price')
# Run the query to get the stocks data.
curs.execute("SELECT date, trans, symbol, qty, price FROM stocks")
# Create a PDFWriter and set some of its fields.
pw = PDFWriter("stocks.pdf")
pw.setFont("Courier", 12)
pw.setHeader("SQLite data to PDF with named tuples")
pw.setFooter("Generated by xtopdf - https://bitbucket.org/vasudevram/xtopdf")
# Write header info.
hdr_flds = [ str(hdr_fld).rjust(10) + " " for hdr_fld in StockRecord._fields ]
hdr_fld_str = ''.join(hdr_flds)
print_and_write(pw, '=' * len(hdr_fld_str))
print_and_write(pw, hdr_fld_str)
print_and_write(pw, '-' * len(hdr_fld_str))
# Now loop over the fetched data and write it to PDF.
# Map the StockRecord namedtuple's _make class method
# (that creates a new instance) to all the rows fetched.
for stock in map(StockRecord._make, curs.fetchall()):
row = [ str(col).rjust(10) + " " for col in (stock.date, \
stock.trans, stock.symbol, stock.qty, stock.price) ]
# Above line can instead be written more simply as:
# row = [ str(col).rjust(10) + " " for col in stock ]
row_str = ''.join(row)
print_and_write(pw, row_str)
print_and_write(pw, '=' * len(hdr_fld_str))
except Exception as e:
print("ERROR: Caught exception: " + e.message)
sys.exit(1)
finally:
pw.close()
conn.close()
| mit | -1,415,191,299,276,203,300 | 36.013333 | 87 | 0.662104 | false |
HuangFJ/pyeth | app.py | 1 | 2580 | # -*- coding: utf8 -*-
from gevent import monkey; monkey.patch_all()
from pyeth.discovery import EndPoint, Server, Node
import binascii
from urlparse import urlparse
test_boots = [
# "enode://561ab5a08c6f2e486059f2add5d08932e4f0ebbc6c2a2ba5e0f930a5441e65ec59f5b6684b3e75bed380109135d089e56380dad83357f5fda2122fdbdbe7d168@125.178.244.165:30303",
"enode://f815b53feab9f68fb6035181029241729cf3ed4cd253f0b3a6a25d5b7a1912be1c4f12504d48fe54cb67b42bd00f6c6b88ab595fc8facd329b5e4ae76978f4f9@159.69.56.113:10934"
# US-Azure geth
# "enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303",
# US-Azure parity
# "enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303",
# Parity
# "enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303",
# @gpip
# "enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303",
]
main_boots = [
# Ethereum Foundation Go Bootnodes
# IE
"enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303",
# US-WEST
"enode://3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99@13.93.211.84:30303",
# BR
"enode://78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d@191.235.84.50:30303",
# AU
"enode://158f8aab45f6d19c6cbf4a089c2670541a8da11978a2f90dbf6a502a4a3bab80d288afdbeb7ec0ef6d92de563767f3b1ea9e8e334ca711e9f8e2df5a0385e8e6@13.75.154.138:30303",
# SG
"enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303",
# Ethereum Foundation C++ Bootnodes
# DE
"enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303",
]
nodes = []
for n in main_boots:
info = urlparse(n)
nodes.append(Node(EndPoint(info.hostname, info.port, info.port), binascii.a2b_hex(info.username)))
server = Server(nodes)
server.run()
| bsd-3-clause | 6,300,188,362,417,611,000 | 55.086957 | 167 | 0.837984 | false |
rtucker-mozilla/mozilla_inventory | mozdns/mozbind/tests/build_tests.py | 1 | 10431 | # These tests are similar to the ones in the scripts directory. They not ran on
# real data so the testing db needs to be filled with info.
import os
from django.test.client import Client
from django.test import TestCase
from mozdns.soa.models import SOA
from mozdns.domain.models import Domain
from mozdns.address_record.models import AddressRecord
from mozdns.view.models import View
from mozdns.tests.utils import random_label, random_byte
from mozdns.mozbind.builder import DNSBuilder, BuildError
from mozdns.tests.utils import create_fake_zone
from core.task.models import Task
from scripts.dnsbuilds.tests.build_tests import BuildScriptTests
class MockBuildScriptTests(BuildScriptTests, TestCase):
def setUp(self):
Domain.objects.get_or_create(name="arpa")
Domain.objects.get_or_create(name="in-addr.arpa")
self.r1, _ = Domain.objects.get_or_create(name="10.in-addr.arpa")
Domain.objects.get_or_create(name="com")
Domain.objects.get_or_create(name="mozilla.com")
self.cleint = Client()
super(MockBuildScriptTests, self).setUp()
self.stop_update_file = '/tmp/fake/stop.update'
def get_post_data(self, random_str):
"""Return a valid set of data"""
return {
'root_domain': '{0}.{0}.mozilla.com'.format(
random_label() + random_str),
'soa_primary': 'ns1.mozilla.com',
'soa_contact': 'noc.mozilla.com',
'nameserver_1': 'ns1.mozilla.com',
'nameserver_2': 'ns2.mozilla.com',
'nameserver_3': 'ns3.mozilla.com',
'ttl_1': random_byte(),
'ttl_2': random_byte(),
'ttl_3': random_byte(),
}
def test_build_zone(self):
create_fake_zone('asdf1')
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update_file)
b.build_dns()
create_fake_zone('asdf2')
b.build_dns()
create_fake_zone('asdf3')
create_fake_zone('asdf4')
b.build_dns()
create_fake_zone('asdf5')
b.build_dns()
def test_change_a_record(self):
root_domain = create_fake_zone('asdfz1')
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update_file)
b.build_dns() # This won't check anything in since PUSH_TO_PROD==False
self.assertEqual((26, 0), b.svn_lines_changed(b.PROD_DIR))
b.PUSH_TO_PROD = True
b.build_dns() # This checked stuff in
# no lines should have changed
b.build_dns()
self.assertEqual((0, 0), b.svn_lines_changed(b.PROD_DIR))
# Now add a record.
a, c = AddressRecord.objects.get_or_create(
label='', domain=root_domain, ip_str="10.0.0.1", ip_type='4'
)
a.views.add(View.objects.get_or_create(name='private')[0])
if not c:
a.ttl = 8
a.save()
self.assertTrue(SOA.objects.get(pk=root_domain.soa.pk).dirty)
tmp_serial = SOA.objects.get(pk=root_domain.soa.pk).serial
b.PUSH_TO_PROD = False # Task isn't deleted
b.build_dns() # Serial get's incrimented
self.assertEqual(
SOA.objects.get(pk=root_domain.soa.pk).serial, tmp_serial + 1
)
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
# added new record (1) and new serials (2 for both views), old serials
# removed.
self.assertEqual((3, 2), b.svn_lines_changed(b.PROD_DIR))
tmp_serial = SOA.objects.get(pk=root_domain.soa.pk).serial
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
b.PUSH_TO_PROD = True
b.build_dns()
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
# Serial is again incremented because PUSH_TO_PROD was False during the
# last build. When PUSH_TO_PROD is false, no scheduled tasts are
# deleted so we should still see this soa being rebuilt.
self.assertEqual(
SOA.objects.get(pk=root_domain.soa.pk).serial, tmp_serial + 1
)
self.assertEqual((0, 0), b.svn_lines_changed(b.PROD_DIR))
# no lines should have changed if we would have built again
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
tmp_serial = SOA.objects.get(pk=root_domain.soa.pk).serial
b.PUSH_TO_PROD = False
b.build_dns()
self.assertEqual(SOA.objects.get(pk=root_domain.soa.pk).serial,
tmp_serial)
self.assertFalse(SOA.objects.get(pk=root_domain.soa.pk).dirty)
self.assertEqual((0, 0), b.svn_lines_changed(b.PROD_DIR))
def test_one_file_svn_lines_changed(self):
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update_file)
test_file = os.path.join(self.prod_dir, 'test')
with open(test_file, 'w+') as fd:
fd.write('line 1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 0), lc)
b.svn_checkin(lc)
with open(test_file, 'w+') as fd:
fd.write('line 1\nline 2\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 0), lc)
b.svn_checkin(lc)
with open(test_file, 'w+') as fd:
fd.write('line 1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((0, 1), lc)
b.svn_checkin(lc)
def test_too_many_config_lines_changed(self):
create_fake_zone('asdf86')
root_domain1 = create_fake_zone('asdf87')
root_domain2 = create_fake_zone('asdf88')
root_domain3 = create_fake_zone('asdf89')
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=True,
STOP_UPDATE_FILE=self.stop_update_file)
b.build_dns()
for ns in root_domain1.nameserver_set.all():
ns.delete()
b.build_dns() # One zone removed should be okay
for ns in root_domain2.nameserver_set.all():
ns.delete()
for ns in root_domain3.nameserver_set.all():
ns.delete()
self.assertRaises(BuildError, b.build_dns)
def test_two_file_svn_lines_changed(self):
b = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=False,
STOP_UPDATE_FILE=self.stop_update_file)
test1_file = os.path.join(self.prod_dir, 'test1')
test2_file = os.path.join(self.prod_dir, 'test2')
with open(test1_file, 'w+') as fd:
fd.write('line 1.1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 0), lc)
b.svn_checkin(lc)
with open(test1_file, 'w+') as fd:
fd.write('line 1.1\nline 1.2\n')
with open(test2_file, 'w+') as fd:
fd.write('line 2.1\nline 2.2\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((3, 0), lc)
b.svn_checkin(lc)
with open(test1_file, 'w+') as fd:
fd.write('line 1\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((1, 2), lc)
b.svn_checkin(lc)
with open(test1_file, 'w+') as fd:
fd.write('line 1.1\nline 1.2\n')
with open(test2_file, 'w+') as fd:
fd.write('line 2.3\nline 2.4\n')
lc = b.svn_lines_changed(b.PROD_DIR)
self.assertEqual((4, 3), lc)
b.svn_checkin(lc)
def test_svn_conflict(self):
"""
This uses tasks as a block box measurement to see if conflicts are
being handled
"""
root_domain = create_fake_zone('conflict')
b1 = DNSBuilder(STAGE_DIR=self.stage_dir, PROD_DIR=self.prod_dir,
LOCK_FILE=self.lock_file, LOG_SYSLOG=False,
FIRST_RUN=True, PUSH_TO_PROD=True,
STOP_UPDATE_FILE=self.stop_update_file)
b1.build_dns() # This checked stuff in
# Check the repo out somewhere elst
command_str = "svn co file://{0} {1}".format(
self.svn_repo, self.prod_dir2
)
b1.shell_out(command_str)
# Calculate the path to the zone file so we can tamper with it.
fm = b1.get_file_meta(
View.objects.get(name='public'), root_domain,
root_domain.soa
)
# Make local changes
fname = fm['prod_fname'].replace(self.prod_dir, self.prod_dir2)
with open(fname, 'a') as fd:
fd.write(";foobar")
# Check those changes in.
b1.PROD_DIR = self.prod_dir2 # Cheat and swap the dirs
b1.vcs_checkin()
b1.PROD_DIR = self.prod_dir # Fix our little cheat
b1.FORCE = True # Force a build
# Add something to the end of the file to cause a collision
a = AddressRecord.objects.create(
label="zeenada", domain=root_domain, ip_type='4',
ip_str='255.0.0.0'
)
a.views.add(View.objects.get(name='public'))
# Alright, we should have conflicts here. See if we detect it by
# counting how many tasks need to be serviced. If the number remains
# the same that means we aborted the build due to a conflict
pre_task_count = Task.objects.all().count()
b1.build_dns()
post_task_count = Task.objects.all().count()
self.assertEqual(pre_task_count, post_task_count)
# Conflicts should be resolved. Let's see if we build successfully
pre_task_count = Task.objects.all().count()
b1.build_dns()
post_task_count = Task.objects.all().count()
self.assertTrue(pre_task_count != 0)
self.assertEqual(0, post_task_count)
| bsd-3-clause | -7,636,081,866,384,204,000 | 37.633333 | 79 | 0.584795 | false |
centaurialpha/edis | src/ui/dialogs/preferences/preferences.py | 1 | 4181 | # -*- coding: utf-8 -*-
# EDIS - a simple cross-platform IDE for C
#
# This file is part of Edis
# Copyright 2014-2015 - Gabriel Acosta <acostadariogabriel at gmail>
# License: GPLv3 (see http://www.gnu.org/licenses/gpl.html)
from PyQt4.QtGui import (
QDialog,
QVBoxLayout,
QHBoxLayout,
QIcon,
QToolBar,
QStackedWidget,
QPushButton,
QGraphicsOpacityEffect,
QShortcut,
QKeySequence
)
from PyQt4.QtCore import (
Qt,
SIGNAL,
QSize,
QPropertyAnimation
)
from src.ui.main import Edis
class Preferences(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle(self.tr("Configuraciones - Edis"))
self.__sections = []
# Opacity effect
self.effect = QGraphicsOpacityEffect()
self.setGraphicsEffect(self.effect)
self.animation = QPropertyAnimation(self.effect, "opacity")
Edis.load_component("preferences", self)
# Install sections
#lint:disable
from src.ui.dialogs.preferences import (
environment_configuration,
editor_configuration,
compiler_configuration
)
#lint:enable
self.load_ui()
key_escape = QShortcut(QKeySequence(Qt.Key_Escape), self)
self.connect(key_escape, SIGNAL("activated()"), self.close)
self.connect(self.btn_cancel, SIGNAL("clicked()"), self.close)
self.connect(self.btn_guardar, SIGNAL("clicked()"), self._save)
def install_section(self, obj):
self.__sections.append(obj)
def load_ui(self):
container = QVBoxLayout(self)
box = QHBoxLayout()
box.setContentsMargins(0, 0, 0, 0)
box.setSpacing(0)
toolbar = QToolBar()
toolbar.setToolButtonStyle(3)
toolbar.setOrientation(Qt.Vertical)
toolbar.setIconSize(QSize(30, 30))
toolbar.setObjectName("preferencias")
environment_section = toolbar.addAction(
QIcon(":image/general-pref"), "Entorno")
editor_section = toolbar.addAction(
QIcon(":image/editor-pref"), "Editor")
compiler_section = toolbar.addAction(
QIcon(":image/compiler-pref"), "Compilador")
self.connect(environment_section, SIGNAL("triggered()"),
lambda: self.change_widget(0))
self.connect(editor_section, SIGNAL("triggered()"),
lambda: self.change_widget(1))
self.connect(compiler_section, SIGNAL("triggered()"),
lambda: self.change_widget(2))
# Set size
for action in toolbar.actions():
widget = toolbar.widgetForAction(action)
widget.setFixedSize(80, 50)
box.addWidget(toolbar)
self.stack = QStackedWidget()
box.addWidget(self.stack)
# Load sections and subsections
for section in self.__sections:
for name, obj in list(section.get_tabs().items()):
section.install_tab(obj, name)
self.stack.addWidget(section)
box_buttons = QHBoxLayout()
box_buttons.setMargin(10)
box_buttons.setSpacing(10)
box_buttons.addStretch(1)
self.btn_cancel = QPushButton(self.tr("Cancelar"))
self.btn_guardar = QPushButton(self.tr("Guardar"))
box_buttons.addWidget(self.btn_cancel)
box_buttons.addWidget(self.btn_guardar)
container.addLayout(box)
container.addLayout(box_buttons)
def change_widget(self, index):
if not self.isVisible():
self.show()
self.stack.setCurrentIndex(index)
def _save(self):
for index in range(self.stack.count()):
self.stack.widget(index).save()
self.close()
def close(self):
super(Preferences, self).close()
self.emit(SIGNAL("configurationsClose(PyQt_PyObject)"), self)
def showEvent(self, event):
super(Preferences, self).showEvent(event)
self.animation.setDuration(400)
self.animation.setStartValue(0)
self.animation.setEndValue(1)
self.animation.start()
preferences = Preferences() | gpl-3.0 | 6,063,191,720,508,532,000 | 30.208955 | 71 | 0.615881 | false |
cscorley/mud2014-modeling-changeset-topics | src/corpora.py | 1 | 8562 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# [The "New BSD" license]
# Copyright (c) 2014 The Board of Trustees of The University of Alabama
# All rights reserved.
#
# See LICENSE for details.
"""
Code for generating the corpora.
"""
from StringIO import StringIO
import re
import gensim
import dulwich
import dulwich.repo
import dulwich.patch
from preprocessing import tokenize, split, remove_stops, read_stops, to_unicode
import logging
logger = logging.getLogger('mct.corpora')
STOPS = read_stops([
'data/english_stops.txt',
'data/java_reserved.txt',
])
class GitCorpus(gensim.interfaces.CorpusABC):
"""
Helper class to simplify the pipeline of getting bag-of-words vectors (=
a gensim corpus) from plain text.
This is an abstract base class: override the `get_texts()` method to match
your particular input.
Given a filename (or a file-like object) in constructor, the corpus object
will be automatically initialized with a dictionary in `self.dictionary` and
will support the `iter` corpus method. You must only provide a correct
`get_texts` implementation.
"""
def __init__(self, repo=None, ref='HEAD', remove_stops=True,
split=True, lower=True, min_len=3, max_len=40,
lazy_dict=False):
logger.info('Creating %s corpus out of source files for commit %s' % (
self.__class__.__name__, ref))
self.repo = repo
self.remove_stops = remove_stops
self.split = split
self.lower = lower
self.min_len = min_len
self.max_len = max_len
self.lazy_dict = lazy_dict
self.id2word = gensim.corpora.Dictionary()
self.metadata = False
# ensure ref is a str otherwise dulwich cries
if type(ref) is unicode:
self.ref = ref.encode('utf-8')
else:
self.ref = ref
self.ref_tree = None
if repo is not None:
# find which file tree is for the commit we care about
self.ref_tree = self.repo[self.ref].tree
if not lazy_dict:
# build the dict (not lazy)
self.id2word.add_documents(self.get_texts())
super(GitCorpus, self).__init__()
def preprocess(self, document, info=[]):
document = to_unicode(document, info)
words = tokenize(document)
if self.split:
words = split(words)
if self.lower:
words = (word.lower() for word in words)
if self.remove_stops:
words = remove_stops(words, STOPS)
def include(word):
return len(word) >= self.min_len and len(word) <= self.max_len
words = (word for word in words if include(word))
return words
def __iter__(self):
"""
The function that defines a corpus.
Iterating over the corpus must yield sparse vectors, one for each
document.
"""
for text in self.get_texts():
if self.metadata:
meta = text[1]
text = text[0]
yield (self.id2word.doc2bow(text, allow_update=self.lazy_dict),
meta)
else:
yield self.id2word.doc2bow(text, allow_update=self.lazy_dict)
def get_texts(self):
"""
Iterate over the collection, yielding one document at a time. A document
is a sequence of words (strings) that can be fed into
`Dictionary.doc2bow`.
Override this function to match your input (parse input files, do any
text preprocessing, lowercasing, tokenizing etc.). There will be no
further preprocessing of the words coming out of this function.
"""
raise NotImplementedError
def __len__(self):
return self.length # will throw if corpus not initialized
class MultiTextCorpus(GitCorpus):
def get_texts(self):
length = 0
for entry in self.repo.object_store.iter_tree_contents(self.ref_tree):
fname = entry.path
document = self.repo.object_store.get_raw(entry.sha)[1]
if dulwich.patch.is_binary(document):
continue
words = self.preprocess(document, [fname, self.ref])
length += 1
if self.metadata:
yield words, (fname, u'en')
else:
yield words
self.length = length # only reset after iteration is done.
class ChangesetCorpus(GitCorpus):
def _get_diff(self, changeset):
""" Return a text representing a `git diff` for the files in the
changeset.
"""
patch_file = StringIO()
dulwich.patch.write_object_diff(patch_file,
self.repo.object_store,
changeset.old, changeset.new)
return patch_file.getvalue()
def _walk_changes(self, reverse=False):
""" Returns one file change at a time, not the entire diff.
"""
for walk_entry in self.repo.get_walker(reverse=reverse):
commit = walk_entry.commit
# initial revision, has no parent
if len(commit.parents) == 0:
for changes in dulwich.diff_tree.tree_changes(
self.repo.object_store, None, commit.tree
):
diff = self._get_diff(changes)
yield commit.id, None, diff
for parent in commit.parents:
# do I need to know the parent id?
for changes in dulwich.diff_tree.tree_changes(
self.repo.object_store, self.repo[parent].tree, commit.tree
):
diff = self._get_diff(changes)
yield commit.id, parent, diff
def get_texts(self):
length = 0
unified = re.compile(r'^[+ -].*')
current = None
low = list() # collecting the list of words
for commit, parent, diff in self._walk_changes():
# write out once all diff lines for commit have been collected
# this is over all parents and all files of the commit
if current is None:
# set current for the first commit, clear low
current = commit
low = list()
elif current != commit:
# new commit seen, yield the collected low
if self.metadata:
yield low, (current, u'en')
else:
yield low
length += 1
current = commit
low = list()
# to process out whitespace only changes, the rest of this
# loop will need to be structured differently. possibly need
# to actually parse the diff to gain structure knowledge
# (ie, line numbers of the changes).
diff_lines = filter(lambda x: unified.match(x),
diff.splitlines())
if len(diff_lines) < 2:
continue # useful for not worrying with binary files
# sanity?
assert diff_lines[0].startswith('--- '), diff_lines[0]
assert diff_lines[1].startswith('+++ '), diff_lines[1]
# parent_fn = diff_lines[0][4:]
# commit_fn = diff_lines[1][4:]
lines = diff_lines[2:] # chop off file names hashtag rebel
lines = [line[1:] for line in lines] # remove unified markers
document = ' '.join(lines)
# call the tokenizer
words = self.preprocess(document,
[commit, str(parent), diff_lines[0]])
low.extend(words)
length += 1
if self.metadata:
# have reached the end, yield whatever was collected last
yield low, (current, u'en')
else:
yield low
self.length = length # only reset after iteration is done.
class CommitLogCorpus(GitCorpus):
def get_texts(self):
length = 0
for walk_entry in self.repo.get_walker():
commit = walk_entry.commit
words = self.preprocess(commit.message, [commit.id])
length += 1
if self.metadata:
# have reached the end, yield whatever was collected last
yield words, (commit.id, u'en')
else:
yield words
self.length = length # only reset after iteration is done.
| bsd-3-clause | -8,410,882,084,569,625,000 | 31.309434 | 80 | 0.560266 | false |
psigen/miridan | pddlpy/predicate.py | 1 | 2501 | """
Module that defines all predicates in this world.
"""
import inspect
from pddlpy.scope import Scope
class BasePredicate(object):
"""
Represents a predicate that already has grounded atoms.
"""
def __init__(self, *args, **kwargs):
argnames, _, _, _ = inspect.getargspec(self.__call__)
# Save positional arguments (by converting to keyword args)
self.args = {}
self.args.update(dict(zip(argnames, args)))
# Save keyword arguments (by validating against existing args)
if kwargs is not None:
for (k, v) in kwargs.iteritems():
if k not in argnames:
raise KeyError("Invalid argument '{}' for {}."
.format(k, self.__class__.name))
self.args.update(kwargs)
self.__call__ = self.__class__.__nonzero__
def __getattr__(self, item):
"""
Attempt to resolve unknown attributes from Domain scope.
"""
try:
return Scope.root[item]
except KeyError:
raise AttributeError(item)
def __nonzero__(self):
"""
Evaluate this grounded predicate.
"""
return self.__class__.__call__(self, **self.args)
def __and__(self, other):
"""
Create a new joint predicate.
"""
return AndPredicate(self, other)
def __invert__(self):
"""
Create a new joint predicate.
"""
return NotPredicate(self)
def __call__(self, *args, **kwargs):
"""
Empty implementation of predicate is trivially true.
"""
return True
def why(self):
"""
Diagnostic function to determine why a predicate failed.
"""
return str(self)
class AndPredicate(BasePredicate):
def __init__(self, left, right):
self.left = left
self.right = right
self.__call__ = self.__class__.__nonzero__
def __nonzero__(self):
return bool(self.left) & bool(self.right)
def why(self):
if not self.left:
return self.left.why()
else:
return self.right.why()
class NotPredicate(BasePredicate):
def __init__(self, inner):
self.inner = inner
self.__call__ = self.__class__.__nonzero__
def __nonzero__(self):
return not bool(self.inner)
def why(self):
return "NOT(" + self.inner.why() + ")"
class Predicate(BasePredicate):
pass
| bsd-2-clause | -1,814,435,613,284,187,400 | 24.520408 | 70 | 0.541783 | false |
cstipkovic/spidermonkey-research | testing/mozbase/mozlog/mozlog/scripts/unstable.py | 1 | 3593 | import argparse
from collections import defaultdict
import json
from mozlog import reader
class StatusHandler(reader.LogHandler):
def __init__(self):
self.run_info = None
self.statuses = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: defaultdict(int))))
def test_id(self, test):
if type(test) in (str, unicode):
return test
else:
return tuple(test)
def suite_start(self, item):
self.run_info = tuple(sorted(item.get("run_info", {}).items()))
def test_status(self, item):
self.statuses[self.run_info][self.test_id(item["test"])][item["subtest"]][item["status"]] += 1
def test_end(self, item):
self.statuses[self.run_info][self.test_id(item["test"])][None][item["status"]] += 1
def suite_end(self, item):
self.run_info = None
def get_statuses(filenames):
handler = StatusHandler()
for filename in filenames:
with open(filename) as f:
reader.handle_log(reader.read(f), handler)
return handler.statuses
def _filter(results_cmp):
def inner(statuses):
rv = defaultdict(lambda:defaultdict(dict))
for run_info, tests in statuses.iteritems():
for test, subtests in tests.iteritems():
for name, results in subtests.iteritems():
if results_cmp(results):
rv[run_info][test][name] = results
return rv
return inner
filter_unstable = _filter(lambda x: len(x) > 1)
filter_stable = _filter(lambda x: len(x) == 1)
def group_results(data):
rv = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for run_info, tests in data.iteritems():
for test, subtests in tests.iteritems():
for name, results in subtests.iteritems():
for status, number in results.iteritems():
rv[test][name][status] += number
return rv
def print_results(data):
for run_info, tests in data.iteritems():
run_str = " ".join("%s:%s" % (k,v) for k,v in run_info) if run_info else "No Run Info"
print run_str
print "=" * len(run_str)
print_run(tests)
def print_run(tests):
for test, subtests in sorted(tests.items()):
print "\n" + str(test)
print "-" * len(test)
for name, results in subtests.iteritems():
print "[%s]: %s" % (name if name is not None else "",
" ".join("%s (%i)" % (k,v) for k,v in results.iteritems()))
def get_parser(add_help=True):
parser = argparse.ArgumentParser("unstable",
description="List tests that don't give consistent results from one or more runs.", add_help=add_help)
parser.add_argument("--json", action="store_true", default=False,
help="Output in JSON format")
parser.add_argument("--group", action="store_true", default=False,
help="Group results from different run types")
parser.add_argument("log_file", nargs="+",
help="Log files to read")
return parser
def main(**kwargs):
unstable = filter_unstable(get_statuses(kwargs["log_file"]))
if kwargs["group"]:
unstable = group_results(unstable)
if kwargs["json"]:
print json.dumps(unstable)
else:
if not kwargs["group"]:
print_results(unstable)
else:
print_run(unstable)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
kwargs = vars(args)
main(**kwargs)
| mpl-2.0 | 7,686,368,141,913,607,000 | 32.268519 | 139 | 0.588088 | false |
PinguinoIDE/pinguino-ide | pinguino/qtgui/ide/child_windows/submit_bug.py | 1 | 9803 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import logging
from datetime import datetime
from PySide2 import QtGui, QtCore, QtWidgets
import requests
import json
# Python3 compatibility
if os.getenv("PINGUINO_PYTHON") is "3":
#Python3
#from urllib.request import urlopen
#from urllib.parse import urlencode
from configparser import RawConfigParser
else:
#Python2
#from urllib import urlopen, urlencode
from ConfigParser import RawConfigParser
from ...frames.submit_bug import Ui_SubmitBug
from ..methods.dialogs import Dialogs
#if os.getenv("PINGUINO_MODE") == "NORMAL":
#SUBMIT_SERVER = "http://submit.pinguino.xyz/submit/"
#else:
#SUBMIT_SERVER = "http://localhost:8000/submit/"
########################################################################
class SubmitBug(QtWidgets.QDialog):
def __init__(self, parent, details=""):
super(SubmitBug, self).__init__()
self.setWindowFlags(QtCore.Qt.WindowCloseButtonHint |
QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowStaysOnTopHint |
QtCore.Qt.Tool)
self.submit = Ui_SubmitBug()
self.submit.setupUi(self)
self.main = parent
username, password = self.get_auth()
self.submit.lineEdit_username.setText(username)
self.submit.lineEdit_password.setText(password)
self.submit.plainTextEdit_details.insertPlainText(details)
self.setWindowTitle(os.getenv("PINGUINO_NAME")+" - "+self.windowTitle())
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/logo/art/windowIcon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setWindowIcon(icon)
self.connect(self.submit.pushButton_submit, QtCore.SIGNAL("clicked()"), self.submit_now)
self.connect(self.submit.pushButton_cancel, QtCore.SIGNAL("clicked()"), self.close)
self.connect(self.submit.checkBox_show_this_dialog, QtCore.SIGNAL("clicked()"), self.update_submit_dialog)
self.setStyleSheet("""
font-family: inherit;
font-weight: normal;
""")
self.center_on_screen()
#----------------------------------------------------------------------
def center_on_screen(self):
screen = QtWidgets.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width()-size.width())/2, (screen.height()-size.height())/2)
#----------------------------------------------------------------------
def update_submit_dialog(self):
self.main.configIDE.set("Features", "submmit_bugs", self.submit.checkBox_show_this_dialog.isChecked())
#----------------------------------------------------------------------
def submit_now(self):
""""""
summary = self.submit.lineEdit_summary.text().replace("\n", "<br>")
details = self.submit.plainTextEdit_details.toPlainText()
username = self.submit.lineEdit_username.text()
password = self.submit.lineEdit_password.text()
repo = self.submit.comboBox_repo.currentText()
if not details:
Dialogs.error_message(self, "No details!")
return
if not username:
Dialogs.error_message(self, "No userbane!")
return
if not password:
Dialogs.error_message(self, "No password!")
return
environ = self.get_systeminfo()
logging.info("Submitting bug report.")
maked = self.make_github_issue(summary, details, repo, environ, username, password)
#response = urlopen(SUBMIT_SERVER, urlencode({"summary": summary, "details": details, "environ": environ,}).encode("utf-8"))
#share_link = eval(response.read())["share"]
self.hide()
if not maked:
if details and environ:
self.save_for_later(summary, details, repo, environ, username, password)
logging.error("ConnectionError")
self.close()
#----------------------------------------------------------------------
def get_systeminfo(self):
data = {}
try: data["os.name"] = str(os.name)
except: pass
try: data["os.environ"] = str(os.environ)
except: pass
try: data["os.uname"] = str(os.uname())
except: pass
try: data["sys.argv"] = str(sys.argv)
except: pass
try: data["sys.flags"] = str(sys.flags)
except: pass
try: data["sys.platform"] = str(sys.platform)
except: pass
try: data["sys.version"] = str(sys.version)
except: pass
try: data["platform.architecture"] = str(platform.architecture())
except: pass
try: data["platform.dist"] = str(platform.dist())
except: pass
try: data["platform.linux_distribution"] = str(platform.linux_distribution())
except: pass
try: data["platform.mac_ver"] = str(platform.mac_ver())
except: pass
try: data["platform.system"] = str(platform.system())
except: pass
try: data["platform.win32_ver"] = str(platform.win32_ver())
except: pass
try: data["platform.libc_ver"] = str(platform.libc_ver())
except: pass
try: data["platform.machine"] = str(platform.machine())
except: pass
try: data["platform.platform"] = str(platform.platform())
except: pass
try: data["platform.release"] = str(platform.release())
except: pass
try: data["pyusb"] = str(usb.__version__)
except: pass
return "\n".join([": ".join(item) for item in data.items()])
#----------------------------------------------------------------------
def save_for_later(self, summary, details, repo, environ, username, password):
""""""
parser = RawConfigParser()
parser.add_section("SUBMIT")
parser.set("SUBMIT", "summary", summary)
parser.set("SUBMIT", "details", details)
parser.set("SUBMIT", "repo", repo)
parser.set("SUBMIT", "environ", environ)
parser.set("SUBMIT", "username", username)
parser.set("SUBMIT", "password", password)
filename = os.path.join(os.getenv("PINGUINO_USER_PATH"), "submit-{}".format(datetime.now()))
parser.write(open(filename, "w"))
#----------------------------------------------------------------------
def save_auth(self, username, password):
""""""
parser = RawConfigParser()
parser.add_section("AUTH")
parser.set("AUTH", "username", username)
parser.set("AUTH", "password", password)
filename = os.path.join(os.getenv("PINGUINO_USER_PATH"), "submit-auth")
parser.write(open(filename, "w"))
#----------------------------------------------------------------------
def get_auth(self):
""""""
if not os.path.exists(os.path.join(os.getenv("PINGUINO_USER_PATH"), "submit-auth")):
return '', ''
parser = RawConfigParser()
filename = os.path.join(os.getenv("PINGUINO_USER_PATH"), "submit-auth")
parser.readfp(open(filename, "r"))
username = parser.get("AUTH", "username")
password = parser.get("AUTH", "password")
return username, password
#----------------------------------------------------------------------
def make_github_issue(self, summary, details, repo, environ, username, password):
'''Create an issue on github.com using the given parameters.'''
# Our url to create issues via POST
url = 'https://api.github.com/repos/{}/{}/issues'.format('PinguinoIDE', repo)
# Create an authenticated session to create the issue
#session = requests.session(auth=(username, password))
session = requests.Session()
session.auth = (username, password)
# Create our issue
issue = {'title': summary,
'body': "{}\n\n{}".format(details, environ),
'labels': ['submitted-from-ide',
'bug',
'v{}'.format(os.environ["PINGUINO_VERSION"][:2]),
],
}
# Add the issue to our repository
r = session.post(url, json.dumps(issue))
if r.status_code == 201:
logging.info('Successfully created Issue "{}"'.format(summary))
self.save_auth(username, password)
return r.status_code == 201
#----------------------------------------------------------------------
def send_old_submits():
submits = filter(lambda s:s.startswith("submit-"), os.listdir(os.getenv("PINGUINO_USER_PATH")))
for submit in submits:
parser = RawConfigParser()
filename = os.path.join(os.getenv("PINGUINO_USER_PATH"), submit)
parser.readfp(open(filename, "r"))
summary = parser.get("SUBMIT", "summary")
details = parser.get("SUBMIT", "details")
repo = parser.get("SUBMIT", "repo")
environ = parser.get("SUBMIT", "environ")
username = parser.get("SUBMIT", "username")
password = parser.get("SUBMIT", "password")
try:
url = 'https://api.github.com/repos/{}/{}/issues'.form
session = requests.Session()
session.auth = (username, password)
issue = {'title': summary,
'body': "{}\n\n{}".format(details, environ),
'labels': ['submitted-from-ide',
'bug',
'v{}'.format(os.environ["PINGUINO_VERSION"][:2]),
],
}
r = session.post(url, json.dumps(issue))
os.remove(filename)
except:
pass
| gpl-2.0 | -8,652,381,418,289,209,000 | 35.442379 | 132 | 0.540345 | false |
ESOedX/edx-platform | lms/djangoapps/certificates/admin.py | 1 | 3118 | """
django admin pages for certificates models
"""
from __future__ import absolute_import
from operator import itemgetter
from config_models.admin import ConfigurationModelAdmin
from django import forms
from django.conf import settings
from django.contrib import admin
from lms.djangoapps.certificates.models import (
CertificateGenerationConfiguration,
CertificateGenerationCourseSetting,
CertificateHtmlViewConfiguration,
CertificateTemplate,
CertificateTemplateAsset,
GeneratedCertificate
)
from util.organizations_helpers import get_organizations
class CertificateTemplateForm(forms.ModelForm):
"""
Django admin form for CertificateTemplate model
"""
def __init__(self, *args, **kwargs):
super(CertificateTemplateForm, self).__init__(*args, **kwargs)
organizations = get_organizations()
org_choices = [(org["id"], org["name"]) for org in organizations]
org_choices.insert(0, ('', 'None'))
self.fields['organization_id'] = forms.TypedChoiceField(
choices=org_choices, required=False, coerce=int, empty_value=None
)
languages = list(settings.CERTIFICATE_TEMPLATE_LANGUAGES.items())
lang_choices = sorted(languages, key=itemgetter(1))
lang_choices.insert(0, (None, 'All Languages'))
self.fields['language'] = forms.ChoiceField(
choices=lang_choices, required=False
)
class Meta(object):
model = CertificateTemplate
fields = '__all__'
class CertificateTemplateAdmin(admin.ModelAdmin):
"""
Django admin customizations for CertificateTemplate model
"""
list_display = ('name', 'description', 'organization_id', 'course_key', 'mode', 'language', 'is_active')
form = CertificateTemplateForm
class CertificateTemplateAssetAdmin(admin.ModelAdmin):
"""
Django admin customizations for CertificateTemplateAsset model
"""
list_display = ('description', 'asset_slug',)
prepopulated_fields = {"asset_slug": ("description",)}
class GeneratedCertificateAdmin(admin.ModelAdmin):
"""
Django admin customizations for GeneratedCertificate model
"""
raw_id_fields = ('user',)
show_full_result_count = False
search_fields = ('course_id', 'user__username')
list_display = ('id', 'course_id', 'mode', 'user')
class CertificateGenerationCourseSettingAdmin(admin.ModelAdmin):
"""
Django admin customizations for CertificateGenerationCourseSetting model
"""
list_display = ('course_key', 'self_generation_enabled', 'language_specific_templates_enabled')
search_fields = ('course_key',)
show_full_result_count = False
admin.site.register(CertificateGenerationConfiguration)
admin.site.register(CertificateGenerationCourseSetting, CertificateGenerationCourseSettingAdmin)
admin.site.register(CertificateHtmlViewConfiguration, ConfigurationModelAdmin)
admin.site.register(CertificateTemplate, CertificateTemplateAdmin)
admin.site.register(CertificateTemplateAsset, CertificateTemplateAssetAdmin)
admin.site.register(GeneratedCertificate, GeneratedCertificateAdmin)
| agpl-3.0 | 1,246,055,720,484,850,000 | 34.431818 | 108 | 0.728993 | false |
gaolingyun/eps-reconfig | time_next_best_action_1_simple.py | 1 | 2121 | from greedy import *
import time
filename = 'circuit_sensor_1_Simplified.net'
G = nx.DiGraph()
G = read_netlist(filename)
uncon_comp_tups = []
contactor_tups = []
declaration = init(G, uncon_comp_tups, contactor_tups)
sensors = ['S1', 'S2']
con_conts = ['C1', 'C3', 'C4', 'C6']
result = generate_database(G, sensors, con_conts, 'database1_simple.csv')
actual_state = {'G1': 1, 'G2': 1, 'T1': 0, 'T2': 0}
length_list = []
for j in range(0, pow(2, len(actual_state))):
# set the actual state
state_value = format(j, '0' + str(len(actual_state)) + 'b')
for i in range(0, len(actual_state)):
actual_state[list(actual_state)[i]] = int(state_value[i])
print actual_state
print '+++'
# test the greedy strategy
# set the actual state and initial action
# get the initial sensor readings
read_file_name = 'database1_simple.csv'
action = {'C1': 1, 'C3': 1, 'C4': 1, 'C6': 1}
action_list = [action.copy()]
states = actual_state.copy()
states.update(action)
# set the initial compatible_states to be all states
compatible_states = []
with open(read_file_name, 'rb') as f:
reader = csv.reader(f)
for row in reader:
for i in range(2, len(row)):
candidate = ast.literal_eval(row[i])
if compatible_states.count(candidate) == 0:
compatible_states.append(candidate)
while (len(compatible_states) > 1):
sensor_readings = sensor_measurement(G, uncon_comp_tups, contactor_tups, states)
compatible_states_candidate = read_from_database(read_file_name, sensor_readings, action)
temp_states = []
for i in compatible_states:
if compatible_states_candidate.count(i) > 0:
temp_states.append(i.copy())
compatible_states = list(temp_states)
# if all the actions are performed, then break
if len(action_list) == pow(2, len(action)): break
# find the best action to perform
#start = time.time()
action = find_best_action(sensor_readings, compatible_states, G, read_file_name, action_list)
#end = time.time()
#print end - start
states.update(action)
action_list.append(action.copy())
for i in compatible_states:
print i
print '------------------------------------' | bsd-3-clause | 1,155,626,011,497,945,000 | 30.671642 | 95 | 0.668081 | false |
rwl/PyCIM | CIM15/IEC61970/Generation/Production/ShutdownCurve.py | 1 | 3108 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.Curve import Curve
class ShutdownCurve(Curve):
"""Relationship between the rate in gross active power/minute (Y-axis) at which a unit should be shutdown and its present gross MW output (X-axis)Relationship between the rate in gross active power/minute (Y-axis) at which a unit should be shutdown and its present gross MW output (X-axis)
"""
def __init__(self, shutdownDate='', shutdownCost=0.0, ThermalGeneratingUnit=None, *args, **kw_args):
"""Initialises a new 'ShutdownCurve' instance.
@param shutdownDate: The date and time of the most recent generating unit shutdown
@param shutdownCost: Fixed shutdown cost
@param ThermalGeneratingUnit: A thermal generating unit may have a shutdown curve
"""
#: The date and time of the most recent generating unit shutdown
self.shutdownDate = shutdownDate
#: Fixed shutdown cost
self.shutdownCost = shutdownCost
self._ThermalGeneratingUnit = None
self.ThermalGeneratingUnit = ThermalGeneratingUnit
super(ShutdownCurve, self).__init__(*args, **kw_args)
_attrs = ["shutdownDate", "shutdownCost"]
_attr_types = {"shutdownDate": str, "shutdownCost": float}
_defaults = {"shutdownDate": '', "shutdownCost": 0.0}
_enums = {}
_refs = ["ThermalGeneratingUnit"]
_many_refs = []
def getThermalGeneratingUnit(self):
"""A thermal generating unit may have a shutdown curve
"""
return self._ThermalGeneratingUnit
def setThermalGeneratingUnit(self, value):
if self._ThermalGeneratingUnit is not None:
self._ThermalGeneratingUnit._ShutdownCurve = None
self._ThermalGeneratingUnit = value
if self._ThermalGeneratingUnit is not None:
self._ThermalGeneratingUnit.ShutdownCurve = None
self._ThermalGeneratingUnit._ShutdownCurve = self
ThermalGeneratingUnit = property(getThermalGeneratingUnit, setThermalGeneratingUnit)
| mit | 2,589,421,699,765,079,000 | 45.38806 | 293 | 0.723295 | false |
jml/flocker | flocker/provision/_rackspace.py | 1 | 2979 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Rackspace provisioner.
"""
from ._libcloud import monkeypatch, LibcloudProvisioner
from ._install import (
provision,
task_open_control_firewall,
)
from ._ssh import run_remotely
from ._effect import sequence
def get_default_username(distribution):
"""
Return the username available by default on a system.
:param str distribution: Name of the operating system distribution
:return str: The username made available by Rackspace for this
distribution.
"""
return 'root'
def provision_rackspace(node, package_source, distribution, variants):
"""
Provision flocker on this node.
:param LibcloudNode node: Node to provision.
:param PackageSource package_source: See func:`task_install_flocker`
:param bytes distribution: See func:`task_install_flocker`
:param set variants: The set of variant configurations to use when
provisioning
"""
commands = []
commands.append(run_remotely(
username=get_default_username(distribution),
address=node.address,
commands=sequence([
provision(
package_source=package_source,
distribution=node.distribution,
variants=variants,
),
# https://clusterhq.atlassian.net/browse/FLOC-1550
# This should be part of ._install.configure_cluster
task_open_control_firewall(node.distribution),
]),
))
return sequence(commands)
IMAGE_NAMES = {
'fedora-20': u'Fedora 20 (Heisenbug) (PVHVM)',
'centos-7': u'CentOS 7 (PVHVM)',
'ubuntu-14.04': u'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
'ubuntu-15.04': u'Ubuntu 15.04 (Vivid Vervet) (PVHVM)',
}
def rackspace_provisioner(username, key, region, keyname):
"""
Create a LibCloudProvisioner for provisioning nodes on rackspace.
:param bytes username: The user to connect to rackspace with.
:param bytes key: The API key associated with the user.
:param bytes region: The rackspace region in which to launch the instance.
:param bytes keyname: The name of an existing ssh public key configured in
rackspace. The provision step assumes the corresponding private key is
available from an agent.
"""
# Import these here, so that this can be imported without
# installng libcloud.
from libcloud.compute.providers import get_driver, Provider
monkeypatch()
driver = get_driver(Provider.RACKSPACE)(
key=username,
secret=key,
region=region)
provisioner = LibcloudProvisioner(
driver=driver,
keyname=keyname,
image_names=IMAGE_NAMES,
create_node_arguments=lambda **kwargs: {
"ex_config_drive": "true",
},
provision=provision_rackspace,
default_size="performance1-8",
get_default_user=get_default_username,
)
return provisioner
| apache-2.0 | 4,473,445,723,125,299,000 | 29.71134 | 78 | 0.663981 | false |
tunnell/wax | wax/EventBuilder/Tasks.py | 1 | 1508 | """Celery tasks"""
import numpy as np
from celery import Celery
import pymongo
from wax import Configuration
import ebcore
# Specify mongodb host and datababse to connect to
BROKER_URL = 'mongodb://xedaqtest2:27017/jobs'
celery = Celery('EOD_TASKS',
broker=BROKER_URL,
backend=BROKER_URL)
@celery.task
def process_time_range_task(t0, t1,
collection_name, hostname,
threshold=Configuration.THRESHOLD,
compressed=True):
reduction_factor = 100
return ebcore.process_time_range_task(t0,
t1,
Configuration.MAX_DRIFT,
Configuration.PADDING,
threshold,
reduction_factor,
hostname,
"input.dataset", "output.dataset",
compressed)
# "%s.%s" % (MongoDBInput.get_db_name(), collection_name),
# "%s.%s" % (MongoDBOutput.get_db_name(), collection_name))
@celery.task
def clear_processed(t0, collection_name, hostname='127.0.0.1'):
"""Delete data up to t0
"""
c = pymongo.MongoClient(hostname)
c['input'][collection_name].remove({"time_max": {"$lt": t0}})
| bsd-3-clause | 2,997,668,039,803,772,000 | 34.069767 | 100 | 0.466844 | false |
userzimmermann/robotframework-python3 | src/robot/running/signalhandler.py | 1 | 3717 | # Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from threading import currentThread
try:
import signal
except ImportError:
signal = None # IronPython 2.6 doesn't have signal module by default
if sys.platform.startswith('java'):
from java.lang import IllegalArgumentException
else:
## IllegalArgumentException = None
# `None` doesn't work in Python 3 if used in `except` statement
# (in _register_signal_handler)
class IllegalArgumentException(Exception):
pass
from robot.errors import ExecutionFailed
from robot.output import LOGGER
class _StopSignalMonitor(object):
def __init__(self):
self._signal_count = 0
self._running_keyword = False
self._orig_sigint = None
self._orig_sigterm = None
def __call__(self, signum, frame):
self._signal_count += 1
LOGGER.info('Received signal: %s.' % signum)
if self._signal_count > 1:
sys.__stderr__.write('Execution forcefully stopped.\n')
raise SystemExit()
sys.__stderr__.write('Second signal will force exit.\n')
if self._running_keyword and not sys.platform.startswith('java'):
self._stop_execution_gracefully()
def _stop_execution_gracefully(self):
raise ExecutionFailed('Execution terminated by signal', exit=True)
def start(self):
# TODO: Remove start() in favor of __enter__ in RF 2.9. Refactoring
# the whole signal handler at that point would be a good idea.
self.__enter__()
def __enter__(self):
if signal:
self._orig_sigint = signal.getsignal(signal.SIGINT)
self._orig_sigterm = signal.getsignal(signal.SIGTERM)
for signum in signal.SIGINT, signal.SIGTERM:
self._register_signal_handler(signum)
return self
def __exit__(self, *exc_info):
if signal:
signal.signal(signal.SIGINT, self._orig_sigint)
signal.signal(signal.SIGTERM, self._orig_sigterm)
def _register_signal_handler(self, signum):
try:
signal.signal(signum, self)
except (ValueError, IllegalArgumentException) as err:
# ValueError occurs e.g. if Robot doesn't run on main thread.
# IllegalArgumentException is http://bugs.jython.org/issue1729
if currentThread().getName() == 'MainThread':
self._warn_about_registeration_error(signum, err)
def _warn_about_registeration_error(self, signum, err):
name, ctrlc = {signal.SIGINT: ('INT', 'or with Ctrl-C '),
signal.SIGTERM: ('TERM', '')}[signum]
LOGGER.warn('Registering signal %s failed. Stopping execution '
'gracefully with this signal %sis not possible. '
'Original error was: %s' % (name, ctrlc, err))
def start_running_keyword(self, in_teardown):
self._running_keyword = True
if self._signal_count and not in_teardown:
self._stop_execution_gracefully()
def stop_running_keyword(self):
self._running_keyword = False
STOP_SIGNAL_MONITOR = _StopSignalMonitor()
| apache-2.0 | 1,963,857,764,741,282,800 | 36.928571 | 75 | 0.649179 | false |
holinnn/lupin | lupin/schema.py | 1 | 4807 | from copy import copy
from . import bind
from .validators_combination import ValidatorsNullCombination
from .errors import ValidationError, InvalidDocument, MissingKey
# compatibility : used to generate schema names if not provided (2018-07-04)
_globals = {
"schemas_count": 0
}
def _generate_name():
"""Generate schema name if not provided"""
_globals["schemas_count"] += 1
return "schema%i" % _globals["schemas_count"]
class Schema(object):
def __init__(self, fields, name=None, validators=None):
"""
Args:
fields (dict): dictionary of fields
name (str): schema name
validators (ValidatorsCombination|Validator): list of validators or a combination a validators
"""
self._fields = fields
if not name:
name = _generate_name()
self.name = name
if validators is None:
validators = ValidatorsNullCombination()
self._validators = validators
def add_field(self, name, field):
"""Add new field to schema.
Args:
name (str): field name
field (Field): a field
"""
self._fields[name] = field
def copy(self, new_name=None):
"""Returns a new schema based on current schema
Args:
new_names (str): name of new schema
Returns:
Schema
"""
return type(self)(copy(self._fields), new_name)
def load(self, cls, data, mapper, allow_partial=False, factory=bind):
"""Loads an instance of cls from dictionary
Args:
cls (class): class to instantiate
data (dict): dictionary of data
mapper (Mapper): mapper used to load data
allow_partial (bool): allow partial schema, won't raise error if missing keys
factory (callable): factory method used to instantiate objects
Returns:
object
"""
attrs = self.load_attrs(data, mapper, allow_partial)
return factory(cls, attrs)
def load_attrs(self, data, mapper, allow_partial=False):
"""Loads attributes dictionary from `data`
Args:
data (dict): dictionary of data
mapper (Mapper): mapper used to load data
allow_partial (bool): allow partial schema, won't raise error if missing keys
Returns:
dict
"""
attrs = {}
for key, field in self._fields.items():
if field.is_read_only:
continue
if key in data:
raw_value = data[key]
value = field.pre_load(raw_value)
value = field.load(value, mapper)
value = field.post_load(value)
elif allow_partial:
continue
else:
value = field.default
attr_name = field.binding or key
attrs[attr_name] = value
return attrs
def dump(self, obj, mapper):
"""Dumps object into a dictionnary
Args:
obj (object): object to dump
mapper (Mapper): mapper used to dump data
Returns:
dict
"""
ret_dict = {}
for key, field in self._fields.items():
if field.is_write_only:
continue
value = field.extract_attr(obj, mapper, key)
if field.is_ignore_if_null and value is None:
continue
ret_dict[key] = value
return ret_dict
def validate(self, data, mapper, allow_partial=False, path=None):
"""Validate data with all field validators.
If path is provided it will be used as the base path for errors.
Args:
data (dict): data to validate
mapper (Mapper): mapper used to validate data
allow_partial (bool): allow partial schema, won't raise error if missing keys
path (list): base path for errors
"""
path = path or []
errors = []
# Validate the fields
for key, field in self._fields.items():
field_path = path + [key]
if key in data:
raw_value = data.get(key)
try:
field.validate(raw_value, field_path, mapper)
except ValidationError as error:
errors.append(error)
elif not allow_partial and not field.is_optional:
errors.append(MissingKey(key, field_path))
# Validate data with global validators
try:
self._validators(data, path)
except ValidationError as error:
errors.append(error)
if errors:
raise InvalidDocument(errors)
raise InvalidDocument(errors)
| mit | 584,183,981,520,573,300 | 29.814103 | 106 | 0.558144 | false |
cdiener/micom | tests/test_util.py | 1 | 1709 | """Test utilities."""
from os.path import basename
import numpy as np
import micom
import micom.util as util
from fixtures import community
URL = "http://bigg.ucsd.edu/static/models/e_coli_core.xml.gz"
tax = micom.data.test_taxonomy()
def test_download(tmpdir):
print(tmpdir.dirpath())
util.download_model(URL, str(tmpdir))
assert tmpdir.join("e_coli_core.xml.gz").check()
model = util.load_model(URL)
assert len(model.reactions) == 95
assert len(model.metabolites) == 72
def test_load_model():
row = tax.loc[0]
model = util.load_model(row.file)
assert len(model.reactions) == 95
assert len(model.metabolites) == 72
def test_serialization(tmpdir):
row = tax.loc[0]
util.serialize_models([row.file], str(tmpdir))
assert tmpdir.join("e_coli_core.pickle").check()
def test_fluxes_from_primals(community):
community.solver.optimize()
fluxes = util.fluxes_from_primals(community, tax.loc[0])
assert len(fluxes) < len(community.reactions)
assert len(fluxes) == 95
def test_join_models():
single = util.load_model(tax.file[0])
single_coefs = {
v.name: coef for v, coef in
single.objective.get_linear_coefficients(single.variables).items()
}
mod = util.join_models(tax.file, id="test_model")
coefs = {
v.name: coef for v, coef in
mod.objective.get_linear_coefficients(single.variables).items()
}
assert len(mod.reactions) == len(single.reactions)
assert len(mod.metabolites) == len(single.metabolites)
assert all(np.allclose(single_coefs[v.name], coefs[v.name])
for v in mod.variables)
assert np.allclose(single.slim_optimize(), mod.slim_optimize()) | apache-2.0 | 3,677,359,368,492,674,600 | 28.482759 | 74 | 0.674664 | false |
bmatejek/ibex | utilities/dataIO.py | 1 | 6563 | import os
import h5py
import imageio
import tifffile
import struct
import numpy as np
from PIL import Image
from ibex.data_structures import meta_data, skeleton_points
from ibex.utilities.constants import *
def GetWorldBBox(prefix):
# return the bounding box for this segment
return meta_data.MetaData(prefix).WorldBBox()
def GridSize(prefix):
# return the size of this dataset
return meta_data.MetaData(prefix).GridSize()
def CroppingBox(prefix):
# return which locations are valid for training and validation
return meta_data.MetaData(prefix).CroppingBox()
def ReadMetaData(prefix):
# return the meta data for this prefix
return meta_data.MetaData(prefix)
def Resolution(prefix):
# return the resolution for this prefix
return meta_data.MetaData(prefix).Resolution()
def GetGoldFilename(prefix):
filename, _ = meta_data.MetaData(prefix).GoldFilename()
return filename
def ReadH5File(filename, dataset=None):
# read the h5py file
with h5py.File(filename, 'r') as hf:
# read the first dataset if none given
if dataset == None: data = np.array(hf[hf.keys()[0]])
else: data = np.array(hf[dataset])
# allow affinities and images to not be int64, everything else gets converted
if data.dtype == np.float32 or data.dtype == np.uint8 or data.dtype == np.int64: return data
else: return data.astype(np.int64)
def IsIsotropic(prefix):
resolution = Resolution(prefix)
return (resolution[IB_Z] == resolution[IB_Y]) and (resolution[IB_Z] == resolution[IB_X])
def WriteH5File(data, filename, dataset, compression=True):
with h5py.File(filename, 'w') as hf:
# should cover all cases of affinities/images
if compression: hf.create_dataset(dataset, data=data, compression='gzip')
else: hf.create_dataset(dataset, data=data)
def ReadAffinityData(prefix):
filename, dataset = meta_data.MetaData(prefix).AffinityFilename()
affinities = ReadH5File(filename, dataset).astype(np.float32)
# create the dataset so it is (z, y, x, c)
if affinities.shape[0] == 3: affinities = np.moveaxis(affinities, 0, 3)
return affinities
def ReadSegmentationData(prefix):
filename, dataset = meta_data.MetaData(prefix).SegmentationFilename()
return ReadH5File(filename, dataset).astype(np.int64)
def ReadGoldData(prefix):
filename, dataset = meta_data.MetaData(prefix).GoldFilename()
return ReadH5File(filename, dataset).astype(np.int64)
def ReadImageData(prefix):
filename, dataset = meta_data.MetaData(prefix).ImageFilename()
return ReadH5File(filename, dataset)
def ReadSkeletons(prefix, skeleton_algorithm='thinning', downsample_resolution=(80, 80, 80), params='00'):
# read in all of the skeleton points
skeleton_filename = 'skeletons/{}/{}-{:03d}x{:03d}x{:03d}-upsample-{}-skeleton.pts'.format(prefix, skeleton_algorithm, downsample_resolution[IB_X], downsample_resolution[IB_Y], downsample_resolution[IB_Z], params)
endpoint_filename = 'skeletons/{}/{}-{:03d}x{:03d}x{:03d}-endpoint-vectors.vec'.format(prefix, skeleton_algorithm, downsample_resolution[IB_X], downsample_resolution[IB_Y], downsample_resolution[IB_Z], params)
# read the joints file and the vector file
with open(skeleton_filename, 'rb') as sfd, open(endpoint_filename, 'rb') as efd:
skel_zres, skel_yres, skel_xres, skel_max_label, = struct.unpack('qqqq', sfd.read(32))
end_zres, end_yres, end_xres, end_max_label, = struct.unpack('qqqq', efd.read(32))
assert (skel_zres == end_zres and skel_yres == end_yres and skel_xres == end_xres and skel_max_label == end_max_label)
# create an array of skeletons
skeletons = []
resolution = Resolution(prefix)
grid_size = GridSize(prefix)
for label in range(skel_max_label):
joints = []
endpoints = []
vectors = {}
nelements, = struct.unpack('q', sfd.read(8))
for _ in range(nelements):
index, = struct.unpack('q', sfd.read(8))
if (index < 0): endpoints.append(-1 * index)
else: joints.append(index)
nendpoints, = struct.unpack('q', efd.read(8))
assert (len(endpoints) == nendpoints)
for _ in range(nendpoints):
endpoint, vz, vy, vx, = struct.unpack('qddd', efd.read(32))
vectors[endpoint] = (vz, vy, vx)
skeletons.append(skeleton_points.Skeleton(label, joints, endpoints, vectors, resolution, grid_size))
return skeletons
def ReadImage(filename):
return np.array(Image.open(filename))
def WriteImage(image, filename):
imageio.imwrite(filename, image)
def H52Tiff(stack, output_prefix):
zres, _, _ = stack.shape
for iz in range(zres):
image = stack[iz,:,:]
tifffile.imsave('{}-{:05d}.tif'.format(output_prefix, iz), image)
def H52PNG(stack, output_prefix):
zres, _, _ = stack.shape
for iz in range(zres):
image = stack[iz,:,:]
im = Image.fromarray(image)
im.save('{}-{:05d}.png'.format(output_prefix, iz))
def PNG2H5(directory, filename, dataset, dtype=np.int32):
# get all of the png files
png_files = sorted(os.listdir(directory))
# what is the size of the output file
zres = len(png_files)
for iz, png_filename in enumerate(png_files):
im = np.array(Image.open('{}/{}'.format(directory, png_filename)))
# create the output if this is the first slice
if not iz:
if len(im.shape) == 2: yres, xres = im.shape
else: yres, xres, _ = im.shape
h5output = np.zeros((zres, yres, xres), dtype=dtype)
# add this element
if len(im.shape) == 3 and dtype == np.int32: h5output[iz,:,:] = 65536 * im[:,:,0] + 256 * im[:,:,1] + im[:,:,2]
elif len(im.shape) == 3 and dtype == np.uint8: h5output[iz,:,:] = ((im[:,:,0].astype(np.uint16) + im[:,:,1].astype(np.uint16) + im[:,:,2].astype(np.uint16)) / 3).astype(np.uint8)
else: h5output[iz,:,:] = im[:,:]
WriteH5File(h5output, filename, dataset)
def SpawnMetaFile(prefix, rhoana_filename, rhoana_dataset):
meta = meta_data.MetaData(prefix)
# get the new prefix for the data from the rhoana file
new_prefix = rhoana_filename.split('/')[1][:-3]
# update the values for this meta data
meta.prefix = new_prefix
meta.rhoana_filename = '{} {}'.format(rhoana_filename, rhoana_dataset)
meta.WriteMetaFile() | mit | -6,225,843,316,542,372,000 | 29.672897 | 217 | 0.651227 | false |
crooks/mixminion | lib/mixminion/testSupport.py | 1 | 25587 | # Copyright 2002-2011 Nick Mathewson. See LICENSE for licensing information.
"""mixminion.testSupport
Shared support code for unit tests, benchmark tests, and integration tests.
"""
import base64
import cStringIO
import os
import stat
import sys
import mixminion.Crypto
import mixminion.Common
from mixminion.Common import waitForChildren, ceilDiv, createPrivateDir, LOG
from mixminion.Config import _parseBoolean, _parseIntervalList, ConfigError
from mixminion.server.Modules import DELIVER_FAIL_NORETRY, DELIVER_FAIL_RETRY,\
DELIVER_OK, DeliveryModule, ImmediateDeliveryQueue, \
SimpleModuleDeliveryQueue, _escapeMessageForEmail
#----------------------------------------------------------------------
# DirectoryStoreModule
class DirectoryStoreModule(DeliveryModule):
"""Delivery module for testing: puts messages in files in a given
directory. Can be configured to use a delivery queue or not.
When this module delivers a message:
If the routing info is 'FAIL!', the message is treated as undeliverable.
If the routing info is 'fail', the message is treated as temporarily
undeliverable (and so will eventually time out).
Otherwise, creates a file in the specified directory, containing
the routing info, a newline, and the message contents.
"""
def __init__(self):
DeliveryModule.__init__(self)
## Fields:
# loc -- The directory to store files in. All filenames are numbers;
# we always put new messages in the smallest number greater than
# all existing numbers.
# next -- the number of the next file.
def getConfigSyntax(self):
return { 'Testing/DirectoryDump':
{ 'Location' : ('REQUIRE', None, None),
'UseQueue': ('REQUIRE', "boolean", None),
'Retry' : ('ALLOW', "intervalList",
"every 1 min for 10 min") } }
def validateConfig(self, config, lines, contents):
# loc = sections['Testing/DirectoryDump'].get('Location')
pass
def getRetrySchedule(self):
return self.retry
def configure(self, config, manager):
self.loc = config['Testing/DirectoryDump'].get('Location')
if not self.loc:
return
self.useQueue = config['Testing/DirectoryDump']['UseQueue']
if not os.path.exists(self.loc):
createPrivateDir(self.loc)
self.next = 1 + max([-1]+[int(f) for f in os.listdir(self.loc)])
self.retry = config['Testing/DirectoryDump']['Retry']
manager.enableModule(self)
def getServerInfoBlock(self):
return ""
def getName(self):
return "Testing_DirectoryDump"
def getExitTypes(self):
return [ 0xFFFE ]
def createDeliveryQueue(self, queueDir):
if self.useQueue:
return SimpleModuleDeliveryQueue(self, queueDir,
retrySchedule=self.retry)
else:
return ImmediateDeliveryQueue(self)
def processMessage(self, packet):
assert packet.getExitType() == 0xFFFE
exitInfo = packet.getAddress()
if exitInfo == 'fail':
return DELIVER_FAIL_RETRY
elif exitInfo == 'FAIL!':
return DELIVER_FAIL_NORETRY
log.debug("Delivering test message")
m = _escapeMessageForEmail(packet)
if m is None:
# Ordinarily, we'd drop corrupt messages, but this module is
# meant for debugging.
m = """\
==========CORRUPT OR UNDECODABLE MESSAGE
Decoding handle: %s%s==========MESSAGE ENDS""" % (
base64.encodestring(packet.getTag()),
base64.encodestring(packet.getContents()))
f = open(os.path.join(self.loc, str(self.next)), 'w')
self.next += 1
f.write(m)
f.close()
return DELIVER_OK
#----------------------------------------------------------------------
# mix_mktemp: A secure, paranoid mktemp replacement. (May be overkill
# for testing, but better safe than sorry.)
# Name of our temporary directory: all temporary files go under this
# directory. If None, it hasn't been created yet. If it exists,
# it must be owned by us, mode 700.
_CHECK_MODE = 1
_CHECK_UID = 1
if sys.platform in ('cygwin', 'win32') or os.environ.get("MM_NO_FILE_PARANOIA"):
_CHECK_MODE = _CHECK_UID = 0
_MM_TESTING_TEMPDIR = None
# How many temporary files have we created so far?
_MM_TESTING_TEMPDIR_COUNTER = 0
# Do we nuke the contents of _MM_TESTING_TEMPDIR on exit?
_MM_TESTING_TEMPDIR_REMOVE_ON_EXIT = 1
def mix_mktemp(extra=""):
'''mktemp wrapper. puts all files under a securely mktemped
directory.'''
global _MM_TESTING_TEMPDIR
global _MM_TESTING_TEMPDIR_COUNTER
if _MM_TESTING_TEMPDIR is None:
# We haven't configured our temporary directory yet.
import tempfile
# If tempfile.mkdtemp exists, use it. This avoids warnings, and
# is harder for people to exploit.
if hasattr(tempfile, 'mkdtemp'):
try:
temp = tempfile.mkdtemp()
except OSError, e:
print "mkdtemp failure: %s" % e
sys.exit(1)
else:
# Otherwise, pick a dirname, make sure it doesn't exist, and try to
# create it.
temp = tempfile.mktemp()
if os.path.exists(temp):
print "I think somebody's trying to exploit mktemp."
sys.exit(1)
try:
os.mkdir(temp, 0700)
except OSError, e:
print "Something's up with mktemp: %s" % e
sys.exit(1)
# The directory must exist....
if not os.path.exists(temp):
print "Couldn't create temp dir %r" %temp
sys.exit(1)
st = os.stat(temp)
# And be writeable only by us...
if _CHECK_MODE and st[stat.ST_MODE] & 077:
print "Couldn't make temp dir %r with secure permissions" %temp
sys.exit(1)
# And be owned by us...
if _CHECK_UID and st[stat.ST_UID] != os.getuid():
print "The wrong user owns temp dir %r"%temp
sys.exit(1)
_MM_TESTING_TEMPDIR = temp
if _MM_TESTING_TEMPDIR_REMOVE_ON_EXIT:
import atexit
atexit.register(deltree, temp)
# So now we have a temporary directory; return the name of a new
# file there.
_MM_TESTING_TEMPDIR_COUNTER += 1
return os.path.join(_MM_TESTING_TEMPDIR,
"tmp%05d%s" % (_MM_TESTING_TEMPDIR_COUNTER,extra))
_WAIT_FOR_KIDS = 1
def deltree(*dirs):
"""Delete each one of a list of directories, along with all of its
contents."""
global _WAIT_FOR_KIDS
#print "deltree(%r)"%dirs
if _WAIT_FOR_KIDS:
print "Waiting for shred processes to finish."
waitForChildren()
_WAIT_FOR_KIDS = 0
for d in dirs:
#print "Considering",d
if os.path.isdir(d):
#print "deleting from %s: %s" % (d, os.listdir(d))
for fn in os.listdir(d):
loc = os.path.join(d,fn)
if os.path.isdir(loc):
#print "deleting (I)",loc
deltree(loc)
else:
#print "unlinking (I)",loc
os.unlink(loc)
#ld = os.listdir(d)
#if ld: print "remaining in %s: %s" % (d, ld)
if os.listdir(d):
print "os.listdir(%r)==(%r)"%(d,os.listdir(d))
os.rmdir(d)
elif os.path.exists(d):
#print "Unlinking", d
os.unlink(d)
else:
pass #print "DNE", d
#----------------------------------------------------------------------
# suspendLog
def suspendLog(severity=None):
"""Temporarily suppress logging output"""
log = LOG
if hasattr(log, '_storedHandlers'):
resumeLog()
buf = cStringIO.StringIO()
h = mixminion.Common._ConsoleLogHandler(buf)
log._storedHandlers = log.handlers
log._storedSeverity = log.severity
log._testBuf = buf
log.handlers = []
if severity is not None:
log.setMinSeverity(severity)
log.addHandler(h)
def resumeLog():
"""Resume logging output. Return all new log messages since the last
suspend."""
log = LOG
if not hasattr(log, '_storedHandlers'):
return None
buf = log._testBuf
del log._testBuf
log.handlers = log._storedHandlers
del log._storedHandlers
log.setMinSeverity(log._storedSeverity)
del log._storedSeverity
return buf.getvalue()
#----------------------------------------------------------------------
# Facilities to temporarily replace attributes and functions for testing
# List of object, attribute, old-value for all replaced attributes.
_REPLACED_OBJECT_STACK = []
def replaceAttribute(object, attribute, value):
"""Temporarily replace <object.attribute> with value. When
undoReplacedAttributes() is called, the old value is restored."""
if hasattr(object, attribute):
tup = (object, attribute, getattr(object, attribute))
else:
tup = (object, attribute)
_REPLACED_OBJECT_STACK.append(tup)
setattr(object, attribute, value)
# List of (fnname, args, kwargs) for all the replaced functions that
# have been called.
_CALL_LOG = []
class _ReplacementFunc:
"""Helper object: callable stub that logs its invocations to _CALL_LOG
and delegates to an internal function."""
def __init__(self, name, fn=None):
self.name = name
self.fn = fn
def __call__(self, *args, **kwargs):
_CALL_LOG.append((self.name, args, kwargs))
if self.fn:
return self.fn(*args, **kwargs)
else:
return None
def replaceFunction(object, attribute, fn=None):
"""Temporarily replace the function or method <object.attribute>.
If <fn> is provided, replace it with fn; otherwise, the new
function will just return None. All invocations of the new
function will logged, and retrievable by getReplacedFunctionCallLog()"""
replaceAttribute(object, attribute, _ReplacementFunc(attribute, fn))
def getReplacedFunctionCallLog():
"""Return a list of (functionname, args, kwargs)"""
return _CALL_LOG
def clearReplacedFunctionCallLog():
"""Clear all entries from the replaced function call log"""
del _CALL_LOG[:]
def undoReplacedAttributes():
"""Undo all replaceAttribute and replaceFunction calls in effect."""
# Remember to traverse _REPLACED_OBJECT_STACK in reverse, so that
# "replace(a,b,c1); replace(a,b,c2)" is properly undone.
r = _REPLACED_OBJECT_STACK[:]
r.reverse()
del _REPLACED_OBJECT_STACK[:]
for item in r:
if len(item) == 2:
o,a = item
delattr(o,a)
else:
o,a,v = item
setattr(o,a,v)
#----------------------------------------------------------------------
# Test vectors.
class CyclicRNG(mixminion.Crypto.RNG):
def __init__(self):
mixminion.Crypto.RNG.__init__(self,4096)
self.idx = 0
self.pattern = "".join(map(chr,range(256)))
def _prng(self,n):
reps = ceilDiv(n+self.idx,256)
r = (self.pattern*reps)[self.idx:self.idx+n]
self.idx = (self.idx+n) % 256
assert len(r) == n
return r
def unHexStr(s):
assert s[0] == '['
assert s[-1] == ']'
r = []
for i in xrange(1,len(s)-1,3):
r.append(chr(int(s[i:i+2],16)))
assert s[i+2] in ' ]'
return "".join(r)
def unHexNum(s):
assert s[0] == '['
assert s[-1] == ']'
r = [ ]
for i in xrange(1,len(s)-1,3):
r.append(s[i:i+2])
assert s[i+2] in ' ]'
return long("".join(r), 16)
def hexStr(s):
r = []
for c in s:
r.append("%02X"%ord(c))
return "[%s]"%(" ".join(r))
def hexNum(n):
hn = "%X"%n
if len(hn)%2 == 1:
hn = "0"+hn
r = []
for i in xrange(0,len(hn),2):
r.append(hn[i:i+2])
return "[%s]"%(" ".join(r))
def tvRSA():
print "======================================== RSA"
pk1 = TEST_KEYS_2048[0]
print "Example 2048-bit Key K"
n,e = pk1.get_public_key()
n2,e2,d,p,q = pk1.get_private_key()
print " exponent =",hexNum(e)
print " modulus =",hexNum(n)
print " Private key (P)=",hexNum(p)
print " Private key (Q)=",hexNum(q)
print " Private key (D)=",hexNum(d)
print " PK_Encode(K) =",hexStr(pk1.encode_key(1))
print " Fingerprint =",mixminion.Crypto.pk_fingerprint(pk1)
print
ms = CyclicRNG().getBytes(20)
print "OAEP Padding/PKCS encoding example: (Using MGF SEED %s)"%hexStr(ms)
s = "Hello world"
print " original string M:",hexStr(s)
assert pk1.get_modulus_bytes() == 256
enc = mixminion.Crypto._add_oaep_padding(s,
mixminion.Crypto.OAEP_PARAMETER,256,CyclicRNG())
print " Padded string (2048 bits):",hexStr(enc)
pkenc = pk1.crypt(enc,1,1)
print
print " PK_Encrypt(K,M):",hexStr(pkenc)
assert mixminion.Crypto.pk_decrypt(pkenc,pk1) == s
def tvAES():
import mixminion._minionlib as _ml
print "======================================== AES"
print "Single block encryption"
k = unHexStr("[00 11 22 33 44 55 66 77 88 99 AA BB CC DD EE FF]")
b = "MixminionTypeIII"
print " Key:",hexStr(k)
print " Plaintext block:",hexStr(b)
eb = _ml.aes128_block_crypt(_ml.aes_key(k),b,1)
db = _ml.aes128_block_crypt(_ml.aes_key(k),b,0)
print " Encrypted block:",hexStr(eb)
print " Decrypted block:",hexStr(db)
print
print "Counter mode encryption:"
k = unHexStr("[02 13 24 35 46 57 68 79 8A 9B AC BD CE DF E0 F1]")
print " Key:",hexStr(k)
print " Keystream[0x00000...0x0003F]:",hexStr(mixminion.Crypto.prng(k,64))
print " Keystream[0x002C0...0x002FF]:",hexStr(mixminion.Crypto.prng(k,64,0x2c0))
print " Keystream[0xF0000...0xF003F]:",hexStr(mixminion.Crypto.prng(k,64,0xF0000))
txt = "Hello world!"
print " Example text M:",hexStr(txt)
print " Encrypt(K,M):",hexStr(mixminion.Crypto.ctr_crypt(txt,k))
def tvLIONESS():
print "======================================== LIONESS"
print "SPRP_Encrypt:"
ks = mixminion.Crypto.Keyset("basic key")
k1,k2,k3,k4=ks.getLionessKeys("A")
print " Base key K:",hexStr(k1)
print " K2:",hexStr(k2)
print " K3:",hexStr(k3)
print " K4:",hexStr(k4)
txt = "I never believe in code until it's running, and I never believe in the next release until it's out."
print
print " Example text M:",hexStr(txt)
print " SPRP_Encrypt(K,M):",hexStr(mixminion.Crypto.lioness_encrypt(
txt,(k1,k2,k3,k4)))
print " SPRP_Decrypt(K,M):",hexStr(mixminion.Crypto.lioness_decrypt(
txt,(k1,k2,k3,k4)))
def testVectors(name,args):
assert hexStr("ABCDEFGHI") == "[41 42 43 44 45 46 47 48 49]"
assert hexNum(10000) == '[27 10]'
assert hexNum(100000) == '[01 86 A0]'
assert hexNum(1000000000L) == '[3B 9A CA 00]'
assert unHexStr(hexStr("ABCDEFGHI")) == "ABCDEFGHI"
assert unHexNum(hexNum(10000)) in (10000, 10000L)
assert unHexNum(hexNum(100000)) in (100000,100000L)
assert unHexNum(hexNum(1000000000L)) == 1000000000L
tvRSA()
tvAES()
tvLIONESS()
#----------------------------------------------------------------------
# Long keypairs: stored here to avoid regenerating them every time we need
# to run tests. (We can't use 1024-bit keys, since they're not long enough
# to use as identity keys.)
TEST_KEYS_2048 = [
"""\
MIIEowIBAAKCAQEA0aBBHqAyfoAweyq5NGozHezVut12lGHeKrfmnax9AVPMfueqskqcKsjMe3Rz
NhDukD3ebYKPLKMnVDM+noVyHSawnzIc+1+wq1LFP5TJiPkPdodKq/SNlz363kkluLwhoWdn/16k
jlprnvdDk6ZxuXXTsAGtg235pEtFs4BLOLOxikW2pdt2Tir71p9SY0zGdM8m5UWZw4z3KqYFfPLI
oBsN+3hpcsjjO4BpkzpP3zVxy8VN2+hCxjbfow2sO6faD2u6r8BXPB7WlAbmwD8ZoX6f8Fbay02a
jG0mxglE9f0YQr66DONEQPoxQt8C1gn3KAIQ2Hdw1cxpQf3lkceBywIDAQABAoIBAETRUm+Gce07
ki7tIK4Ha06YsLXO/J3L306w3uHGfadQ5mKHFW/AtLILB65D1YrbViY+WWYkJXKnAUNQK2+JKaRO
Tk+E+STBDlPAMYclBmCUOzJTSf1XpKARNemBpAOYp4XAV9DrNiSRpKEkVagETXNwLhWrB1aNZRY9
q9048fjj1NoXsvLVY6HTaViHn8RCxuoSHT/1LXjStvR9tsLHk6llCtzcRO1fqBH7gRog8hhL1g5U
rfUJnXNSC3C2P9bQty0XACq0ma98AwGfozrK3Ca40GtlqYbsNsbKHgEgSVe124XDeVweK8b56J/O
EUsWF5hwdZnBTfmJP8IWmiXS16ECgYEA8YxFt0GrqlstLXEytApkkTZkGDf3D1Trzys2V2+uUExt
YcoFrZxIGLk8+7BPHGixJjLBvMqMLNaBMMXH/9HfSyHN3QHXWukPqNhmwmnHiT00i0QsNsdwsGJE
xXH0HsxgZCKDkLbYkzmzetfXPoaP43Q5feVSzhmBrZ3epwlTJDECgYEA3isKtLiISyGuao4bMT/s
3sQcgqcLArpNiPUo5ESp5qbXflAiH2wTC1ZNh7wUtn0Am8TdG1JnKFUdwHELpiRP9yCQj2bFS/85
jk6RCEmXdAGpYzB6lrqtYhFNe5LzphLGtALsuVOq6I7LQbUXY3019fkawfiFvnYZVovC3DKCsrsC
gYBSg8y9EZ4HECaaw3TCtFoukRoYe+XWQvhbSTPDIs+1dqZXJaBS8nRenckLYetklQ8PMX+lcrv4
BT8U3ju4VIWnMOEWgq6Cy+MhlutjtqcHZvUwLhW8kN0aJDfCC2+Npdu32WKAaTYK9Ucuy9Un8ufs
l6OcMl7bMTNvj+KjxTe1wQKBgB1cSNTrUi/Dqr4wO429qfsipbXqh3z7zAVeiOHp5R4zTGVIB8pp
SPcFl8dpZr9bM7piQOo8cJ+W6BCnn+d8Awlgx1n8NfS+LQgOgAI9X4OYOJ+AJ6NF1mYQbVH4cLSw
5Iujm08+rGaBgIEVgprGUFxKaGvcASjTiLO0UrMxBa7DAoGBALIwOkPLvZNkyVclSIdgrcWURlyC
oAK9MRgJPxS7s6KoJ3VXVKtIG3HCUXZXnmkPXWJshDBHmwsv8Zx50f+pqf7MD5fi3L1+rLjN/Rp/
3lGmzcVrG4LO4FEgs22LXKYfpvYRvcsXzbwHX33LnyLeXKrKYQ82tdxKOrh9wnEDqDmh""",
"""\
MIIEpQIBAAKCAQEAv/fvw/2HK48bwjgR2nUQ1qea9eIsYv4m98+DQoqPO7Zlr+Qs6/uiiOKtH0/b
3/B9As261HKkI4VDG0L523rB1QAfeENKdLczj8DoQPjHMsNDDepbBYmYH91vmig47fbLmbDnUiSD
+CFtM+/wUG4holomQBdPfUhoc44Fcw3cyvskkJr5aN9rqBRGuwuR81RaXt5lKtiwv9JUYqEBb2/f
sSDEWWHSf9HemzR25M/T+A51yQwKyFXC4RQzCu2jX7sZ53c6KRCniLPq9wUwtTrToul34Sssnw8h
PiV0Fwrk12uJdqqLDbltUlp6SEx8vBjSZC6JnVsunYmw88sIYGsrbQIDAQABAoIBAQCpnDaLxAUZ
x2ePQlsD2Ur3XT7c4Oi2zjc/3Gjs8d97srxFnCTUm5APwbeUYsqyIZlSUNMxwdikSanw/EwmT1/T
AjjL2Sh/1x4HdTm/rg7SGxOzx8yEJ/3wqYVhfwhNuDBLqrG3Mewn3+DMcsKxTZ0KBPymw/HHj6I5
9tF5xlW+QH7udAPxAX3qZC/VveqlomGTu4rBBtGt1mIIt+iP4kjlOjIutb6EK3fXZ8r9VZllNJ3D
/xZVx7Jt40hcV6CEuWOg1lwXQNmgl8+bSUvTaCpiVQ4ackeosWhTWxtKndw4UXSzXZAbjHAmAwMY
bHwxN4AqZZfbb2EI1WzOBjeZje1BAoGBAOiQZgngJr++hqn0gJOUImv+OWpFMfffzhWyMM8yoPXK
tIKaFTEuHAkCVre6lA1g34cFeYDcK9BC4oyQbdO4nxTZeTnrU2JQK2t4+N7WBU9W5/wOlxEdYzE0
2rNrDxBtOtCQnOI1h9Mrc87+xzPP55OloKbRMW1JzeAxWdg1LJrvAoGBANNQRNdRzgoDAm0N7WNe
pGx51v+UuGUHvE4dMGKWdK8njwbsv6l7HlTplGGOZUThZWM7Ihc8LU6NZ2IlwWYgzivYL/SUejUD
9/rYaWEYWPdXQW2/ekdi3FFZtKcuUB5zLy3gqtLSjM1a8zhbxdkYq4tqa+v9JwMTr/oyVf//XM9j
AoGAEjftpmxm3LKCPiInSGhcYfVibg7JoU9pB44UAMdIkLi2d1y2uEmSbKpAPNhi7MFgAWXOZOfa
jtAOi1BtKh7WZ325322t9I+vNxYc+OfvNo3qUnaaIv8YXCx1zYRfg7vq1ZfekmH7J/HJere+xzJM
Q+a/tRHCO3uCo0N6dFOGEQUCgYEAsQhJdD6zqA2XZbfKTnrGs55rsdltli6h4qtvktjLzsYMfFex
xpI/+hFqX0TFsKxInZa329Ftf6bVmxNYcHBBadgHbRdLPskhYsUVm+Oi/Szbws8s6Ut4mqrVv038
j1Yei4fydQcyMQTmSSwRl+ykIvu4iI+gtGI1Bx5OkFbm8VMCgYEAlEvig/fGBA/MgE6DUf6MXbFn
92JW25az5REkpZtEXz3B6yhyt/S5D1Da6xvfqvNijyqZpUqtp7lPSOlqFRJ3NihNc8lRqyFMPiBn
41QQWPZyFa1rTwJxijyG9PkI0sl1/WQK5QrTjGZGjX7r4Fjzr6EYM8gH3RA3WAPzJylTOdo=""",
"""\
MIIEpQIBAAKCAQEA68uqw2Ao12QPktY9pf9VSHMfJ8jKBGG4eG+HPmaBifc6+kAZWA7jeOwMTnbS
+KZ2nMFXKthp6zJiDzQqgKlQ7eA0zzBPtAboy4YhPRwrrQr/o1oPrppS2eEwvCGewySAZsIUwX4d
0P68lpLbA9h1vuV3t19M2WNifsYYcTUGPGdbpZHgBDQdmQeUBkXtCTANPxOYsrLwEhaCBrK4BLkW
sRNi0dRnFRdJ18rAYCiDAKq168IyP4TCUKKGWHbquv5rrNdg/RoUiCyPTgDodLaXTOLrRPuCOl5p
dwhNSwJyzEpeqy/x4YnNRbGNv7M1sNhnrarbUduZqOz9RpTQ0niKFQIDAQABAoIBAQC2h1aNH2b+
NWsI0+etFFbEWrmHZptbgPn34P3khB1K26NADVaRIBVeifuM0dbGvLWc6t27QQPdGYdnFY7BQlBv
k9vNdyx7w815nz8juybkMVtq7FCvbK8uEnBTcgMgNKVg5mSC1Enoewkp1kzMUUf0mlVuEcu/jHu2
f0p0eAN3xV5f4up+soujOrWuradmZ3uirYXzYrApagUHMqtjr+AhXJx7MuQCv9UPRU7ouidV/q36
Q/C4OpRqizjiKzulLhUoHmAUGMEQOd+ICoy71HOiK4MqnCmt2vI34cV9Cd5A8Hlfm6/COseor0Sq
26t4f8M8un7efc/RsF1xULiz/RoRAoGBAPvyQRyts6xpvDnanBLQa7b1Qf8oatYIcCcC7JlU+DZX
wD5qroyE5O7xStnSjqX5D6Lc7RbINkAuNGCofJzzynl5tP9j0WREueT1nq/YUW7Xn+Pd0fD6Fgb4
Js2vdRybH+vG4mv4gMxnS/gY+9jR7HL3GJRRQMMM5zWKY4LvrVADAoGBAO+W46I0/X5WCWellMod
Pa0M9OY3a8pJyP//JzblYykgw5nWWPHZEEOxV4VGFP0Pz4i6kpq/psWbCNLsh9k7EsqWLpeE7wsW
uXQj5LruIupL9/notboifL4zIOQcvHNs25iya+yURISYcVhmlqHHofX7ePfQR5sg1e1ZvethyR4H
AoGBAOH1ZhIrc14pQmf8uUdiZ4iiM/t8qzykOrmyNLJb83UBhGg2U6+xLIVkIMZ0wfz2/+AIFhb9
nzI2fkFGOuSk/S2vSvZV9qDfxn0jEJwS/Q3VExBRjA18ra64dky4lOb/9UQHjmBZcmJgLlEnTxAp
Tc/Z7tBugw+sDd0F7bOr85szAoGAOOBzLaCyxPkbxnUye0Cx0ZEP2k8x0ZXul4c1Af02qx7SEIUo
HFHRYKCLDGJ0vRaxx92yy/XPW33QfHIWVeWGMn2wldvC+7jrUbzroczCkShzt+ocqhFh160/k6eW
vTgMcZV5tXIFSgz+a2P/Qmyn8ENAlmPle9gxsOTrByPxoKUCgYEA1raYnqI9nKWkZYMrEOHx7Sy3
xCaKFSoc4nBxjJvZsSJ2aH6fJfMksPTisbYdSaXkGrb1fN2E7HxM1LsnbCyvXZsbMUV0zkk0Tzum
qDVW03gO4AvOD9Ix5gdebdq8le0xfMUzDvAIG1ypM+oMdZ122bI/rsOpLkZ4EtmixFxJbpk=""",
"""\
MIIEowIBAAKCAQEAs1+yp3NsF9qTybuupbt3cyBD+rEWUB+c3veK+TLTTu/hKrULCg6AaCXObv49
45xca0FxXc1/hbr7JinarjngmXj8Slr7UlTkbYKar9aGo3oMkMzbamQC4hBlp0fvH95f+A4M0iyM
RLGgcvZdk5/n0aXGOrlJ0maNFg5qgJcm38i5eRiItPJzTvnktYFcAbKV9IV3C8B8H2soubaJv0JF
nyPPA/pZDsK5/RNg+YRIflXKWe4dNH4/gt/3FwykQ7qdaoSpfoFS4WYCBPxJVcwzTfkwnAw7V+Lb
qxpBn0qJTz0sB6IIQWmOL5IhKd2isZVN9H2M+72vU+UDeCPrDYDbjQIDAQABAoIBAGBoVwVZLAfG
GxiaH0xEbfcaqG7dLzjxRMcyFSfLAXezxjnGBKDrGmjfqQxO6cSkDag4DE52XMvrq4DfjgGGagkS
1cbBD8M4jW2ufKV1j/fdaVOKR4PvLP2EAp7eMs/WHY6dPpbYCqwBLFOdxr3JfDdZ+ikl3V+QbtQj
+2oR03sC6HkpRiFJzrwatyKy3pq5CQkrO8fmzx+MtSOl4crwuX9cLw1K/6Zr0hSMP4LNc85WcH8h
7Fop2d405pQhy+dnBY19PQ0ODrv+wYXvWHClKy1U533sdqi8WcyCU2tu0MiWa5+kf/EB1J8LHi5X
Fyaut7pTU9766zBwmlVAvyeOfKECgYEA5lvwwcOop3oyPu9YevbX7gcEh5wqffnDIqFJbEbqDXd3
eSiGZfEuCcBhTQLgmX3RDMw9bbouisBEF+6DxrBDSdPQvpL/1j6/UscaecnNGPdIi9NkB+swtlOz
G4SRGx6nv+AY6y3cG11QO8q3jEXj8hzapVX7vFodt9FNor/kRTMCgYEAx1bvne8Fa7zBsYmefcDI
msWHaUdy8KuHqaWSz1IvXw0tXUnofSJ/Z51l8Lx2DVbX5Gj8IPEM/NsnYM7RiwFkRyvu+L25rswR
C2vO3kHELVU8YeZWxL4D0TQUpUcSEQzj4kFZjmnhey/8B32TtC00mOJP8vfr2pb3pk+Z9Pu03D8C
gYAreCgLeG+IAxAePi41LgV7PknFiVufYBVJoKPpUcxy9BtQeqw56nQklPAHh0Z40Hw1bQkefqav
ui5fUbv+L17TPKxEehrbBAY4iafeWY1ha7B96ksTD3emwE6pH6/+LR+8nn41SvchFs/AKLXQO5QT
KQy9bGdPmLXI7S84Sfu6bwKBgHxQjDjbQm8xFT6KC6xzGOfkvhD6/QR4hK9Y0di3cVF+30ape/Lm
G7xbnaJnddvVx+frTSmiCq56YfFuqaFd6dK05GB9uZn4K70Kq8VSEG0RFgob4wrpUWobZ7C3RN4b
QtbsWFSHVZZEk5F8UCvycTXTFXb6BD2bHrC6PdJZUy5zAoGBAIMmwTxxn5jO3l2CZxFC1+HzCh7w
Z3tq2p8JYqoLQQqVivAHfn5Lieh9ktxvWjQyZiUcEoFcWeikhwTwRmrwRPsylh15M4tfGbrYBEvN
+RXJuNVLt+ugJcbla7ghZnb1gkgxBWEVl3cW00eP0joi9kVcOyTEOLYH6fuDNso79KBz""",
"""\
MIIEpgIBAAKCAQEArnEcMtv09DktcSvk7t+RQMJqwAShxLPUfdMLsixahN1UU1VNIBY5sLBbKinS
5ixxzGTbDI9SKcM/ow7zN7KG8NEcpx3hTR45A4rJHvajeqnAbhucEcgnCu39QnGue03HW9BEJ5TM
6awpdrkUtpLoJviP8/8ClrNfQN8My10LcgsfFoQqxMo9YU5sj+kSm6/U3CS5Nuk3vxD5tabmBCBg
9rQ1komuE1Yet42NPmHdxjwC9npW01+uDoBrxmYaz1zJNNUiVk+2cwlsa1grvPU1UCBf4x3hNQC+
ZD3jGndnfcIUcrb0grsL85icFoXf/WEKjcKhGOUaVsypimCDyVkDDwIDAQABAoIBAQCtvpUihvgs
hAKh1OFZlq26/amLhVGGdMKxbBIbLZge+7+wnKaLzfc55/11OmEHxr61oMKYeOuSExmAFDTlhdhn
ZTAPt3Ae+no47/Ov9mIPm6HBSZiiEWPpu+7jTg1GXMqyxPYNImUSXNqTmHZr/lhh8HKYyKbQaOn3
1/GLYCo1M/6rgaftuJIl+uXKd3Sxy0fco7mGnqVn5+5MWibkIdZfqeVVImFcJSW9T+T5AnhihS/R
DXy0a+oX8fw06eTclM4GcOJVCjrXBH3kGiFLH//g07nhQVTHRuIPhB1cO+t1ByjX2S8zPpSuCctq
gtIe3+H6q5oIDcsy0dpoKPghTajhAoGBAOG4pxJ6X8RDXOHoqT9sZ5YCJUWpLXn4n47QVNdKW6hI
2aoveEjHxKKXJuA3EEeZA+Uu5TkgbsBv7mbgtAoZFbcoQEoNCgK5lAj/tjJXLv36RgOXuJYZivD9
rUzhbjiWvj1p2k9nQlgB7h321lLBgwhNsazKNpcX6/12WkWnAB+xAoGBAMXXgMi978AgymX5fWzT
rN/KKLd1nvxuxbKeKQ6ucHE/hssA5Zgk4ong9l09EPDfsoI5wDXgrvHVAXBXVtPq0fkO20FMaVct
27raLahp9C8yuvKPnaKD97B8r/dWsyV+eaREGAGUUiGx8QyapKyDD5byHOXIN5jBMXs9N91vfL6/
AoGBAKh3+yqNb4C6jk6GKhwOOtn5S/xMIocQi3Y6A7iT5QkbJmog9/PKNfbsPbXHIz1s9T1O3QLg
NAkpAZSDTZzj0BNd1W3vgXM7M0PsJv43l/kznKH90WUmN09a5sek0XEnAWIw6SGufhPVjPWMT7aA
e93srxm56zimQBpzBTlLRYphAoGBAJogeUPqNI0I/qTS6NOPVG5Dn9TM3T7rTTkJ3hKB8zdGtkwQ
Ns2AbrvbdhLNMBV3MCojs4pFsATWXHiYkhwmI85TtJv6W1Z/c17t+gPqB0F91AaDu9qP1La5bJzT
/lyHW1yNb+ZLFnEJnzCiiQecUtjVZY3dnPJ0D4hi+NKZuCUhAoGBAIvPIQPkqh09nNJpS5xoiPsM
4wAgmqBb1bTay+WGPqtjcOz3VcVmFX7EwTLgClTkuOXygzJno5FIx9JKb75Cd5KKvgCFXxmYt79N
vaYGagCA9BzT/93ejzuRTPpkbFBUL3AwmMyD1/DIzkADksfzhhKtB5QACkT00s0yzm4rVMwG""",
"""\
MIIEowIBAAKCAQEA0XrUDXtebDNoCNiqAUY3wizHGPmKeuMUduYeIpA+26OIT9Ougne7RYmJ6uQz
2NWuMkZhOxpuQXLMShsdjzx/YgAt/Ap7lZZMiorK5vRIsVuqI279nW8zovyGz043/20pRQy6csIA
z94mBWVpS7pjOBQ4fV0s4LxLZOxYvaSB5JsZAFjJk/40+EBGN51aLmiDfA5KUZLqpiL7eaKl14Tc
UH3Vwg4pn2DZtBvrJ5QSxtP2fOVf60U8MqR6g9xOPgxyhflcmqyPdFRpsaVTR6Rs211qkk3U+UP6
+xiWkiB/eEmw6JUnfDdLunjGKy2uYVXqyzMre8+4McmzYi7QyXLNGwIDAQABAoIBAQDAWoxrkNRE
gPPP47xADU1YFSwBh+scKnZ5M5eKX3AI2WJrAtLk5LLnCIPHWCMvwg8CBVR1JDEIEjT6+2kqRQAn
akjPfoS6+FdyhD4K01gI3EYf4WQq85iz2jSkGYwcFQ3nZOe0Rubd+XxqShPlQNKpBRBWNX/nIaAN
nWVjRrMryYJe7oycr3UF594RpBIo1DLFuIZOqttL+vy6MB+GzImEnJDYQg8vIpcRrDOt689sYFC8
7RPfK+ScWfxcU4gIfQZeIN4IqNANivXj5QIs/1uVxCXBBX9s1PPhg0HpPItTIi/r8iElyABHhfmv
JCQ7YqMfcfBOyuJuzRpG3OAaZ4IJAoGBAOlf4DtLl6TTSoBc6aLbzcQjQXyvzk35M9NtSbtP5uFh
UODa+tTubfLVK3MEPCf7PcLsuGeP649qpWNrkAebXDzibMWtz5O5y9uw11eHbxUeukFaVldVHHRt
b9lTKXTdZRfzm+chSyLQ3XikIwpRXqLK9rfir5zv7z8au1xo1BI1AoGBAOXJ6MPNE9zeFtssTjEK
x2HBaIQB7a4XjOY79nRvetLyPyLeGrJjcT554o6qpcWfi+KSGn75C9HQuAQZfmhRgmubwCn//V/S
u/ZPrZJyp/fGcIybmAM9fMJtE80u+gROaJbwomXHG+XNx8KgToHYbB5o0eLiH3EgBOmuM2yE5kwP
AoGAD39OZKGgcFGXoO6KlUYDZALzVlRWXtctmdyoCMhFjLHprQTdo0YyBu4g9IJTfFQyxb7yf+4O
tndehDugVOD8Pw7KKlZgcm7kGrKjmixkNALWW4CkOyhru0+JHeVn21rYW77Rm4eadbVo/5nmucit
gCH6QDvNbZ6BRK+BwaE0dAECgYBSDn8TZKliJuDMlY66jpnSe8mB0lp436oOEX2Z6LFYoO8Q2XV5
HG+1GrtfrOqTnrzKRNg3XWHuI/WCaUQtpmXHXZAKr4JgdJVwiNV3xX/byD4qx+lJxuxFVcRLcioP
3ZwVwoqLg8Wfk5NxGePPFGTPmyjQN2V49TEr7WwppW/D2wKBgFpAUk1vBM6kL37QMPLYXJgpk2ff
IDjChskWlUaR3yWTZaNoXLTMG/6SJwdAAr/riqnwAWox7Wv2UR5cBYsJe5vJK++6KthF2umNnzGg
Ymi6HkoIxiR2jmr56kDPeInqk6AB4vhqSn9PtLGtQyXp+0dkfeiE5qz36QX/EQTPULcd""",
]
TEST_KEYS_2048 = [
mixminion.Crypto.pk_decode_private_key(base64.decodestring(s))
for s in TEST_KEYS_2048 ]
del s
| mit | 4,014,876,960,043,403,300 | 41.931208 | 111 | 0.723571 | false |
hack4sec/ws-cli | classes/threads/SeleniumThread.py | 1 | 5065 | # -*- coding: utf-8 -*-
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Common thread class for selenium threads
"""
import os
import random
import threading
import shutil
import time
from selenium import webdriver
from selenium.webdriver.common.proxy import ProxyType, Proxy
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from classes.Registry import Registry
from classes.SeleniumBrowser import SeleniumBrowser
class SeleniumThread(threading.Thread):
""" Common thread class for selenium threads """
requests_count = 0
proxy_using = False
browser = None
def up_requests_count(self):
""" Up requests counter """
self.requests_count += 1
if self.proxy_using and self.requests_count >= int(Registry().get('config')['main']['requests_per_proxy']):
#print "Recreating browser"
self.browser_close()
self.browser_create()
self.requests_count = 0
def browser_create(self, retry_counter = 0):
""" Create a browser """
if retry_counter >= int(Registry().get('config')['selenium']['browser_recreate_errors_limit']):
raise Exception("WebDriver can`t create browser. Check errors log selenium settings.")
self_num = random.randint(0, 99999)
myProxy = Registry().get('proxies').get_proxy()
if myProxy:
proxy = Proxy({
'proxyType': ProxyType.MANUAL,
'httpProxy': myProxy,
'ftpProxy': myProxy,
'sslProxy': myProxy,
'noProxy': ''
})
self.proxy_using = True
else:
#print "No proxy"
proxy = None
self.proxy_using = False
profile_path = '/tmp/wr-selenium-{0}/'.format(self_num)
if os.path.exists(profile_path):
shutil.rmtree(profile_path)
if not os.path.exists(profile_path):
os.mkdir(profile_path)
profile = webdriver.FirefoxProfile(profile_path)
if Registry().get('config')['selenium']['css_load'] != '1':
profile.set_preference('permissions.default.stylesheet', 2)
if Registry().get('config')['selenium']['images_load'] != '1':
profile.set_preference('permissions.default.image', 2)
if Registry().get('config')['selenium']['flash_load'] != '1':
profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
profile.set_preference("browser.startup.homepage", "about:blank")
profile.set_preference("startup.homepage_welcome_url", "about:blank")
profile.set_preference("startup.homepage_welcome_url.additional", "about:blank")
if myProxy and len(myProxy):
agent_IP, agent_Port = myProxy.split(":")
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.share_proxy_settings", True)
profile.set_preference("network.http.use-cache", False)
profile.set_preference("network.proxy.http", agent_IP)
profile.set_preference("network.proxy.http_port", int(agent_Port))
profile.set_preference('network.proxy.ssl_port', int(agent_Port))
profile.set_preference('network.proxy.ssl', agent_IP)
profile.set_preference('network.proxy.socks', agent_IP)
profile.set_preference('network.proxy.socks_port', int(agent_Port))
fo = open('/tmp/firefox-run-{0}.log'.format(self_num), "w")
binary = FirefoxBinary(firefox_path=Registry().get('config')['selenium']['firefox_path'], log_file=fo)
try:
self.browser = SeleniumBrowser(
profile,
firefox_binary=binary,
ddos_phrase=self.ddos_phrase,
proxy=proxy,
ddos_human=self.ddos_human,
)
except WebDriverException as e:
self.logger.ex(e)
self.logger.log("Re-trying. Browser creation error: " + str(e))
shutil.rmtree(profile_path)
time.sleep(5)
retry_counter += 1
return self.browser_create(retry_counter)
self.browser.set_page_load_timeout(Registry().get('config')['selenium']['timeout_page_load'])
self.browser.implicitly_wait(Registry().get('config')['selenium']['timeout_page_load'])
def browser_close(self):
""" Quit browser """
try:
self.browser.close()
self.browser.quit()
self.browser.binary.process.kill()
if os.path.exists(self.browser.profile_path.replace('webdriver-py-profilecopy', '')):
shutil.rmtree(self.browser.profile_path.replace('webdriver-py-profilecopy', ''))
except BaseException:
pass
| mit | -5,560,726,856,216,333,000 | 38.570313 | 115 | 0.6154 | false |
agesmundo/IDParser | scripts/SharedTaskCommon.py | 1 | 16955 | #!/usr/bin/python
import sys
import string
rootDeprel = u'ROOT' # the dependency relation for the root
emptyFeatsString = u'_' # if no morphological features exist (only PoS)
featsJoiner = u'|' # to join morphological features into one string
emptyProjColumnString = u'_' # if no PHEAD or PDEPREL available
class NonTerminal:
def __init__(self,constLabel,features,deprel):
self.constLabel = constLabel
self.features = features
self.head = {} # a dictionary of references to the lexical heads
self.deprel = deprel
self.children = []
def getLexHead(self,head_type):
if not self.head.has_key(head_type): # does not have this head type
# this can happen if a proper head child could not be found
# according to the normal head rules and the default rules
# have been applied, resulting e.g. in an NP being the
# head of a finite clause
head_type = 'head' # take default head type
return self.head[head_type]
class Terminal:
def __init__(self, id, form, lemma, cpostag, postag, feats, deprel,
phead = emptyProjColumnString, pdeprel = emptyProjColumnString):
self.id = id
self.form = form
self.lemma = lemma
self.cpostag = cpostag
self.postag = postag
self.feats = feats
self.deprel = deprel
self.phead = phead
self.pdeprel = pdeprel
# initially, a terminal links to itself;
# needed for recursive percolation of lexical heads
self.head = self
def getLexHead(self,head_type):
# the head_type is irrelevant:
# terminals only have one head
return self.head
class CorpusError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return `self.value`
def processOptionsBlanks(options):
"""turn string of column widths (e.g. '3|10|10|5')
into list (e.g. [3,10,10,5])"""
if options.blanks:
list = options.blanks.split('|')
if len(list) != len(options.output):
print >>sys.stderr, ("Value to blanks option does not \
have same number of elements as output columns chosen:\n\
%s != %s" % (list,options.output))
sys.exit(-1)
for i in range(len(list)):
try:
int = string.atoi(list[i])
except ValueError:
print >>sys.stderr, ("Non-integer value in blanks option: %s" %
list[i])
sys.exit(-1)
else:
list[i] = int
options.blanks = list
# obsolete: just use '... = dict.fromkeys(list)' instead
# thanks to EM for ponting this out
#def turnListIntoHash(list):
# hash = {}
# for i in range(len(list)):
# hash[list[i]] = 1
# return hash
def handleProblem(infile, problem_type, msg, options):
"depending on options: raise exception or just warn or stay silent"
if options.discard_problems.has_key(problem_type):
raise CorpusError, msg
else:
if not options.silence_warnings.has_key(problem_type):
print >>sys.stderr, ("%s: Warning: %s" %
(infile, msg)).encode(options.encoding)
def addOptions(parser):
# what types of problems can occur during conversion;
# list can be used to selectively silence warnings
# or discard sentences (or files, with 'XML') that
# have those problems
problems_types = [ 'XML', # error in XML parsing
'cycle', # cycles in dependency structure
'label', # wrong POS/constituent/function label
'index', # index of head points to non-existent token
'punct', # problem in reattaching children of punctuation
'whitespace', # missing or superfluous whitespace
'discontinuity', # problem relating to annotation of discontinuity
'ambiguity', # ambiguous structure annotated in treebank
'tree', # problem with structure of tree (not discontinuity)
'head_table', # cannot find head child
'other' # anything else
]
parser.add_option('-b', '--blanks',
dest='blanks',
action='store',
metavar='FORMAT',
default='',
help="""
use variable number of blanks as
output column separator (default is tab);
expects argument FORMAT of form: i|j|k|...
where i,j,k etc. are integer>0, indicating the minimum
width of that column (there must be as many integers as
columns requested in the output)
"""
)
parser.add_option('-c', '--condition',
dest='condition',
action='store',
metavar='CONDITION',
default='',
help="""use only those files/extracts/sentences that
fulfill CONDITION (e.g. <743 or >=743); useful for
splitting into training and test set"""
)
parser.add_option('-d', '--discard_problems',
dest='discard_problems',
choices=problems_types,
action='append',
default = [],
help="""discard sentence (or file, for XML problems) that
exhibits certain problems (default is fix, not discard);
possible choices:"""+' '.join(problems_types)
)
parser.add_option('-e', '--encoding',
dest='encoding',
action='store',
default='utf-8',
help="output character encoding (default is utf-8)")
parser.add_option('-f', '--file',
dest='file',
action='store_true',
default=False,
help="""write output to file, replacing original
suffix by .conll (default is to standard output)"""
)
parser.add_option('-o', '--output',
dest='output',
choices=['id','form','lemma','cpostag','postag',
'feats','head','deprel','phead','pdeprel'],
action='append',
default = [],
help="""print named column in output, in order
specified on command line(default is none);
possible choices:
'id','form','lemma','cpostag','postag',
'feats','head','deprel','phead','pdeprel'"""
)
parser.add_option('-s', '--silence_warnings',
dest='silence_warnings',
choices=problems_types,
action='append',
default = [],
help="""don't warn about certain types of conversion
problems (default is to warn about every problem);
possible choices:"""+' '.join(problems_types)
)
parser.add_option('-p', '--punctuation',
dest='punctuation',
action='store_true',
default=False,
help='links words linking to punctuation to punctuation\'s head instead'
)
def checkCycles(infile, options, token_list, rootFunction):
for i in range(1,len(token_list)):
head_path = { i: 1 }
j = i
while j != 0:
j = token_list[ j ]['head']
if head_path.has_key(j): # cycle found!
# raise exception or just warn or stay silent
msg = (u"Cycle detected at token %d (%s)" %
(j, token_list[ j ]['form']))
handleProblem(infile, 'cycle', msg, options)
# break cycle by linking token to root
token_list[ j ]['head'] = 0
token_list[ j ]['deprel'] = rootFunction
break
else:
head_path[j] = 1
def checkCycles_tmp2(infile, options, token_list, rootFunction):
for i in range(1,len(token_list)):
head_path = { i: 1 }
j = i
while j != 0:
j = token_list[ j ].head
if head_path.has_key(j): # cycle found!
# raise exception or just warn or stay silent
msg = (u"Cycle detected at token %d (%s)" %
(j, token_list[ j ].form))
handleProblem(infile, 'cycle', msg, options)
# break cycle by linking token to root
token_list[ j ].head = 0
token_list[ j ].deprel = rootFunction
break
else:
head_path[j] = 1
def checkCyclesPhead(infile, options, token_list, rootFunction):
for i in range(1,len(token_list)):
head_path = { i: 1 }
j = i
while j != 0 and token_list[ j ].phead != emptyProjColumnString:
# if PHEAD column contains dummy value, just stop checking
j = token_list[ j ].phead
if head_path.has_key(j): # cycle found!
# raise exception or just warn or stay silent
msg = (u"PHEAD cycle detected at token %d (%s)" %
(j, token_list[ j ].form))
handleProblem(infile, 'cycle', msg, options)
# break cycle by linking token to root
token_list[ j ].phead = 0
token_list[ j ].pdeprel = rootFunction
break
else:
head_path[j] = 1
def attachPunctHigh(infile, options, token_list, punctuationPos,
punctuationFunction, rootFunction):
"""
Reattach punctuation as high as possible,
change deprel to value punctuationFunction.
"""
for i in range(1,len(token_list)):
token1 = token_list[ i ]
if token1['postag'] == punctuationPos:
punc = token1
# find highest attachment point
highest = 0
head_path = {}
if i>1:
j=i-1
while token_list[ j ]['head'] != 0:
if head_path.has_key(j):
# raise exception or just warn or stay silent
msg = (u"Cycle detected at token %d (%s)" %
(j, token_list[ j ]['form']))
handleProblem(infile, 'cycle', msg, options)
# break cycle by linking token to root
token_list[ j ]['head'] = 0
token_list[ j ]['deprel'] = rootFunction
break
head_path[j] = 1
j = token_list[ j ]['head']
highest = j
if i<len(token_list)-1:
j=i+1
while token_list[ j ]['head'] != 0:
if head_path.has_key(j):
if head_path[j] == 2:
# raise exception or just warn or stay silent
msg = (u"Cycle detected at token %d (%s)" %
(j, token_list[ j ]['form']))
handleProblem(infile, 'cycle', msg, options)
# break cycle by linking token to root
token_list[ j ]['head'] = 0
token_list[ j ]['deprel'] = rootFunction
break
elif head_path[j] == 1:
# was also on other path
break
head_path[j] = 2
j=token_list[ j ]['head']
highest = j
# make punctuation link to highest
punc['head'] = highest
if highest == 0:
punc['deprel'] = rootFunction
else:
punc['deprel'] = punctuationFunction
return token_list
def printSentences(sent_list, options, outstream):
"""
print all sentences in sent_list;
tokens are dictionaries
"""
# ??? should format string be unicode string regardless of options.encoding?
format = []
for j in range(len(options.output)): # for each column
if options.blanks:
width = options.blanks[j] # pad with blanks
if j < len(options.output)-1: # non-last column
format_string = u'%'+`width`+u's ' # e.g. u"%-15s "
else: # last column
format_string = u'%'+`width`+u's' # e.g. u"%-15s"
else: # separate by tab
if j < len(options.output)-1: # non-last column
format_string = u'%s\t'
else: # last column
format_string = u'%s'
format.append(format_string)
for sent in sent_list: # for each sentence
word_count = 0
for i in range(1,len(sent)): # for each token
token = sent[i]
word_count += 1
for j in range(len(options.output)): # for each column
column_name = options.output[j]
if column_name == 'id':
output_string = format[j] % word_count
else:
value = token[column_name] # get value for column
if column_name == 'feats':
if value == []: # if no features:
value = emptyFeatsString # use default value
else:
value = featsJoiner.join(value) # else: join
output_string = format[j] % value # format string
outstream.write(output_string.encode(options.encoding)) # print
outstream.write("\n") # newline at end of token
outstream.write("\n") # extra newline at end of token
def printSentences_tmp2(sent_list, options, outstream):
"""
print all sentences in sent_list;
tokens are class instances
"""
# ??? should format string be unicode string regardless of options.encoding?
format = []
for j in range(len(options.output)): # for each column
if options.blanks:
width = options.blanks[j] # pad with blanks
if j < len(options.output)-1: # non-last column
format_string = u'%'+`width`+u's ' # e.g. u"%-15s "
else: # last column
format_string = u'%'+`width`+u's' # e.g. u"%-15s"
else: # separate by tab
if j < len(options.output)-1: # non-last column
format_string = u'%s\t'
else: # last column
format_string = u'%s'
format.append(format_string)
for sent in sent_list: # for each sentence
word_count = 0
for i in range(1,len(sent)): # for each token
token = sent[i]
word_count += 1
for j in range(len(options.output)): # for each column
column_name = options.output[j]
if column_name == 'id':
output_string = format[j] % word_count # format string
# ??? check that word count is same as ID?
else:
value = getattr(token,column_name) # get value for column
if column_name == 'feats':
if value == []: # if no features:
value = emptyFeatsString # use default value
else:
value = featsJoiner.join(value) # else: join
output_string = format[j] % value # format string
outstream.write(output_string.encode(options.encoding)) # print
outstream.write("\n") # newline at end of token
outstream.write("\n") # extra newline at end of token
| gpl-3.0 | -345,788,771,460,872,700 | 41.176617 | 94 | 0.47744 | false |
ArnaudKOPP/BioREST | BioREST/Fasta.py | 1 | 8602 | # coding=utf-8
__author__ = "Arnaud KOPP"
__copyright__ = "© 2015-2016 KOPP Arnaud All Rights Reserved"
__credits__ = ["KOPP Arnaud"]
__license__ = "GNU GPL V3.0"
__maintainer__ = "Arnaud KOPP"
__email__ = "[email protected]"
__status__ = "Production"
from collections import OrderedDict
import logging
import pandas as pd
log = logging.getLogger(__name__)
class MultiFASTA(object):
"""
Class for FASTA files
"""
def __init__(self):
# fetch the sequence using this attribute
self._fasta_fetcher = FASTA()
# an ordered dictionary to store the fasta contents
self._fasta = OrderedDict()
def __len__(self):
return len(self._fasta)
def _get_fasta(self):
return self._fasta
fasta = property(_get_fasta, doc="Returns all FASTA instances ")
def _get_ids(self):
return [f for f in self._fasta.keys()]
ids = property(_get_ids, doc="returns list of keys/accession identifiers")
def load_fasta(self, ids):
"""
Loads a single FASTA file into the dictionary
:param ids:
"""
if isinstance(ids, str):
ids = [ids]
for id_ in ids:
self._fasta_fetcher.load(id_)
# create a new instance of FASTA and save fasta data
f = FASTA()
f._fasta = self._fasta_fetcher._fasta[:]
# append in the ordered dictionary
self._fasta[id_] = f
log.info("%s loaded" % id_)
def save_fasta(self, filename):
"""
Save all FASTA into a file
:param filename:
"""
fh = open(filename, "w")
for f in self._fasta.values():
fh.write(f.fasta)
fh.close()
def read_fasta(self, filename):
"""
Load several FASTA from a filename
:param filename:
"""
fh = open(filename, "r")
data = fh.read()
fh.close()
# we split according to ">2 character
for thisfasta in data.split(">")[1:]:
f = FASTA()
f._fasta = f._interpret(thisfasta)
if f.accession is not None and f.accession not in self.ids:
self._fasta[f.accession] = f
else:
log.warning("Accession %s is already in the ids list or could not be interpreted. skipped" %
str(f.accession))
def _get_df(self):
df = pd.concat([self.fasta[id_].df for id_ in self.fasta.keys()])
df.reset_index(inplace=True)
return df
df = property(_get_df)
def hist_size(self, **kargs):
"""
:param kargs:
"""
try:
import pylab
self.df.Size.hist(**kargs)
pylab.title("Histogram length of the sequences")
pylab.xlabel("Length")
except:
pass
class FASTA(object):
"""
Fasta class
"""
known_dbtypes = ["sp", "gi"]
def __init__(self):
self._fasta = None
def _get_fasta(self):
return self._fasta
fasta = property(_get_fasta, doc="returns FASTA content")
# for all types
def _get_sequence(self):
if self.fasta:
return "".join(self.fasta.split("\n")[1:])
else:
raise ValueError("You need to load a fasta sequence first using get_fasta or read_fasta")
sequence = property(_get_sequence, doc="returns the sequence only")
# for all types
def _get_header(self):
if self.fasta:
return self.fasta.split("\n")[0]
else:
raise ValueError("You need to load a fasta sequence first using get_fasta or read_fasta")
header = property(_get_header, doc="returns header only")
def _get_dbtype(self):
dbtype = self.header.split("|")[0].replace(">", "")
return dbtype
dbtype = property(_get_dbtype)
# for all types
def _get_identifier(self):
return self.header.split(" ")[0]
identifier = property(_get_identifier)
def _get_entry(self):
return self.header.split("|")[2].split(" ")[0]
entry = property(_get_entry, doc="returns entry only")
# swiss prot only
def _get_accession(self):
if self.dbtype == "sp":
# header = self.header
return self.identifier.split("|")[1]
elif self.dbtype == "gi":
return self.identifier.split("|")[1]
accession = property(_get_accession)
# swiss prot only
def _get_name_sp(self):
if self.dbtype == "sp":
header = self.header
return header.split(" ")[0].split("|")[2]
name = property(_get_name_sp)
def _get_df(self):
df = pd.DataFrame({
"Identifiers": [self.identifier],
"Accession": [self.accession],
"Entry": [self.entry],
"Database": [self.dbtype],
"Organism": [self.organism],
"PE": [self.PE],
"SV": [self.SV],
"Sequence": [self.sequence],
"Header": [self.header],
"Size": [len(self.sequence)]})
return df
df = property(_get_df)
def _get_info_from_header(self, prefix):
if prefix not in self.header:
return None
# finds the prefix
index = self.header.index(prefix + "=")
# remove it
name = self.header[index:][3:]
# figure out if there is anothe = sign to split the string
# otherwise, the prefix we looked for is the last one anyway
if "=" in name:
name = name.split("=")[0]
# here each = sign in FASTA is preceded by 2 characters that we must remove
name = name[0:-2]
name = name.strip()
else:
name = name.strip()
return name
def _get_gene_name(self):
return self._get_info_from_header("GN")
gene_name = property(_get_gene_name,
doc="returns gene name from GN keyword found in the header if any")
def _get_organism(self):
return self._get_info_from_header("OS")
organism = property(_get_organism,
doc="returns organism from OS keyword found in the header if any")
def _get_PE(self):
pe = self._get_info_from_header("PE")
if pe is not None:
return int(pe)
PE = property(_get_PE,
doc="returns PE keyword found in the header if any")
def _get_SV(self):
sv = self._get_info_from_header("SV")
if sv is not None:
return int(sv)
SV = property(_get_SV,
doc="returns SV keyword found in the header if any")
def __str__(self):
str_ = self.fasta
return str_
def load(self, id_):
self.load_fasta(id_)
def load_fasta(self, id_):
"""
:param id_:
:raise Exception:
"""
from BioREST.Uniprot import Uniprot
u = Uniprot()
try:
res = u.retrieve(id_, frmt="fasta")
# some entries in uniprot are valid but obsolet and return empty string
if res == "":
raise Exception
self._fasta = res[:]
except:
pass
def save_fasta(self, filename):
"""
Save FASTA file into a filename
:param str filename: where to save it
"""
if self._fasta is None:
raise ValueError("No fasta was read or downloaded. Nothing to save.")
fh = open(filename, "w")
fh.write(self._fasta)
fh.close()
def read_fasta(self, filename):
"""
:param filename:
:raise ValueError:
"""
fh = open(filename, "r")
data = fh.read()
fh.close()
# Is there more than one sequence ?
data = data.split(">")[1:]
if len(data) > 1 or len(data) == 0:
raise ValueError(
"""Only one sequence expected to be found. Found %s. Please use MultiFASTA class instead""" % len(data))
self._data = data
if data.count(">sp|") > 1:
raise ValueError("""It looks like your FASTA file contains more than
one FASTA. You must use MultiFASTA class instead""")
self._fasta = data[:]
self._fasta = self._fasta[0]
if self.dbtype not in self.known_dbtypes:
log.warning("Only sp and gi header are recognised so far but sequence and header are loaded")
@staticmethod
def _interpret(data):
# cleanup the data in case of empty spaces or \n characters
return data
| gpl-3.0 | -5,907,526,804,875,600,000 | 27.2 | 120 | 0.540402 | false |
DataViva/dataviva-site | dataviva/utils/jinja_helpers.py | 1 | 4548 | # -*- coding: utf-8 -*-
from re import sub
from jinja2 import Markup
from dataviva.translations.dictionary import dictionary
from dataviva.utils.num_format import num_format
from dataviva.utils.title_case import title_case
from decimal import *
from flask import g
import locale
from flask.ext.babel import gettext
''' A helper class for dealing with injecting times into the page using moment.js'''
class jinja_momentjs:
def __init__(self, timestamp):
self.timestamp = timestamp
def __call__(self, *args):
return self.format(*args)
def render(self, format):
return Markup("<script>\ndocument.write(moment(\"%s\").%s);\n</script>" % (self.timestamp.strftime("%Y-%m-%dT%H:%M:%S Z"), format))
def format(self, fmt):
return self.render("format(\"%s\")" % fmt)
def calendar(self):
return self.render("calendar()")
def fromNow(self):
return self.render("fromNow()")
class jinja_formatter:
def __init__(self, text):
self.text = text
def __call__(self, *args):
return self.format(*args)
@staticmethod
def is_number(s):
if s is None:
return False
try:
float(s)
return True
except ValueError:
return False
def render(self, type):
if self.is_number(self.text):
num = float(self.text) if "." in str(self.text) else int(self.text)
return Markup(num_format(num, type))
else:
dict = dictionary()
if self.text in dict:
return Markup(dict[self.text])
else:
return Markup(title_case(self.text))
''' A helper function for stripping out html tags for showing snippets of user submitted content'''
def jinja_strip_html(s):
return sub('<[^<]+?>', '', s)
def jinja_split(s, char):
return s.split(char)
def max_digits(number, digits, counter=None):
negative = False
separator = ',' if g.locale == 'pt' and counter == None else '.'
if number and number < 0:
negative = True
number = abs(number)
old_number = number
if type(number) == float:
number = Decimal(number)
if type(number) == Decimal:
if number > 1000:
number = int(number)
if number >= 1000:
str_n = [1]
for i in range(len(str(number)), 0, -3):
if i > 3:
str_n+="000"
else:
break
num = int(''.join(map(str, str_n)))
number = (float(number)/num)*10
else:
if number < 10 and number >= 1:
number = number * 10
if number < 1:
digits = digits if number > 0.00 else digits+1
if len(str(number)) > digits+1 and int(str(number)[digits+1]) >= 5:
number = round(number, 2)
number_str = str(number)
number_str = number_str.replace('.', separator);
result = number_str[0:digits+1]
else:
number_str = str(float(number))
number_str = number_str.replace('.', separator);
if old_number < 10 or old_number >= 1000:
number_list = list(number_str)
comma, digit = number_list.index(separator), number_list.index(separator)-1
number_list[digit], number_list[comma] = number_list[comma], number_list[digit]
number_str = ''.join(number_list)
if len(number_str) > 3 and number_str[digits] == separator:
result = number_str[0:digits]
else:
result = number_str[0:digits+1]
if negative:
return "-" + result
else:
return result
def ordinal(number, gender='m'):
if g.locale == 'en':
if number == 1:
return "st"
elif number == 2:
return "nd"
elif number == 3:
return "rd"
elif number in range(4, 10) or number == 0:
return "th"
else:
return ordinal(int(str(number)[-1]))
else:
if gender == 'm':
return "º"
else:
return "ª"
def jinja_magnitude(number):
if not number:
return ''
integer = str(abs(int(number)))
if g.locale == 'en':
orders_of_magnitude = ['', gettext('Thousand'), gettext('Million'), gettext('Billion'), gettext('Trillion')]
elif g.locale == 'pt':
orders_of_magnitude = ['', gettext('Thousands'), gettext('Millions'), gettext('Billions'), gettext('Trillions')]
return orders_of_magnitude[len(integer[::3]) - 1]
| mit | 5,653,166,006,842,362,000 | 29.28 | 139 | 0.559665 | false |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons/add_mesh_extra_objects/mesh_discombobulator.py | 2 | 27573 | # GPL # Original Authors: Evan J. Rosky (syrux), Chichiri, Jace Priester #
import bpy
import random
import math
from bpy.types import (
Operator,
Menu,
)
from mathutils import (
Vector,
Quaternion,
)
# ################### Globals #################### #
doprots = True
# Datas in which we will build the new discombobulated mesh
nPolygons = []
nVerts = []
Verts = []
Polygons = []
dVerts = []
dPolygons = []
i_prots = [] # index of the top polygons on which we'll generate the doodads
i_dood_type = [] # type of doodad (given by index of the doodad obj)
# ############### Utility Functions ############### #
def randnum(a, b):
return random.random() * (b - a) + a
def randVertex(a, b, c, d, Verts):
"""Return a vector of a random vertex on a quad-polygon"""
i = random.randint(1, 2)
A, B, C, D = 0, 0, 0, 0
if(a == 1):
A, B, C, D = a, b, c, d
else:
A, B, C, D = a, d, c, b
i = randnum(0.1, 0.9)
vecAB = Verts[B] - Verts[A]
E = Verts[A] + vecAB * i
vecDC = Verts[C] - Verts[D]
F = Verts[D] + vecDC * i
i = randnum(0.1, 0.9)
vecEF = F - E
O = E + vecEF * i
return O
# ################## Protusions #################### #
def fill_older_datas(verts, polygon):
""" Specifically coded to be called by the function addProtusionToPolygon,
its sets up a tuple which contains the vertices from the base and the top of the protusions.
"""
temp_vertices = []
temp_vertices.append(verts[polygon[0]].copy())
temp_vertices.append(verts[polygon[1]].copy())
temp_vertices.append(verts[polygon[2]].copy())
temp_vertices.append(verts[polygon[3]].copy())
temp_vertices.append(verts[polygon[0]].copy())
temp_vertices.append(verts[polygon[1]].copy())
temp_vertices.append(verts[polygon[2]].copy())
temp_vertices.append(verts[polygon[3]].copy())
return temp_vertices
def extrude_top(temp_vertices, normal, height):
""" This function extrude the polygon composed of the four first members of the tuple
temp_vertices along the normal multiplied by the height of the extrusion.
"""
j = 0
while j < 3:
temp_vertices[0][j] += normal[j] * height
temp_vertices[1][j] += normal[j] * height
temp_vertices[2][j] += normal[j] * height
temp_vertices[3][j] += normal[j] * height
j += 1
def scale_top(temp_vertices, center, normal, height, scale_ratio):
""" This function scale the polygon composed of the four first members of the tuple temp_vertices. """
vec1 = [0, 0, 0]
vec2 = [0, 0, 0]
vec3 = [0, 0, 0]
vec4 = [0, 0, 0]
j = 0
while j < 3:
center[j] += normal[j] * height
vec1[j] = temp_vertices[0][j] - center[j]
vec2[j] = temp_vertices[1][j] - center[j]
vec3[j] = temp_vertices[2][j] - center[j]
vec4[j] = temp_vertices[3][j] - center[j]
temp_vertices[0][j] = center[j] + vec1[j] * (1 - scale_ratio)
temp_vertices[1][j] = center[j] + vec2[j] * (1 - scale_ratio)
temp_vertices[2][j] = center[j] + vec3[j] * (1 - scale_ratio)
temp_vertices[3][j] = center[j] + vec4[j] * (1 - scale_ratio)
j += 1
def add_prot_polygons(temp_vertices):
""" Specifically coded to be called by addProtusionToPolygon, this function
put the data from the generated protusion at the end the tuples Verts and Polygons,
which will later used to generate the final mesh.
"""
global Verts
global Polygons
global i_prots
findex = len(Verts)
Verts += temp_vertices
polygontop = [findex + 0, findex + 1, findex + 2, findex + 3]
polygon1 = [findex + 0, findex + 1, findex + 5, findex + 4]
polygon2 = [findex + 1, findex + 2, findex + 6, findex + 5]
polygon3 = [findex + 2, findex + 3, findex + 7, findex + 6]
polygon4 = [findex + 3, findex + 0, findex + 4, findex + 7]
Polygons.append(polygontop)
i_prots.append(len(Polygons) - 1)
Polygons.append(polygon1)
Polygons.append(polygon2)
Polygons.append(polygon3)
Polygons.append(polygon4)
def addProtusionToPolygon(obpolygon, verts, minHeight, maxHeight, minTaper, maxTaper):
"""Create a protusion from the polygon "obpolygon" of the original object and use
several values sent by the user. It calls in this order the following functions:
- fill_older_data;
- extrude_top;
- scale_top;
- add_prot_polygons;
"""
# some useful variables
polygon = obpolygon.vertices
tVerts = list(fill_older_datas(verts, polygon)) # list of temp vertices
height = randnum(minHeight, maxHeight) # height of generated protusion
scale_ratio = randnum(minTaper, maxTaper)
# extrude the top polygon
extrude_top(tVerts, obpolygon.normal, height)
# Now, we scale, the top polygon along its normal
scale_top(tVerts, GetPolyCentroid(obpolygon, verts), obpolygon.normal, height, scale_ratio)
# Finally, we add the protusions to the list of polygons
add_prot_polygons(tVerts)
# ################# Divide a polygon ############### #
def divide_one(list_polygons, list_vertices, verts, polygon, findex):
""" called by divide_polygon, to generate a polygon from one polygon, maybe I could simplify this process """
temp_vertices = []
temp_vertices.append(verts[polygon[0]].copy())
temp_vertices.append(verts[polygon[1]].copy())
temp_vertices.append(verts[polygon[2]].copy())
temp_vertices.append(verts[polygon[3]].copy())
list_vertices += temp_vertices
list_polygons.append([findex + 0, findex + 1, findex + 2, findex + 3])
def divide_two(list_polygons, list_vertices, verts, polygon, findex):
""" called by divide_polygon, to generate two polygons from one polygon and
add them to the list of polygons and vertices which form the discombobulated mesh
"""
temp_vertices = []
temp_vertices.append(verts[polygon[0]].copy())
temp_vertices.append(verts[polygon[1]].copy())
temp_vertices.append(verts[polygon[2]].copy())
temp_vertices.append(verts[polygon[3]].copy())
temp_vertices.append((verts[polygon[0]] + verts[polygon[1]]) / 2)
temp_vertices.append((verts[polygon[2]] + verts[polygon[3]]) / 2)
list_vertices += temp_vertices
list_polygons.append([findex + 0, findex + 4, findex + 5, findex + 3])
list_polygons.append([findex + 1, findex + 2, findex + 5, findex + 4])
def divide_three(list_polygons, list_vertices, verts, polygon, findex, center):
""" called by divide_polygon, to generate three polygons from one polygon and
add them to the list of polygons and vertices which form the discombobulated mesh
"""
temp_vertices = []
temp_vertices.append(verts[polygon[0]].copy())
temp_vertices.append(verts[polygon[1]].copy())
temp_vertices.append(verts[polygon[2]].copy())
temp_vertices.append(verts[polygon[3]].copy())
temp_vertices.append((verts[polygon[0]] + verts[polygon[1]]) / 2)
temp_vertices.append((verts[polygon[2]] + verts[polygon[3]]) / 2)
temp_vertices.append((verts[polygon[1]] + verts[polygon[2]]) / 2)
temp_vertices.append(center.copy())
list_vertices += temp_vertices
list_polygons.append([findex + 0, findex + 4, findex + 5, findex + 3])
list_polygons.append([findex + 1, findex + 6, findex + 7, findex + 4])
list_polygons.append([findex + 6, findex + 2, findex + 5, findex + 7])
def divide_four(list_polygons, list_vertices, verts, polygon, findex, center):
""" called by divide_polygon, to generate four polygons from one polygon and
add them to the list of polygons and vertices which form the discombobulated mesh
"""
temp_vertices = []
temp_vertices.append(verts[polygon[0]].copy())
temp_vertices.append(verts[polygon[1]].copy())
temp_vertices.append(verts[polygon[2]].copy())
temp_vertices.append(verts[polygon[3]].copy())
temp_vertices.append((verts[polygon[0]] + verts[polygon[1]]) / 2)
temp_vertices.append((verts[polygon[2]] + verts[polygon[3]]) / 2)
temp_vertices.append((verts[polygon[1]] + verts[polygon[2]]) / 2)
temp_vertices.append(center.copy())
temp_vertices.append((verts[polygon[0]] + verts[polygon[3]]) / 2)
temp_vertices.append(center.copy())
list_vertices += temp_vertices
list_polygons.append([findex + 0, findex + 4, findex + 7, findex + 8])
list_polygons.append([findex + 1, findex + 6, findex + 7, findex + 4])
list_polygons.append([findex + 6, findex + 2, findex + 5, findex + 7])
list_polygons.append([findex + 8, findex + 7, findex + 5, findex + 3])
def dividepolygon(obpolygon, verts, number):
"""Divide the poly into the wanted number of polygons"""
global nPolygons
global nVerts
poly = obpolygon.vertices
if(number == 1):
divide_one(nPolygons, nVerts, verts, poly, len(nVerts))
elif(number == 2):
divide_two(nPolygons, nVerts, verts, poly, len(nVerts))
elif(number == 3):
divide_three(nPolygons, nVerts, verts, poly, len(nVerts), GetPolyCentroid(obpolygon, verts))
elif(number == 4):
divide_four(nPolygons, nVerts, verts, poly, len(nVerts), GetPolyCentroid(obpolygon, verts))
# ################## Discombobulate ################ #
def GetPolyCentroid(obpolygon, allvertcoords):
centroid = Vector((0, 0, 0))
for vindex in obpolygon.vertices:
centroid += Vector(allvertcoords[vindex])
centroid /= len(obpolygon.vertices)
return centroid
def division(obpolygons, verts, sf1, sf2, sf3, sf4):
"""Function to divide each of the selected polygons"""
divide = []
if (sf1):
divide.append(1)
if (sf2):
divide.append(2)
if (sf3):
divide.append(3)
if (sf4):
divide.append(4)
for poly in obpolygons:
if(poly.select is True and len(poly.vertices) == 4):
a = random.randint(0, len(divide) - 1)
dividepolygon(poly, verts, divide[a])
def protusion(obverts, obpolygons, minHeight, maxHeight, minTaper, maxTaper):
"""function to generate the protusions"""
verts = []
for vertex in obverts:
verts.append(vertex.co)
for polygon in obpolygons:
if(polygon.select is True):
if(len(polygon.vertices) == 4):
addProtusionToPolygon(polygon, verts, minHeight, maxHeight, minTaper, maxTaper)
def test_v2_near_v1(v1, v2):
if (v1.x - 0.1 <= v2.x <= v1.x + 0.1 and
v1.y - 0.1 <= v2.y <= v1.y + 0.1 and
v1.z - 0.1 <= v2.z <= v1.z + 0.1):
return True
return False
def angle_between_nor(nor_orig, nor_result):
angle = math.acos(nor_orig.dot(nor_result))
axis = nor_orig.cross(nor_result).normalized()
q = Quaternion()
q.x = axis.x * math.sin(angle / 2)
q.y = axis.y * math.sin(angle / 2)
q.z = axis.z * math.sin(angle / 2)
q.w = math.cos(angle / 2)
return q
def doodads(object1, mesh1, dmin, dmax):
"""function to generate the doodads"""
global dVerts
global dPolygons
i = 0
# on parcoure cette boucle pour ajouter des doodads a toutes les polygons
# english translation: this loops adds doodads to all polygons
while(i < len(object1.data.polygons)):
if object1.data.polygons[i].select is False:
continue
doods_nbr = random.randint(dmin, dmax)
j = 0
while(j <= doods_nbr):
origin_dood = randVertex(object1.data.polygons[i].vertices[0], object1.data.polygons[i].vertices[1],
object1.data.polygons[i].vertices[2], object1.data.polygons[i].vertices[3], Verts)
type_dood = random.randint(0, len(bpy.context.scene.discomb.DISC_doodads) - 1)
polygons_add = []
verts_add = []
# First we have to apply scaling and rotation to the mesh
bpy.ops.object.select_pattern(pattern=bpy.context.scene.discomb.DISC_doodads[type_dood], extend=False)
bpy.context.scene.objects.active = bpy.data.objects[bpy.context.scene.discomb.DISC_doodads[type_dood]]
bpy.ops.object.transform_apply(rotation=True, scale=True)
for polygon in bpy.data.objects[bpy.context.scene.discomb.DISC_doodads[type_dood]].data.polygons:
polygons_add.append(polygon.vertices)
for vertex in bpy.data.objects[bpy.context.scene.discomb.DISC_doodads[type_dood]].data.vertices:
verts_add.append(vertex.co.copy())
normal_original_polygon = object1.data.polygons[i].normal
nor_def = Vector((0.0, 0.0, 1.0))
qr = nor_def.rotation_difference(normal_original_polygon.normalized())
if(test_v2_near_v1(nor_def, -normal_original_polygon)):
qr = Quaternion((0.0, 0.0, 0.0, 0.0))
# qr = angle_between_nor(nor_def, normal_original_polygon)
for vertex in verts_add:
vertex.rotate(qr)
vertex += origin_dood
findex = len(dVerts)
for polygon in polygons_add:
dPolygons.append([polygon[0] + findex, polygon[1] + findex, polygon[2] + findex, polygon[3] + findex])
i_dood_type.append(bpy.data.objects[bpy.context.scene.discomb.DISC_doodads[type_dood]].name)
for vertex in verts_add:
dVerts.append(vertex)
j += 1
i += 5
def protusions_repeat(object1, mesh1, r_prot):
for j in i_prots:
if j < len(object1.data.polygons):
object1.data.polygons[j].select = True
else:
print("Warning: hit end of polygons in object1")
# add material to discombobulated mesh
def setMatProt(discObj, origObj, sideProtMat, topProtMat):
# First we put the materials in their slots
bpy.ops.object.select_pattern(pattern=discObj.name, extend=False)
bpy.context.scene.objects.active = bpy.data.objects[discObj.name]
try:
origObj.material_slots[topProtMat]
origObj.material_slots[sideProtMat]
except:
return
bpy.ops.object.material_slot_add()
bpy.ops.object.material_slot_add()
discObj.material_slots[0].material = origObj.material_slots[topProtMat].material
discObj.material_slots[1].material = origObj.material_slots[sideProtMat].material
# Then we assign materials to protusions
for polygon in discObj.data.polygons:
if polygon.index in i_prots:
polygon.material_index = 0
else:
polygon.material_index = 1
def setMatDood(doodObj):
# First we add the materials slots
bpy.ops.object.select_pattern(pattern=doodObj.name, extend=False)
bpy.context.scene.objects.active = doodObj
for name in bpy.context.scene.discomb.DISC_doodads:
try:
bpy.ops.object.material_slot_add()
doodObj.material_slots[-1].material = bpy.data.objects[name].material_slots[0].material
for polygon in doodObj.data.polygons:
if i_dood_type[polygon.index] == name:
polygon.material_index = len(doodObj.material_slots) - 1
except:
print()
def clean_doodads():
current_doodads = list(bpy.context.scene.discomb.DISC_doodads)
for name in current_doodads:
if name not in bpy.data.objects:
bpy.context.scene.discomb.DISC_doodads.remove(name)
def discombobulate(minHeight, maxHeight, minTaper, maxTaper, sf1, sf2, sf3, sf4,
dmin, dmax, r_prot, sideProtMat, topProtMat, isLast):
global doprots
global nVerts
global nPolygons
global Verts
global Polygons
global dVerts
global dPolygons
global i_prots
bpy.ops.object.mode_set(mode="OBJECT")
# start by cleaning up doodads that don't exist anymore
clean_doodads()
# Create the discombobulated mesh
mesh = bpy.data.meshes.new("tmp")
object = bpy.data.objects.new("tmp", mesh)
bpy.context.scene.objects.link(object)
# init final verts and polygons tuple
nPolygons = []
nVerts = []
Polygons = []
Verts = []
dPolygons = []
dVerts = []
origObj = bpy.context.active_object
# There we collect the rotation, translation and scaling datas from the original mesh
to_translate = bpy.context.active_object.location
to_scale = bpy.context.active_object.scale
to_rotate = bpy.context.active_object.rotation_euler
# First, we collect all the informations we will need from the previous mesh
obverts = bpy.context.active_object.data.vertices
obpolygons = bpy.context.active_object.data.polygons
verts = []
for vertex in obverts:
verts.append(vertex.co)
division(obpolygons, verts, sf1, sf2, sf3, sf4)
# Fill in the discombobulated mesh with the new polygons
mesh.from_pydata(nVerts, [], nPolygons)
mesh.update(calc_edges=True)
# Reload the datas
bpy.ops.object.select_all(action="DESELECT")
bpy.ops.object.select_pattern(pattern=object.name, extend=False)
bpy.context.scene.objects.active = bpy.data.objects[object.name]
obverts = bpy.context.active_object.data.vertices
obpolygons = bpy.context.active_object.data.polygons
protusion(obverts, obpolygons, minHeight, maxHeight, minTaper, maxTaper)
# Fill in the discombobulated mesh with the new polygons
mesh1 = bpy.data.meshes.new("discombobulated_object")
object1 = bpy.data.objects.new("discombobulated_mesh", mesh1)
bpy.context.scene.objects.link(object1)
mesh1.from_pydata(Verts, [], Polygons)
mesh1.update(calc_edges=True)
# Set the material's of discombobulated object
setMatProt(object1, origObj, sideProtMat, topProtMat)
bpy.ops.object.select_pattern(pattern=object1.name, extend=False)
bpy.context.scene.objects.active = bpy.data.objects[object1.name]
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
# if(bpy.context.scene.repeatprot):
protusions_repeat(object1, mesh1, r_prot)
if(len(bpy.context.scene.discomb.DISC_doodads) != 0 and bpy.context.scene.discomb.dodoodads and isLast):
doodads(object1, mesh1, dmin, dmax)
mesh2 = bpy.data.meshes.new("dood_mesh")
object2 = bpy.data.objects.new("dood_obj", mesh2)
bpy.context.scene.objects.link(object2)
mesh2.from_pydata(dVerts, [], dPolygons)
mesh2.update(calc_edges=True)
setMatDood(object2)
object2.location = to_translate
object2.rotation_euler = to_rotate
object2.scale = to_scale
bpy.ops.object.select_pattern(pattern=object.name, extend=False)
bpy.context.scene.objects.active = bpy.data.objects[object.name]
bpy.ops.object.delete()
bpy.ops.object.select_pattern(pattern=object1.name, extend=False)
bpy.context.scene.objects.active = bpy.data.objects[object1.name]
bpy.context.scene.update()
# translate, scale and rotate discombobulated results
object1.location = to_translate
object1.rotation_euler = to_rotate
object1.scale = to_scale
# set all polys to selected. this allows recursive discombobulating.
for poly in mesh1.polygons:
poly.select = True
# ### Operators for selecting and deselecting an object as a doodad ### #
class chooseDoodad(Operator):
bl_idname = "object.discombobulate_set_doodad"
bl_label = "Discombobulate set doodad object"
bl_description = ("Save the Active Object as Doodad \n"
"Object has to be quads only")
bl_options = {'REGISTER'}
@classmethod
def poll(cls, context):
obj = bpy.context.active_object
if (obj is not None and obj.type == "MESH"):
mesh = obj.data
for polygon in mesh.polygons:
is_ok = len(polygon.vertices)
if is_ok != 4:
return False
return True
return False
def execute(self, context):
obj_name = bpy.context.active_object.name
msg = "Object with this name already saved"
if obj_name not in bpy.context.scene.discomb.DISC_doodads:
bpy.context.scene.discomb.DISC_doodads.append(obj_name)
msg = "Saved Doodad object: {}".format(obj_name)
self.report({'INFO'}, message=msg)
def invoke(self, context, event):
self.execute(context)
return {'FINISHED'}
class unchooseDoodad(Operator):
bl_idname = "object.discombobulate_unset_doodad"
bl_label = "Discombobulate unset doodad object"
bl_description = "Remove the saved Doodad Object(s)"
bl_options = {'REGISTER'}
remove_all = bpy.props.BoolProperty(
name="Remove all Doodads",
default=False,
)
def execute(self, context):
msg = ("No doodads to remove")
doodadery = bpy.context.scene.discomb.DISC_doodads
if len(doodadery) > 0:
if not self.remove_all:
name = bpy.context.active_object.name
if name in doodadery:
bpy.context.scene.discomb.DISC_doodads.remove(name)
msg = ("Removed Doodad object: {}".format(name))
else:
bpy.context.scene.discomb.DISC_doodads[:] = []
msg = "Removed all Doodads"
else:
msg = "No Doodads to Remove"
self.report({'INFO'}, message=msg)
def invoke(self, context, event):
self.execute(context)
return {'FINISHED'}
# ################## Interpolygon ################## #
class discombobulator(Operator):
bl_idname = "object.discombobulate"
bl_label = "Discombobulate"
bl_description = "Apply"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scn = context.scene.discomb
i = 0
while i < bpy.context.scene.discomb.repeatprot:
isLast = False
if i == scn.repeatprot - 1:
isLast = True
discombobulate(scn.minHeight, scn.maxHeight, scn.minTaper, scn.maxTaper, scn.subpolygon1,
scn.subpolygon2, scn.subpolygon3, scn.subpolygon4, scn.mindoodads, scn.maxdoodads,
scn.repeatprot, scn.sideProtMat, scn.topProtMat, isLast)
i += 1
return {'FINISHED'}
class discombobulator_dodads_list(Menu):
bl_idname = "object.discombobulator_dodad_list"
bl_label = "List of saved Doodads"
bl_description = "List of the saved Doodad Object Names"
bl_options = {'REGISTER'}
def draw(self, context):
layout = self.layout
doodle = len(bpy.context.scene.discomb.DISC_doodads)
layout.label("Saved doodads : {}".format(doodle))
layout.separator()
if doodle > 0:
for name in bpy.context.scene.discomb.DISC_doodads:
layout.label(text=name)
class discombob_help(Menu):
bl_idname = "help.discombobulator"
bl_label = "Usage Information"
bl_description = "Help"
bl_options = {'REGISTER'}
def draw(self, context):
layout = self.layout
layout.label(text="Usage Information:", icon="INFO")
layout.separator()
layout.label(text="Quads only, not Triangles or Ngons", icon="ERROR")
layout.label("Works only with Mesh object that have faces")
layout.separator()
layout.label("Select a face or faces")
layout.label("Press Discombobulate to create greebles")
layout.label("In object mode, still needs a selection in Edit Mode")
layout.separator()
layout.label("Doodads - additional objects layered on the mesh surface")
layout.label("(Similar to dupliverts - but as one separate object)")
layout.separator()
layout.label(text="Limitations:", icon="MOD_EXPLODE")
layout.label("Be careful with the repeat protusions setting")
layout.label("(Runs reqursively)")
layout.label("If possible, avoid using on a high polycount base mesh")
layout.label("(It can run out of memory and take a long time to compute)")
class VIEW3D_OT_tools_discombobulate(Operator):
bl_idname = "discombobulate.ops"
bl_label = "Discombobulator"
bl_description = ("Easily add sci-fi details to a surface \n"
"Needs an existing active Mesh with Faces")
bl_options = {'REGISTER'}
executing = False
@classmethod
def poll(cls, context):
return (context.active_object is not None and
context.active_object.type == "MESH")
def draw(self, context):
layout = self.layout
row = layout.row()
row.menu('help.discombobulator', icon='INFO')
box = layout.box()
box.label("Protusions settings")
row = box.row()
row.prop(context.scene.discomb, 'doprots')
row = box.row()
row.prop(context.scene.discomb, 'minHeight')
row = box.row()
row.prop(context.scene.discomb, 'maxHeight')
row = box.row()
row.prop(context.scene.discomb, 'minTaper')
row = box.row()
row.prop(context.scene.discomb, 'maxTaper')
row = box.row()
col1 = row.column(align=True)
col1.prop(context.scene.discomb, "subpolygon1")
col2 = row.column(align=True)
col2.prop(context.scene.discomb, "subpolygon2")
col3 = row.column(align=True)
col3.prop(context.scene.discomb, "subpolygon3")
col4 = row.column(align=True)
col4.prop(context.scene.discomb, "subpolygon4")
row = box.row()
row.prop(context.scene.discomb, "repeatprot")
box = layout.box()
box.label("Doodads settings")
row = box.row()
is_doodad = context.scene.discomb.dodoodads
row.prop(context.scene.discomb, 'dodoodads')
row = box.row()
row.enabled = is_doodad
row.prop(context.scene.discomb, "mindoodads")
row = box.row()
row.enabled = is_doodad
row.prop(context.scene.discomb, "maxdoodads")
row = box.row()
row.enabled = is_doodad
row.operator("object.discombobulate_set_doodad", text="Pick doodad")
row = box.row()
splits = row.split(0.5)
splits.enabled = is_doodad
splits.operator("object.discombobulate_unset_doodad",
text="Remove active doodad").remove_all = False
splits.operator("object.discombobulate_unset_doodad",
text="Remove all doodads").remove_all = True
col = box.column(align=True)
doodle = len(bpy.context.scene.discomb.DISC_doodads)
col.enabled = (True if doodle > 0 else False)
col.menu("object.discombobulator_dodad_list",
text="List of saved Doodads ({})".format(doodle))
box = layout.box()
box.label("Materials settings")
row = box.row()
row.prop(context.scene.discomb, 'topProtMat')
row = box.row()
row.prop(context.scene.discomb, "sideProtMat")
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self, width=300)
def check(self, context):
return not self.executing
def execute(self, context):
self.executing = True
bpy.ops.object.discombobulate('INVOKE_DEFAULT')
return {'FINISHED'}
| gpl-3.0 | -2,927,640,118,018,320,000 | 35.137615 | 119 | 0.626265 | false |
c17r/TagTrain | src/tagtrain/migrations/002_add_table_member.py | 1 | 1628 | """Peewee migrations -- 002_add_table_member.py.
Some examples (model - class or model name)::
> Model = migrator.orm['model_name'] # Return model in current state by name
> migrator.sql(sql) # Run custom SQL
> migrator.python(func, *args, **kwargs) # Run python code
> migrator.create_model(Model) # Create a model (could be used as decorator)
> migrator.remove_model(model, cascade=True) # Remove a model
> migrator.add_fields(model, **fields) # Add fields to a model
> migrator.change_fields(model, **fields) # Change fields
> migrator.remove_fields(model, *field_names, cascade=True)
> migrator.rename_field(model, old_field_name, new_field_name)
> migrator.rename_table(model, new_table_name)
> migrator.add_index(model, *col_names, unique=False)
> migrator.drop_index(model, *col_names)
> migrator.add_not_null(model, *field_names)
> migrator.drop_not_null(model, *field_names)
> migrator.add_default(model, field_name, default)
"""
import datetime as dt
import peewee
def migrate(migrator, database, fake=False, **kwargs):
"""Write your migrations here."""
Group = migrator.orm['group']
def _now():
pass
@migrator.create_table
class Member(peewee.Model):
group = peewee.ForeignKeyField(Group, backref='members', index=True)
reddit_name = peewee.CharField(max_length=30)
added = peewee.DateTimeField(default=_now)
def rollback(migrator, database, fake=False, **kwargs):
"""Write your rollback migrations here."""
| mit | -8,744,574,383,846,106,000 | 36 | 97 | 0.646806 | false |
MostlyOpen/odoo_addons | myo_insured_card/models/annotation.py | 1 | 1447 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class InsuredCard(models.Model):
_inherit = 'myo.insured.card'
annotation_ids = fields.Many2many(
'myo.annotation',
'myo_insured_card_annotation_rel',
'insured_id',
'annotation_id',
'Annotations'
)
class Annotation(models.Model):
_inherit = 'myo.annotation'
insured_ids = fields.Many2many(
'myo.insured.card',
'myo_insured_card_annotation_rel',
'annotation_id',
'insured_id',
'Insured Cards'
)
| agpl-3.0 | 7,274,541,034,307,426,000 | 31.155556 | 79 | 0.610228 | false |
bitmazk/django-account-keeping | account_keeping/freckle_api.py | 1 | 1583 | """API calls against letsfreckle.com"""
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
try:
from freckle_client.client import FreckleClientV2
client = FreckleClientV2(settings.ACCOUNT_KEEPING_FRECKLE_ACCESS_TOKEN)
except ImportError: # pragma: nocover
client = None
from requests.exceptions import ConnectionError, HTTPError
from . import models
def get_unpaid_invoices_with_transactions(branch=None):
"""
Returns all invoices that are unpaid on freckle but have transactions.
This means, that the invoice is either partially paid and can be left as
unpaid in freckle, or the invoice has been fully paid and should be set to
paid in freckle as well.
"""
if not client: # pragma: nocover
return None
result = {}
try:
unpaid_invoices = client.fetch_json(
'invoices', query_params={'state': 'unpaid'})
except (ConnectionError, HTTPError): # pragma: nocover
result.update({'error': _('Wasn\'t able to connect to Freckle.')})
else:
invoices = []
for invoice in unpaid_invoices:
invoice_with_transactions = models.Invoice.objects.filter(
invoice_number=invoice['reference'],
transactions__isnull=False)
if branch:
invoice_with_transactions = invoice_with_transactions.filter(
branch=branch)
if invoice_with_transactions:
invoices.append(invoice)
result.update({'invoices': invoices})
return result
| mit | -6,213,594,721,971,263,000 | 34.977273 | 78 | 0.660771 | false |
koparasy/faultinjection-gem5 | src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py | 1 | 3156 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# All the memory versions need to use LOCK, regardless of if it was set
def macroop XCHG_R_R
{
# Use the xor trick instead of moves to reduce register pressure.
# This probably doesn't make much of a difference, but it's easy.
xor reg, reg, regm
xor regm, regm, reg
xor reg, reg, regm
};
def macroop XCHG_R_M
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_R_P
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
def macroop XCHG_M_R
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_P_R
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
def macroop XCHG_LOCKED_M_R
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_LOCKED_P_R
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
'''
| bsd-3-clause | -7,811,480,589,938,713,000 | 32.221053 | 72 | 0.737643 | false |
talon-one/talon_one.py | talon_one/models/new_coupons.py | 1 | 15073 | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class NewCoupons(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'usage_limit': 'int',
'discount_limit': 'float',
'start_date': 'datetime',
'expiry_date': 'datetime',
'number_of_coupons': 'int',
'unique_prefix': 'str',
'attributes': 'object',
'recipient_integration_id': 'str',
'valid_characters': 'list[str]',
'coupon_pattern': 'str'
}
attribute_map = {
'usage_limit': 'usageLimit',
'discount_limit': 'discountLimit',
'start_date': 'startDate',
'expiry_date': 'expiryDate',
'number_of_coupons': 'numberOfCoupons',
'unique_prefix': 'uniquePrefix',
'attributes': 'attributes',
'recipient_integration_id': 'recipientIntegrationId',
'valid_characters': 'validCharacters',
'coupon_pattern': 'couponPattern'
}
def __init__(self, usage_limit=None, discount_limit=None, start_date=None, expiry_date=None, number_of_coupons=None, unique_prefix=None, attributes=None, recipient_integration_id=None, valid_characters=None, coupon_pattern=None, local_vars_configuration=None): # noqa: E501
"""NewCoupons - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._usage_limit = None
self._discount_limit = None
self._start_date = None
self._expiry_date = None
self._number_of_coupons = None
self._unique_prefix = None
self._attributes = None
self._recipient_integration_id = None
self._valid_characters = None
self._coupon_pattern = None
self.discriminator = None
self.usage_limit = usage_limit
if discount_limit is not None:
self.discount_limit = discount_limit
if start_date is not None:
self.start_date = start_date
if expiry_date is not None:
self.expiry_date = expiry_date
self.number_of_coupons = number_of_coupons
if unique_prefix is not None:
self.unique_prefix = unique_prefix
if attributes is not None:
self.attributes = attributes
if recipient_integration_id is not None:
self.recipient_integration_id = recipient_integration_id
if valid_characters is not None:
self.valid_characters = valid_characters
if coupon_pattern is not None:
self.coupon_pattern = coupon_pattern
@property
def usage_limit(self):
"""Gets the usage_limit of this NewCoupons. # noqa: E501
The number of times a coupon code can be redeemed. This can be set to 0 for no limit, but any campaign usage limits will still apply. # noqa: E501
:return: The usage_limit of this NewCoupons. # noqa: E501
:rtype: int
"""
return self._usage_limit
@usage_limit.setter
def usage_limit(self, usage_limit):
"""Sets the usage_limit of this NewCoupons.
The number of times a coupon code can be redeemed. This can be set to 0 for no limit, but any campaign usage limits will still apply. # noqa: E501
:param usage_limit: The usage_limit of this NewCoupons. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and usage_limit is None: # noqa: E501
raise ValueError("Invalid value for `usage_limit`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
usage_limit is not None and usage_limit > 999999): # noqa: E501
raise ValueError("Invalid value for `usage_limit`, must be a value less than or equal to `999999`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
usage_limit is not None and usage_limit < 0): # noqa: E501
raise ValueError("Invalid value for `usage_limit`, must be a value greater than or equal to `0`") # noqa: E501
self._usage_limit = usage_limit
@property
def discount_limit(self):
"""Gets the discount_limit of this NewCoupons. # noqa: E501
The amount of discounts that can be given with this coupon code. # noqa: E501
:return: The discount_limit of this NewCoupons. # noqa: E501
:rtype: float
"""
return self._discount_limit
@discount_limit.setter
def discount_limit(self, discount_limit):
"""Sets the discount_limit of this NewCoupons.
The amount of discounts that can be given with this coupon code. # noqa: E501
:param discount_limit: The discount_limit of this NewCoupons. # noqa: E501
:type: float
"""
if (self.local_vars_configuration.client_side_validation and
discount_limit is not None and discount_limit > 999999): # noqa: E501
raise ValueError("Invalid value for `discount_limit`, must be a value less than or equal to `999999`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
discount_limit is not None and discount_limit < 0): # noqa: E501
raise ValueError("Invalid value for `discount_limit`, must be a value greater than or equal to `0`") # noqa: E501
self._discount_limit = discount_limit
@property
def start_date(self):
"""Gets the start_date of this NewCoupons. # noqa: E501
Timestamp at which point the coupon becomes valid. # noqa: E501
:return: The start_date of this NewCoupons. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this NewCoupons.
Timestamp at which point the coupon becomes valid. # noqa: E501
:param start_date: The start_date of this NewCoupons. # noqa: E501
:type: datetime
"""
self._start_date = start_date
@property
def expiry_date(self):
"""Gets the expiry_date of this NewCoupons. # noqa: E501
Expiry date of the coupon. Coupon never expires if this is omitted, zero, or negative. # noqa: E501
:return: The expiry_date of this NewCoupons. # noqa: E501
:rtype: datetime
"""
return self._expiry_date
@expiry_date.setter
def expiry_date(self, expiry_date):
"""Sets the expiry_date of this NewCoupons.
Expiry date of the coupon. Coupon never expires if this is omitted, zero, or negative. # noqa: E501
:param expiry_date: The expiry_date of this NewCoupons. # noqa: E501
:type: datetime
"""
self._expiry_date = expiry_date
@property
def number_of_coupons(self):
"""Gets the number_of_coupons of this NewCoupons. # noqa: E501
The number of new coupon codes to generate for the campaign. Must be at least 1. # noqa: E501
:return: The number_of_coupons of this NewCoupons. # noqa: E501
:rtype: int
"""
return self._number_of_coupons
@number_of_coupons.setter
def number_of_coupons(self, number_of_coupons):
"""Sets the number_of_coupons of this NewCoupons.
The number of new coupon codes to generate for the campaign. Must be at least 1. # noqa: E501
:param number_of_coupons: The number_of_coupons of this NewCoupons. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and number_of_coupons is None: # noqa: E501
raise ValueError("Invalid value for `number_of_coupons`, must not be `None`") # noqa: E501
self._number_of_coupons = number_of_coupons
@property
def unique_prefix(self):
"""Gets the unique_prefix of this NewCoupons. # noqa: E501
A unique prefix to prepend to all generated coupons. # noqa: E501
:return: The unique_prefix of this NewCoupons. # noqa: E501
:rtype: str
"""
return self._unique_prefix
@unique_prefix.setter
def unique_prefix(self, unique_prefix):
"""Sets the unique_prefix of this NewCoupons.
A unique prefix to prepend to all generated coupons. # noqa: E501
:param unique_prefix: The unique_prefix of this NewCoupons. # noqa: E501
:type: str
"""
self._unique_prefix = unique_prefix
@property
def attributes(self):
"""Gets the attributes of this NewCoupons. # noqa: E501
Arbitrary properties associated with this item # noqa: E501
:return: The attributes of this NewCoupons. # noqa: E501
:rtype: object
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this NewCoupons.
Arbitrary properties associated with this item # noqa: E501
:param attributes: The attributes of this NewCoupons. # noqa: E501
:type: object
"""
self._attributes = attributes
@property
def recipient_integration_id(self):
"""Gets the recipient_integration_id of this NewCoupons. # noqa: E501
The integration ID for this coupon's beneficiary's profile # noqa: E501
:return: The recipient_integration_id of this NewCoupons. # noqa: E501
:rtype: str
"""
return self._recipient_integration_id
@recipient_integration_id.setter
def recipient_integration_id(self, recipient_integration_id):
"""Sets the recipient_integration_id of this NewCoupons.
The integration ID for this coupon's beneficiary's profile # noqa: E501
:param recipient_integration_id: The recipient_integration_id of this NewCoupons. # noqa: E501
:type: str
"""
self._recipient_integration_id = recipient_integration_id
@property
def valid_characters(self):
"""Gets the valid_characters of this NewCoupons. # noqa: E501
Set of characters to be used when generating random part of code. Defaults to [A-Z, 0-9] (in terms of RegExp). # noqa: E501
:return: The valid_characters of this NewCoupons. # noqa: E501
:rtype: list[str]
"""
return self._valid_characters
@valid_characters.setter
def valid_characters(self, valid_characters):
"""Sets the valid_characters of this NewCoupons.
Set of characters to be used when generating random part of code. Defaults to [A-Z, 0-9] (in terms of RegExp). # noqa: E501
:param valid_characters: The valid_characters of this NewCoupons. # noqa: E501
:type: list[str]
"""
self._valid_characters = valid_characters
@property
def coupon_pattern(self):
"""Gets the coupon_pattern of this NewCoupons. # noqa: E501
The pattern that will be used to generate coupon codes. The character `#` acts as a placeholder and will be replaced by a random character from the `validCharacters` set. # noqa: E501
:return: The coupon_pattern of this NewCoupons. # noqa: E501
:rtype: str
"""
return self._coupon_pattern
@coupon_pattern.setter
def coupon_pattern(self, coupon_pattern):
"""Sets the coupon_pattern of this NewCoupons.
The pattern that will be used to generate coupon codes. The character `#` acts as a placeholder and will be replaced by a random character from the `validCharacters` set. # noqa: E501
:param coupon_pattern: The coupon_pattern of this NewCoupons. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
coupon_pattern is not None and len(coupon_pattern) > 100):
raise ValueError("Invalid value for `coupon_pattern`, length must be less than or equal to `100`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
coupon_pattern is not None and len(coupon_pattern) < 3):
raise ValueError("Invalid value for `coupon_pattern`, length must be greater than or equal to `3`") # noqa: E501
self._coupon_pattern = coupon_pattern
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NewCoupons):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NewCoupons):
return True
return self.to_dict() != other.to_dict()
| mit | -7,932,817,323,509,143,000 | 37.256345 | 647 | 0.625423 | false |
agx/calypso | setup.py | 1 | 3533 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Calypso Server - Calendar Server
# Copyright © 2009-2011 Guillaume Ayoub
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Calypso. If not, see <http://www.gnu.org/licenses/>.
"""
Calypso CalDAV server
======================
The Calypso Project is a CalDAV calendar server. It aims to be a light
solution, easy to use, easy to install, easy to configure. As a consequence,
it requires few software dependances and is pre-configured to work
out-of-the-box.
The Calypso Project runs on most of the UNIX-like platforms (Linux, BSD,
MacOS X) and Windows. It is known to work with Evolution 2.30+, Lightning 0.9+
and Sunbird 0.9+. It is free and open-source software, released under GPL
version 3.
For further information, please visit the `Calypso Website
<http://keithp.com/blogs/calypso/>`_.
"""
import os
from distutils.command.build_scripts import build_scripts
from setuptools import setup
try:
from calypso import VERSION
except ImportError as e:
print 'Error importing Calypso, probably dependencies are not installed'
print e
VERSION = '0.0.1'
print 'Assuming version %s' % VERSION
# build_scripts is known to have a lot of public methods
# pylint: disable=R0904
class BuildScripts(build_scripts):
"""Build the package."""
def run(self):
"""Run building."""
# These lines remove the .py extension from the calypso executable
self.mkpath(self.build_dir)
for script in self.scripts:
root, _ = os.path.splitext(script)
self.copy_file(script, os.path.join(self.build_dir, root))
# pylint: enable=R0904
# When the version is updated, ``calypso.VERSION`` must be modified.
# A new section in the ``NEWS`` file must be added too.
setup(
name="calypso",
version=VERSION,
description="CalDAV and CardDAV Server",
long_description=__doc__,
author="Keith Packard",
author_email="[email protected]",
url="http://keithp.com/blogs/calypso/",
download_url="https://anonscm.debian.org/cgit/calypso/calypso.git/",
license="GNU GPL v3",
platforms="Any",
packages=["calypso", "calypso.acl"],
provides=["calypso"],
install_requires=["daemon","vobject"],
tests_require=['nose>=0.11.1'],
scripts=["calypso.py"],
cmdclass={"build_scripts": BuildScripts},
keywords=["calendar", "CalDAV"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Office/Business :: Groupware"])
| gpl-3.0 | -6,134,075,689,661,929,000 | 35.402062 | 79 | 0.682243 | false |
kivy/plyer | examples/gps/main.py | 1 | 3054 | from kivy.lang import Builder
from plyer import gps
from kivy.app import App
from kivy.properties import StringProperty
from kivy.clock import mainthread
from kivy.utils import platform
kv = '''
BoxLayout:
orientation: 'vertical'
Label:
text: app.gps_location
Label:
text: app.gps_status
BoxLayout:
size_hint_y: None
height: '48dp'
padding: '4dp'
ToggleButton:
text: 'Start' if self.state == 'normal' else 'Stop'
on_state:
app.start(1000, 0) if self.state == 'down' else \
app.stop()
'''
class GpsTest(App):
gps_location = StringProperty()
gps_status = StringProperty('Click Start to get GPS location updates')
def request_android_permissions(self):
"""
Since API 23, Android requires permission to be requested at runtime.
This function requests permission and handles the response via a
callback.
The request will produce a popup if permissions have not already been
been granted, otherwise it will do nothing.
"""
from android.permissions import request_permissions, Permission
def callback(permissions, results):
"""
Defines the callback to be fired when runtime permission
has been granted or denied. This is not strictly required,
but added for the sake of completeness.
"""
if all([res for res in results]):
print("callback. All permissions granted.")
else:
print("callback. Some permissions refused.")
request_permissions([Permission.ACCESS_COARSE_LOCATION,
Permission.ACCESS_FINE_LOCATION], callback)
# # To request permissions without a callback, do:
# request_permissions([Permission.ACCESS_COARSE_LOCATION,
# Permission.ACCESS_FINE_LOCATION])
def build(self):
try:
gps.configure(on_location=self.on_location,
on_status=self.on_status)
except NotImplementedError:
import traceback
traceback.print_exc()
self.gps_status = 'GPS is not implemented for your platform'
if platform == "android":
print("gps.py: Android detected. Requesting permissions")
self.request_android_permissions()
return Builder.load_string(kv)
def start(self, minTime, minDistance):
gps.start(minTime, minDistance)
def stop(self):
gps.stop()
@mainthread
def on_location(self, **kwargs):
self.gps_location = '\n'.join([
'{}={}'.format(k, v) for k, v in kwargs.items()])
@mainthread
def on_status(self, stype, status):
self.gps_status = 'type={}\n{}'.format(stype, status)
def on_pause(self):
gps.stop()
return True
def on_resume(self):
gps.start(1000, 0)
pass
if __name__ == '__main__':
GpsTest().run()
| mit | 2,254,386,045,913,796,900 | 28.365385 | 77 | 0.592993 | false |
agermanidis/Pattern | en/parser/__init__.py | 1 | 26069 | #### PATTERN | EN | RULE-BASED SHALLOW PARSER ########################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
######################################################################################################
# Fast tagger-chunker using regular expressions.
import re
#### TOKENIZER #######################################################################################
token = re.compile(r"(\S+)\s")
# Handle common contractions,
# we don't want the parser to break on something simple like "I'm eating".
replacements = {
"'m" : " 'm",
"'re" : " 're",
"'ve" : " 've",
"'ll" : " 'll",
"'s" : " 's",
"n't" : " n't"
}
# Handle common abbreviations.
abrreviations = dict.fromkeys([
"a.m.", "cf.", "e.g.", "ex.", "etc.", "fig.", "i.e.", "Mr.", "p.m."
], True)
a1 = re.compile("^[A-Za-z]\.$") # single letter, "T. De Smedt"
a2 = re.compile("^([A-Za-z]\.)+$") # alternating letters, "U.S."
a3 = re.compile("^[A-Z]["+"|".join("bcdfghjklmnpqrstvwxz")+"]+.$") # capital followed by consonants, "Mr."
# Handle common word punctuation:
punctuation = (
("(","[","\""),
(":",";",",","!","?","]",")","\"", "'")
)
def tokenize(string):
""" Returns a list of sentences. Each sentence is a space-separated string of tokens (words).
Aside from a few common cases ("etc.") no attempt is made to disambiguate abbreviations
from sentence periods.
"""
for a,b in replacements.items():
string = re.sub(a, b, string)
# Collapse whitespace.
string = re.sub(r"\s+", " ", string)
tokens = []
for t in token.findall(string+" "):
if len(t) > 0:
tail = []
# Split leading punctuation.
if t.startswith(punctuation[0]):
tokens.append(t[0]); t=t[1:]
if t.startswith("'") and not t in replacements:
tokens.append(t[0]); t=t[1:]
for i in range(2):
# Split trailing punctuation.
if t.endswith(punctuation[1]):
tail.append(t[-1]); t=t[:-1]
# Split ellipsis before checking for period.
if t.endswith("..."):
tail.append("..."); t=t[:-3]
# Split period (if not an abbreviation).
if t.endswith(".") and not t in abrreviations and \
a1.match(t) is None and \
a2.match(t) is None and \
a3.match(t) is None:
tail.append(t[-1]); t=t[:-1]
tokens.append(t)
tokens.extend(reversed(tail))
sentences = [[]]
for t in tokens:
sentences[-1].append(t)
# A period token always breaks the sentence.
if t == ".": sentences.append([])
return [" ".join(s) for s in sentences if len(s) > 0]
# MBSP's tokenizer.py is pretty fast and a lot more robust so we could try to load it.
# You could also do parser.tokenize = my_module.tokenize
#try: from MBSP.tokenizer import split as tokenize
#except:
# pass
#### TAGGER ##########################################################################################
#--- BRILL TAGGER ------------------------------------------------------------------------------------
# Based on Jason Wiener's implementation of a rule-based part-of-speech Brill tagger.
# Original Copyright (C) Mark Watson. All rights reserved.
# Python port by Jason Wiener (http://www.jasonwiener.com)
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY
# KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
# PARTICULAR PURPOSE.
import os
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
class Lexicon:
def __init__(self):
self._words = None
def load(self):
# Brill's lexicon is a list of common tokens and their part-of-speech tag.
# It takes a while to load but this happens only once when parse() is called.
# Create a dictionary from the entries:
self._words = open(os.path.join(MODULE, "Brill_lexicon.txt")).read().splitlines()
self._words = dict([x.split(" ") for x in self._words])
self["crunchy"] = "JJ" # The lexicon can be updated easily.
def get(self, word, default=None):
return word in self and self._words[word] or default
def __contains__(self, word):
try:
return word in self._words
except:
self.load()
return word in self._words
def __getitem__(self, word):
if self._words is None:
self.load()
return self._words[word]
def __setitem__(self, word, pos):
if self._words is None:
self.load()
self._words[word] = pos
lexicon = Lexicon()
def find_tags(tokens, default="NN", light=False):
""" Returns a list of [token, tag]-items for the given list of tokens.
For example:
['That', 'is', 'interesting', '.'] =>
[['That', 'DT'], ['is', 'VBZ'], ['interesting', 'JJ'], ['.', '.']]
With light=True uses Brill's lexical and contextual rules to improve token tags.
With light=False uses a faster set of arbitrary rules (Jason Wiener's rules).
"""
tagged = []
for token in tokens:
# By default, all tokens are tagged NN unless we find an entry in the lexicon.
# Words that start with a capital letter are tagged with NNP by default.
# Words that are not in the lexicon are then improved with lexical rules.
tagged.append([token, lexicon.get(token, lexicon.get(token.lower(), None))])
f = light and apply_default_rules or apply_lexical_rules
for i, (token, tag) in enumerate(tagged):
if tag == None:
if len(token) > 0 and token[0] == token[0].upper() and token[0].isalpha():
tagged[i] = [token, "NNP"]
else:
tagged[i] = [token, default]
tagged[i] = f(tagged[i],
previous = i>0 and tagged[i-1] or (None, None),
next = i<len(tagged)-1 and tagged[i+1] or (None, None))
if not light:
apply_contextual_rules(tagged)
return tagged
def apply_default_rules(token, previous=(None,None), next=(None,None)):
""" Returns the token with its tag updated according to a few simple rules.
Jason Wiener's rules are less accurate than Brill's lexical rules, but they are faster (5-10x).
"""
# By comparison, WordNet has 12401 adjectives not in the Brill lexicon.
# Brill's lexical rules corrected 11961 of them, in 1.71 seconds.
# Jason Wiener's rules corrected 9948, in 0.19 seconds.
# errors fixed: Brill Wiener
# verbs 26197 15983 13525
# adjectives 12401 11986 9948
#
# Rule 1: convert a common noun ending with "ing" to a present participle verb (i.e., a gerund).
# Rule 2: convert any type to adverb if it ends in "ly".
# Rule 3: if a word has been categorized as a common noun and it ends with "s",
# then set its type to plural common noun (NNS)
# Rule 4: convert a noun to a number (CD) if "." appears in the word.
# Rule 5: convert a common noun (NN or NNS) to a adjective if it ends with "al", "ient", "ish", "less"
# or if there is a hyphen ("-") in the word.
# Rule 6: convert a noun to a past participle if word ends with "ed".
# Rule 7: DT, {VBD | VBP} --> DT, NN
# Rule 8: convert a noun to a verb if the preceeding word is "would".
word, pos = token
if pos.startswith("NN") and word.endswith("ing"):
pos = "VBG"
elif word.endswith("ly"):
pos = "RB"
elif pos == "NN" and word.endswith("s") and not word.endswith("ss"):
pos = "NNS"
elif pos.startswith("NN") and word.isdigit():
pos = "CD"
elif pos.startswith("NN") and word[:1].isdigit() and word.replace(".","").isdigit():
pos = "CD"
elif pos.startswith("NN") and word.endswith(("al","ient","ish","less")) or "-" in word:
pos = "JJ"
elif pos.startswith("NN") and word.endswith("ed"):
pos = "VBN"
elif i > 0 and previous[1] == "DT" and pos in ("VBD", "VBP", "VB"):
pos = "NN"
elif i > 0 and pos.startswith("NN") and previous[0] == "would":
pos = "VB"
return [word, pos]
#--- BRILL RULES -------------------------------------------------------------------------------------
lexical_commands = ["char", "hassuf", "deletesuf", "addsuf", "haspref", "deletepref", "addpref", "goodleft", "goodright"]
lexical_commands.extend(["f"+x for x in lexical_commands])
# Brill's lexical rules.
# An entry looks like: ('fhassuf', ['NN', 's', 'fhassuf', '1', 'NNS', 'x']).
# The first item is the lookup command.
# If it is prefixed with an "f", it means that the token needs to have the first given tag (NN).
# In this case, if the NN-word ends with an "s", it is tagged as NNS.
lexical_rules = open(os.path.join(MODULE, "Brill_lexical_rules.txt")).read()
lexical_rules = lexical_rules.strip().split("\n")
for i, rule in enumerate(lexical_rules):
rule = rule.split()
for cmd in lexical_commands:
if cmd in rule:
lexical_rules[i] = (cmd, rule)
break
def apply_lexical_rules(token, previous=(None,None), next=(None,None)):
""" Applies the lexical rules to the given token.
A token is a [word,tag]-item whose tag might change if it matches a rule.
Rules are lexically based on word characters, prefixes and suffixes.
"""
word, pos = token[0], token[1]
if word[:1].isdigit() and word.replace(".","").isdigit():
return [word, "CD"]
for cmd, rule in lexical_rules:
pos = rule[-2]
x = rule[0]
if cmd.startswith("f"):
# Word must be tagged as the f-rule states.
cmd = cmd[1:]
if token[1] != rule[0]: continue
x = rule[1]
if (cmd == "char" and x in word) \
or (cmd == "hassuf" and word.endswith(x)) \
or (cmd == "deletesuf" and word.endswith(x) and word[:-len(x)] in lexicon) \
or (cmd == "haspref" and word.startswith(x)) \
or (cmd == "deletepref" and word.startswith(x) and word[len(x):] in lexicon) \
or (cmd == "addsuf" and word+x in lexicon) \
or (cmd == "addpref" and x+word in lexicon) \
or (cmd == "goodleft" and x == previous[0]) \
or (cmd == "goodright" and x == next[0]):
return [word, pos]
else:
return token
# Brill's contextual rules.
# An entry looks like: ('PREVTAG', ['VBD', 'VB', 'PREVTAG', 'TO']).
# The first item is the lookup command.
# The example rule reads like:
# "If the previous word is tagged TO, change this word's tag from VBD to VB (if it is VBD)".
contextual_rules = open(os.path.join(MODULE, "Brill_contextual_rules.txt")).read()
contextual_rules = contextual_rules.strip().split("\n")
for i, rule in enumerate(contextual_rules):
rule = rule.split()
cmd = rule[2]
contextual_rules[i] = (cmd, rule)
def apply_contextual_rules(tokens):
""" Applies the contextual rules to the given list of tokens.
Each token is a [word,tag]-item whose tag might change if it matches a rule.
Rules are contextually based on the token's position in the sentence.
"""
b = [(None,"STAART")] * 3 # Add empty tokens so we can scan ahead and behind.
T = b + tokens + b
for i, token in enumerate(T):
for cmd, rule in contextual_rules:
# If the word is tagged differently than required by the rule, skip it.
if token[1] != rule[0]:
continue
# Never allow rules to tag "be" anything but infinitive.
if token[0] == "be" and token[1] == "VB":
continue
# A rule involves scanning the previous/next word or tag,
# and all combinations thereof.
x = rule[3]
if (cmd == "PREVTAG" and x == T[i-1][1]) \
or (cmd == "NEXTTAG" and x == T[i+1][1]) \
or (cmd == "PREV1OR2TAG" and x in (T[i-1][1], T[i-2][1])) \
or (cmd == "NEXT1OR2TAG" and x in (T[i+1][1], T[i+2][1])) \
or (cmd == "PREV1OR2OR3TAG" and x in (T[i-1][1], T[i-2][1], T[i-3][1])) \
or (cmd == "NEXT1OR2OR3TAG" and x in (T[i+1][1], T[i+2][1], T[i+3][1])) \
or (cmd == "SURROUNDTAG" and x == T[i-1][1] and rule[4] == T[i+1][1]) \
or (cmd == "PREVBIGRAM" and x == T[i-2][1] and rule[4] == T[i-1][1]) \
or (cmd == "NEXTBIGRAM" and x == T[i+1][1] and rule[4] == T[i+2][1]) \
or (cmd == "PREV2TAG" and x == T[i-2][1]) \
or (cmd == "NEXT2TAG" and x == T[i+2][1]) \
or (cmd == "CURWD" and x == T[i][0]) \
or (cmd == "PREVWD" and x == T[i-1][0]) \
or (cmd == "NEXTWD" and x == T[i+1][0]) \
or (cmd == "PREV1OR2WD" and x in (T[i-1][0], T[i-2][0])) \
or (cmd == "NEXT1OR2WD" and x in (T[i+1][0], T[i+2][0])) \
or (cmd == "WDPREVTAG" and x == T[i][0] and rule[4] == T[i-1][1]) \
or (cmd == "WDNEXTTAG" and x == T[i][0] and rule[4] == T[i+1][1]):
tokens[i-len(b)] = [tokens[i-len(b)][0], rule[1]]
#### CHUNKER #########################################################################################
SEPARATOR = "/"
VB = "VB|VBD|VBG|VBN|VBP|VBZ"
JJ = "JJ|JJR|JJS"
RB = "[^W]RB|RBR|RBS"
NN = "NN|NNS|NNP|NNPS|PRP|PRP\$"
rules = [
("NP", re.compile(r"(("+NN+")/)*((DT|CD|CC)/)*(("+RB+"|"+JJ+")/)*(("+NN+")/)+")),
("VP", re.compile(r"(((MD|"+RB+")/)*(("+VB+")/)+)+")),
("VP", re.compile(r"((MD)/)")),
("PP", re.compile(r"((IN|TO)/)")),
("ADJP", re.compile(r"((CC|"+RB+"|"+JJ+")/)*(("+JJ+")/)+")),
("ADVP", re.compile(r"(("+RB+"|WRB)/)+")),
]
rules.insert(1, rules.pop(3)) # Handle ADJP before VP (RB prefers next ADJP over previous VP).
def find_chunks(tagged, iob=True):
""" The input is a list of (token, tag)-tuples.
The output is a list of (token, tag, chunk)-tuples.
For example:
The/DT nice/JJ fish/NN is/VBZ dead/JJ ./. =>
The/DT/B-NP nice/JJ/I-NP fish/NN/I-NP is/VBZ/B-VP dead/JJ/B-ADJP ././O
"""
chunked = [x for x in tagged]
tags = "".join("%s%s"%(tag,SEPARATOR) for token, tag in tagged)
for tag, rule in rules:
for m in rule.finditer(tags):
# Find the start of the pattern inside the tag-string.
# The number of preceding separators = the number of preceding tokens.
i = m.start()
j = tags[:i].count(SEPARATOR)
n = m.group(0).count(SEPARATOR)
for k in range(j, j+n):
# Don't overwrite tokens already chunked.
if len(chunked[k]) < 3:
if k == j and chunked[k][1] == "CC":
# A CC-tag can never be start of a chunk.
j += 1
elif k == j:
# Mark the start of a chunk with the "B"-tag.
chunked[k].append("B-"+tag)
else:
chunked[k].append("I-"+tag)
# Chinks are tokens outside of a chunk, we add the O-tag.
for chink in filter(lambda x: len(x)<3, chunked):
chink.append("O")
return chunked
#### RELATION FINDER #################################################################################
# Naive approach.
BE = dict.fromkeys(("be", "am", "are", "is", "being", "was", "were", "been"), True)
GO = dict.fromkeys(("go", "goes", "going", "went"), True)
def find_relations(chunked):
""" The input is a list of (token, tag, chunk)-tuples.
The output is a list of (token, tag, chunk, relation)-tuples.
A noun phrase preceding a verb phrase is perceived as sentence subject.
A noun phrase following a verb phrase is perceived as sentence object.
"""
tag = lambda token: token[2].split("-")[-1]
# Group consecutive tokens with the same chunk-tag.
# Tokens in a chunk that are not part of a relation just get the O-tag.
chunks = []
for token in chunked:
if len(chunks) == 0 \
or token[2].startswith("B-") \
or tag(token) != tag(chunks[-1][-1]):
chunks.append([])
chunks[-1].append(token+["O"])
# If a VP is preceded by a NP, the NP is tagged as NP-SBJ-(id).
# If a VP is followed by a NP, the NP is tagged as NP-OBJ-(id).
id = 0
for i, chunk in enumerate(chunks):
if tag(chunk[-1]) == "VP" and i > 0 and tag(chunks[i-1][-1]) == "NP":
if chunk[-1][-1] == "O":
id += 1
for token in chunk:
token[-1] = "VP-"+str(id)
for token in chunks[i-1]:
token[-1] += "*NP-SBJ-"+str(id)
token[-1] = token[-1].lstrip("O-*")
if tag(chunk[-1]) == "VP" and i < len(chunks)-1 and tag(chunks[i+1][-1]) == "NP":
if chunk[-1][-1] == "O":
id += 1
for token in chunk:
token[-1] = "VP-"+str(id)
for token in chunks[i+1]:
token[-1] = "*NP-OBJ-"+str(id)
token[-1] = token[-1].lstrip("O-*")
# This is more a proof-of-concept than useful in practice:
# PP-LOC = be + in|at + the|my
# PP-DIR = go + to|towards + the|my
for i, chunk in enumerate(chunks):
if 0 < i < len(chunks)-1 and len(chunk)==1 and chunk[-1][-1] == "O":
t0, t1, t2 = chunks[i-1][-1], chunks[i][0], chunks[i+1][0] # previous / current / next
if tag(t1) == "PP" and t2[1] in ("DT", "PRP$"):
if t0[0] in BE and t1[0] in ("in", "at") : t1[-1] = "PP-LOC"
if t0[0] in GO and t1[0] in ("to", "towards") : t1[-1] = "PP-DIR"
related = []; [related.extend(chunk) for chunk in chunks]
return related
#### PNP FINDER ######################################################################################
def find_prepositions(chunked):
""" The input is a list of (token, tag, chunk)-tuples.
The output is a list of (token, tag, chunk, preposition)-tuples.
PP-chunks followed by NP-chunks make up a PNP-chunk.
"""
n = len(chunked) > 0 and len(chunked[0]) or 0
for i, chunk in enumerate(chunked):
if chunk[2].endswith("PP") and i<len(chunked)-1 and chunked[i+1][2].endswith("NP"):
chunk.append("B-PNP")
for ch in chunked[i+1:]:
if not ch[2].endswith("NP"):
break
ch.append("I-PNP")
# Tokens that are not part of a preposition just get the O-tag.
for chunk in filter(lambda x: len(x) < n+1, chunked):
chunk.append("O")
return chunked
#### LEMMATIZER ######################################################################################
# Word lemmas using singularization and verb conjugation from the inflect module.
try: from pattern.en.inflect import singularize, conjugate
except:
try:
import os, sys; sys.path.append(os.path.join(MODULE, ".."))
from inflect import singularize, conjugate
except:
singularize = lambda w: w
conjugate = lambda w,t: w
def lemma(word, pos="NN"):
""" Returns the lemma of the given word, e.g. horses/NNS => horse, am/VBP => be.
Words must be lowercase.
"""
if pos == "NNS":
return singularize(word)
if pos.startswith("VB"):
return conjugate(word, "infinitive") or word
return word
def find_lemmata(tagged):
""" Appends the lemma to the given (token, tag)-tuple.
"""
for token in tagged:
token.append(lemma(token[0].lower(), pos=len(token)>1 and token[1] or None))
return tagged
#### PARSER ##########################################################################################
_tokenize = tokenize
def parse(s, tokenize=True, tags=True, chunks=True, relations=False, lemmata=False, encoding="utf-8", default="NN", light=False):
""" Takes a string (sentences) and returns a tagged Unicode string.
Sentences in the output are separated by newlines.
"""
if isinstance(s, str):
s = s.decode(encoding)
if tokenize:
s = _tokenize(s)
s = [s.split(" ") for s in s]
for i in range(len(s)):
if tags or chunks or prepositions or lemmata:
s[i] = find_tags(s[i], default, light)
if chunks or relations:
s[i] = find_chunks(s[i])
if chunks or relations:
s[i] = find_prepositions(s[i])
if relations:
s[i] = find_relations(s[i])
if lemmata:
s[i] = find_lemmata(s[i])
# Include the format of a token in the parsed output string.
# This allows a Sentence (see tree.py) to figure out the order of the tags.
format = ["word"]
if tags : format.append("part-of-speech")
if chunks : format.extend(("chunk", "preposition"))
if relations : format.append("relation")
if lemmata : format.append("lemma")
# Collapse the output.
# Sentences are separated by newlines, tokens by spaces, tags by slashes.
# Slashes in words are encoded with &slash;
for i in range(len(s)):
for j in range(len(s[i])):
s[i][j][0] = s[i][j][0].replace("/", "&slash;")
s[i][j] = "/".join(s[i][j])
s[i] = " ".join(s[i])
s = "\n".join(s)
s = TaggedString(s, tags=format)
return s
#--- TAGGED STRING -----------------------------------------------------------------------------------
# The parse() command returns a unicode string with an extra "tags" attribute.
# The Sentence tree object uses this attribute to determine the token format.
# The TaggedString class emulates the TokenString class in the MBSP module,
# which has additional functionality besides a "tags" attribute.
TOKENS = "tokens"
class TaggedString(unicode):
def __new__(self, string, tags=["word"]):
if isinstance(string, unicode) and hasattr(string, "tags"):
tags = string.tags
s = unicode.__new__(self, string)
s.tags = list(tags)
return s
def split(self, sep=TOKENS):
""" Returns a list of sentences, where each sentence is a list of tokens,
where each token is a list of word + tags.
"""
if sep != TOKENS:
return unicode.split(self, sep)
return [[token.split("/") for token in s.split(" ")] for s in unicode.split(self, "\n")]
def tag(s, tokenize=True, encoding="utf-8", default="NN", light=False):
""" Returns a list of (token,tag)-tuples from the given string.
"""
tags = []
for sentence in parse(s, tokenize, True, False).split():
for token in sentence:
tags.append((token[0], token[1]))
return tags
#### COMMAND LINE ####################################################################################
# From the folder that contains the "pattern" folder:
# python -m pattern.en.parser xml -s "Hello, my name is Dr. Sbaitso. Nice to meet you." -OTCLI
def main():
import optparse
import codecs
p = optparse.OptionParser()
p.add_option("-f", "--file", dest="file", action="store", help="text file to parse", metavar="FILE")
p.add_option("-s", "--string", dest="string", action="store", help="text string to parse", metavar="STRING")
p.add_option("-O", "--tokenize", dest="tokenize", action="store_true", help="tokenize the input")
p.add_option("-T", "--tags", dest="tags", action="store_true", help="parse part-of-speech tags")
p.add_option("-C", "--chunks", dest="chunks", action="store_true", help="parse chunk tags")
p.add_option("-R", "--relations", dest="relations", action="store_true", help="find verb/predicate relations")
p.add_option("-L", "--lemmata", dest="lemmata", action="store_true", help="find word lemmata")
p.add_option("-I", "--light", dest="light", action="store_true", help="disable contextual rules")
p.add_option("-e", "--encoding", dest="encoding", action="store_true", default="utf-8", help="character encoding")
p.add_option("-v", "--version", dest="version", action="store_true", help="version info")
o, arguments = p.parse_args()
# Version info.
if o.version:
from pattern import __version__
print __version__
# Either a text file (-f) or a text string (-s) must be supplied.
s = o.file and codecs.open(o.file, "r", o.encoding).read() or o.string
# The given text can be parsed in two modes:
# - implicit: parse everything (tokenize, tag/chunk, find relations, lemmatize).
# - explicit: define what to parse manually.
if s:
explicit = False
for option in [o.tokenize, o.tags, o.chunks, o.relations, o.lemmata]:
if option is not None: explicit=True; break
if not explicit:
a = {"encoding": o.encoding,
"light": o.light or False}
else:
a = {"tokenize": o.tokenize or False,
"tags": o.tags or False,
"chunks": o.chunks or False,
"relations": o.relations or False,
"lemmata": o.lemmata or False,
"light": o.light or False,
"encoding": o.encoding }
s = parse(s, **a)
# The output can be either slash-formatted string or XML.
if "xml" in arguments:
from pattern.en.parser.tree import Text
s = Text(s, s.tags).xml
print s
if __name__ == "__main__":
main() | bsd-3-clause | -2,130,800,496,313,446,000 | 43.336735 | 129 | 0.533354 | false |
gph82/PyEMMA | pyemma/__init__.py | 1 | 1495 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
=======================================
PyEMMA - Emma's Markov Model Algorithms
=======================================
"""
from __future__ import absolute_import
# set version from versioneer.
from ._version import get_versions
__version__ = get_versions()['version']
version = __version__
del get_versions
from .util import config
from . import coordinates
from . import msm
from . import util
from . import plots
from . import thermo
def setup_package():
# purpose is for nose testing only to silence progress bars etc.
import warnings
warnings.warn('You should never see this, only in unit testing!'
' This switches off progress bars')
config.show_progress_bars = False
| lgpl-3.0 | -7,400,841,693,377,844,000 | 31.5 | 97 | 0.698328 | false |
atomic-labs/zulip | zproject/settings.py | 1 | 33670 | from __future__ import absolute_import
# Django settings for zulip project.
########################################################################
# Here's how settings for the Zulip project work:
#
# * settings.py contains non-site-specific and settings configuration
# for the Zulip Django app.
# * settings.py imports local_settings.py, and any site-specific configuration
# belongs there. The template for local_settings.py is local_settings_template.py
########################################################################
import os
import platform
import time
import sys
import six.moves.configparser
from zerver.lib.db import TimeTrackingConnection
########################################################################
# INITIAL SETTINGS
########################################################################
config_file = six.moves.configparser.RawConfigParser()
config_file.read("/etc/zulip/zulip.conf")
# Whether this instance of Zulip is running in a production environment.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
DEVELOPMENT = not PRODUCTION
secrets_file = six.moves.configparser.RawConfigParser()
if PRODUCTION:
secrets_file.read("/etc/zulip/zulip-secrets.conf")
else:
secrets_file.read("zproject/dev-secrets.conf")
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
# Make this unique, and don't share it with anybody.
SECRET_KEY = get_secret("secret_key")
# A shared secret, used to authenticate different parts of the app to each other.
SHARED_SECRET = get_secret("shared_secret")
# We use this salt to hash a user's email into a filename for their user-uploaded
# avatar. If this salt is discovered, attackers will only be able to determine
# that the owner of an email account has uploaded an avatar to Zulip, which isn't
# the end of the world. Don't use the salt where there is more security exposure.
AVATAR_SALT = get_secret("avatar_salt")
# SERVER_GENERATION is used to track whether the server has been
# restarted for triggering browser clients to reload.
SERVER_GENERATION = int(time.time())
if not 'DEBUG' in globals():
# Uncomment end of next line to test JS/CSS minification.
DEBUG = DEVELOPMENT # and platform.node() != 'your-machine'
TEMPLATE_DEBUG = DEBUG
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
# Detect whether we're running as a queue worker; this impacts the logging configuration.
if len(sys.argv) > 2 and sys.argv[0].endswith('manage.py') and sys.argv[1] == 'process_queue':
IS_WORKER = True
else:
IS_WORKER = False
# This is overridden in test_settings.py for the test suites
TEST_SUITE = False
# The new user tutorial is enabled by default, but disabled for client tests.
TUTORIAL_ENABLED = True
# Import variables like secrets from the local_settings file
# Import local_settings after determining the deployment/machine type
if PRODUCTION:
from .local_settings import *
else:
# For the Dev VM environment, we use the same settings as the
# sample local_settings.py file, with a few exceptions.
from .local_settings_template import *
EXTERNAL_HOST = 'localhost:9991'
ALLOWED_HOSTS = ['localhost']
AUTHENTICATION_BACKENDS = ('zproject.backends.DevAuthBackend',)
# Add some of the below if you're testing other backends
# AUTHENTICATION_BACKENDS = ('zproject.backends.EmailAuthBackend',
# 'zproject.backends.GoogleMobileOauth2Backend',)
EXTERNAL_URI_SCHEME = "http://"
EMAIL_GATEWAY_PATTERN = "%s@" + EXTERNAL_HOST
ADMIN_DOMAIN = "zulip.com"
NOTIFICATION_BOT = "[email protected]"
ERROR_BOT = "[email protected]"
NEW_USER_BOT = "[email protected]"
EMAIL_GATEWAY_BOT = "[email protected]"
########################################################################
# DEFAULT VALUES FOR SETTINGS
########################################################################
# For any settings that are not defined in local_settings.py,
# we want to initialize them to sane default
DEFAULT_SETTINGS = {'TWITTER_CONSUMER_KEY': '',
'TWITTER_CONSUMER_SECRET': '',
'TWITTER_ACCESS_TOKEN_KEY': '',
'TWITTER_ACCESS_TOKEN_SECRET': '',
'EMAIL_GATEWAY_PATTERN': '',
'EMAIL_GATEWAY_EXAMPLE': '',
'EMAIL_GATEWAY_BOT': None,
'EMAIL_GATEWAY_LOGIN': None,
'EMAIL_GATEWAY_PASSWORD': None,
'EMAIL_GATEWAY_IMAP_SERVER': None,
'EMAIL_GATEWAY_IMAP_PORT': None,
'EMAIL_GATEWAY_IMAP_FOLDER': None,
'MANDRILL_API_KEY': '',
'S3_KEY': '',
'S3_SECRET_KEY': '',
'S3_BUCKET': '',
'S3_AVATAR_BUCKET': '',
'LOCAL_UPLOADS_DIR': None,
'DROPBOX_APP_KEY': '',
'ERROR_REPORTING': True,
'JWT_AUTH_KEYS': {},
'NAME_CHANGES_DISABLED': False,
'DEPLOYMENT_ROLE_NAME': "",
# The following bots only exist in non-VOYAGER installs
'ERROR_BOT': None,
'NEW_USER_BOT': None,
'NAGIOS_STAGING_SEND_BOT': None,
'NAGIOS_STAGING_RECEIVE_BOT': None,
'APNS_CERT_FILE': None,
'ANDROID_GCM_API_KEY': None,
'INITIAL_PASSWORD_SALT': None,
'FEEDBACK_BOT': '[email protected]',
'FEEDBACK_BOT_NAME': 'Zulip Feedback Bot',
'API_SUPER_USERS': set(),
'ADMINS': '',
'INLINE_IMAGE_PREVIEW': True,
'CAMO_URI': '',
'ENABLE_FEEDBACK': PRODUCTION,
'FEEDBACK_EMAIL': None,
'ENABLE_GRAVATAR': True,
'DEFAULT_AVATAR_URI': '/static/images/default-avatar.png',
'AUTH_LDAP_SERVER_URI': "",
'EXTERNAL_URI_SCHEME': "https://",
'ZULIP_COM': False,
'ZULIP_COM_STAGING': False,
'STATSD_HOST': '',
'REMOTE_POSTGRES_HOST': '',
'GOOGLE_CLIENT_ID': '',
'DBX_APNS_CERT_FILE': None,
}
for setting_name, setting_val in DEFAULT_SETTINGS.iteritems():
if not setting_name in vars():
vars()[setting_name] = setting_val
# These are the settings that we will check that the user has filled in for
# production deployments before starting the app. It consists of a series
# of pairs of (setting name, default value that it must be changed from)
REQUIRED_SETTINGS = [("EXTERNAL_HOST", "zulip.example.com"),
("ZULIP_ADMINISTRATOR", "[email protected]"),
("ADMIN_DOMAIN", "example.com"),
# SECRET_KEY doesn't really need to be here, in
# that we set it automatically, but just in
# case, it seems worth having in this list
("SECRET_KEY", ""),
("AUTHENTICATION_BACKENDS", ()),
("NOREPLY_EMAIL_ADDRESS", "[email protected]"),
("DEFAULT_FROM_EMAIL", "Zulip <[email protected]>"),
("ALLOWED_HOSTS", "*"),
]
if ADMINS == "":
ADMINS = (("Zulip Administrator", ZULIP_ADMINISTRATOR),)
MANAGERS = ADMINS
# Voyager is a production zulip server that is not zulip.com or
# staging.zulip.com VOYAGER is the standalone all-on-one-server
# production deployment model for based on the original Zulip
# ENTERPRISE implementation. We expect most users of the open source
# project will be using VOYAGER=True in production.
VOYAGER = PRODUCTION and not ZULIP_COM
########################################################################
# STANDARD DJANGO SETTINGS
########################################################################
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# The ID, as an integer, of the current site in the django_site database table.
# This is used so that application data can hook into specific site(s) and a
# single database can manage content for multiple sites.
#
# We set this site's domain to 'zulip.com' in populate_db.
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DEPLOY_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '..')
TEMPLATE_DIRS = ( os.path.join(DEPLOY_ROOT, 'templates'), )
LOCALE_PATHS = ( os.path.join(DEPLOY_ROOT, 'locale'), )
# Make redirects work properly behind a reverse proxy
USE_X_FORWARDED_HOST = True
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
if PRODUCTION:
# Template caching is a significant performance win in production.
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader',
TEMPLATE_LOADERS),
)
MIDDLEWARE_CLASSES = (
# Our logging middleware should be the first middleware item.
'zerver.middleware.TagRequests',
'zerver.middleware.LogRequests',
'zerver.middleware.JsonErrorHandler',
'zerver.middleware.RateLimitMiddleware',
'zerver.middleware.FlushDisplayRecipientCache',
'django.middleware.common.CommonMiddleware',
'zerver.middleware.SessionHostDomainMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ANONYMOUS_USER_ID = None
AUTH_USER_MODEL = "zerver.UserProfile"
TEST_RUNNER = 'zerver.lib.test_runner.Runner'
ROOT_URLCONF = 'zproject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'zproject.wsgi.application'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'confirmation',
'guardian',
'pipeline',
'zerver',
]
if not VOYAGER:
INSTALLED_APPS += [
'analytics',
'zilencer',
]
# Base URL of the Tornado server
# We set it to None when running backend tests or populate_db.
# We override the port number when running frontend tests.
TORNADO_SERVER = 'http://localhost:9993'
RUNNING_INSIDE_TORNADO = False
########################################################################
# DATABASE CONFIGURATION
########################################################################
DATABASES = {"default": {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'zulip',
'USER': 'zulip',
'PASSWORD': '', # Authentication done via certificates
'HOST': '', # Host = '' => connect through a local socket
'SCHEMA': 'zulip',
'CONN_MAX_AGE': 600,
'OPTIONS': {
'connection_factory': TimeTrackingConnection
},
},
}
if DEVELOPMENT:
LOCAL_DATABASE_PASSWORD = get_secret("local_database_password")
DATABASES["default"].update({
'PASSWORD': LOCAL_DATABASE_PASSWORD,
'HOST': 'localhost'
})
elif REMOTE_POSTGRES_HOST != '':
DATABASES['default'].update({
'HOST': REMOTE_POSTGRES_HOST,
})
DATABASES['default']['OPTIONS']['sslmode'] = 'verify-full'
########################################################################
# RABBITMQ CONFIGURATION
########################################################################
USING_RABBITMQ = True
RABBITMQ_USERNAME = 'zulip'
RABBITMQ_PASSWORD = get_secret("rabbitmq_password")
########################################################################
# CACHING CONFIGURATION
########################################################################
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 3600
},
'database': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'third_party_api_results',
# Basically never timeout. Setting to 0 isn't guaranteed
# to work, see https://code.djangoproject.com/ticket/9595
'TIMEOUT': 2000000000,
'OPTIONS': {
'MAX_ENTRIES': 100000000,
'CULL_FREQUENCY': 10,
}
},
}
########################################################################
# REDIS-BASED RATE LIMITING CONFIGURATION
########################################################################
RATE_LIMITING = True
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
RATE_LIMITING_RULES = [
(60, 100), # 100 requests max every minute
]
########################################################################
# SECURITY SETTINGS
########################################################################
# Tell the browser to never send our cookies without encryption, e.g.
# when executing the initial http -> https redirect.
#
# Turn it off for local testing because we don't have SSL.
if PRODUCTION:
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
try:
# For get_updates hostname sharding.
domain = config_file.get('django', 'cookie_domain')
SESSION_COOKIE_DOMAIN = '.' + domain
CSRF_COOKIE_DOMAIN = '.' + domain
except six.moves.configparser.Error:
# Failing here is OK
pass
# Prevent Javascript from reading the CSRF token from cookies. Our code gets
# the token from the DOM, which means malicious code could too. But hiding the
# cookie will slow down some attackers.
CSRF_COOKIE_PATH = '/;HttpOnly'
CSRF_FAILURE_VIEW = 'zerver.middleware.csrf_failure'
if DEVELOPMENT:
# Use fast password hashing for creating testing users when not
# PRODUCTION. Saves a bunch of time.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher'
)
# Also we auto-generate passwords for the default users which you
# can query using ./manage.py print_initial_password
INITIAL_PASSWORD_SALT = get_secret("initial_password_salt")
########################################################################
# API/BOT SETTINGS
########################################################################
if "EXTERNAL_API_PATH" not in vars():
EXTERNAL_API_PATH = EXTERNAL_HOST + "/api"
EXTERNAL_API_URI = EXTERNAL_URI_SCHEME + EXTERNAL_API_PATH
S3_KEY = get_secret("s3_key")
S3_SECRET_KEY = get_secret("s3_secret_key")
# GCM tokens are IP-whitelisted; if we deploy to additional
# servers you will need to explicitly add their IPs here:
# https://cloud.google.com/console/project/apps~zulip-android/apiui/credential
ANDROID_GCM_API_KEY = get_secret("android_gcm_api_key")
GOOGLE_OAUTH2_CLIENT_SECRET = get_secret('google_oauth2_client_secret')
DROPBOX_APP_KEY = get_secret("dropbox_app_key")
MAILCHIMP_API_KEY = get_secret("mailchimp_api_key")
# This comes from our mandrill accounts page
MANDRILL_API_KEY = get_secret("mandrill_api_key")
# Twitter API credentials
# Secrecy not required because its only used for R/O requests.
# Please don't make us go over our rate limit.
TWITTER_CONSUMER_KEY = get_secret("twitter_consumer_key")
TWITTER_CONSUMER_SECRET = get_secret("twitter_consumer_secret")
TWITTER_ACCESS_TOKEN_KEY = get_secret("twitter_access_token_key")
TWITTER_ACCESS_TOKEN_SECRET = get_secret("twitter_access_token_secret")
# These are the bots that Zulip sends automated messages as.
INTERNAL_BOTS = [ {'var_name': 'NOTIFICATION_BOT',
'email_template': 'notification-bot@%s',
'name': 'Notification Bot'},
{'var_name': 'EMAIL_GATEWAY_BOT',
'email_template': 'emailgateway@%s',
'name': 'Email Gateway'},
{'var_name': 'NAGIOS_SEND_BOT',
'email_template': 'nagios-send-bot@%s',
'name': 'Nagios Send Bot'},
{'var_name': 'NAGIOS_RECEIVE_BOT',
'email_template': 'nagios-receive-bot@%s',
'name': 'Nagios Receive Bot'},
{'var_name': 'WELCOME_BOT',
'email_template': 'welcome-bot@%s',
'name': 'Welcome Bot'} ]
INTERNAL_BOT_DOMAIN = "zulip.com"
# Set the realm-specific bot names
for bot in INTERNAL_BOTS:
if not bot['var_name'] in vars():
bot_email = bot['email_template'] % (INTERNAL_BOT_DOMAIN,)
vars()[bot['var_name'] ] = bot_email
if EMAIL_GATEWAY_BOT not in API_SUPER_USERS:
API_SUPER_USERS.add(EMAIL_GATEWAY_BOT)
if EMAIL_GATEWAY_PATTERN != "":
EMAIL_GATEWAY_EXAMPLE = EMAIL_GATEWAY_PATTERN % ("support+abcdefg",)
DEPLOYMENT_ROLE_KEY = get_secret("deployment_role_key")
if PRODUCTION:
FEEDBACK_TARGET="https://zulip.com/api"
else:
FEEDBACK_TARGET="http://localhost:9991/api"
########################################################################
# STATSD CONFIGURATION
########################################################################
# Statsd is not super well supported; if you want to use it you'll need
# to set STATSD_HOST and STATSD_PREFIX.
if STATSD_HOST != '':
INSTALLED_APPS += ['django_statsd']
STATSD_PORT = 8125
STATSD_CLIENT = 'django_statsd.clients.normal'
########################################################################
# CAMO HTTPS CACHE CONFIGURATION
########################################################################
if CAMO_URI != '':
# This needs to be synced with the Camo installation
CAMO_KEY = get_secret("camo_key")
########################################################################
# STATIC CONTENT AND MINIFICATION SETTINGS
########################################################################
STATIC_URL = '/static/'
# ZulipStorage is a modified version of PipelineCachedStorage,
# and, like that class, it inserts a file hash into filenames
# to prevent the browser from using stale files from cache.
#
# Unlike PipelineStorage, it requires the files to exist in
# STATIC_ROOT even for dev servers. So we only use
# ZulipStorage when not DEBUG.
# This is the default behavior from Pipeline, but we set it
# here so that urls.py can read it.
PIPELINE = not DEBUG
if DEBUG:
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
if PIPELINE:
STATIC_ROOT = 'prod-static/serve'
else:
STATIC_ROOT = 'static/'
else:
STATICFILES_STORAGE = 'zerver.storage.ZulipStorage'
STATICFILES_FINDERS = (
'zerver.finders.ZulipFinder',
)
if PRODUCTION:
STATIC_ROOT = '/home/zulip/prod-static'
else:
STATIC_ROOT = 'prod-static/serve'
# We want all temporary uploaded files to be stored on disk.
FILE_UPLOAD_MAX_MEMORY_SIZE = 0
STATICFILES_DIRS = ['static/']
STATIC_HEADER_FILE = 'zerver/static_header.txt'
# To use minified files in dev, set PIPELINE = True. For the full
# cache-busting behavior, you must also set DEBUG = False.
#
# You will need to run update-prod-static after changing
# static files.
PIPELINE_CSS = {
'activity': {
'source_filenames': ('styles/activity.css',),
'output_filename': 'min/activity.css'
},
'portico': {
'source_filenames': (
'third/zocial/zocial.css',
'styles/portico.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
),
'output_filename': 'min/portico.css'
},
# Two versions of the app CSS exist because of QTBUG-3467
'app-fontcompat': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'styles/zulip.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
# We don't want fonts.css on QtWebKit, so its omitted here
),
'output_filename': 'min/app-fontcompat.css'
},
'app': {
'source_filenames': (
'third/bootstrap-notify/css/bootstrap-notify.css',
'third/spectrum/spectrum.css',
'third/jquery-perfect-scrollbar/css/perfect-scrollbar.css',
'styles/zulip.css',
'styles/pygments.css',
'styles/thirdparty-fonts.css',
'styles/fonts.css',
),
'output_filename': 'min/app.css'
},
'common': {
'source_filenames': (
'third/bootstrap/css/bootstrap.css',
'third/bootstrap/css/bootstrap-btn.css',
'third/bootstrap/css/bootstrap-responsive.css',
),
'output_filename': 'min/common.css'
},
}
JS_SPECS = {
'common': {
'source_filenames': (
'third/jquery/jquery-1.7.2.js',
'third/underscore/underscore.js',
'js/blueslip.js',
'third/bootstrap/js/bootstrap.js',
'js/common.js',
),
'output_filename': 'min/common.js'
},
'signup': {
'source_filenames': (
'js/signup.js',
'third/jquery-validate/jquery.validate.js',
),
'output_filename': 'min/signup.js'
},
'initial_invite': {
'source_filenames': (
'third/jquery-validate/jquery.validate.js',
'js/initial_invite.js',
),
'output_filename': 'min/initial_invite.js'
},
'api': {
'source_filenames': ('js/api.js',),
'output_filename': 'min/api.js'
},
'app_debug': {
'source_filenames': ('js/debug.js',),
'output_filename': 'min/app_debug.js'
},
'app': {
'source_filenames': [
'third/bootstrap-notify/js/bootstrap-notify.js',
'third/html5-formdata/formdata.js',
'third/jquery-validate/jquery.validate.js',
'third/jquery-form/jquery.form.js',
'third/jquery-filedrop/jquery.filedrop.js',
'third/jquery-caret/jquery.caret.1.02.js',
'third/xdate/xdate.dev.js',
'third/spin/spin.js',
'third/jquery-mousewheel/jquery.mousewheel.js',
'third/jquery-throttle-debounce/jquery.ba-throttle-debounce.js',
'third/jquery-idle/jquery.idle.js',
'third/jquery-autosize/jquery.autosize.js',
'third/jquery-perfect-scrollbar/js/perfect-scrollbar.js',
'third/lazyload/lazyload.js',
'third/spectrum/spectrum.js',
'third/winchan/winchan.js',
'third/sockjs/sockjs-0.3.4.js',
'third/handlebars/handlebars.runtime.js',
'third/marked/lib/marked.js',
'templates/compiled.js',
'js/feature_flags.js',
'js/loading.js',
'js/util.js',
'js/dict.js',
'js/localstorage.js',
'js/channel.js',
'js/setup.js',
'js/muting.js',
'js/muting_ui.js',
'js/viewport.js',
'js/rows.js',
'js/unread.js',
'js/stream_list.js',
'js/filter.js',
'js/narrow.js',
'js/reload.js',
'js/compose_fade.js',
'js/fenced_code.js',
'js/echo.js',
'js/socket.js',
'js/compose.js',
'js/stream_color.js',
'js/admin.js',
'js/stream_data.js',
'js/subs.js',
'js/message_edit.js',
'js/condense.js',
'js/resize.js',
'js/floating_recipient_bar.js',
'js/ui.js',
'js/click_handlers.js',
'js/scroll_bar.js',
'js/gear_menu.js',
'js/copy_and_paste.js',
'js/popovers.js',
'js/typeahead_helper.js',
'js/search_suggestion.js',
'js/search.js',
'js/composebox_typeahead.js',
'js/navigate.js',
'js/hotkey.js',
'js/favicon.js',
'js/notifications.js',
'js/hashchange.js',
'js/invite.js',
'js/message_list_view.js',
'js/message_list.js',
'js/message_flags.js',
'js/alert_words.js',
'js/alert_words_ui.js',
'js/people.js',
'js/message_store.js',
'js/server_events.js',
'js/zulip.js',
'js/activity.js',
'js/colorspace.js',
'js/timerender.js',
'js/tutorial.js',
'js/templates.js',
'js/avatar.js',
'js/settings.js',
'js/tab_bar.js',
'js/emoji.js',
'js/referral.js',
'js/custom_markdown.js',
'js/bot_data.js',
# JS bundled by webpack is also included here if PIPELINE setting is true
],
'output_filename': 'min/app.js'
},
'activity': {
'source_filenames': (
'third/sorttable/sorttable.js',
),
'output_filename': 'min/activity.js'
},
# We also want to minify sockjs separately for the sockjs iframe transport
'sockjs': {
'source_filenames': ('third/sockjs/sockjs-0.3.4.js',),
'output_filename': 'min/sockjs-0.3.4.min.js'
},
}
if PIPELINE:
JS_SPECS['app']['source_filenames'].append('js/bundle.js')
app_srcs = JS_SPECS['app']['source_filenames']
PIPELINE_JS = {} # Now handled in tools/minify-js
PIPELINE_JS_COMPRESSOR = None
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yui.YUICompressor'
PIPELINE_YUI_BINARY = './yuicompressor'
########################################################################
# LOGGING SETTINGS
########################################################################
ZULIP_PATHS = [
("SERVER_LOG_PATH", "/var/log/zulip/server.log"),
("ERROR_FILE_LOG_PATH", "/var/log/zulip/errors.log"),
("MANAGEMENT_LOG_PATH", "/var/log/zulip/manage.log"),
("WORKER_LOG_PATH", "/var/log/zulip/workers.log"),
("PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.pickle"),
("JSON_PERSISTENT_QUEUE_FILENAME", "/home/zulip/tornado/event_queues.json"),
("EMAIL_MIRROR_LOG_PATH", "/var/log/zulip/email-mirror.log"),
("EMAIL_DELIVERER_LOG_PATH", "/var/log/zulip/email-deliverer.log"),
("LDAP_SYNC_LOG_PATH", "/var/log/zulip/sync_ldap_user_data.log"),
("QUEUE_ERROR_DIR", "/var/log/zulip/queue_error"),
("STATS_DIR", "/home/zulip/stats"),
("DIGEST_LOG_PATH", "/var/log/zulip/digest.log"),
]
# The Event log basically logs most significant database changes,
# which can be useful for debugging.
if VOYAGER:
EVENT_LOG_DIR = None
else:
ZULIP_PATHS.append(("EVENT_LOG_DIR", "/home/zulip/logs/event_log"))
for (var, path) in ZULIP_PATHS:
if DEVELOPMENT:
# if DEVELOPMENT, store these files in the Zulip checkout
path = os.path.basename(path)
vars()[var] = path
ZULIP_WORKER_TEST_FILE = '/tmp/zulip-worker-test-file'
if IS_WORKER:
FILE_LOG_PATH = WORKER_LOG_PATH
else:
FILE_LOG_PATH = SERVER_LOG_PATH
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)-8s %(message)s'
}
},
'filters': {
'ZulipLimiter': {
'()': 'zerver.lib.logging_util.ZulipLimiter',
},
'EmailLimiter': {
'()': 'zerver.lib.logging_util.EmailLimiter',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'nop': {
'()': 'zerver.lib.logging_util.ReturnTrue',
},
'require_really_deployed': {
'()': 'zerver.lib.logging_util.RequireReallyDeployed',
},
},
'handlers': {
'zulip_admins': {
'level': 'ERROR',
'class': 'zerver.handlers.AdminZulipHandler',
# For testing the handler delete the next line
'filters': ['ZulipLimiter', 'require_debug_false', 'require_really_deployed'],
'formatter': 'default'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'formatter': 'default',
'filename': FILE_LOG_PATH,
'when': 'D',
'interval': 7,
'backupCount': 100000000,
},
'errors_file': {
'level': 'WARNING',
'class': 'logging.handlers.TimedRotatingFileHandler',
'formatter': 'default',
'filename': ERROR_FILE_LOG_PATH,
'when': 'D',
'interval': 7,
'backupCount': 100000000,
},
},
'loggers': {
'': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'django': {
'handlers': (['zulip_admins'] if ERROR_REPORTING else [])
+ ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'zulip.requests': {
'handlers': ['console', 'file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
'zulip.management': {
'handlers': ['file', 'errors_file'],
'level': 'INFO',
'propagate': False,
},
## Uncomment the following to get all database queries logged to the console
# 'django.db': {
# 'handlers': ['console'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'zerver.context_processors.add_settings',
'zerver.context_processors.add_metrics',
)
ACCOUNT_ACTIVATION_DAYS=7
LOGIN_REDIRECT_URL='/'
# Client-side polling timeout for get_events, in milliseconds.
# We configure this here so that the client test suite can override it.
# We already kill the connection server-side with heartbeat events,
# but it's good to have a safety. This value should be greater than
# (HEARTBEAT_MIN_FREQ_SECS + 10)
POLL_TIMEOUT = 90 * 1000
# iOS App IDs
ZULIP_IOS_APP_ID = 'com.zulip.Zulip'
DBX_IOS_APP_ID = 'com.dropbox.Zulip'
########################################################################
# SSO AND LDAP SETTINGS
########################################################################
USING_APACHE_SSO = ('zproject.backends.ZulipRemoteUserBackend' in AUTHENTICATION_BACKENDS)
if (len(AUTHENTICATION_BACKENDS) == 1 and
AUTHENTICATION_BACKENDS[0] == "zproject.backends.ZulipRemoteUserBackend"):
HOME_NOT_LOGGED_IN = "/accounts/login/sso"
ONLY_SSO = True
else:
HOME_NOT_LOGGED_IN = '/login'
ONLY_SSO = False
AUTHENTICATION_BACKENDS += ('guardian.backends.ObjectPermissionBackend',)
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipDummyBackend',)
POPULATE_PROFILE_VIA_LDAP = bool(AUTH_LDAP_SERVER_URI)
if POPULATE_PROFILE_VIA_LDAP and \
not 'zproject.backends.ZulipLDAPAuthBackend' in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipLDAPUserPopulator',)
else:
POPULATE_PROFILE_VIA_LDAP = 'zproject.backends.ZulipLDAPAuthBackend' in AUTHENTICATION_BACKENDS or POPULATE_PROFILE_VIA_LDAP
########################################################################
# EMAIL SETTINGS
########################################################################
# If an email host is not specified, fail silently and gracefully
if not EMAIL_HOST and PRODUCTION:
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
elif DEVELOPMENT:
# In the dev environment, emails are printed to the run-dev.py console.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_PASSWORD = get_secret('email_password')
########################################################################
# MISC SETTINGS
########################################################################
if PRODUCTION:
# Filter out user data
DEFAULT_EXCEPTION_REPORTER_FILTER = 'zerver.filters.ZulipExceptionReporterFilter'
# This is a debugging option only
PROFILE_ALL_REQUESTS = False
CROSS_REALM_BOT_EMAILS = set(('[email protected]', '[email protected]'))
| apache-2.0 | 245,933,477,810,805,340 | 35.087889 | 128 | 0.56338 | false |
cgarciae/tfinterface | tfinterface/supervised/supervised_model.py | 1 | 7005 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __coconut_hash__ = 0xe7f49d48
# Compiled with Coconut version 1.2.3-post_dev1 [Colonel]
# Coconut Header: --------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys, os.path as _coconut_os_path
_coconut_file_path = _coconut_os_path.dirname(_coconut_os_path.abspath(__file__))
_coconut_sys.path.insert(0, _coconut_file_path)
from __coconut__ import _coconut, _coconut_MatchError, _coconut_tail_call, _coconut_tco, _coconut_igetitem, _coconut_compose, _coconut_pipe, _coconut_starpipe, _coconut_backpipe, _coconut_backstarpipe, _coconut_bool_and, _coconut_bool_or, _coconut_minus, _coconut_map, _coconut_partial
from __coconut__ import *
_coconut_sys.path.remove(_coconut_file_path)
# Compiled Coconut: ------------------------------------------------------
import tensorflow as tf
from tfinterface.base import Model
from tfinterface.metrics import r2_score
from tfinterface.metrics import sigmoid_score
from tfinterface.metrics import softmax_score
from tfinterface.utils import huber_loss
import cytoolz as cz
import itertools as it
from tfinterface.decorators import return_self
from tfinterface.decorators import with_graph_as_default
from tfinterface.decorators import copy_self
from .supervised_inputs import SupervisedInputs
from abc import abstractmethod
class SupervisedModel(Model):
"""
# Inteface
* `inputs : SupervisedInputs` -
* `predictions : Tensor` -
* `loss : Tensor` -
* `update : Tensor` -
"""
def __init__(self, name, loss="mse", optimizer=tf.train.AdamOptimizer, learning_rate=0.001, **kwargs):
super(SupervisedModel, self).__init__(name, **kwargs)
self._loss_arg = loss
self._optimizer = optimizer
self._learning_rate_arg = learning_rate
@return_self
def build_tensors(self, *args, **kwargs):
super(SupervisedModel, self).build_tensors(*args, **kwargs)
self.inputs = self.get_inputs(*args, **kwargs)
self.labels = self.get_labels(*args, **kwargs)
self.learning_rate = self.get_learning_rate(*args, **kwargs)
self.predictions = self.get_predictions(*args, **kwargs)
self.loss = self.get_loss(*args, **kwargs)
self.score_tensor = self.get_score_tensor(*args, **kwargs)
self.update = self.get_update(*args, **kwargs)
self.summaries = self.get_all_summaries(*args, **kwargs)
def get_inputs(self, inputs, *args, **kwargs):
return inputs
def get_labels(self, *args, **kwargs):
return self.inputs.labels
def get_learning_rate(self, *args, **kwargs):
if hasattr(self.inputs, "learning_rate"):
return self.inputs.learning_rate
else:
return self._learning_rate_arg
@abstractmethod
def get_predictions(self, *args, **kwargs):
pass
@abstractmethod
def get_loss(self, *args, **kwargs):
pass
@abstractmethod
def get_score_tensor(self, *args, **kwargs):
pass
def get_update(self, *args, **kwargs):
return self._optimizer(self.learning_rate).minimize(self.loss, global_step=self.inputs.global_step)
def get_all_summaries(self, *args, **kwargs):
standard = self.get_standard_summaries()
summaries = self.get_summaries(*args, **kwargs)
return tf.summary.merge(standard + summaries)
def get_standard_summaries(self):
return [tf.summary.scalar("loss_summary", self.loss), tf.summary.scalar("score_summary", self.score_tensor)]
def get_summaries(self, *args, **kwargs):
return []
def predict(self, **kwargs):
predict_feed = self.inputs.predict_feed(**kwargs)
return self.sess.run(self.predictions, feed_dict=predict_feed)
def score(self, **kwargs):
predict_feed = self.inputs.predict_feed(**kwargs)
score = self.sess.run(self.score_tensor, feed_dict=predict_feed)
return score
@with_graph_as_default
@return_self
def fit(self, epochs=2000, data_generator=None, log_summaries=False, log_interval=20, print_test_info=False, writer_kwargs={}):
if log_summaries and not hasattr(self, "writer"):
self.writer = tf.summary.FileWriter(self.logs_path, graph=self.graph, **writer_kwargs)
if not hasattr(self, "summaries"):
self.summaries = tf.no_op()
if data_generator is None:
#generator of empty dicts
data_generator = it.repeat({})
data_generator = (_coconut.functools.partial(cz.take, epochs))(data_generator)
for i, batch_feed_data in enumerate(data_generator):
fit_feed = self.inputs.fit_feed(**batch_feed_data)
_, summaries = self.sess.run([self.update, self.summaries], feed_dict=fit_feed)
if log_summaries and i % log_interval == 0 and summaries is not None:
self.writer.add_summary(summaries, global_step=self.sess.run(self.inputs.global_step))
if print_test_info and i % log_interval == 0:
loss, score = self.sess.run([self.loss, self.score_tensor], feed_dict=fit_feed)
print("loss {}, score {}, at {}".format(loss, score, i))
class SoftmaxClassifier(SupervisedModel):
"""docstring for SoftmaxClassifier."""
@abstractmethod
def get_logits(self):
pass
def get_predictions(self, *args, **kwargs):
self.logits = self.get_logits(*args, **kwargs)
return tf.nn.softmax(self.logits)
def get_loss(self, *args, **kwargs):
return ((tf.reduce_mean)(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)))
def get_score_tensor(self, *args, **kwargs):
return softmax_score(self.predictions, self.labels)
class SigmoidClassifier(SupervisedModel):
"""docstring for SoftmaxClassifier."""
@abstractmethod
def get_logits(self):
pass
def get_predictions(self, *args, **kwargs):
self.logits = self.get_logits(*args, **kwargs)
return tf.nn.sigmoid(self.logits)
def get_loss(self, *args, **kwargs):
return ((tf.reduce_mean)(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.labels)))
def get_score_tensor(self, *args, **kwargs):
return sigmoid_score(self.predictions, self.labels)
class LinearClassifier(SupervisedModel):
"""docstring for SoftmaxClassifier."""
def __init__(self, *args, **kwargs):
loss = kwargs.pop("loss", huber_loss)
if loss == "mse":
loss = tf.nn.l2_loss
elif loss == "huber":
loss = huber_loss
self._loss_fn = loss
super(LinearClassifier, self).__init__(*args, **kwargs)
def get_loss(self, *args, **kwargs):
return ((tf.reduce_mean)(self._loss_fn(self.predictions - self.labels)))
def get_score_tensor(self, *args, **kwargs):
return r2_score(self.predictions, self.labels)
| mit | -4,977,267,530,083,282,000 | 32.199052 | 285 | 0.646395 | false |
pymedusa/Medusa | medusa/providers/torrent/html/anidex.py | 1 | 5788 | # coding=utf-8
"""Provider code for AniDex."""
from __future__ import unicode_literals
import logging
import random
import string
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import convert_size, try_int
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
from requests.utils import add_dict_to_cookiejar
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class AniDexProvider(TorrentProvider):
"""AniDex Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(AniDexProvider, self).__init__('AniDex')
# Credentials
self.public = True
# URLs
self.url = 'https://anidex.info'
self.urls = {
'search': self.url,
}
# Miscellaneous Options
self.supports_absolute_numbering = True
# Cache
self.cache = tv.Cache(self, min_time=20)
self.cookies = {
'__ddg1': self.random_sixteen(),
'__ddg2': self.random_sixteen(),
'smpush_desktop_request': 'true'
}
@staticmethod
def random_sixteen():
"""
Create 16 character string, for cookies.
This will bypass DDos-guard.net protection
"""
return ''.join(random.choice(
string.ascii_uppercase + string.ascii_lowercase + string.digits
) for _ in range(16))
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: An episode object
:returns: A list of search results (structure)
"""
results = []
category = '1,2,3'
if ep_obj and not ep_obj.series.is_anime:
category = '4,5'
search_params = {
'id': category
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params.update({'q': search_string})
add_dict_to_cookiejar(self.session.cookies, self.cookies)
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
items = []
with BS4Parser(data, 'html5lib') as html:
table_header = html.find('thead')
# Continue only if at least one release is found
if not table_header:
log.debug('Data returned from provider does not contain any torrents')
return items
table_ths = table_header.find_all('th')
# [u'Category', u'', u'Filename', u'Comments', u'Torrent', u'Magnet',
# u'File size', u'Age', u'Seeders', u'Leechers', u'Completed']
labels = [label.span.get('title') if label.span else '' for label in table_ths]
torrent_rows = html.find('tbody').find_all('tr')
for row in torrent_rows:
cells = row.find_all('td')
try:
title = cells[labels.index('Filename')].span.get('title')
download_url = cells[labels.index('Torrent')].a.get('href')
if not all([title, download_url]):
continue
download_url = urljoin(self.url, download_url)
seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed:
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
' minimum seeders: {0}. Seeders: {1}',
title, seeders)
continue
torrent_size = cells[labels.index('File size')].get_text()
size = convert_size(torrent_size) or -1
pubdate_raw = cells[labels.index('Age')].get('title')
pubdate = self.parse_pubdate(pubdate_raw)
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': pubdate,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
provider = AniDexProvider()
| gpl-3.0 | -6,090,695,897,899,882,000 | 32.074286 | 92 | 0.527125 | false |
luceatnobis/chan_archiver | chan/test/test_post_collector.py | 1 | 3157 | #!/usr/bin/env python
import pkgutil
import unittest
from chan.post_producer import PostProducer
from chan.post_collector import PostCollector
json_old_dump = pkgutil.get_data("chan.test.files", "json_old_dump")
json_new_dump = pkgutil.get_data("chan.test.files", "json_new_dump")
json_minimal = pkgutil.get_data("chan.test.files", "json_minimal")
json_missing_lower = pkgutil.get_data("chan.test.files", "json_missing_lower")
json_missing_upper = pkgutil.get_data("chan.test.files", "json_missing_upper")
class TestPostCollector(unittest.TestCase):
def setUp(self):
self.posts_nr_old_dump = 190
self.posts_nr_new_dump = 231
self.posts_nr_missing_dump_upper = 230 # duh
self.minimal_order = [41811961, 41811988, 41812459]
self.producer_old = PostProducer(json_old_dump)
self.producer_new = PostProducer(json_new_dump)
self.producer_minimal = PostProducer(json_minimal)
self.producer_missing_upper = PostProducer(json_missing_upper)
self.producer_missing_lower = PostProducer(json_missing_lower)
self.old_gen = self.producer_old.all_posts_wrapped()
self.new_gen = self.producer_new.all_posts_wrapped()
self.minimal_gen = self.producer_minimal.all_posts_wrapped()
self.missing_u_gen = self.producer_missing_upper.all_posts_wrapped()
self.missing_l_gen = self.producer_missing_lower.all_posts_wrapped()
self.collector = PostCollector()
def test_simple_collection(self):
self.collector.add_to_collection(*self.old_gen)
nr_items = self._len_of_dict(self.collector.posts)
self.assertEqual(self.posts_nr_old_dump, nr_items)
def test_update_collection(self):
self.collector.add_to_collection(*self.old_gen)
self.collector.add_to_collection(*self.new_gen)
nr_items = self._len_of_dict(self.collector.posts)
self.assertEqual(self.posts_nr_new_dump, nr_items)
def test_collection_deleted_missing_upper(self):
self.collector.add_to_collection(*self.new_gen)
self.collector.add_to_collection(*self.missing_u_gen)
nr_items = self._len_of_dict(self.collector.posts)
self.assertEqual(self.posts_nr_new_dump, nr_items)
def test_collection_deleted_missing_lower(self):
self.collector.add_to_collection(*self.old_gen)
self.collector.add_to_collection(*self.missing_l_gen)
nr_items = self._len_of_dict(self.collector.posts)
self.assertEqual(self.posts_nr_new_dump, nr_items)
def test_return_ordered(self):
self.collector.add_to_collection(*self.minimal_gen)
ordered = self.collector.return_ordered()
l = list(x.post_id for x in ordered) # its 3 elements, deal with it
self.assertEqual(l, self.minimal_order)
def test_return_ordered_fail(self):
self.collector.add_to_collection(*self.minimal_gen)
ordered = self.collector.return_ordered()
l = list(x.post_id for x in ordered) # its 3 elements, deal with it
self.assertNotEqual(l, self.minimal_order[::-1])
def _len_of_dict(self, d):
return len(d.keys())
| gpl-3.0 | 1,611,658,684,578,089,700 | 37.036145 | 78 | 0.681343 | false |
grap/odoo-addons-crb | crb_print_product/models/__init__.py | 1 | 1144 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Croc Bauges - Print Product module for Odoo
# Copyright (C) 2015-Today GRAP (http://www.grap.coop)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import product_product
from . import print_product_wizard
from . import print_product_type
| agpl-3.0 | -1,931,316,703,520,689,400 | 44.76 | 78 | 0.622378 | false |
julianpeeters/avro | lang/py/src/avro/datafile.py | 1 | 11950 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Read/Write Avro File Object Containers.
"""
import zlib
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from avro import schema
from avro import io
try:
import snappy
except:
pass # fail later if snappy is used
#
# Constants
#
VERSION = 1
MAGIC = 'Obj' + chr(VERSION)
MAGIC_SIZE = len(MAGIC)
SYNC_SIZE = 16
SYNC_INTERVAL = 1000 * SYNC_SIZE # TODO(hammer): make configurable
META_SCHEMA = schema.parse("""\
{"type": "record", "name": "org.apache.avro.file.Header",
"fields" : [
{"name": "magic", "type": {"type": "fixed", "name": "magic", "size": %d}},
{"name": "meta", "type": {"type": "map", "values": "bytes"}},
{"name": "sync", "type": {"type": "fixed", "name": "sync", "size": %d}}]}
""" % (MAGIC_SIZE, SYNC_SIZE))
VALID_CODECS = ['null', 'deflate', 'snappy']
VALID_ENCODINGS = ['binary'] # not used yet
CODEC_KEY = "avro.codec"
SCHEMA_KEY = "avro.schema"
#
# Exceptions
#
class DataFileException(schema.AvroException):
"""
Raised when there's a problem reading or writing file object containers.
"""
def __init__(self, fail_msg):
schema.AvroException.__init__(self, fail_msg)
#
# Write Path
#
class DataFileWriter(object):
@staticmethod
def generate_sync_marker():
return generate_sixteen_random_bytes()
# TODO(hammer): make 'encoder' a metadata property
def __init__(self, writer, datum_writer, writers_schema=None, codec='null'):
"""
If the schema is not present, presume we're appending.
@param writer: File-like object to write into.
"""
self._writer = writer
self._encoder = io.BinaryEncoder(writer)
self._datum_writer = datum_writer
self._buffer_writer = StringIO()
self._buffer_encoder = io.BinaryEncoder(self._buffer_writer)
self._block_count = 0
self._meta = {}
if writers_schema is not None:
if codec not in VALID_CODECS:
raise DataFileException("Unknown codec: %r" % codec)
self._sync_marker = DataFileWriter.generate_sync_marker()
self.set_meta('avro.codec', codec)
self.set_meta('avro.schema', str(writers_schema))
self.datum_writer.writers_schema = writers_schema
self._write_header()
else:
# open writer for reading to collect metadata
dfr = DataFileReader(writer, io.DatumReader())
# TODO(hammer): collect arbitrary metadata
# collect metadata
self._sync_marker = dfr.sync_marker
self.set_meta('avro.codec', dfr.get_meta('avro.codec'))
# get schema used to write existing file
schema_from_file = dfr.get_meta('avro.schema')
self.set_meta('avro.schema', schema_from_file)
self.datum_writer.writers_schema = schema.parse(schema_from_file)
# seek to the end of the file and prepare for writing
writer.seek(0, 2)
# read-only properties
writer = property(lambda self: self._writer)
encoder = property(lambda self: self._encoder)
datum_writer = property(lambda self: self._datum_writer)
buffer_writer = property(lambda self: self._buffer_writer)
buffer_encoder = property(lambda self: self._buffer_encoder)
sync_marker = property(lambda self: self._sync_marker)
meta = property(lambda self: self._meta)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Perform a close if there's no exception
if type is None:
self.close()
# read/write properties
def set_block_count(self, new_val):
self._block_count = new_val
block_count = property(lambda self: self._block_count, set_block_count)
# utility functions to read/write metadata entries
def get_meta(self, key):
return self._meta.get(key)
def set_meta(self, key, val):
self._meta[key] = val
def _write_header(self):
header = {'magic': MAGIC,
'meta': self.meta,
'sync': self.sync_marker}
self.datum_writer.write_data(META_SCHEMA, header, self.encoder)
# TODO(hammer): make a schema for blocks and use datum_writer
def _write_block(self):
if self.block_count > 0:
# write number of items in block
self.encoder.write_long(self.block_count)
# write block contents
uncompressed_data = self.buffer_writer.getvalue()
if self.get_meta(CODEC_KEY) == 'null':
compressed_data = uncompressed_data
compressed_data_length = len(compressed_data)
elif self.get_meta(CODEC_KEY) == 'deflate':
# The first two characters and last character are zlib
# wrappers around deflate data.
compressed_data = zlib.compress(uncompressed_data)[2:-1]
compressed_data_length = len(compressed_data)
elif self.get_meta(CODEC_KEY) == 'snappy':
compressed_data = snappy.compress(uncompressed_data)
compressed_data_length = len(compressed_data) + 4 # crc32
else:
fail_msg = '"%s" codec is not supported.' % self.get_meta(CODEC_KEY)
raise DataFileException(fail_msg)
# Write length of block
self.encoder.write_long(compressed_data_length)
# Write block
self.writer.write(compressed_data)
# Write CRC32 checksum for Snappy
if self.get_meta(CODEC_KEY) == 'snappy':
self.encoder.write_crc32(uncompressed_data)
# write sync marker
self.writer.write(self.sync_marker)
# reset buffer
self.buffer_writer.truncate(0)
self.block_count = 0
def append(self, datum):
"""Append a datum to the file."""
self.datum_writer.write(datum, self.buffer_encoder)
self.block_count += 1
# if the data to write is larger than the sync interval, write the block
if self.buffer_writer.tell() >= SYNC_INTERVAL:
self._write_block()
def sync(self):
"""
Return the current position as a value that may be passed to
DataFileReader.seek(long). Forces the end of the current block,
emitting a synchronization marker.
"""
self._write_block()
return self.writer.tell()
def flush(self):
"""Flush the current state of the file, including metadata."""
self._write_block()
self.writer.flush()
def close(self):
"""Close the file."""
self.flush()
self.writer.close()
class DataFileReader(object):
"""Read files written by DataFileWriter."""
# TODO(hammer): allow user to specify expected schema?
# TODO(hammer): allow user to specify the encoder
def __init__(self, reader, datum_reader):
self._reader = reader
self._raw_decoder = io.BinaryDecoder(reader)
self._datum_decoder = None # Maybe reset at every block.
self._datum_reader = datum_reader
# read the header: magic, meta, sync
self._read_header()
# ensure codec is valid
self.codec = self.get_meta('avro.codec')
if self.codec is None:
self.codec = "null"
if self.codec not in VALID_CODECS:
raise DataFileException('Unknown codec: %s.' % self.codec)
# get file length
self._file_length = self.determine_file_length()
# get ready to read
self._block_count = 0
self.datum_reader.writers_schema = schema.parse(self.get_meta(SCHEMA_KEY))
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Perform a close if there's no exception
if type is None:
self.close()
def __iter__(self):
return self
# read-only properties
reader = property(lambda self: self._reader)
raw_decoder = property(lambda self: self._raw_decoder)
datum_decoder = property(lambda self: self._datum_decoder)
datum_reader = property(lambda self: self._datum_reader)
sync_marker = property(lambda self: self._sync_marker)
meta = property(lambda self: self._meta)
file_length = property(lambda self: self._file_length)
# read/write properties
def set_block_count(self, new_val):
self._block_count = new_val
block_count = property(lambda self: self._block_count, set_block_count)
# utility functions to read/write metadata entries
def get_meta(self, key):
return self._meta.get(key)
def set_meta(self, key, val):
self._meta[key] = val
def determine_file_length(self):
"""
Get file length and leave file cursor where we found it.
"""
remember_pos = self.reader.tell()
self.reader.seek(0, 2)
file_length = self.reader.tell()
self.reader.seek(remember_pos)
return file_length
def is_EOF(self):
return self.reader.tell() == self.file_length
def _read_header(self):
# seek to the beginning of the file to get magic block
self.reader.seek(0, 0)
# read header into a dict
header = self.datum_reader.read_data(
META_SCHEMA, META_SCHEMA, self.raw_decoder)
# check magic number
if header.get('magic') != MAGIC:
fail_msg = "Not an Avro data file: %s doesn't match %s."\
% (header.get('magic'), MAGIC)
raise schema.AvroException(fail_msg)
# set metadata
self._meta = header['meta']
# set sync marker
self._sync_marker = header['sync']
def _read_block_header(self):
self.block_count = self.raw_decoder.read_long()
if self.codec == "null":
# Skip a long; we don't need to use the length.
self.raw_decoder.skip_long()
self._datum_decoder = self._raw_decoder
elif self.codec == 'deflate':
# Compressed data is stored as (length, data), which
# corresponds to how the "bytes" type is encoded.
data = self.raw_decoder.read_bytes()
# -15 is the log of the window size; negative indicates
# "raw" (no zlib headers) decompression. See zlib.h.
uncompressed = zlib.decompress(data, -15)
self._datum_decoder = io.BinaryDecoder(StringIO(uncompressed))
elif self.codec == 'snappy':
# Compressed data includes a 4-byte CRC32 checksum
length = self.raw_decoder.read_long()
data = self.raw_decoder.read(length - 4)
uncompressed = snappy.decompress(data)
self._datum_decoder = io.BinaryDecoder(StringIO(uncompressed))
self.raw_decoder.check_crc32(uncompressed);
else:
raise DataFileException("Unknown codec: %r" % self.codec)
def _skip_sync(self):
"""
Read the length of the sync marker; if it matches the sync marker,
return True. Otherwise, seek back to where we started and return False.
"""
proposed_sync_marker = self.reader.read(SYNC_SIZE)
if proposed_sync_marker != self.sync_marker:
self.reader.seek(-SYNC_SIZE, 1)
return False
else:
return True
# TODO(hammer): handle block of length zero
# TODO(hammer): clean this up with recursion
def next(self):
"""Return the next datum in the file."""
if self.block_count == 0:
if self.is_EOF():
raise StopIteration
elif self._skip_sync():
if self.is_EOF(): raise StopIteration
self._read_block_header()
else:
self._read_block_header()
datum = self.datum_reader.read(self.datum_decoder)
self.block_count -= 1
return datum
def close(self):
"""Close this reader."""
self.reader.close()
def generate_sixteen_random_bytes():
try:
import os
return os.urandom(16)
except:
import random
return [ chr(random.randrange(256)) for i in range(16) ]
| apache-2.0 | 1,734,923,212,509,811,200 | 31.472826 | 78 | 0.663096 | false |
madgik/exareme | Exareme-Docker/src/mip-algorithms/THREE_C/threec.py | 1 | 44722 | #!/usr/bin/env python
from __future__ import print_function
from __future__ import division
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
import pandas as pd
import numpy as np
from mipframework import Algorithm, AlgorithmResult, TabularDataResource
class ThreeC(Algorithm):
def __init__(self, cli_args):
super(ThreeC, self).__init__(__file__, cli_args, intercept=False)
def local_pure(self):
c2_feature_selection_method = self.parameters.c2_feature_selection_method
c2_num_clusters_method = self.parameters.c2_num_clusters_method
c2_clustering_method = self.parameters.c2_clustering_method
# =======================================================================
# NOTE: number_of_clusters parameter default value doesn't work in R code
# =======================================================================
c2_num_clusters = int(self.parameters.c2_num_clusters)
c3_feature_selection_method = self.parameters.c3_feature_selection_method
c3_classification_method = self.parameters.c3_classification_method
cm_names = self.parameters.x
pb_names = self.parameters.y
markers_and_biomarkers = self.data.full[cm_names + pb_names]
diag_name = self.parameters.dx
diagnosis = self.data.db.select_vars_from_data(
[diag_name], self.parameters.dataset, self.parameters.filter
)
full_data = pd.concat([diagnosis, markers_and_biomarkers], axis=1)
var_names = [diag_name] + cm_names + pb_names
var_categories = ["DX"] + ["CM"] * len(cm_names) + ["PB"] * len(pb_names)
full_metadata = pd.DataFrame(
{"varName": var_names, "varCategory": var_categories}
)
r_data = pandas2ri.py2ri(full_data)
r_md = pandas2ri.py2ri(full_metadata)
robjects.globalenv["data"] = r_data
robjects.globalenv["metadata"] = r_md
define_r_funcs()
robjects.r(
"""
x <- get_xy_from_DATA_C2(data, metadata)$x
y <- get_xy_from_DATA_C2(data, metadata)$y
"""
)
robjects.r(
"""
C2_results <- C2(x,
y,
feature_selection_method="{fsm}",
num_clusters_method="{ncm}",
clustering_method="{cm}",
plot.num.clus=TRUE,
plot.clustering=TRUE,
k={nc}
)
""".format(
fsm=c2_feature_selection_method,
ncm=c2_num_clusters_method,
cm=c2_clustering_method,
nc=c2_num_clusters,
)
)
robjects.r(
"""
PBx <- get_PBx_from_DATA_C3(data, metadata)
new_y <- C2_results[[3]]
"""
)
robjects.r(
"""
C3_results <- C3(PBx = PBx,
newy = new_y,
feature_selection_method = "{fsm}",
classification_method="{cm}"
)
result <- table(new_y, C3_results[[2]])
""".format(
fsm=c3_feature_selection_method, cm=c3_classification_method
)
)
res = np.array(list(robjects.globalenv["result"]))
res = res.reshape(c2_num_clusters, c2_num_clusters).tolist()
table_out = TabularDataResource(
fields=[str(i + 1) for i in range(len(res))],
data=tuple(res),
title="3C result",
)
self.result = AlgorithmResult(
raw_data=dict(), tables=[table_out], highcharts=[],
)
def define_r_funcs():
rdef_get_xy_from_DATA_C2()
rdef_feature_selection()
rdef_Feature_Selection_dummy_regressions()
rdef_Feature_Selection_RF()
rdef_Feature_Selection_BIC()
rdef_MSFDR()
rdef_Feature_Selection_AIC_MSFDR()
rdef_Feature_Selection_AIC()
rdef_FDR_selection()
rdef_number_of_clusters()
rdef_k_euclidean()
rdef_k_manhattan()
rdef_khclust_euc()
rdef_khclust_man()
rdef_clustering()
rdef_cluster_euclidean()
rdef_cluster_manhattan()
rdef_hclust_euc()
rdef_hclust_man()
rdef_C2()
rdef_get_PBx_from_DATA_C3()
rdef_C3()
rdef_classification_fun()
rdef_RF_classify()
rdef_RF_one_by_one()
rdef_cart_function()
def rdef_get_xy_from_DATA_C2():
robjects.r(
"""
#' Title get_xy_from_DATA_C2
#'
#' @param DATA Full data matrix, includes all observations for all the variables
#' @param META_DATA Need to have at least 2 columns, one with all variables name, another one which indicate
#' the type of each variable (CM, DX, PB)
#'
#' @return a list of important variables
#'
#' @export
#'
#' @examples
#' # x <- get_xy_from_DATA_C2(DATA, META_DATA)[[1]]
#' # y <- get_xy_from_DATA_C2(DATA, META_DATA)[[2]]
get_xy_from_DATA_C2 <- function(DATA, META_DATA) {
# DATA META_DATA
x <- DATA[, META_DATA$varName[META_DATA$varCategory == "CM"]]
y <- DATA[, META_DATA$varName[META_DATA$varCategory == "DX"]]
list(x = x, y = y)
}
"""
)
# =================
# Feature Selection
# =================
def rdef_feature_selection():
robjects.r(
"""
#' Title Features Selection
#'
#' @param x Data matrix
#' @param y Dependent variable
#' @param method The method to be used for the feature selection: Random forest, AIC, AIC with MSFDR or BIC
#' @param ... further arguments to be passed to or from other methods
#'
#' @return a list of important variables
#'
#' @export
#'
#' @examples
#' # feature_selection(x, y, method='RF')
#' # feature_selection(x[, 1:30], y, method='BIC')
#' # feature_selection(x, y, method='FDR_screening')
feature_selection <- function(x, y, method = "RF", ...) {
if (method == "RF") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_RF,
...) # ('...' : p)
}
if (method == "AIC_MSFDR") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_AIC_MSFDR,
...) # ('...' : q, print.the.steps)
}
if (method == "BIC") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_BIC,
...) # ('...' : nbest, nvmax, nmin, plot)
}
if (method == "AIC") {
output <- Feature_Selection_dummy_regressions(x, y, Feature_Selection_AIC)
}
if (method == "FDR_screening") {
output <- Feature_Selection_dummy_regressions(x, y, FDR_selection,
...) # ('...' : q, eta)
}
if (method == "LASSO") {
output <- Feature_Selection_dummy_regressions(x, y, LASSO_selection)
}
return(output)
}
"""
)
def rdef_Feature_Selection_dummy_regressions():
robjects.r(
"""
#' Finds a subset of variables based on all dummy regressions
#' Title Feature Selection Dummy Regression
#'
#' @param x Data matrix
#' @param y Dependent variable
#' @param FUN Indicating which method to use for feature selection
#' @param ... further arguments to be passed to or from other methods
#'
#' @return a vector with the names of the important variables
#' @export
#'
#' @examples
#' Feature_Selection_dummy_regressions(x, y, Feature_Selection_RF)
#'
Feature_Selection_dummy_regressions <- function(x, y, FUN, ...) {
u_y <- unique(y)
selected_variables <- list()
for (i in seq_along(u_y)) {
dummy_y <- as.numeric(y == u_y[i])
# FUN(x, y, ...)
selected_variables[[i]] <- FUN(x, dummy_y, ...)
}
# Union of all selected variables
unique(unlist(selected_variables))
}
"""
)
# =================================
# Feature Selection - sub-functions
# =================================
# ==============
# Random Forests
# ==============
def rdef_Feature_Selection_RF():
robjects.r(
"""
#' Title Feature Selection Using Random Forest
#'
#' @param x Data matrix
#' @param y Categorial dependent variable (factor)
#' @param p Precentage of the number of variables to be chosen from x. Default value is 0.1.
#' @return list of p precentage of the variables chosen by their Gini importance index.
#'
#' @export
#'
#' @examples
#' # Feature_Selection_RF(x, y, p = 0.1)
#'
Feature_Selection_RF <- function(x, y, p = 0.1) {
library(randomForest)
if (!is.factor(y)) {
warning("y is not a factor - but was coerced into one.")
y <- as.factor(y)
}
rf_DX_by_CM <- randomForest(y ~ ., data = x, importance = TRUE, proximity = TRUE)
var_import <- importance(rf_DX_by_CM)[, "MeanDecreaseAccuracy"]
m <- round(dim(x)[2] * p) # We'll save just 10% of the variables, the precentage can be changed
subset_vars <- sort(var_import, decreasing = TRUE)[1:m] # Sort the variables by their Gini importance index
important_var_RF <- names(subset_vars)
return(unlist(important_var_RF))
}
"""
)
# ===
# BIC
# ===
def rdef_Feature_Selection_BIC():
robjects.r(
"""
#' Title Feature Selection Using BIC
#'
#' @param x Data matrix
#' @param y response vector (must be numeric?)
#' @param nbest number of subsets of each size to record
#' @param nvmax maximum size of subsets to examine
#' @param nmin number of minimum varibles to be included in the suggested final model
#' @param plot.BIC if TRUE (default) the function plots a table of models showing which variables are in each model.
#' The models are ordered by the specified model selection statistic.
#' @return
#' vector with the names of variables of the model with minimum BIC between the models including more then 'nmin' variables' of regsubsets object
#' @export
#'
#' @examples
#' # Feature_Selection_BIC(x[, 1:30], y, nbest=1, nvmax=5, plot.BIC=TRUE, nmin=4)
Feature_Selection_BIC <- function(x, y, nbest = 1, nvmax = 12, nmin = 4,
plot.BIC = FALSE) {
library(leaps)
library(car)
fulldata <- data.frame(x, y) # Creating one joint data.frame of the data
RET <- regsubsets(y ~ ., data = fulldata, nbest = nbest, nvmax = nvmax,
really.big = TRUE)
# if (plot.BIC) { plot(RET, scale = 'bic') }
summary_RET <- summary(RET) # Saving the summary of the rugsubsets output
help_mat <- matrix(as.numeric(summary_RET$which), nrow = (nvmax * nbest),
ncol = (dim(x)[2] + 1)) # Which variables were chosen for each model
num_var_each_model <- apply(help_mat, 1, sum) # Counting the number of variables chosen for each model
chosen_models <- summary_RET$bic[which(num_var_each_model >= nmin)] # Saving the BIC value of the models which includes more then 'nmin' variables
ind_model_min_BIC <- which(chosen_models == min(chosen_models)) # Which model with more then 3 variables have the minimum BIC
return(unlist(colnames(x)[which(help_mat[ind_model_min_BIC, ] == 1) -
1]))
}
"""
)
# ============
# AIC with FDR
# ============
def rdef_MSFDR():
robjects.r(
"""
#' Title Forward Selection Using AIC Criteria and MSFDR Procedure
#'
#' @param minimal.lm lm function output of model which includes an intercept
#' @param maximal.lm lm function output of model which not includes an intercept
#' @param q Significant level. default as 0.05
#' @param print.the.steps if TRUE the Lambda, model size, and final model at each iteration will be printed;
#' Default as FALSE
#' @param print.running.time If TRUE the running time will be printed, it is equal to the value of print.the.steps
#' Default as False.
#' @return
#' Final model, running time, summary of AIC_MSFDR object
#' @export
#'
#' @examples
#' # Feature_Selection_AIC_MSFDR(x, y, q = 0.5, print.the.steps = FALSE)
#'
MSFDR <- function(minimal.lm, maximal.lm, q, print.the.steps, print.running.time = print.the.steps) {
# computes forward model selection using the multiple stage FDR
# controlling procedure (MSFDR)
if (!(class(minimal.lm) == "lm" & class(maximal.lm) == "lm")) {
print("one of the models you entered aren't linear models (lm), please try fitting lm only")
break
}
if (print.running.time)
time <- proc.time()
library(MASS)
algorithm.direction <- "forward" # always forward
the.scope <- list(lower = minimal.lm, upper = maximal.lm)
trace.stepAIC <- ifelse(print.the.steps, 1, 0)
iteration.number <- 1
m <- extractAIC(maximal.lm)[1] - 1 # check if the full model should include the intercept or not !!!!!!
i <- max(extractAIC(minimal.lm)[1] - 1, 1) # so if the model is with intercept only, the i size won't be 0.
# q = .05 # default
Lambda <- qnorm((1 - 0.5 * q * i/(m + 1 - i * (1 - q))))^2
if (print.the.steps) {
print(paste("Starting Lambda is: ", Lambda))
}
# first step of the algorithm
new.lm <- stepAIC(minimal.lm, direction = algorithm.direction, scope = the.scope,
k = Lambda, trace = trace.stepAIC)
new.lm.model.size <- extractAIC(new.lm)[1] - 1
while (new.lm.model.size > i) {
iteration.number <- iteration.number + 1
if (print.the.steps) {
print("=========================================")
print("=========================================")
print(paste("iteration number: ", iteration.number))
print(paste("current model size is:", new.lm.model.size, ">",
i, " (which is bigger then the old model size)"))
}
i <- new.lm.model.size
Lambda <- qnorm((1 - 0.5 * q * i/(m + 1 - i * (1 - q))))^2
if (print.the.steps) {
print(paste("new Lambda is: ", Lambda))
}
new.lm <- stepAIC(new.lm, direction = algorithm.direction, scope = the.scope,
k = Lambda, trace = trace.stepAIC)
new.lm.model.size <- extractAIC(new.lm)[1] - 1
}
if (print.the.steps) {
print("=========================================")
print("=========================================")
print("=========================================")
print("The final model is: ")
print(new.lm$call)
}
if (print.running.time) {
print("")
print("Algorithm running time was:")
print(proc.time() - time)
}
return(new.lm)
}
"""
)
def rdef_Feature_Selection_AIC_MSFDR():
robjects.r(
"""
# TODO: MSFDR does NOT (!!!) work with non-numeric values. Using it for
# factors, will produce very wrong results It should be considered if
# to extend it to also work with factors (e.g.: through multinomial
# regression)
Feature_Selection_AIC_MSFDR <- function(x, y, q = 0.05, print.the.steps = FALSE) {
y <- as.numeric(y)
fulldata <- data.frame(x, y = y)
# Creating one joint data.frame of the data defining the smallest and
# largest lm we wish to progress through
smallest_linear_model <- lm(y ~ +1, data = fulldata)
largest_linear_model <- lm(y ~ ., data = fulldata)
# Implementing the MSFDR functions (with q = 0.05)
AIC_MSDFR <- MSFDR(minimal.lm = smallest_linear_model, maximal.lm = largest_linear_model,
q, print.the.steps)
sum <- summary(AIC_MSDFR) # Saving the summary of the AIC.MSFDR procedure
important_var_FDR <- which(!is.na(AIC_MSDFR$coeff))
important_var_FDR <- names(important_var_FDR)
return(unlist(important_var_FDR[2:length(important_var_FDR)]))
}
"""
)
# ===================
# AIC without FDR ###
# ===================
def rdef_Feature_Selection_AIC():
robjects.r(
"""
#' Title Feature Selection Using AIC
#'
#' @param x data matrix
#' @param y categorical variable (factor)
#'
#' @return
#' Returns a list with two items. The first is a list of important variables. The second
#' is NA if print.summary.AIC==FALSE or the summary of AIC if TRUE.
#' @export
#'
#' @examples
#' # Feature_Selection_AIC(x, y)
Feature_Selection_AIC <- function(x, y) {
library(MASS)
y <- as.numeric(y)
fulldata <- data.frame(x, y) # Creating one joint data.frame of the data
smallest_linear_model <- lm(y ~ +1, data = fulldata)
largest_linear_model <- lm(y ~ . + 1, data = fulldata)
AIC_procedure <- stepAIC(object = smallest_linear_model, scope = list(lower = smallest_linear_model,
upper = largest_linear_model), direction = "forward", trace = FALSE)
important_var_AIC <- names(AIC_procedure$coeff)
return(unlist(important_var_AIC[2:length(important_var_AIC)])) # Extracting the print of 'Intercept'
}
"""
)
# ==================================
# FDR Selection (F and Chi-sq tests)
# ==================================
def rdef_FDR_selection():
robjects.r(
"""
#' Title Feature Selection Using FDR selection
#'
#' @param x data matrix
#' @param y categorical variable (factor)
#' @param q adjusted p value threshold level. The chosen variables will have adjusted p value smaller than q
#' @param eta eta squared threshold, the chosen variables will have eta value greater then eta.
#'
#' @return
#' Returns a list of the selected variables
#' @export
#'
#' @examples
#' # FDR_selection(x, y, q = 0.001, eta = 0.1)
FDR_selection <- function(x, y, q = 0.05, eta = 0.1) {
if (!is.factor(y)) {
warning("y is not a factor - but was coerced into one.")
y <- as.factor(y)
}
eta_squared <- rep(NA, dim(x)[2])
original_p_val <- rep(NA, dim(x)[2])
for (i in 1:dim(x)[2]) {
# variable is discrete
if (sum(floor(x[, i]) == x[, i]) == dim(x)[2])
{
original_p_val[i] <- chisq.test(x = x[, i], y)$p.value
eta_squared[i] <- summary.lm(lm(as.factor(x[, i]) ~ as.factor(y)))$r.squared
} # variable is not discrete
else {
anova_model <- anova(lm(x[, i] ~ y + 0))
original_p_val[i] <- anova_model[[5]][1]
eta_squared[i] <- summary.lm(lm(x[, i] ~ as.factor(y)))$r.squared
}
}
names(original_p_val) <- colnames(x)
adjust_p_val <- p.adjust(original_p_val, method = "BH")
is_smaller <- ifelse(adjust_p_val < q & eta_squared > eta, 1, 0)
screening <- data.frame("var" = names(original_p_val), original_p_val, adjust_p_val,
eta_squared, is_smaller, row.names = c(1:length(original_p_val)))
keep_vars <- screening$var[which(is_smaller == 1)]
screening <- screening[order(original_p_val), ]
return(as.character(keep_vars))
}
#' Title LASSO
#'
#' @param x Data matrix
#' @param y Dependent variable
#'
#' @return
#' plot and table which advises how many clusters should be
#'
#' @export
#'
#' @examples
#' # LASSO_selection(x, y)
# LASSO_selection<-function(x, y) { cvfit <- cv.glmnet(as.matrix(x), y)
# important_var_LASSO <- as.matrix(coef(cvfit, s = 'lambda.1se'))
# important_var_LASSO <- important_var_LASSO[important_var_LASSO[, 1]
# != 0, ] important_var_LASSO <-
# important_var_LASSO[names(important_var_LASSO) != '(Intercept)']
# reduced_x <- x[, names(important_var_LASSO)] return(reduced_x) }
"""
)
# ======================================================
# Deciding on number of clusters and clustering the data
# ======================================================
def rdef_number_of_clusters():
robjects.r(
"""
#' Title Deciding on Number of Clusters
#'
#' @param x Data matrix
#' @param method character string indicating how the "optimal" number of clusters: Euclidean (default), Manhattan,
#' heirarchical euclidean or heirarchcal manhattan
#' @param K.max the maximum number of clusters to consider, must be at least two. Default value is 10.
#' @param B integer, number of Monte Carlo ("bootstrap") samples. Default value is 100.
#' @param verbose integer or logical, determining if "progress" output should be printed. The default prints
#' one bit per bootstrap sample. Default value is FALSE.
#' @param scale if TRUE (default) the data matrix will be scaled.
#' @param diss if TRUE (default as FALSE) x will be considered as a dissimilarity matrix.
#' @param cluster.only if true (default as FALSE) only the clustering will be computed and returned, see details.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return
#' plot and table which advises how many clusters should be
#'
#' @export
#'
#' @examples
#' # number_of_clusters(subx, B=50, method='Euclidean')
#'
number_of_clusters <- function(x, method = "Euclidean", K.max = 10, B = 100,
verbose = FALSE, plot.num.clus = TRUE, scale = TRUE, diss = FALSE,
cluster.only = TRUE) {
# scale
if (scale) {
x <- scale(x)
}
# TODO: what we SHOULD do is pass Euclidean/Man to the functions, as
# well as hclust vs pam...
if (method == "Euclidean") {
k_clusters <- k_euclidean(x, K.max, B, verbose, plot.num.clus)
}
if (method == "Manhattan") {
k_clusters <- k_manhattan(x, K.max, diss, B, cluster.only, verbose,
plot.num.clus)
}
if (method == "hclust_Euclidean") {
k_clusters <- khclust_euc(x, K.max, B, verbose, plot.num.clus)
}
if (method == "hclust_Manhattan") {
k_clusters <- khclust_man(x, K.max, B, verbose, plot.num.clus)
}
return(list(k_clusters))
}
"""
)
def rdef_k_euclidean():
robjects.r(
"""
#' Title Gap statisic with k-medoids euclidean
#'
#' @param x Data matrix
#' @param K.max the maximum number of clusters to consider, must be at least two. Default value is 10.
#' @param B integer, number of Monte Carlo ("bootstrap") samples. Default value is 100.
#' @param verbose integer or logical, determining if "progress" output should be printed. The default prints
#' one bit per bootstrap sample. Default value is FALSE.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return the clusGap function' values
#' @export
#'
#' @examples
#' # k_euclidean(subx, K.max=8, B=50, verbose=FALSE, plot.num.clus=TRUE)
#'
k_euclidean <- function(x, K.max, B, verbose, plot.num.clus) {
library(cluster)
library(clusterCrit)
clusGap_best <- cluster::clusGap(x, FUN = pam, K.max = K.max, B, verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap Statistic for k-medoids Euclidean")
}
# # Silhouette Criteria for k-medoids sil <- c(rep(NA, 10)) sil[1] <- 0
# max_sil <- 0 clust_num_sil <- 0 for (i in 2:10) { clust <- pam(x, i,
# diss = FALSE) sil[i] <- intCriteria(x, clust$cluster, 'Silhouette')
# if (as.numeric(sil[i]) > max_sil) { max_sil_means <- sil[i]
# clust_num_sil <- i } } if (plot.num.clus) { plot(as.numeric(sil),
# type = 'l', main = 'Silhouette criteria k-medoids Euclidean') }
# return(list(clusGap_best, clust))
return(list(clusGap_best))
}
"""
)
def rdef_k_manhattan():
robjects.r(
"""
#' Title Gap statisic with k-medoids manhattan
#'
#' @param x data matrix
#' @param K.max positive integer specifying the number of clusters, less than the number of observations.
#' Default value is 10.
#' @param diss if TRUE (default as FALSE) x will be considered as a dissimilarity matrix
#' @param B integer, number of Monte Carlo ("bootstrap") samples. Default value is 100.
#' @param cluster.only if true (default) only the clustering will be computed and returned, see details.
#' @param verbose integer or logical, determining if "progress" output should be printed. The default prints
#' one bit per bootstrap sample. Default as FALSE.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#' @param ... another objects of pam function
#'
#' @return clusGap function' output
#' @export
#'
#' @examples
#' # k_manhattan (subx, K.max = 8, diss=FALSE, B = 50, cluster.only = TRUE, verbose = FALSE)
#'
k_manhattan <- function(x, K.max, diss, B, cluster.only, verbose, plot.num.clus) {
library(cluster)
library(clusterCrit)
library(magrittr)
library(fpc)
pam_1 <- function(x, k, ...) {
clusters <- x %>% pam(k = k, diss = diss, metric = "manhattan",
cluster.only = cluster.only)
list(clusters = clusters)
}
set.seed(40)
clusGap_best <- clusGap(x, FUN = pam_1, K.max = K.max, B = B, verbose = verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap Statistic for k-medoids Manhattan")
}
# #Silhouette criteria with k-medoids manhattan
# sil_med_m<-c(rep(NA,10)) sil_med_m[1]<-0 max_sil_med_m<-0
# clust_num_sil_med_m<-0 for (i in 2:10) {
# clust_med_m<-pam(Scaled_Reduced_CM_trans,i,diss=FALSE,metric='manhattan')
# sil_med_m[i]<-intCriteria(Scaled_Reduced_CM_trans,clust_med_m$cluster,'Silhouette')
# if (as.numeric(sil_med_m[i]) > max_sil_med_m) {
# max_sil_med_m<-sil_med_m[i] clust_num_sil_med_m<-i } }
# plot(as.numeric(sil_med_m),type='l',main='Silhouette criteria,
# k-medoids manhattan')
return(list(clusGap_best))
}
"""
)
def rdef_khclust_euc():
robjects.r(
"""
#' Title Gap statistics for hclust Euclidean
#'
#' @param x data matrix
#' @param K.max positive integer specifying the number of clusters, less than the number of observations.
#' @param B integer, number of Monte Carlo ("bootstrap") samples
#' @param verbose integer or logical, determining if "progress" output should be printed. The default prints
#' one bit per bootstrap sample
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return the clusGap function output
#' @export
#'
#' @examples
#' # khclust_euc(subx,K.max=10, B=60, verbose = FALSE, plot.num.clus=TRUE )
#'
khclust_euc <- function(x, K.max, B, verbose, plot.num.clus) {
hclust_k_euc <- function(x, k, ...) {
library(magrittr)
library(cluster)
clusters <- x %>% dist %>% hclust %>% cutree(k = k)
list(clusters = clusters)
}
clusGap_best <- clusGap(x, FUN = hclust_k_euc, K.max = K.max, B = B,
verbose = verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap statistic, hclust Euclidean")
}
return(clusGap_best)
}
"""
)
def rdef_khclust_man():
robjects.r(
"""
#' Title Gap statistics for hclust Manhattan
#'
#' @param x data matrix
#' @param K.max positive integer specifying the number of clusters, less than the number of observations.
#' Default value is 10
#' @param B integer, number of Monte Carlo ("bootstrap") samples. Default value is 100.
#' @param verbose integer or logical, determining if "progress" output should be printed. The default prints
#' one bit per bootstrap sample. Default value is FALSE.
#' @param plot.num.clus if TRUE (default) the gap statistic plot will be printed
#'
#' @return the clusGap function output
#' @export
#'
#' @examples
#' # khclust_man(subx, K.max=8, B=60, verbose=FALSE, plot.num.clus=TRUE)
#'
khclust_man <- function(x, K.max, B, verbose, plot.num.clus) {
hclust_k_man <- function(x, k, ...) {
library(magrittr)
clusters <- x %>% dist(method = "manhattan") %>% hclust %>% cutree(k = k)
list(clusters = clusters)
}
clusGap_best <- clusGap(x, FUN = hclust_k_man, K.max = K.max, B = B,
verbose = verbose)
if (plot.num.clus) {
plot(clusGap_best, main = "Gap statistic, hclust Manhattan")
}
return(list(clusGap_best))
}
"""
)
# =====================
# Clustering the data #
# =====================
def rdef_clustering():
robjects.r(
"""
#' Title Clustering
#'
#' @param x data matrix
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param method Indicating which method to use for clustering. Default is 'Euclidean'.
#' @param plot.clustering if TRUE (default) a 2-dimensional "clusplot" plot will be printed
#'
#' @return vector withnew assigned clusters
#' @export
#'
#' @examples
#' clustering(subx, k.gap = 5, method='Euclidean', plot.clustering=TRUE)
#'
clustering <- function(x, k.gap = 2, method = "Euclidean", plot.clustering = FALSE) {
if (method == "Euclidean") {
clusters <- cluster_euclidean(x, k.gap, plot.clustering)
}
if (method == "Manhattan") {
clusters <- cluster_manhattan(x, k.gap, plot.clustering)
}
if (method == "Heuclidean") {
clusters <- cluster_euclidean(x, k.gap, plot.clustering)
}
if (method == "Hmanhattan") {
clusters <- cluster_manhattan(x, k.gap, plot.clustering)
}
return(clusters)
}
### Euclidean ###
#' Title Clustering Using Euclidean distances
#'
#' @param x data matrix
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param plot.clustering if TRUE (default) a 2-dimensional "clusplot" plot will be printed
#'
#' @return
#' vector with the new assigned clusters
#'
#' @export
#'
#' @examples
#' # cluster_euclidean(subx, k.gap = 5, plot.clustering = TRUE)
#'
"""
)
def rdef_cluster_euclidean():
robjects.r(
"""
# Title Cluster Euclidean
cluster_euclidean <- function(x, k.gap, plot.clustering) {
library(cluster)
pam_4 <- pam(x, k.gap, diss = FALSE)
if (plot.clustering) {
clusplot(x, pam_4$cluster, color = TRUE, main = c("k-medoids,",
paste = k.gap, "clusters"))
}
clusters <- pam_4$cluster
return(unlist(clusters))
}
"""
)
# =========
# Manhattan
# =========
def rdef_cluster_manhattan():
robjects.r(
"""
#' Title Clustering Using Manhattan Distances
#'
#' @param x data matrix
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param plot.clustering if TRUE (default) a 2-dimensional "clusplot" plot will be printed
#'
#' @return
#' vector with the new assigned clusters
#' @export
#'
#' @examples
#' # cluster_manhattan(subx, k.gap=4, plot.clustering=TRUE)
#'
cluster_manhattan <- function(x, k.gap, plot.clustering) {
pam_3_man <- pam(x, k.gap, diss = FALSE, metric = "manhattan")
if (plot.clustering) {
clusplot(x, pam_3_man$cluster, color = TRUE, main = c("k-medoids,manhattan",
paste(k.gap), "clusters"))
}
clusters <- pam_3_man$cluster
return(unlist(clusters))
}
"""
)
def rdef_hclust_euc():
robjects.r(
"""
### Hierarchical clustering euclidean ###
#' Title Deciding on number of clusters by using Hierarchical clustering euclidean
#'
#' @param x data matrix
#' @param y Dependent variable
#' @param k.gap positive integer specifying the number of clusters, less than the number of observation. Default value is 10.
#' @param plot.clustering if TRUE (default) a 2-dimensional "clusplot" plot will be printed
#'
#'
#' @return
#' summary table of the distribution to clusters
#' @export
#'
#' @examples
#' hclust_euc(subx, k.gap = 5, plot.clustering=TRUE)
#'
hclust_euc <- function(x, k.gap, plot.clustering) {
d <- dist(x, method = "euclidean")
fit_best <- hclust(d, method = "ward.D")
if (plot.clustering) {
plot(fit_best, main = c("hclust , euclidean,", paste(k.gap), " clusters"))
}
groups_best_4 <- cutree(fit_best, k = k.gap)
rect.hclust(fit_best, k = k.gap, border = "blue")
clusters <- groups_best_4
return(unlist(clusters))
}
"""
)
# =================================
# Hierarchical clustering manhattan
# =================================
def rdef_hclust_man():
robjects.r(
"""
#' Title Deciding on number of clusters by Hierarchical clustering manhattan
#'
#' @param x data matrix
#' @param plot.clustering if TRUE (default) a 2-dimensional 'clusplot' plot will be printed
#'
#' @return
#' a list of two variables the hclust function description and a summary table
#' of the distribution to clusters
#' @export
#'
#' @examples
#' hclust_man(subx, k.gap = 5, plot.clustering=TRUE)
#'
hclust_man <- function(x, k.gap, plot.clustering) {
d_man <- dist(x, method = "manhattan")
fit_best_man <- hclust(d_man, method = "ward.D")
if (plot.clustering) {
plot(fit_best_man, main = c("hclust, manhattan,", paste(k.gap),
"7 clusters"))
}
groups_best_4_man <- cutree(fit_best_man, k = k.gap)
rect.hclust(fit_best_man, k = k.gap, border = "red")
clusters <- groups_best_4_man
return(unlist(clusters))
}
"""
)
# =============
# 3 C functions
# =============
def rdef_C2():
robjects.r(
"""
#' Title C2
#'
#' @param x data matrix
#' @param y Dependent variable
#' @param feature_selection_method method for the feature selection of the clinical measurements stage. Default RF.
#' @param num_clusters_method method for the choosing number of clusters by using the clinical measurements. Default Euclidean.
#' @param k number of clusters to use. If missing, we use a detection method. Defaukt as NULL
#' @param clustering_method method for clustering using the reduced clinical measures. Default is Hmanhattan,
#'
#' @return a list of three variables:
#' 1) vector with the names of the omportant variables chosen.
#' 2) number of classes that will be used for clustering
#' 3) vector of the new assigned clusterst
#'
#' @export
#'
#' @examples
#' resultC2 <- C2(x, y, feature_selection_method='RF', num_clusters_method='Manhattan', clustering_method='Manhattan', plot.num.clus=TRUE, plot.clustering=TRUE)
#' C2(x, y, feature_selection_method='BIC', num_clusters_method='Manhattan', clustering_method='Hmanhattan', plot.num.clus=TRUE, plot.clustering=FALSE, nbest=1, nvmax=8, B=50)
C2 <- function(x, y, feature_selection_method, num_clusters_method, k = NULL,
clustering_method, ...) {
# Feature selection
imp_var <- feature_selection(x, y, method = feature_selection_method)
# print(imp_var) CM_final_vars <- imp_var[[1]][2] # Extracting a list
# of inportant CM variables
subx <- x[, unlist(imp_var)]
# Deciding on number of clusters
if (missing(k)) {
num_clust <- number_of_clusters(x = subx, method = num_clusters_method)
print(num_clust)
# library(car)
user_choise <- function() {
k <- readline(prompt = paste("Enter the chosen number of clusters",
":\n"))
k <- as.numeric(k)
return(k)
}
num_clust <- user_choise()
} else {
num_clust <- k
}
# Final clustering
final_cluster <- clustering(subx, k.gap = num_clust)
# print(final_cluster)
return(list(imp_var, num_clust, final_cluster))
}
"""
)
def rdef_get_PBx_from_DATA_C3():
robjects.r(
"""
#' Title get_PBx_from_DATA_C3
#'
#' @param DATA Full data matrix, includes all observations for all the variables
#' @param META_DATA Need to have at least 2 columns, one with all variables name, another one which indicate
#' the type of each variable (CM, DX, PB)
#'
#' @return a list of important variables
#'
#' @export
#'
#' @examples
#' # PBx <- get_PBx_from_DATA_C3(DATA, META_DATA)
#'
get_PBx_from_DATA_C3 <- function(DATA, META_DATA) {
x <- DATA[, META_DATA$varName[META_DATA$varCategory == "PB"]]
return(PBx = x)
}
"""
)
def rdef_C3():
robjects.r(
"""
#' Title C3
#'
#' @param PBx data matrix
#' @param newy new assigned clusters, results from C2.
#' @param feature_selection_method method for the feature selection of the Potential Bio-Markers
#' @param classification_method method for classification using the potential bio-markers
#'
#' @return a list of two variables:
#' 1) vector with the names of important variables chosen
#' 2) classification result for each observation
#' @export
#'
#' @examples
#' C3(PBx, newy, feature_selection_method='RF', classification_method='RF')
#'
C3 <- function(PBx, newy, feature_selection_method, classification_method) {
# Feature selection if(!factor(newy)){ newy <- as.factor(newy) }
imp_var <- feature_selection(PBx, newy, method = feature_selection_method)
sub_PBx <- PBx[, imp_var]
# Classification
classification <- classification_fun(PBx, newy, method = classification_method)
return(list(imp_var, unname(classification)))
}
"""
)
def rdef_classification_fun():
robjects.r(
"""
####################################### Potential biomarkers classification #
#' Title Classification for the potential Biomarkers
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#' @param method Classification method for the function to use
#'
#' @return Predicted values for each observation
#'
#' @export
#'
#' @examples
#' # classification_fun(PBx, newy, method='RF')
classification_fun <- function(PBx, newy, method = "RF") {
if (method == "RF") {
output <- RF_classify(PBx, newy)
}
if (method == "RF_downsampling") {
output <- RF_one_by_one(PBx, newy)
}
if (method == "CART_information") {
output <- cart_function(PBx, newy, criteria = "information")
}
if (method == "CART_gini") {
output <- cart_function(PBx, newy, criteria = "gini")
}
return(output)
}
"""
)
def rdef_RF_classify():
robjects.r(
"""
### Random Forest Without Down Sampling ###
#' Title Classification Using Random Forest Without Down Sampling
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#'
#' @return The predicted values for each observation
#'
#' @export
#'
#' @examples
#' # RF_classify(PBx, newy)
library(randomForest)
RF_classify <- function(PBx, newy) {
if (!is.factor(newy)) {
warning("y is not a factor - but was coerced into one.")
newy <- as.factor(newy)
}
fulldata <- data.frame(PBx, newy)
rf_clus_PB <- randomForest(newy ~ ., data = fulldata, ntree = 50)
model <<- rf_clus_PB
return(rf_clus_PB$predicted)
}
"""
)
def rdef_RF_one_by_one():
robjects.r(
"""
### Random forest with down sampling ###
#' Title Classification Using Random Forest Without Down Sampling
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#'
#' @return a list of two variables: the hclust function description and a summary table
#' of the distribution to clusters
#' @export
#'
#' @examples
#' # RF_one_by_one(PBx, newy)
RF_one_by_one <- function(PBx, newy) {
if (!is.factor(newy)) {
warning("y is not a factor - but was coerced into one.")
newy <- as.numeric(as.factor(newy))
}
rflist_names <- paste("cluster", c(1:length(unique(newy))))
rflist <- sapply(rflist_names, function(x) NULL)
for (i in 1:length(unique(newy))) {
class_2 <- ifelse(newy == i, 1, 0)
nmin <- sum(class_2 == 1)
rflist[[i]] <- randomForest(factor(class_2) ~ ., data = PBx, ntree = 1000,
importance = TRUE, proximity = TRUE, sampsize = rep(nmin, 2))
}
return(rflist)
}
"""
)
def rdef_cart_function():
robjects.r(
"""
#' # cart_function(PBx, newy, 'information')
### CART ###
#' Title Classification Using CART
#'
#' @param PBx data matrix
#' @param newy New assigned clusters
#' @param criteria gini or information
#'
#' @return a list of two variables: the hclust function description and a summary table
#' of the distribution to clusters
#' @export
#'
#' @examples
cart_function <- function(PBx, newy, criteria = "gini") {
fulldata <- data.frame(PBx, newy)
cart <- rpart(newy ~ ., data = fulldata, method = "class", parms = list(split = criteria))
model <<- cart
pred <- predict(cart, type = "class")
return(pred)
}
"""
)
if __name__ == "__main__":
import time
from mipframework import create_runner
algorithm_args = [
"-y",
"lefthippocampus, righthippocampus, leftcaudate",
"-x",
"gender, agegroup",
"-pathology",
"dementia",
"-dataset",
"edsd, ppmi",
"-filter",
"",
"-dx",
"alzheimerbroadcategory",
"-c2_feature_selection_method",
"RF",
"-c2_num_clusters_method",
"Euclidean",
"-c2_num_clusters",
"6",
"-c2_clustering_method",
"Euclidean",
"-c3_feature_selection_method",
"RF",
"-c3_classification_method",
"RF",
]
runner = create_runner(ThreeC, algorithm_args=algorithm_args, num_workers=1,)
start = time.time()
runner.run()
end = time.time()
print("Completed in ", end - start)
| mit | 6,020,439,716,612,034,000 | 34.63506 | 179 | 0.55087 | false |
WaveBlocks/WaveBlocks | src/WaveBlocks/SimulationLoopFourier.py | 1 | 6326 | """The WaveBlocks Project
This file contains the main simulation loop
for the Fourier propagator.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
import numpy as np
import scipy as sp
from PotentialFactory import PotentialFactory as PF
from WaveFunction import WaveFunction
from HagedornWavepacket import HagedornWavepacket
from FourierPropagator import FourierPropagator
from SimulationLoop import SimulationLoop
from IOManager import IOManager
class SimulationLoopFourier(SimulationLoop):
r"""
This class acts as the main simulation loop. It owns a propagator that
propagates a set of initial values during a time evolution. All values are
read from the ``Parameters.py`` file.
"""
def __init__(self, parameters):
r"""
Create a new simulation loop instance.
"""
# Keep a reference to the simulation parameters
self.parameters = parameters
#: The time propagator instance driving the simulation.
self.propagator = None
#: A ``IOManager`` instance for saving simulation results.
self.IOManager = None
#: The number of time steps we will perform.
self.nsteps = parameters["nsteps"]
# Set up serializing of simulation data
self.IOManager = IOManager()
self.IOManager.create_file(self.parameters)
self.IOManager.create_block()
def prepare_simulation(self):
r"""
Set up a Fourier propagator for the simulation loop. Set the
potential and initial values according to the configuration.
:raise ValueError: For invalid or missing input data.
"""
# Compute the position space grid points
nodes = self.parameters["f"] * sp.pi * sp.arange(-1, 1, 2.0/self.parameters["ngn"], dtype=np.complexfloating)
# The potential instance
potential = PF().create_potential(self.parameters)
# Check for enough initial values
if not self.parameters.has_key("initial_values"):
if len(self.parameters["parameters"]) < potential.get_number_components():
raise ValueError("Too few initial states given. Parameters are missing.")
if len(self.parameters["coefficients"]) < potential.get_number_components():
raise ValueError("Too few initial states given. Coefficients are missing.")
# Calculate the initial values sampled from a hagedorn wave packet
d = dict([("ncomponents", 1), ("basis_size", self.parameters["basis_size"]), ("eps", self.parameters["eps"])])
# Initial values given in the "fourier" specific format
if self.parameters.has_key("initial_values"):
initialvalues = [ np.zeros(nodes.shape, dtype=np.complexfloating) for i in xrange(self.parameters["ncomponents"]) ]
for level, params, coeffs in self.parameters["initial_values"]:
hwp = HagedornWavepacket(d)
hwp.set_parameters(params)
for index, value in coeffs:
hwp.set_coefficient(0, index, value)
iv = hwp.evaluate_at(nodes, component=0, prefactor=True)
initialvalues[level] = initialvalues[level] + iv
# Initial value read in compatibility mode to the packet algorithms
else:
# See if we have a list of parameter tuples or just a single 5-tuple
# This is for compatibility with the inhomogeneous case.
try:
# We have a list of parameter tuples this is ok for the loop below
len(self.parameters["parameters"][0])
parameters = self.parameters["parameters"]
except TypeError:
# We have just a single 5-tuple of parameters, we need to replicate for looping
parameters = [ self.parameters["parameters"] for i in xrange(self.parameters["ncomponents"]) ]
initialvalues = []
for level, item in enumerate(parameters):
hwp = HagedornWavepacket(d)
hwp.set_parameters(item)
# Set the coefficients of the basis functions
for index, value in self.parameters["coefficients"][level]:
hwp.set_coefficient(0, index, value)
iv = hwp.evaluate_at(nodes, component=0, prefactor=True)
initialvalues.append(iv)
# Project the initial values to the canonical basis
initialvalues = potential.project_to_canonical(nodes, initialvalues)
# Store the initial values in a WaveFunction object
IV = WaveFunction(self.parameters)
IV.set_grid(nodes)
IV.set_values(initialvalues)
# Finally create and initialize the propagator instace
self.propagator = FourierPropagator(potential, IV, self.parameters)
# Which data do we want to save
tm = self.parameters.get_timemanager()
slots = tm.compute_number_saves()
print(tm)
self.IOManager.add_grid(self.parameters, blockid="global")
self.IOManager.add_fourieroperators(self.parameters)
self.IOManager.add_wavefunction(self.parameters, timeslots=slots)
# Write some initial values to disk
self.IOManager.save_grid(nodes, blockid="global")
self.IOManager.save_fourieroperators(self.propagator.get_operators())
self.IOManager.save_wavefunction(IV.get_values(), timestep=0)
def run_simulation(self):
r"""
Run the simulation loop for a number of time steps. The number of steps is calculated in the ``initialize`` function.
"""
tm = self.parameters.get_timemanager()
# Run the simulation for a given number of timesteps
for i in xrange(1, self.nsteps+1):
print(" doing timestep "+str(i))
self.propagator.propagate()
# Save some simulation data
if tm.must_save(i):
self.IOManager.save_wavefunction(self.propagator.get_wavefunction().get_values(), timestep=i)
def end_simulation(self):
r"""
Do the necessary cleanup after a simulation. For example request the
IOManager to write the data and close the output files.
"""
self.IOManager.finalize()
| bsd-3-clause | 8,183,372,517,161,207,000 | 37.339394 | 127 | 0.64638 | false |
dshean/pygeotools | pygeotools/replace_ndv.py | 1 | 1731 | #! /usr/bin/env python
#David Shean
#[email protected]
import sys
import os
import argparse
import numpy as np
from osgeo import gdal
from pygeotools.lib import iolib
#Can use ASP image_calc for multithreaded ndv replacement of huge images
#image_calc -o ${1%.*}_ndv.tif -c 'var_0' --output-nodata-value $2 $1
def getparser():
parser = argparse.ArgumentParser(description="Replace raster NoData value")
parser.add_argument('-overwrite', action='store_true', help='Overwrite original file')
parser.add_argument('src_fn', type=str, help='Input raster filename')
parser.add_argument('new_ndv', type=str, help='New NoData value (e.g., -9999)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
src_fn = args.src_fn
new_ndv = args.new_ndv
#Input argument is a string, which is not recognized by set_fill_value
#Must use np.nan object
if new_ndv == 'nan' or new_ndv == 'np.nan':
new_ndv = np.nan
else:
new_ndv = float(new_ndv)
#Output filename will have ndv appended
if args.overwrite:
out_fn = src_fn
else:
out_fn = os.path.splitext(src_fn)[0]+'_ndv.tif'
ds = gdal.Open(src_fn)
b = ds.GetRasterBand(1)
#Extract old ndv
old_ndv = iolib.get_ndv_b(b)
print(src_fn)
print("Replacing old ndv %s with new ndv %s" % (old_ndv, new_ndv))
#Load masked array
bma = iolib.ds_getma(ds)
#Handle cases with input ndv of nan
#if old_ndv == np.nan:
bma = np.ma.fix_invalid(bma)
#Set new fill value
bma.set_fill_value(new_ndv)
#Fill ma with new value and write out
iolib.writeGTiff(bma.filled(), out_fn, ds, ndv=new_ndv)
if __name__ == '__main__':
main()
| mit | -3,940,889,173,752,229,400 | 25.227273 | 90 | 0.644714 | false |
xhair/TopOdoo_Addons | ToproERP_Wechat_Enterprises/models/wechat_enterprise_basic.py | 1 | 29875 | # -*- coding:utf-8 -*-
import json
import requests
import urllib
import hashlib
from json import *
from xml.dom import minidom, Node
import WXBizMsgCrypt
from openerp.http import request
from wechat_sdk.messages import MESSAGE_TYPES, UnknownMessage
from wechat_sdk.exceptions import ParseError, NeedParseError, NeedParamError, OfficialAPIError
import time
import logging
_logger = logging.getLogger(__name__)
from wechat_sdk.reply import TextReply, ImageReply, VoiceReply, VideoReply, MusicReply, Article, ArticleReply
class ErrorCode(object):
SUCCESS = 0
class WeChatEnterprise(object):
def __init__(self, corpid,corpsecret,agentid=1,Token=None,AESKey=None):
"""
document address: http://qydev.weixin.qq.com/wiki/index.php?title=%E9%A6%96%E9%A1%B5
"""
# self.corpid = self._get_corpid()
# self.corpsecret = self._get_corpsecret()
# self.agentid = agentid
# self.url_prefix = "https://qyapi.weixin.qq.com/cgi-bin"
# self.access_token = self.__get_access_token()
# self.Token = self._get_token()
# self.EncodingAESKey = self._get_EncodingAESKey()
# self.__message = None
self.corpid = corpid
self.corpsecret = corpsecret
self.agentid = agentid
self.url_prefix = "https://qyapi.weixin.qq.com/cgi-bin"
self.access_token = self.__get_access_token()
self.Token = Token
self.EncodingAESKey = AESKey
self.__message = None
def __get_access_token(self):
# access_token 有效期为 7200秒
# todo 缓存access_token
url = "%s/gettoken?corpid=%s&corpsecret=%s" % (self.url_prefix, self.corpid, self.corpsecret)
res = requests.get(url)
access_token = res.json().get("access_token")
return access_token
# def _get_corpid(self):
# result = request.env['wechat.enterprise.config'].sudo().search([],limit=1)
# return result.corp_id
#
# def _get_corpsecret(self):
# result = request.env['wechat.enterprise.config'].sudo().search([],limit=1)
# return result.corp_secret
#
# def _get_token(self):
# result = request.env['wechat.enterprise.app'].sudo().search([("agentid","=",self.agentid)],limit=1)
# return result.Token
#
# def _get_EncodingAESKey(self):
# result = request.env['wechat.enterprise.app'].sudo().search([("agentid","=",self.agentid)],limit=1)
# return result.EncodingAESKey
@staticmethod
def __response(res):
errcode = res.get("errcode")
# errmsg = res.get("errmsg")
if errcode is ErrorCode.SUCCESS:
return True, res
else:
return False, res
def __post(self, url, data):
_logger.debug(u"the url is:%s" % url)
res = requests.post(url, data=json.dumps(data).decode('unicode-escape').encode("utf-8")).json()
return self.__response(res)
def __get(self, url):
_logger.debug(u"the url is:%s" % url)
res = requests.get(url).json()
return self.__response(res)
def __post_file(self, url, media_file):
res = requests.post(url, file=media_file).json()
return self.__response(res)
# 部门管理
def create_department(self, name, parentid=1,department_id=None):
"""
创建部门
name : 部门名称。长度限制为1~64个字符
parentid: 父亲部门id。根部门id为1
order : 在父部门中的次序。从1开始,数字越大排序越靠后
"""
url = "%s/department/create?access_token=%s" % (self.url_prefix, self.access_token)
data = {
"name": name,
"parentid": parentid,
}
if department_id is not None:
data["id"] = int(department_id)
status, res = self.__post(url, data)
return status, res
def update_department(self, department_id, name=None, parentid=None, **kwargs):
"""
更新部门
参数 必须 说明
access_token 是 调用接口凭证
id 是 部门id
name 否 更新的部门名称。长度限制为1~64个字符。修改部门名称时指定该参数
parentid 否 父亲部门id。根部门id为1
order 否 在父部门中的次序。从1开始,数字越大排序越靠后
"""
url = "%s/department/update?access_token=%s" % (self.url_prefix, self.access_token)
data = {
"id": department_id,
}
if name is not None:
data["name"] = name
if parentid is not None:
data["parentid"] = parentid
data.update(kwargs)
status, res = self.__post(url, data)
return status, res
def delete_department(self, department_id):
"""
删除部门
参数 必须 说明
access_token 是 调用接口凭证
id 是 部门id。(注:不能删除根部门;不能删除含有子部门、成员的部门)
"""
url = "%s/department/delete?access_token=%s&id=%s" % (self.url_prefix, self.access_token, department_id)
status, res = self.__get(url)
return status, res
def get_department_list(self):
"""
获取部门列表
参数 必须 说明
access_token 是 调用接口凭证
"""
url = "%s/department/list?access_token=%s" % (self.url_prefix, self.access_token)
status, res = self.__get(url)
return status, res
# 成员管理
def create_user(self, data):
"""
创建用户
参数 必须 说明
access_token 是 调用接口凭证
userid 是 员工UserID。对应管理端的帐号,企业内必须唯一。长度为1~64个字符
name 是 成员名称。长度为1~64个字符
department 是 成员所属部门id列表。注意,每个部门的直属员工上限为1000个
position 否 职位信息。长度为0~64个字符
mobile 否 手机号码。企业内必须唯一,mobile/weixinid/email三者不能同时为空
email 否 邮箱。长度为0~64个字符。企业内必须唯一
weixinid 否 微信号。企业内必须唯一。(注意:是微信号,不是微信的名字)
extattr 否 扩展属性。扩展属性需要在WEB管理端创建后才生效,否则忽略未知属性的赋值
"""
url = "%s/user/create?access_token=%s" % (self.url_prefix, self.access_token)
if data.get("userid") and data.get("name"):
status, res = self.__post(url, data)
else:
status = False
res = u"userid 或者 name 为空"
return status, res
def update_user(self, data):
"""
更新成员
参数 必须 说明
access_token 是 调用接口凭证
userid 是 员工UserID。对应管理端的帐号,企业内必须唯一。长度为1~64个字符
name 否 成员名称。长度为0~64个字符
department 否 成员所属部门id列表。注意,每个部门的直属员工上限为1000个
position 否 职位信息。长度为0~64个字符
mobile 否 手机号码。企业内必须唯一,mobile/weixinid/email三者不能同时为空
email 否 邮箱。长度为0~64个字符。企业内必须唯一
weixinid 否 微信号。企业内必须唯一。(注意:是微信号,不是微信的名字)
enable 否 启用/禁用成员。1表示启用成员,0表示禁用成员
extattr 否 扩展属性。扩展属性需要在WEB管理端创建后才生效,否则忽略未知属性的赋值
"""
url = "%s/user/update?access_token=%s" % (self.url_prefix, self.access_token)
if data.get("userid") and data.get("name"):
status, res = self.__post(url, data)
else:
status = False
res = u"userid 或者 name 为空"
return status, res
def delete_user(self, userid):
"""
删除成员
参数 必须 说明
access_token 是 调用接口凭证
userid 是 员工UserID。对应管理端的帐号
"""
url = "%s/user/delete?access_token=%s&userid=%s" % (self.url_prefix, self.access_token, userid)
status, res = self.__get(url)
return status, res
def multi_delete_user(self, useridlist):
"""
批量删除成员
参数 必须 说明
access_token 是 调用接口凭证
useridlist 是 员工UserID列表。对应管理端的帐号
"""
url = "%s/user/batchdelete?access_token=%s" % (self.url_prefix, self.access_token)
data = {"useridlist": useridlist}
status, res = self.__post(url, data=data)
return status, res
def get_user(self, userid):
"""
获取成员
参数 必须 说明
access_token 是 调用接口凭证
userid 是 员工UserID。对应管理端的帐号
"""
url = "%s/user/get?access_token=%s&userid=%s" % (self.url_prefix, self.access_token, userid)
status, res = self.__get(url)
return status, res
def get_users_in_department(self, department_id, fetch_child=0, status=0):
"""
获取部门成员
参数 必须 说明
access_token 是 调用接口凭证
department_id 是 获取的部门id
fetch_child 否 1/0:是否递归获取子部门下面的成员
status 否 0获取全部员工,1获取已关注成员列表,2获取禁用成员列表,4获取未关注成员列表。status可叠加
"""
url = "%s/user/simplelist?access_token=%s&department_id=%s&fetch_child=%s&status=%s" \
% (self.url_prefix, self.access_token, department_id, fetch_child, status)
status, res = self.__get(url)
return status, res
def get_users_in_department_detail(self, department_id, fetch_child=0, status=0):
"""
获取部门成员(详情)
参数 必须 说明
access_token 是 调用接口凭证
department_id 是 获取的部门id
fetch_child 否 1/0:是否递归获取子部门下面的成员
status 否 0获取全部员工,1获取已关注成员列表,2获取禁用成员列表,4获取未关注成员列表。status可叠加
"""
url = "%s/user/list?access_token=%s&department_id=%s&fetch_child=%s&status=%s" \
% (self.url_prefix, self.access_token, department_id, fetch_child, status)
status, res = self.__get(url)
return status, res
def invite_attention_to_user(self, userid, invite_tips=None):
"""
邀请用户关注
参数 必须 说明
access_token 是 调用接口凭证
userid 是 用户的userid
invite_tips 否 推送到微信上的提示语(只有认证号可以使用)。当使用微信推送时,该字段默认为“请关注XXX企业号”,邮件邀请时,该字段无效。
"""
url = "%s/invite/send?access_token=%s" % (self.url_prefix, self.access_token)
data = {
"userid": userid
}
if invite_tips is not None:
data["invite_tips"] = invite_tips
status, res = self.__post(url, data)
return status, res
# 管理标签
def create_tag(self, tagname,tagid=None):
"""
创建标签
参数 必须 说明
access_token 是 调用接口凭证
tagname 是 标签名称。长度为1~64个字符,标签不可与其他同组的标签重名,也不可与全局标签重名
"""
url = "%s/tag/create?access_token=%s" % (self.url_prefix, self.access_token)
data = {}
data['tagname'] = tagname
if tagid:
data['tagid'] = tagid
status, res = self.__post(url, data)
return status, res
def update_tag(self, tagid, tagname):
"""
更新标签名字
参数 必须 说明
access_token 是 调用接口凭证
tagid 是 标签ID
tagname 是 标签名称。长度为1~64个字符,标签不可与其他同组的标签重名,也不可与全局标签重名
"""
url = "%s/tag/update?access_token=%s" % (self.url_prefix, self.access_token)
data = {"tagid": tagid, "tagname": tagname}
status, res = self.__post(url, data)
return status, res
def delete_tag(self, tagid):
"""
删除标签
参数 必须 说明
access_token 是 调用接口凭证
tagid 是 标签ID
"""
url = "%s/tag/delete?access_token=%s&tagid=%s" % (self.url_prefix, self.access_token, tagid)
status, res = self.__get(url)
return status, res
def get_user_from_tag(self, tagid):
"""
获取标签成员
参数 必须 说明
access_token 是 调用接口凭证
tagid 是 标签ID
"""
url = "%s/tag/get?access_token=%s&tagid=%s" % (self.url_prefix, self.access_token, tagid)
status, res = self.__get(url)
return status, res
def add_users_to_tag(self, data):
"""
增加标签成员
参数 必须 说明
access_token 是 调用接口凭证
tagid 是 标签ID
userlist 否 企业员工ID列表,注意:userlist、partylist不能同时为空
partylist 否 企业部门ID列表,注意:userlist、partylist不能同时为空
"""
url = "%s/tag/addtagusers?access_token=%s" % (self.url_prefix, self.access_token)
status, res = self.__post(url, data=data)
return status, res
def delete_user_in_tag(self, tagid, userlist, partylist):
"""
删除标签成员
参数 必须 说明
access_token 是 调用接口凭证
tagid 是 标签ID
userlist 否 企业员工ID列表,注意:userlist、partylist不能同时为空
partylist 否 企业部门ID列表,注意:userlist、partylist不能同时为空
"""
url = "%s/tag/deltagusers?access_token=%s" % (self.url_prefix, self.access_token)
data = {"tagid": tagid, "userlist": userlist, "partylist": partylist}
status, res = self.__post(url, data=data)
return status, res
def get_tag_list(self):
"""
获取标签列表
参数 必须 说明
access_token 是 调用接口凭证
"""
url = "%s/tag/list?access_token=%s" % (self.url_prefix, self.access_token)
status, res = self.__get(url)
return status, res
# 管理多媒体文件
def upload_media(self, media_type, media_file):
"""
上传媒体文件
参数 必须 说明
access_token 是 调用接口凭证
type 是 媒体文件类型,分别有图片(image)、语音(voice)、视频(video),普通文件(file)
media 是 form-data中媒体文件标识,有filename、filelength、content-type等信息
"""
url = "%s/media/upload?access_token=%s&type=%s" % (self.url_prefix, self.access_token, media_type)
data = {"media": media_file}
status, res = self.__post_file(url, data)
return status, res
def get_media(self, media_id):
"""
获取媒体文件
参数 必须 说明
access_token 是 调用接口凭证
media_id 是 媒体文件id
"""
url = "%s/media/get?access_token=%s&media_id=%s" % (self.url_prefix, self.access_token, media_id)
media_file = requests.get(url)
return media_file
# 发送消息
def send_msg_to_user(self, datas):
"""
发送消息到用户
text消息
参数 必须 说明
touser 否 员工ID列表(消息接收者,多个接收者用‘|’分隔)。特殊情况:指定为@all,则向关注该企业应用的全部成员发送
toparty 否 部门ID列表,多个接收者用‘|’分隔。当touser为@all时忽略本参数
totag 否 标签ID列表,多个接收者用‘|’分隔。当touser为@all时忽略本参数
msgtype 是 消息类型,此时固定为:text
agentid 是 企业应用的id,整型。可在应用的设置页面查看
content 是 消息内容
safe 否 表示是否是保密消息,0表示否,1表示是,默认0
其他消息参考: http://qydev.weixin.qq.com/wiki/index.php?
title=%E6%B6%88%E6%81%AF%E7%B1%BB%E5%9E%8B%E5%8F%8A%E6%95%B0%E6%8D%AE%E6%A0%BC%E5%BC%8F
"""
url = "%s/message/send?access_token=%s" % (self.url_prefix, self.access_token)
data = {
"msgtype": datas.get('msgtype'),
"agentid": datas.get('agentid')
}
if datas.get('msgtype') != "news":
data["safe"] = datas.get('safe')
if datas.get('msgtype') == "text":
data["text"] = {"content": datas.get('content')}
if datas.get('msgtype') == "image":
data["image"] = {"media_id": datas.get("media_id")}
if datas.get('msgtype') == "voice":
data["voice"] = {"media_id": datas.get("media_id")}
if datas.get('msgtype') == "video":
data["video"] = {
"media_id": datas.get("media_id"),
"title": datas.get("title"),
"description": datas.get("description")
}
if datas.get('msgtype') == "file":
data["file"] = {
"media_id": datas.get("media_id")
}
if datas.get('msgtype') == "news":
data["news"] = {
"articles": [
{
"title": datas.get("title"),
"description": datas.get("description"),
"url": datas.get("url"),
"picurl": datas.get("picurl")
}
]
}
# if datas.get['msgtype'] == "mpnews":
#{
# "articles":[
# {
# "title": "Title",
# "thumb_media_id": "id",
# "author": "Author",
# "content_source_url": "URL",
# "content": "Content",
# "digest": "Digest description",
# "show_cover_pic": "0"
# },
# {
# "title": "Title",
# "thumb_media_id": "id",
# "author": "Author",
# "content_source_url": "URL",
# "content": "Content",
# "digest": "Digest description",
# "show_cover_pic": "0"
# }
# ]
#}
# data["mpnews"] = kwargs
if datas.get("touser") is None:
to_user = "@all"
else:
# to_user = '|'.join(touser)
to_user = datas.get("touser")
data["touser"] = to_user
if datas.get("toparty") is not None:
data["toparty"] = datas.get("toparty")
if datas.get("totag") is not None:
data["totag"] = datas.get("totag")
status, res = self.__post(url, data)
return status, res
# 二次验证
def second_validation(self, userid):
"""
二次验证
参数 必须 说明
access_token 是 调用接口凭证
userid 是 员工UserID
"""
url = "https://qyapi.weixin.qq.com/cgi-bin/user/authsucc?access_token=%s&userid=%s" \
% (self.access_token, userid)
status, res = self.__get(url)
return status, res
# 以下是自己添加的部分功能
# 将userid转化为openid
def convert_to_openid(self,userid):
url ="https://qyapi.weixin.qq.com/cgi-bin/user/convert_to_openid?access_token=%s"% self.access_token
data = {}
data['userid'] = userid
content = self.__post(url, data)
if content:
return content[1]['openid']
# 根据code取得用户的企业通讯录账号
def get_enterprise_account_by_code(self, code):
"""
根据code取得用户的企业通讯录账号
参数 必须 说明
code 是 调用接口凭证
"""
url = "https://qyapi.weixin.qq.com/cgi-bin/user/getuserinfo?access_token=%s&code=%s" \
% (self.access_token, code)
# print u'根据code取得用户的企业通讯录UserId的URL:%s' % url
content = self.__get(url)
if content[1].get('errcode'):
return "0"
else:
return content[1]['UserId']
# 根据要验证的url生成微信验证的url
def get_authorize_url(self, redircet_uri,state='toproerp'):
"""
根据要验证的url生成微信验证的url
参数 必须 说明
redircet_uri 是 验证的url
"""
parts = {'redirect_uri':redircet_uri}
timestamp = time.time()
url = "https://open.weixin.qq.com/connect/oauth2/authorize?appid=%s&%s&response_type=code&scope=snsapi_base&state=%s#wechat_redirect" \
% (self.corpid, urllib.urlencode(parts),state)
# print u"获取code的链接:%s"%url
return url
# 获取微信企业号上的app列表
def get_app_lists(self):
"""
获取app列表
参数 必须 说明
access_token 是 调用接口凭证
"""
url = "%s/agent/list?access_token=%s"%(self.url_prefix, self.access_token)
status, res = self.__get(url)
return status, res
# 配置微信企业号上的app
def create_app(self,data):
"""
配置app
参数 必须 说明
access_token 是 调用接口凭证
agentid 是 企业应用的id
report_location_flag 是 企业应用是否打开地理位置上报 0:不上报;1:进入会话上报;2:持续上报
logo_mediaid 否 企业应用头像的mediaid,通过多媒体接口上传图片获得mediaid,上传后会自动裁剪成方形和圆形两个头像
name 是 企业应用名称
description 否 企业应用详情
redirect_domain 是 企业应用可信域名
isreportuser 否 是否接收用户变更通知。0:不接收;1:接收。主页型应用无需该参数
isreportenter 否 是否上报用户进入应用事件。0:不接收;1:接收。主页型应用无需该参数
home_url 否 主页型应用url。url必须以http或者https开头。消息型应用无需该参数
"""
url = "%s/agent/set?access_token=%s"(self.url_prefix, self.access_token)
if data.get("agentid") and data.get("name") and data.get("redirect_domain")\
and data.get("report_location_flag"):
status, res = self.__post(url, data)
else:
status = False
res = u"参数不完整"
return status, res
# 获取企业号app的详细资料
def get_app_details(self,agentid):
"""
获取app详细资料
参数 必须 说明
access_token 是 调用接口凭证
agentid 是 授权方应用id
"""
url = "%s/agent/get?access_token=%s&agentid=%s"%(self.url_prefix, self.access_token,agentid)
status, res = self.__get(url)
return status, res
# 删除应用菜单
def delete_app_menu(self):
"""
删除应用菜单
参数 必须 说明
access_token 是 调用接口凭证
agentid 是 授权方应用id
"""
url = "%s/menu/delete?access_token=%s&agentid=%s"%(self.url_prefix, self.access_token, self.agentid)
status, res = self.__get(url)
return status, res
# 更新菜单至应用
def update_app_menu(self, data):
"""
更新菜单至应用
参数 必须 说明
access_token 是 调用接口凭证
agentid 是 授权方应用id
"""
url = "%s/menu/create?access_token=%s&agentid=%s"%(self.url_prefix, self.access_token,self.agentid)
status, res = self.__post(url, data)
return status, res
def check_signature(self, sVerifyMsgSig, sVerifyTimeStamp, sVerifyNonce,sVerifyEchoStr):
"""
验证微信消息真实性
:param signature: 微信加密签名
:param timestamp: 时间戳
:param nonce: 随机数
:return: 通过验证返回 True, 未通过验证返回 False
"""
# self._check_token()
# print msg_signature
# if not msg_signature or not timestamp or not nonce:
# return False
#
# tmp_list = [self.access_token, timestamp, nonce]
# tmp_list.sort()
# tmp_str = ''.join(tmp_list)
# print hashlib.sha1(tmp_str.encode('utf-8')).hexdigest()
# if msg_signature == hashlib.sha1(tmp_str.encode('utf-8')).hexdigest():
# print 222
# return True
# else:
# return False
wxcpt=WXBizMsgCrypt.WXBizMsgCrypt(self.Token,self.EncodingAESKey,self.corpid)
return wxcpt.VerifyURL(sVerifyMsgSig, sVerifyTimeStamp,sVerifyNonce,sVerifyEchoStr)
def _check_token(self):
"""
检查 Token 是否存在
:raises NeedParamError: Token 参数没有在初始化的时候提供
"""
if not self.access_token:
raise NeedParamError('Please provide Token parameter in the construction of class.')
def parse_data(self, data):
"""
解析微信服务器发送过来的数据并保存类中
:param data: HTTP Request 的 Body 数据
:raises ParseError: 解析微信服务器数据错误, 数据不合法
"""
result = {}
if type(data) == unicode:
data = data.encode('utf-8')
elif type(data) == str:
pass
else:
raise ParseError()
try:
xml = XMLStore(xmlstring=data)
except Exception:
raise ParseError()
result = xml.xml2dict
result['raw'] = data
result['type'] = result.pop('MsgType').lower()
message_type = MESSAGE_TYPES.get(result['type'], UnknownMessage)
self.__message = message_type(result)
self.__is_parse = True
class NeedParamError(Exception):
"""
构造参数提供不全异常
"""
pass
class ParseError(Exception):
"""
解析微信服务器数据异常
"""
pass
class XMLStore(object):
"""
XML 存储类,可方便转换为 Dict
"""
def __init__(self, xmlstring):
self._raw = xmlstring
self._doc = minidom.parseString(xmlstring)
@property
def xml2dict(self):
"""
将 XML 转换为 dict
"""
self._remove_whitespace_nodes(self._doc.childNodes[0])
return self._element2dict(self._doc.childNodes[0])
def _element2dict(self, parent):
"""
将单个节点转换为 dict
"""
d = {}
for node in parent.childNodes:
if not isinstance(node, minidom.Element):
continue
if not node.hasChildNodes():
continue
if node.childNodes[0].nodeType == minidom.Node.ELEMENT_NODE:
try:
d[node.tagName]
except KeyError:
d[node.tagName] = []
d[node.tagName].append(self._element2dict(node))
elif len(node.childNodes) == 1 and node.childNodes[0].nodeType in [minidom.Node.CDATA_SECTION_NODE, minidom.Node.TEXT_NODE]:
d[node.tagName] = node.childNodes[0].data
return d
def _remove_whitespace_nodes(self, node, unlink=True):
"""
删除空白无用节点
"""
remove_list = []
for child in node.childNodes:
if child.nodeType == Node.TEXT_NODE and not child.data.strip():
remove_list.append(child)
elif child.hasChildNodes():
self._remove_whitespace_nodes(child, unlink)
for node in remove_list:
node.parentNode.removeChild(node)
if unlink:
node.unlink()
def get_message(self):
"""
获取解析好的 WechatMessage 对象
:return: 解析好的 WechatMessage 对象
"""
self._check_parse()
return self.__message
def _check_parse(self):
"""
检查是否成功解析微信服务器传来的数据
:raises NeedParseError: 需要解析微信服务器传来的数据
"""
if not self.__is_parse:
raise NeedParseError()
def get_message(self):
"""
获取解析好的 WechatMessage 对象
:return: 解析好的 WechatMessage 对象
"""
self._check_parse()
return self.__message | agpl-3.0 | -5,521,188,693,000,287,000 | 32.457181 | 143 | 0.53436 | false |
charles-cooper/raiden | raiden/tests/fixtures/api.py | 1 | 4289 | # -*- coding: utf-8 -*-
# pylint: disable=too-many-arguments,redefined-outer-name
import copy
import os
import pytest
import psutil
import gevent
from gevent import Greenlet
from raiden.app import App
from raiden.api.rest import RestAPI, APIServer
from raiden.api.python import RaidenAPI
from raiden.raiden_service import RaidenService
from raiden.network.discovery import Discovery
from raiden.tests.utils.apitestcontext import ApiTestContext
def wait_for_listening_port(port_number, tries=10, sleep=0.1):
for _ in range(tries):
gevent.sleep(sleep)
connections = psutil.net_connections()
for conn in connections:
if conn.status == 'LISTEN' and conn.laddr[1] == port_number:
return
raise RuntimeError('{port} is not bound'.format(port_number))
# TODO: Figure out why this fixture can't work as session scoped
# What happens is that after one test is done, in the next one
# the server is no longer running even though the teardown has not
# been invoked.
@pytest.fixture
def api_backend(rest_api_port_number):
# Initializing it without raiden_service.api here since that is a
# function scope fixture. We will inject it to rest_api object later
rest_api = RestAPI(None)
api_server = APIServer(rest_api)
api_server.flask_app.config['SERVER_NAME'] = 'localhost:{}'.format(rest_api_port_number)
# TODO: Find out why tests fail with debug=True
server = Greenlet.spawn(
api_server.run,
rest_api_port_number,
debug=False,
use_evalex=False,
)
# Fixes flaky test, were requests are done prior to the server initializing
# the listening socket.
# https://github.com/raiden-network/raiden/issues/389#issuecomment-305551563
wait_for_listening_port(rest_api_port_number)
yield api_server, rest_api
server.kill(block=True, timeout=10)
@pytest.fixture
def api_raiden_service(
monkeypatch,
api_backend,
api_test_context,
blockchain_services,
transport_class,
max_unresponsive_time,
send_ping_time,
reveal_timeout,
raiden_udp_ports,
tmpdir):
blockchain = blockchain_services[0]
config = copy.deepcopy(App.default_config)
config['port'] = raiden_udp_ports[0]
config['host'] = '127.0.0.1'
config['privatekey_hex'] = blockchain.private_key.encode('hex')
config['send_ping_time'] = send_ping_time
config['max_unresponsive_time'] = max_unresponsive_time
config['reveal_timeout'] = reveal_timeout
config['database_path'] = os.path.join(tmpdir.strpath, 'database.db')
raiden_service = RaidenService(
blockchain,
blockchain.private_key,
transport_class,
Discovery(),
config
)
api = RaidenAPI(raiden_service)
monkeypatch.setattr(api, 'get_channel_list', api_test_context.query_channels)
monkeypatch.setattr(api, 'get_tokens_list', api_test_context.query_tokens)
monkeypatch.setattr(api, 'open', api_test_context.open_channel)
monkeypatch.setattr(api, 'deposit', api_test_context.deposit)
monkeypatch.setattr(api, 'close', api_test_context.close)
monkeypatch.setattr(api, 'settle', api_test_context.settle)
monkeypatch.setattr(api, 'get_channel', api_test_context.get_channel)
monkeypatch.setattr(api, 'get_network_events', api_test_context.get_network_events)
monkeypatch.setattr(api, 'get_token_network_events', api_test_context.get_token_network_events)
monkeypatch.setattr(api, 'get_channel_events', api_test_context.get_channel_events)
monkeypatch.setattr(api, 'transfer', api_test_context.transfer)
monkeypatch.setattr(api, 'token_swap', api_test_context.token_swap)
monkeypatch.setattr(api, 'expect_token_swap', api_test_context.expect_token_swap)
monkeypatch.setattr(api, 'connect_token_network', api_test_context.connect)
monkeypatch.setattr(api, 'leave_token_network', api_test_context.leave)
# also make sure that the test server's raiden_api uses this mock
# raiden service
_, raiden_api = api_backend
monkeypatch.setattr(raiden_api, 'raiden_api', api)
return raiden_service
@pytest.fixture
def api_test_context(reveal_timeout):
return ApiTestContext(reveal_timeout)
| mit | 9,159,316,229,175,237,000 | 35.974138 | 99 | 0.701562 | false |
JaspervanBlokland/SandyApp | src/Toon/SlowFashionCover.py | 1 | 15210 | # -----------------------------------------------------------------------------
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens & Font Bureau
# www.pagebot.io
#
# P A G E B O T
#
# Licensed under MIT conditions
# Made for usage in DrawBot, www.drawbot.com
# -----------------------------------------------------------------------------
#
# SlowFashion.py
#
from __future__ import division
from datetime import datetime # Make date on magazine cover fit today.
import pagebot
from pagebot import newFS, Gradient, Shadow
from pagebot.style import getRootStyle, LEFT, TOP, RIGHT, A4Letter
from pagebot.elements import *
from pagebot.conditions import *
from pagebot.document import Document
from pagebot.composer import Composer
from pagebot.typesetter import Typesetter
from pagebot.toolbox.transformer import s2Color, int2Color, lighter
# Import other than default view class, showing double pages spread
from pagebot.elements.views.spreadview import SpreadView
from pagebot.fonttoolbox.variablefontbuilder import getVariableFont, Font
W, H = A4Letter # Vertical Letter size, horizontal A4.
PADDING = (24, 24, 40, 24) # General page padding.
MD_PATH = 'slowFashionStories.md' # Get text here, if not using blurb content.
EXPORT_PATH = '_export/SlowFashionCover.png' # Export path of the document.
COVER_IMAGE_PATH1 = 'images/IMG_8914.jpg' # Path of the cover image.
# Use this color to show "error" debugging, e.g. to show bounding box of an element.
debugColor = (1, 1, 0, 0.5)
# Set some values of the default template (as already generated by the document).
# Make squential unique names for the flow boxes inside the templates
MAIN_FLOW = 'main' # ELement id of the text box on pages the hold the main text flow.
FLOWID1 = MAIN_FLOW+'1'
FLOWID2 = MAIN_FLOW+'2'
FLOWID3 = MAIN_FLOW+'3'
# Get the root path of open source fonts, enclosed in PageBot.
ROOT_PATH = pagebot.getRootPath()
# Main Variable Font for all text in the magazine. Change this line to build with
# another Variable Font. Using Optical Size (opsz), Weight (wght) and Width (wdth) axes.
FONT_PATH = ROOT_PATH + '/Fonts/fontbureau/AmstelvarAlpha-VF.ttf'
# Open the font, so we can query values that are not available in standard DrawBot functions,
# such as stem width, overshoot of roundings, etc.
f = Font(FONT_PATH)
#print f.axes Uncomment to see the available axes printed.
# Pre-calculate instances of locations in the Variable Font.
LIGHT72 = getVariableFont(FONT_PATH, dict(wght=0.5, wdth=0.6, opsz=72))
BOOK_LIGHT = getVariableFont(FONT_PATH, dict(wght=0.5, wdth=0.7))
BOOK_CONDENSED = getVariableFont(FONT_PATH, dict(wght=0.7, wdth=0.4))
BOOK = getVariableFont(FONT_PATH, dict(wght=0.25, wdth=0))
BOOK_ITALIC = getVariableFont(FONT_PATH, dict(wght=0.25, wdth=1))
MEDIUM = getVariableFont(FONT_PATH, dict(wght=0.40, wdth=0))
SEMIBOLD = getVariableFont(FONT_PATH, dict(wght=0.40, wdth=1))
SEMIBOLD_CONDENSED = getVariableFont(FONT_PATH, dict(wght=0.40, wdth=0.5))
BOLD = getVariableFont(FONT_PATH, dict(wght=0.70, wdth=1))
BOLD_ITALIC = getVariableFont(FONT_PATH, dict(wght=0.7, wdth=1))
shadow = Shadow(offset=(6, -6), blur=10, color=(0.2, 0.2, 0.2, 0.5))
def makeCoverTemplate(imagePath, w, h):
bleed = 0
textColor = 1
# Make styles
# TODO: Make this fit, using size/wdth axis combination of Amstelvar
coverTitleSize = 160
# Not optical size yet. Play more with the axes
coverTitleFont = getVariableFont(FONT_PATH,
dict(wght=0.9, wdth=0.02))#, opsz=coverTitleSize))
coverTitleStyle = dict(font=coverTitleFont.installedName, fontSize=coverTitleSize,
textShadow=shadow, textFill=textColor, tracking=-3)
coverSubTitleSize = 80
# Not optical size yet. Play more with the axes
coverSubTitleFont = getVariableFont(FONT_PATH, dict(wght=0.6, wdth=0.02)) #opsz=coverSubTitleSize))
coverSubTitleStyle = dict(font=coverSubTitleFont.installedName, fontSize=coverSubTitleSize,
textFill=(1, 1, 1, 0.3), tracking=0)
# Cover
coverTemplate = Template(w=w, h=h, padding=PADDING) # Cover template of the magazine.
newImage(imagePath, parent=coverTemplate, conditions=[Fit2WidthSides(), Bottom2BottomSide()])
# Title of the magazine cover.
coverTitle = newFS('Fashion', style=coverTitleStyle)
# Calculate width if single "F" for now, to align "Slow"
# TODO: Change in example to go through the coverTitle to get positions and widths.
FWidth, _ = textSize(newFS('F', style=coverTitleStyle))
coversubTitle = newFS('Slow', style=coverSubTitleStyle)
newTextBox(coversubTitle, parent=coverTemplate, pl=FWidth*0.5,
conditions=[Left2Left(), Fit2Width(), Top2TopSide()])
tw, th = textSize(coverTitle)
newText(coverTitle, parent=coverTemplate, z=20, h=th*0.4,
textShadow=shadow, conditions=[Fit2Width(), Top2TopSide()])
# Make actual date in top-right with magazine title. Draw a bit transparant on background photo.
dt = datetime.now()
d = dt.strftime("%B %Y")
fs = newFS(d, style=dict(font=MEDIUM.installedName, fontSize=17,
textFill=(1, 1, 1, 0.6), tracking=0.5))
# TODO: padding righ could come from right stem of the "n"
newTextBox(fs, parent=coverTemplate, xTextAlign=RIGHT, pr=10, pt=6, conditions=[Top2Top(), Right2Right()])
# Titles could come automatic from chapters in the magazine.
fs = newFS('$6.95', style=dict(font=BOOK.installedName, fontSize=12,
textFill=textColor, tracking=1, leading=12 ))
newText(fs, parent=coverTemplate, mt=8, conditions=[Top2Bottom(), Right2Right()])
makeCoverTitles(coverTemplate)
return coverTemplate
def makeCoverTitles(coverTemplate):
u"""Build the text box elements in the coverTemplate, containing the chapter titles
of the magazine."""
# TODO: Titles should come automatic from random blurb chapter titles in the magazine.
pl = 8 # Generic offset as padding left from the page padding to aligh with cover title.
fs = newFS('Skirts &\nScarves', style=dict(font=BOOK_CONDENSED.installedName,
fontSize=64, textFill=1, tracking=0.5, leading=0, rLeading=0.9))
newTextBox(fs, z=20, pl=15, pt=-40, parent=coverTemplate,
conditions=[Left2Left(), Fit2Width(), Float2Top()])
# TODO: Titles should come automatic from random blurb chapter titles in the magazine.
fs = newFS('Ideal style:\n', style=dict(font=MEDIUM.installedName, fontSize=32,
textFill=1, tracking=0.5, leading=50))
fs += newFS('The almost nothing', style=dict(font=BOOK.installedName,
fontSize=45, textFill=1, tracking=0.5, leading=48))
newTextBox(fs, z=20, pl=8, w=400, pt=0, parent=coverTemplate,
textShadow=shadow,
conditions=[Left2Left(), Float2Top()])
# TODO: Titles should come automatic from random blurb chapter titles in the magazine.
fs = newFS('Findings\non vineyard island', style=dict(font=BOOK_LIGHT.installedName,
fontSize=72, textFill=1, tracking=0.5, leading=74))
newTextBox(fs, z=20, pl=8, pt=40, parent=coverTemplate,
style=dict(shadowOffset=(4, -4), shadowBlur=20, shadowFill=(0,0,0,0.6)),
textShadow=shadow,
conditions=[Left2Left(), Fit2Width(), Float2Top()])
# TODO: Titles should come automatic from random blurb chapter titles in the magazine.
c = (1, 1, 0, 1) #lighter(int2Color(0x99CBE9)) # Pick from light spot in the photo
fs = newFS('Exclusive:\n', style=dict(font=MEDIUM.installedName, fontSize=32,
textFill=c, tracking=0.5, lineHeight=34))
fs += newFS('Interview with Pepper+Tom ', style=dict(font=BOOK.installedName,
fontSize=32, textFill=c, tracking=0.5, lineHeight=34))
newTextBox(fs, z=20, pl=pl, pt=20, parent=coverTemplate,
style=dict(shadowOffset=(4, -4), shadowBlur=20, shadowFill=(0,0,0,0.6)),
textShadow=shadow,
conditions=[Left2Left(), Fit2Width(), Float2Bottom()])
def makeTemplate1(w, h):
# Template 16
template = Template(w=w, h=h, padding=PADDING) # Create template of main size. Front page only.
# Show grid columns and margins if rootStyle.showGrid or rootStyle.showGridColumns are True
"""
# Create empty image place holders. To be filled by running content on the page.
template.cContainer(2, -0.7, 5, 4) # Empty image element, cx, cy, cw, ch
template.cContainer(0, 5, 2, 3)
# Create linked text boxes. Note the "nextPage" to keep on the same page or to next.
template.cTextBox('', 0, 0, 2, 5, eId=FLOWID1, nextBox=FLOWID2, nextPage=0, fill=BOX_COLOR)
template.cTextBox('', 2, 3, 2, 5, eId=FLOWID2, nextBox=FLOWID3, nextPage=0, fill=BOX_COLOR)
template.cTextBox('', 4, 3, 2, 5, eId=FLOWID3, nextBox=FLOWID1, nextPage=1, fill=BOX_COLOR)
# Create page number box. Pattern pageNumberMarker is replaced by actual page number.
template.text(rs['pageIdMarker'], (template.css('w',0)-template.css('mr',0), 20), style=rs, font=BOOK, fontSize=12, fill=BOX_COLOR, align='right')
"""
return template
def makeTemplate2(w, h):
# Template 2
template = Template(w=w, h=h, padding=PADDING) # Create second template. This is for the main pages.
# Show grid columns and margins if rootStyle.showGrid or rootStyle.showGridColumns are True
"""
template.cContainer(4, 0, 2, 3) # Empty image element, cx, cy, cw, ch
template.cContainer(0, 5, 2, 3)
template.cContainer(2, 2, 2, 2)
template.cContainer(2, 0, 2, 2)
template.cContainer(4, 6, 2, 2)
template.cTextBox('', 0, 0, 2, 5, eId=FLOWID1, nextBox=FLOWID2, nextPage=0, fill=BOX_COLOR)
template.cTextBox('', 2, 4, 2, 4, eId=FLOWID2, nextBox=FLOWID3, nextPage=0, fill=BOX_COLOR)
template.cTextBox('', 4, 3, 2, 3, eId=FLOWID3, nextBox=FLOWID1, nextPage=1, fill=BOX_COLOR)
# Create page number box. Pattern pageNumberMarker is replaced by actual page number.
template.text(rs['pageIdMarker'], (template.css('w',0) - template.css('mr',0), 20), style=rs, font=BOOK, fontSize=12, fill=BOX_COLOR, align='right')
"""
return template
# -----------------------------------------------------------------
def makeDocument():
u"""Demo page composer."""
coverTemplate1 = makeCoverTemplate(COVER_IMAGE_PATH1, W, H)
template1 = makeTemplate1(W, H)
template2 = makeTemplate2(W, H)
# Create new document with (w,h) and fixed amount of pages.
# Make number of pages with default document size, start a page=1 to make SpreadView work.
# Initially make all pages default with template2.
# Oversized document (docW, docH) is defined in the rootStyle.
doc = Document(title=EXPORT_PATH, w=W, h=H, autoPages=1, originTop=False,
template=template1, startPage=1)
# TODO Will be expanded with more pages later.
view = doc.getView()
#view = SpreadView(parent=doc) # Show as spread, not a single pages.
view.padding = 40
view.showPageCropMarks = True
view.showPageRegistrationMarks = True
view.showPageFrame = False
view.showPagePadding = False
view.showElementOrigin = False
view.showElementDimensions = False
# Cache some values from the root style that we need multiple time to create the tag styles.
"""
fontSize = rs['fontSize']
leading = rs['leading']
rLeading = rs['rLeading']
listIndent = rs['listIndent']
language = rs['language']
# Add styles for whole document and text flows.
# Note that some values are defined here for clarity, even if their default root values
# are the same.
doc.newStyle(name='chapter', font=BOOK)
doc.newStyle(name='title', fontSize=3*fontSize, font=BOLD)
doc.newStyle(name='subtitle', fontSize=2.6*fontSize, font=BOOK_ITALIC)
doc.newStyle(name='author', fontSize=2*fontSize, font=BOOK, fill=(1, 0, 0))
doc.newStyle(name='h1', fontSize=3.85*fontSize, font=SEMIBOLD_CONDENSED, textFill=(1, 0, 0),
leading=2.5*leading, tracking=H1_TRACK, postfix='\n')
doc.newStyle(name='h2', fontSize=1.5*fontSize, font=SEMIBOLD, textStroke=None,
fill=(0, 0, 1), leading=1*leading, rLeading=0, tracking=H2_TRACK,
prefix='', postfix='\n')
doc.newStyle(name='h3', fontSize=1.1*fontSize, font=MEDIUM, textFill=(1, 0, 0), textStroke=None,
leading=leading, rLeading=0, rNeedsBelow=2*rLeading, tracking=H3_TRACK,
prefix='\n', postfix='\n')
doc.newStyle(name='h4', fontSize=1.1*fontSize, font=BOOK, textFill=(0, 1, 0), textStroke=None,
leading=leading, rLeading=0, rNeedsBelow=2*rLeading, tracking=H3_TRACK,
paragraphTopSpacing=U, paragraphBottomSpacing=U, prefix='\n', postfix='\n')
# Spaced paragraphs.
doc.newStyle(name='p', fontSize=fontSize, font=BOOK, textFill=0.1, prefix='', postfix='\n',
rTracking=P_TRACK, leading=14, rLeading=0, align=LEFT_ALIGN, hyphenation=True)
doc.newStyle(name='b', font=SEMIBOLD)
doc.newStyle(name='em', font=BOOK_ITALIC)
doc.newStyle(name='hr', stroke=(1, 0, 0), strokeWidth=4)
doc.newStyle(name='br', postfix='\n') # Simplest way to make <br/> show newline
doc.newStyle(name='a', prefix='', postfix='')
doc.newStyle(name='img', leading=leading, fontSize=fontSize, font=BOOK)
# Footnote reference index.
doc.newStyle(name='sup', font=MEDIUM, rBaselineShift=0.6, prefix='', postfix=' ',
fontSize=0.6*fontSize)
doc.newStyle(name='li', fontSize=fontSize, font=BOOK,
tracking=P_TRACK, leading=leading, hyphenation=True,
# Lists need to copy the listIndex over to the regalar style value.
tabs=[(listIndent, LEFT_ALIGN)], indent=listIndent,
firstLineIndent=1, postfix='\n')
doc.newStyle(name='ul', prefix='', postfix='')
doc.newStyle(name='literatureref', fill=0.5, rBaselineShift=0.2, fontSize=0.8*fontSize)
doc.newStyle(name='footnote', fill=(1, 0, 0), fontSize=0.8*U, font=BOOK)
doc.newStyle(name='caption', tracking=P_TRACK, language=language, fill=0.2,
leading=leading*0.8, fontSize=0.8*fontSize, font=BOOK_ITALIC,
indent=U/2, tailIndent=-U/2, hyphenation=True)
"""
# Change template of page 1
page1 = doc[0]
page1.applyTemplate(coverTemplate1)
if 0: #NOT NOW
page2 = doc[2] # Default is template1, as defined in Document creation.
page2.applyTemplate(template1)
page3 = doc[3] # Default is template1, as defined in Document creation.
page3.applyTemplate(template2)
# Show thumbnail of entire paga4 on cover.
# TODO: Needs to be masked still.
# TODO: Scale should not be attribute of style, but part of placement instead.
#page3.style['scaleX'] = page3.style['scaleY'] = 0.1
#page1.place(page3, 500, 48)# sx, sy)
"""
# Create main Galley for this page, for pasting the sequence of elements.
g = Galley()
t = Typesetter(g)
t.typesetFile(MD_PATH)
# Fill the main flow of text boxes with the ML-->XHTML formatted text.
c = Composer(doc)
c.compose(g, page2, FLOWID1)
"""
doc.solve()
return doc
d = makeDocument()
d.export(EXPORT_PATH, viewId=SpreadView.viewId)
| mit | 3,673,058,861,442,119,000 | 48.543974 | 152 | 0.678961 | false |
philrosenfield/TPAGB-calib | model_plots.py | 1 | 26550 | import sfh_tests_multi_proc as sfh_tests
import os
import numpy as np
import matplotlib.pylab as plt
from TPAGBparams import snap_src, research_path
import brewer2mpl
import ResolvedStellarPops as rsp
import fileIO
import galaxy_tests
color_scheme = ['#d73027', '#fc8d59', '#fee090', '#669966', '#e0f3f8', '#4575b4']
fontlarge = 24
fontmid = 20
fontsmall = 18
def translate_model_name(model, small=False):
if 'oct' in model.lower():
name = 'R75'
if 'nov13eta' in model.lower():
name = '\eta=0'
if model.lower() == 'nov13':
name = 'mSC05'
if 'feb' in model.lower():
name = 'FEB14'
if small is True:
new_model = r'$\dot M_{\rm pre\!-\!dust}^{\rm %s}$' % name
else:
new_model = r'$\dot{M}_{\rm pre\!-\!dust}=%s$' % name
return new_model
def compare_agb_lifetimes():
import glob
track_loc = research_path + \
'TP-AGBcalib/AGBTracks/plots_for_paperI/agbz001_3dup/'
models = ['NOV13', 'NOV13eta0', 'OCT13']
model_name = translate_model_name('nov13')
# these two have to line up:
search_formats = ['*dL0.0*', '*dL0.50*', '*dL2*']
labels = [r'%s' % model_name,
r'%s: $0.5\lambda$' % model_name,
r'%s: $2\lambda$' % model_name]
track_sets = [rsp.fileIO.get_files(track_loc, sf) for sf in search_formats]
cols1 = ['k', '#d73027', '#fee090', '#e0f3f8', '#91bfdb', '#4575b4']
bmap = brewer2mpl.get_map('Blues', 'Sequential', 9)
cols2 = bmap.mpl_colors[3::2]
fig, axs = plt.subplots(ncols=2, figsize=(10, 5), sharex=True, sharey=True)
for i, track_set in enumerate(track_sets):
tracks = np.array([fileIO.get_numeric_data(t) for t in track_set])
tracks = tracks[np.argsort([t.mass for t in tracks])]
masses = np.array([t.mass for t in tracks])
taus = np.array([np.sum(t.data_array['dt']) for t in tracks])
plt_kw = {'lw': 3, 'label': labels[i], 'color': cols1[i]}
if i == 0:
for ax in axs:
ax.plot(masses, taus/1e6, lw=4, color='k')
plt_kw['color'] = cols2[0]
ax.plot(masses, taus/1e6, **plt_kw)
else:
axs[1].plot(masses, taus/1e6, lw=4, color='k')
axs[1].plot(masses, taus/1e6, **plt_kw)
for j in range(len(models)):
model_name = models[j].replace('.dat', '').split('_')[-1]
if models[j].lower() == 'nov13':
continue
base = research_path + \
'TP-AGBcalib/AGBTracks/CAF09/S_%s' % model_name
agb_track_loc = os.path.join(base, glob.glob1(base, '*0.001*')[0])
track_names = [os.path.join(agb_track_loc, a)
for a in os.listdir(agb_track_loc)
if a.startswith('agb_') and not 'b_1.75' in a
and not 'b_1.80' in a]
tracks = [fileIO.get_numeric_data(t) for t in track_names]
tracks = [t for t in tracks if not t == -1 and t.data_array.size > 1]
tracks = np.array(tracks)[np.argsort([t.mass for t in tracks])]
masses = np.array([t.mass for t in tracks])
taus = np.array([np.sum(t.data_array['dt']) for t in tracks])
model_name = translate_model_name(model_name)
plt_kw = {'lw': 3, 'label': model_name, 'color': cols2[j]}
axs[0].plot(masses, taus/1e6, lw=4, color='k')
axs[0].plot(masses, taus/1e6, **plt_kw)
for ax in axs:
ax.legend(loc=0, frameon=False, fontsize=fontsmall)
ax.set_xlim(1, 2.95)
ax.set_ylim(.25, 3.7)
ax.set_xlabel(r'${\rm Initial\ Mass\ (M_\odot)}$', fontsize=fontlarge)
ax.tick_params(labelsize=fontmid)
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.15, top=0.95,
wspace=0.01)
axs[0].set_ylabel(r'${\rm Lifetime\ (Myr)}$', fontsize=fontlarge)
plt.savefig('lambda_plot.png', dpi=150)
return axs
def agb_lifetimes(models, z=0.002):
import glob
tauss = []
btauss = []
for j in range(len(models)):
print models[j]
print
fig, ax = plt.subplots()
fig2, ax2 = plt.subplots()
model_name = models[j].replace('.dat', '').split('_')[-1]
agb_track_loc = research_path + \
'TP-AGBcalib/AGBTracks/CAF09/S_%s/' % model_name
base = research_path + \
'TP-AGBcalib/AGBTracks/CAF09/S_%s' % model_name
if z == 'all':
zs = np.array([d.split('_')[1].replace('Z','')
for d in glob.glob1(base, '*')], dtype=float)
else:
zs = [z]
zs = np.array([i for i in zs if i <= 0.008])
if len(zs) > 8.:
zs = zs[::2]
print zs
cnum = np.max([len(zs), 3])
bmap = brewer2mpl.get_map('Blues', 'Sequential', cnum + 1)
cols = bmap.mpl_colors[1:]
for i, z in enumerate(np.sort(zs)):
try:
agb_track_loc = os.path.join(base, glob.glob1(base, '*%g*' % z)[0])
except IndexError:
print 'no Z=%g tracks in %s' % (z, base)
continue
if not os.path.isdir(agb_track_loc) is True:
print model_name, 'no agb tracks found'
model_name = translate_model_name(models[j])
agb_track_names = [os.path.join(agb_track_loc, a)
for a in os.listdir(agb_track_loc)
if a.startswith('agb_')]
tracks = [fileIO.get_numeric_data(agb_track)
for agb_track in agb_track_names]
tracks = [t for t in tracks if not t == -1 and t.data_array.size > 1]
masses = np.array([t.mass for t in tracks])
sort = np.argsort(masses)
masses = masses[sort]
tracks = np.array(tracks)[sort]
logls = np.array([t.get_col('L_star') for t in tracks])
brights = np.array([np.nonzero(logl > 3.4)[0] for logl in logls])
#m_cs = np.array([t.get_col('M_c')[0] for t in tracks])
#ax2.plot(masses, m_cs, lw=2, color='black')
taus = np.array([np.sum(t.data_array['dt']) for t in tracks])
btaus = np.array([np.sum(t.data_array['dt'][b])
for t, b in zip(tracks, brights)])
tauss.append(taus)
btauss.append(btaus)
plt_kw = {'lw': 3, 'label': '$Z=%g$' % z, 'color': cols[i]}
ax.plot(masses, taus/1e6, lw=4, color='k')
ax2.plot(masses, btaus/1e6, lw=4, color='k')
ax.plot(masses, taus/1e6, **plt_kw)
ax2.plot(masses, btaus/1e6, **plt_kw)
with open('tpagb_lifetimes_S_%s_Z%g.dat' % (models[j], z), 'w') as out:
out.write('# mass tpagb_tau tpagb_tau_bright \n')
np.savetxt(out, np.array([masses, taus, btaus]).T, fmt='%.3f')
for ax in [ax, ax2]:
ax.set_xlabel('${\\rm Initial\ Mass\ (M_\odot)}$', fontsize=fontlarge)
ax.set_ylabel('${\\rm Lifetime\ (Myr)}$', fontsize=fontlarge)
ax.legend(loc=0, frameon=False)
ax.set_xlim(0, 5)
ax.set_ylim(0, 5)
ax2.annotate('$\log L/L_\odot > 3.4$', (0.03, 0.97), xycoords='axes fraction',
fontsize=fontlarge, va='top')
ax2.set_ylabel(ax2.get_ylabel().replace('(Myr)', '(Myr)'))
#ax2.set_ylabel('${\\rm Pre\!-\!Flash\ Core\ Mass\ (M_\odot)}$', fontsize=24)
fig.savefig('tpagb_lifetime_%s.png' % (models[j]), dpi=150)
fig2.savefig('tpagb_lifetime_bright_%s.png' % (models[j]), dpi=150)
return
def load_plot_limits(filename='default'):
if filename == 'default':
filename = snap_src + '/tables/cmd_plot_limits.dat'
dtype = [('target', '|S16'),
('opt_cmdmin', '<f8'),
('opt_cmdmax', '<f8'),
('opt_lfmin', '<f8'),
('opt_lfmax', '<f8'),
('ir_cmdmin', '<f8'),
('ir_cmdmax', '<f8'),
('ir_lfmin', '<f8'),
('ir_lfmax', '<f8'),
('opt_offset', '<f8'),
('ir_offset', '<f8')]
lims = np.genfromtxt(filename, dtype=dtype)
return lims.view(np.recarray)
def plot_lf_with_stages(target, trilegal_output):
outfile_dir = snap_src + '/models/varysfh/match-hmc/'
vSFH, vsfh_kws = sfh_tests.prepare_vsfh_run([target], ['cmd_input_CAF09_S_NOV13.dat'], 50,
vsfh_kw={'outfile_loc': outfile_dir,
'extra_str': ''},
default_kw=None)
pl = sfh_tests.Plotting(vSFH[0])
cols = color_scheme
cols.append('#9966cc')
del cols[2]
del cols[3]
kw = {'trilegal_output': trilegal_output,
'narratio': False,
'add_stage_lfs': 'all',
'plot_data': False,
'plot_models': False,
'cols': cols,
'stage_lf_kw': {'lw': 3, 'label': translate_model_name('nov13')}}
ax1, ax2 = pl.compare_to_gal(target, **kw)
lims = load_plot_limits()
row = lims[np.nonzero(lims['target'] == target)[0]]
for ax, band in zip([ax1, ax2], ['opt', 'ir']):
ax.set_xlim(row['%s_cmdmin' % band], row['%s_cmdmax' % band])
ax.set_ylim(row['%s_lfmin' % band], row['%s_lfmax' % band])
base_dir = snap_src + '/models/varysfh/match-hmc'
model = 'caf09_s_nov13'
file_loc = os.path.join(base_dir, target, model, 'mc')
best_tri_out, = rsp.fileIO.get_files(file_loc, '*opt_best*')
filter1 = sfh_tests.get_filter1(target)
sgal = rsp.Galaxies.simgalaxy(best_tri_out, filter1=filter1,
filter2='F814W')
return ax1, ax2
def plot_lfs():
pl = sfh_tests.Plotting()
outfile_loc='/home/phil/Dropbox/research/varysfh/'
cmd_inputs = ['CAF09_S_NOV13'.lower(),
'CAF09_S_NOV13eta0'.lower(),
'CAF09_S_OCT13'.lower()]
targets = ['ddo71', 'hs117', 'kkh37', 'ngc2976-deep', 'ngc404', 'ddo78']
one_plot = True
lims = load_plot_limits()
for target in targets:
print target
if one_plot is True:
fig, axs = plt.subplots(ncols=2, figsize=(12,6))
plt.subplots_adjust(right=0.95, left=0.05, wspace=0.1)
cols = ['black', 'navy', 'darkgreen']
narratio=False
else:
cols = ['black'] * len(cmd_inputs)
axs = None
narratio=True
plot_data = True
for i, cmd_input in enumerate(cmd_inputs):
if i > 0 and one_plot is True:
plot_data = False
narratio_file_name = os.path.join(outfile_loc,
'%s_%s_narratio.dat' %
(cmd_input, target.lower()))
opt_lf_file = os.path.join(outfile_loc,
'%s_%s_opt_lf.dat' %
(cmd_input, target.lower()))
ir_lf_file = os.path.join(outfile_loc,
'%s_%s_ir_lf.dat' %
(cmd_input, target.lower()))
ax1, ax2 = pl.compare_to_gal(target, opt_lf_file=opt_lf_file,
ir_lf_file=ir_lf_file,
hist_it_up=False, outfile_loc=outfile_loc,
narratio_file_name=narratio_file_name,
extra_str=cmd_input.split('_')[-1]+'_',
axs=axs, plt_kw={'color': cols[i]},
narratio=narratio, plot_data=plot_data)
#ax1.set_title(cmd_input.replace('_', '\ '))
lab = cmd_input.split('_')[-1]
[ax.plot([0,0], [0,0], lw=3, color=cols[i], label='$%s$' % lab) for ax in [ax1, ax2]]
row = lims[lims['target'] == target]
ax1.set_xlim(row['opt_xmin'], row['opt_xmax'])
ax2.set_xlim(row['ir_xmin'], row['ir_xmax'])
ax1.set_ylim(row['opt_ymin'], row['opt_ymax'])
ax2.set_ylim(row['ir_ymin'], row['ir_ymax'])
[ax.legend(loc=0, frameon=False) for ax in [ax1, ax2]]
figtitle = '%s%s_lfs.png' % (cmd_input.split('_')[-1]+'_', target)
outfile = os.path.join(outfile_loc, figtitle)
def compare_mass_loss(masses=1.0, z=0.001, sets=['NOV13', 'OCT13', 'NOV13eta0'],
paola=False):
'''
made to plot a comparison between several mass prescriptions.
Labels for the plot are set up stupidly, maybe in in_dict or labels arg...
'''
from matplotlib.ticker import NullFormatter
teff_max = 3.5
track_files = None
if paola is True:
# hack to use specific tracks from paola
track_dir = research_path + '/TP-AGBcalib/AGBTracks/plots_for_paperI/'
file_end = '_Mc0.00_dMc0.00_Tbd6.40_L0.00_dL0.00_C0.00_Nr3.00_rates0_KOPv_KMOLv.dat'
if masses == 2.0:
track_files = \
[track_dir + 'agb_2.00_Z0.00100000_Mdot50_eta0.00' + file_end,
track_dir + 'agb_2.00_Z0.00100000_Mdot49_eta0.40' + file_end,
track_dir + 'agb_2.00_Z0.00100000_Mdot48_eta8.00' + file_end,
track_dir + 'agb_2.00_Z0.00100000_Mdot50_eta0.40' + file_end]
teff_max = 3.4
if masses == 1.0:
track_files = \
[track_dir + 'agb_1.00_Z0.00100000_Mdot50_eta0.00' + file_end,
track_dir + 'agb_1.00_Z0.00100000_Mdot49_eta0.40' + file_end,
track_dir + 'agb_1.00_Z0.00100000_Mdot48_eta8.00' + file_end,
track_dir + 'agb_1.00_Z0.00100000_Mdot50_eta0.40' + file_end]
teff_max = 3.4
labels = ['$\\dot{M}_{\\rm{pre-dust}}=0.0$','$\\rm{R75}$',
'$\\rm{SC05}$', '$\\rm{mSC05}$']
if track_files is not None:
nrows = len(track_files)
else:
nrows = len(sets)
fig, axs = plt.subplots(nrows=nrows, ncols=2, figsize=(8, 8))
anorm = 1e6
xlab0 = '\\rm{Age}\ (10^6\ \\rm{yr})'
ylab0 = '\log\ \dot{M}\ (\\rm{M_\odot\ yr}^{-1})'
ylab1 = '\log\ L\ (L_\odot)'
xlab1 = '\log\ T_{\\rm eff}\ (\\rm{K})'
agb_tracks_dir = research_path + 'TP-AGBcalib/AGBTracks/CAF09'
if type(masses) is not list:
masses = [masses]
cols = ['k']
for j, mass in enumerate(masses):
if track_files is None:
tnames = []
labels = []
for tset in sets:
label = translate_model_name(tset)
direc = os.path.join(agb_tracks_dir, 'S_' + tset)
direc, = [os.path.join(direc, d)
for d in os.listdir(direc) if str(z) in d]
tname = rsp.fileIO.get_files(direc, 'agb_%.2f*' % mass)[0]
tnames.append(tname)
labels.append('$%s$' % label)
tracks = [fileIO.get_numeric_data(t) for t in tnames]
else:
tracks = [fileIO.get_numeric_data(t) for t in track_files]
for i in range(len(tracks)):
axs[i][0].plot(tracks[i].data_array['ageyr']/anorm,
tracks[i].data_array['dMdt'],
label='$M=%g\ M_\odot$' % mass, lw=1, color=cols[j])
axs[i][0].plot(tracks[i].data_array['ageyr'][tracks[i].cstar]/anorm,
tracks[i].data_array['dMdt'][tracks[i].cstar],
lw=1, color='darkred')
axs[i][1].plot(tracks[i].data_array['T_star'],
tracks[i].data_array['L_star'],
label='$M=%g\ M_\odot$' % mass, lw=1, color=cols[j])
axs[i][1].plot(tracks[i].data_array['T_star'][tracks[i].cstar],
tracks[i].data_array['L_star'][tracks[i].cstar],
lw=1, color='darkred')
axs[i][0].annotate(labels[i], (0.03, 0.96), fontsize=fontlarge,
xycoords='axes fraction', va='top')
axs[-1, 0].set_xlabel('$%s$' % xlab0, fontsize=fontlarge)
axs[-1, 1].set_xlabel('$%s$' % xlab1, fontsize=fontlarge)
plt.annotate('$%s$' % ylab0, (0.03, 0.5), fontsize=fontlarge, va='center',
xycoords='figure fraction', rotation='vertical')
plt.annotate('$%s$' % ylab1, (0.95, 0.5), fontsize=fontlarge, va='center',
xycoords='figure fraction', rotation='vertical')
[ax.yaxis.tick_right() for ax in axs.flatten()[1::2]]
[ax.xaxis.set_major_formatter(NullFormatter())
for ax in axs.flatten()[:-2]]
# mass loss
[ax.set_ylim(-11.5, -4.5) for ax in axs[:, 0]]
# log l
[ax.set_ylim(2.81, 4.25) for ax in axs[:, 1]]
# log te
[ax.set_xlim(3.66, teff_max) for ax in axs[:, 1]]
# age Myr
[ax.set_xlim(0, 2.45) for ax in axs[:, 0]]
# top left plot only
if paola is False:
[ax.legend(loc=4, fontsize=fontlarge, frameon=False)
for ax in [axs.flatten()[0]]]
fig.subplots_adjust(wspace=0.02, hspace=0.02)
plt.savefig('compare_massloss_M%g_Z%g.png' % (masses[0], z), dpi=150)
return axs
def tpagb_mass_histograms(chi2_location='draft_run', band='opt', dry_run=True,
model='nov13', model_src='default', force=False,
cumsum=True):
'''
plot a histogram of the scaled number of tpagb stars for the best fitting
model in each model chi2 file in the chi2_location
all args besides chi2_location are passed to tpagb_masses.
the trilegal output files can be in a difference location with directories
model_src/target/model/mc see tpagb_masses.
plot colors are fixed at 6: errors for more than 6 targets.
'''
if chi2_location == 'draft_run':
chi2_location = snap_src + '/models/varysfh/match-hmc/'
chi2files = rsp.fileIO.get_files(chi2_location, '*%s_*chi2.dat' % model)
# I do gaussian chi2 too, not just poisson...
chi2files = [c for c in chi2files if not 'gauss' in c][::-1]
# get the tpagb masses
(masses, norm) = zip(*[tpagb_masses(c, band=band, dry_run=dry_run,
model_src=model_src, force=force)
for c in chi2files])
norm = np.array(norm)
ts = [os.path.split(c)[1].split('_')[3] for c in chi2files]
targets = galaxy_tests.ancients()
tinds = [ts.index(t.lower()) for t in targets]
targets = np.array(ts)[tinds]
labels = ['$%s$' % t.upper().replace('-DEEP', '').replace('-', '\!-\!')
for t in targets]
# get the hist made nice and stacked, and then scale it down.
hists, bins, pps = plt.hist(masses, stacked=True, align='left',
histtype='step', bins=50, visible=False)
plt.close()
# scaled histograms
norm_hists = [hists[i] * norm[i] for i in range(len(hists))]
# actual plot... norm scales the histogram.
# set up plot
cols = color_scheme
if cumsum is True:
fig, ax = plt.subplots()
axs = [ax]
else:
fig, axs = plt.subplots(nrows=len(hists), sharex=True)
# mask 0 values so there is a vertical line on the plot
for i in range(len(hists)):
norm_hists[i][norm_hists[i]==0] = 1e-5
if cumsum is True:
yplot = np.cumsum(norm_hists[i]) / np.sum(norm_hists[i])
else:
yplot = norm_hists[i]
axs[i].plot(bins[:-1], yplot, linestyle='steps-pre', color='grey', lw=4)
axs[i].plot(bins[:-1], yplot, linestyle='steps-pre', color=cols[i],
lw=2, label=labels[i], alpha=.9)
#ax.plot(bins[:-1], np.sum(norm_hists, axis=0), linestyle='steps-pre',
# color='darkgrey', lw=3, label=r'$\rm{Total}$')
axs[i].tick_params(labelsize=fontmid)
#ax.set_yscale('log')
#ax.set_ylim(3, 10**3)
axs[-1].set_xlabel(r'$\rm{Mass\ M_\odot}$', fontsize=fontlarge)
if cumsum is True:
axs[0].legend(loc=0, frameon=False)
axs[0].set_ylabel(r'$\rm{Cumulative\ Fraction\ of\ {TP\!-\!AGB}\ Stars}$', fontsize=fontlarge)
axs[0].set_xlim(0.6, 3)
fname = 'tpagb_mass_hist_%s_%s.png' % (band, model)
else:
[ax.set_xlim(0.8, 3) for ax in axs]
plt.annotate(r'$\rm{\#\ of\ {TP\!-\!AGB}\ Stars}$', (0.03, 0.5),
fontsize=fontlarge, va='center',
xycoords='figure fraction', rotation='vertical')
fname = 'tpagb_mass_hist_%s_%s_nocumsum.png' % (band, model)
[ax.locator_params(axis='y', nbins=3) for ax in axs]
fig.subplots_adjust(hspace=0.001)
plt.savefig(fname, dpi=150)
return axs
def tpagb_masses(chi2file, band='opt', model_src='default', dry_run=False,
mass=True, old=False, force=False):
'''
using the chi2file run trilegal with the best fit sfh and return the
normalization and tp-agb masses (scaled simulation)
'''
if model_src == 'default':
model_src = snap_src + '/models/varysfh/'
components = os.path.split(chi2file)[1].split('_')
model = '_'.join(components[:3])
target = components[3]
model_loc = os.path.join(model_src, target, model, 'mc')
if force is False:
# read chi2 file
chi2_data = rsp.fileIO.readfile(chi2file)
# best fitting chi2 run in band
ibest_fit = np.argmin(chi2_data['%s_chi2' % band])
# should work out to isfr == ibest_fit, but just in case:
isfr = chi2_data['sfr'][ibest_fit]
# associated input file for best run
tri_inp, = rsp.fileIO.get_files(model_loc, '*%03d.dat' % isfr)
tri_outp = tri_inp.replace('.dat', '_%s_best.dat' % band).replace('inp', 'outp')
else:
tri_inp, = rsp.fileIO.get_files(model_loc, '*best.dat')
tri_outp = tri_inp.replace('inp', 'outp')
rsp.fileIO.ensure_file(tri_inp)
# run trilegal with best run
cmd_input = 'cmd_input_%s.dat' % model.upper()
rsp.TrilegalUtils.run_trilegal(cmd_input, tri_inp, tri_outp,
dry_run=dry_run)
# load trilegal run and do the normalization (I should really save that...)
# all that's needed here is opt_norm and ir_norm.
filter1 = sfh_tests.get_filter1(target)
ags=sfh_tests.load_default_ancient_galaxies()
files = sfh_tests.FileIO()
files.read_trilegal_catalog(tri_outp, filter1=filter1)
files.load_data_for_normalization(target=target, ags=ags)
files.load_trilegal_data()
sopt_rgb, sir_rgb, sopt_agb, sir_agb = \
sfh_tests.rgb_agb_regions(files.sgal, files.opt_offset,
files.opt_trgb, files.opt_trgb_err,
ags, files.ir_offset,
files.ir_trgb, files.ir_trgb_err,
files.opt_mag, files.ir_mag)
opt_norm, ir_norm, opt_rgb, ir_rgb, opt_agb, ir_agb = \
sfh_tests.normalize_simulation(files.opt_mag, files.ir_mag,
files.nopt_rgb, files.nir_rgb,
sopt_rgb, sir_rgb, sopt_agb,
sir_agb)
print target, opt_norm, ir_norm
with open(tri_inp.replace('.dat', '_norms.dat'), 'w') as out:
out.write('# opt_norm ir_norm\n')
out.write('%.6f %.6f \n' % (opt_norm, ir_norm))
if band == 'opt':
norm = opt_norm
if band == 'ir':
norm = ir_norm
# load tp-agb stars
files.sgal.all_stages('TPAGB')
# it's crazy to believe there is a tp-agb star bluer than the color cut
# but to be consistent with the rest of the method, will do a color cut.
cut_inds = files.__getattribute__('%s_color_cut' % band)
itpagb = list(set(files.sgal.itpagb) & set(cut_inds))
# grab TP-AGB masses
if mass is True:
mass = files.sgal.data.get_col('m_ini')
ret_val = mass[itpagb]
else:
met = files.sgal.data.get_col('[M/H]')
if old is True:
olds, = np.nonzero(files.sgal.data.get_col('logAge') > 8.5)
itpagb = list(set(files.sgal.itpagb) & set(cut_inds) & set(olds))
ret_val = met[itpagb]
return ret_val, norm
def trilegal_metals(chi2_location='draft_run', band='opt', dry_run=False,
model='nov13', model_src='default', old=False, feh=False):
if chi2_location == 'draft_run':
chi2_location = snap_src + '/models/varysfh/match-hmc/'
chi2files = rsp.fileIO.get_files(chi2_location, '*%s_*chi2.dat' % model)
# I do gaussian chi2 too, not just poisson...
chi2files = [c for c in chi2files if not 'gauss' in c][::-1]
# get the tpagb masses
(mhs, norm) = zip(*[tpagb_masses(c, band=band, dry_run=dry_run,
model_src=model_src, mass=False,
old=old) for c in chi2files])
ts = [os.path.split(c)[1].split('_')[3] for c in chi2files]
targets = galaxy_tests.ancients()
tinds = [ts.index(t.lower()) for t in targets]
targets = np.array(ts)[tinds]
from ResolvedStellarPops.convertz import convertz
if feh is True:
ind = 4
else:
ind = 1
zs = np.array([convertz(mh=i)[ind] for i in mhs])
for i, target in enumerate(targets):
print '%.4f %.4f %.4f %s ' % (np.min(zs[i]), np.median(zs[i]),
np.max(zs[i]), target)
def plot_random_sfhs(targets='ancients'):
'''
plot the random sfr arrays with the data sfr arrays.
Hard coded file locations. So read the code.
'''
targets = galaxy_tests.load_targets(targets)
for target in targets:
target = target.lower()
# it doesn't matter which model so it is hard coded...
sfr_file_loc = os.path.join(snap_src, 'models', 'varysfh', target,
'caf09_s_nov13', 'mc')
hmc_file_loc = os.path.join(snap_src, 'data', 'sfh_parsec')
target = target.replace('-deep', '')
outfile = '%s_random_sfr.png' % target
hmc_file, = rsp.fileIO.get_files(hmc_file_loc, '%s*sfh' % target)
sfh = sfh_tests.StarFormationHistories(hmc_file, 'match-hmc',
sfr_file_loc=sfr_file_loc,
sfr_file_search_fmt='*sfr')
sfh.plot_sfh('sfr', plot_random_arrays_kw={'from_files': True},
outfile=outfile, zoom=True)
return
| bsd-3-clause | -6,129,775,246,758,989,000 | 40.099071 | 102 | 0.530132 | false |
prov-suite/interop-test-harness | prov_interop/provtranslator/converter.py | 1 | 4897 | """Manages invocation of ProvTranslator service.
"""
# Copyright (c) 2015 University of Southampton
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os.path
import requests
from prov_interop import http
from prov_interop import standards
from prov_interop.component import ConfigError
from prov_interop.component import RestComponent
from prov_interop.converter import ConversionError
from prov_interop.converter import Converter
class ProvTranslatorConverter(Converter, RestComponent):
"""Manages invocation of ProvTranslator service."""
CONTENT_TYPES = {
standards.PROVN: "text/provenance-notation",
standards.TTL: "text/turtle",
standards.TRIG: "application/trig",
standards.PROVX: "application/provenance+xml",
standards.JSON: "application/json"
}
"""dict: mapping from :mod:`prov_service_tests.standards` formats to
content types understood by ProvTranslator
"""
def __init__(self):
"""Create converter.
"""
super(ProvTranslatorConverter, self).__init__()
def configure(self, config):
"""Configure converter. The configuration must hold:
- :class:`prov_interop.converter.Converter` configuration
- :class:`prov_interop.component.RestComponent` configuration
A valid configuration is::
{
"url": "https://provenance.ecs.soton.ac.uk/validator/provapi/documents/"
"input-formats": ["provn", "ttl", "trig", "provx", "json"]
"output-formats": ["provn", "ttl", "trig", "provx", "json"]
}
:param config: Configuration
:type config: dict
:raises ConfigError: if `config` does not hold the above entries
"""
super(ProvTranslatorConverter, self).configure(config)
def convert(self, in_file, out_file):
"""Convert input file into output file.
- Input and output formats are derived from `in_file` and
`out_file` file extensions.
- A check is done to see that `in_file` exists and that the input
and output format are in ``input-formats`` and ``output-formats``
respectively.
- The input and output formats are used to set HTTP ``Content-type``
and ``Accept`` header values, respectively
- The contents of `in_file` are loaded and used to create a
ProvTranslator-compliant HTTP POST request which is submitted to
``url``, to convert the document.
- The HTTP status is checked to to be 200 OK.
- The HTTP response is parsed to get the converted document, and
this is saved to `out_file`.
:param in_file: Input file
:type in_file: str or unicode
:param out_file: Output file
:type out_file: str or unicode
:raises ConversionError: if the input file cannot be found, or the
HTTP response is not 200
:raises requests.exceptions.ConnectionError: if there are
problems executing the request e.g. the URL cannot be found
"""
super(ProvTranslatorConverter, self).convert(in_file, out_file)
in_format = os.path.splitext(in_file)[1][1:]
out_format = os.path.splitext(out_file)[1][1:]
super(ProvTranslatorConverter, self).check_formats(in_format, out_format)
with open(in_file, "r") as f:
doc_str = f.read()
content_type = ProvTranslatorConverter.CONTENT_TYPES[in_format]
accept_type = ProvTranslatorConverter.CONTENT_TYPES[out_format]
headers = {http.CONTENT_TYPE: content_type,
http.ACCEPT: accept_type}
response = requests.post(self._url,
headers=headers,
data=doc_str)
if (response.status_code != requests.codes.ok): # 200 OK
raise ConversionError(self._url + " POST returned " +
str(response.status_code))
with open(out_file, "w") as f:
f.write(response.text)
| mit | -2,496,572,172,419,691,000 | 39.808333 | 80 | 0.69757 | false |
kdiduk/pytoons | pytoons/states/level_state.py | 1 | 1073 | #!/usr/bin/env python3
# vim:fileencoding=utf-8
from pytoons.camera import Camera
from pytoons.common import Size
from pytoons.controls import Key
from pytoons.level import Level
from pytoons.states import BouncingBallState
from pytoons.states import State
import logging
from pytmx import load_pygame
from pygame import display as pg_display
class LevelState(State):
def __init__(self, game, filepath):
super().__init__()
logging.debug('Entering LevelState with level %s' % filepath)
self._game = game
self._camera = Camera(game, Size(800, 600))
self._level = Level(self._screen, self._camera, load_pygame(filepath))
def update(self, elapsed):
super().update(elapsed)
if self._game.get_controller().is_pressed(Key.ENTER):
next_state = BouncingBallState(self._game)
self._game.set_state(next_state)
return
self._level.update(elapsed)
self._camera.update(elapsed)
def render(self):
self._level.render()
pg_display.update()
# eof
| mit | 8,656,680,493,530,159,000 | 26.512821 | 78 | 0.665424 | false |
lovelydev/django-yabackup | YaBackup/models.py | 1 | 1634 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Backup(models.Model):
title = models.CharField(blank=True, max_length=128, verbose_name=_('Title'), help_text=_('Just a title'))
file_name = models.CharField(max_length=64, verbose_name=_('File name'), help_text=_('Date and time will be added to the file name automagically. Example: backup.zip'))
output_directory = models.CharField(max_length=256, verbose_name=_('Output directory'), help_text=_('Where to store files localy? Example: /home/user/backups/'))
upload = models.BooleanField(default=False, verbose_name=_('Upload to Yandex.Disk'))
delete_after_upload = models.BooleanField(default=False, verbose_name=_('Delete after upload'))
mysqldump = models.BooleanField(default=False, verbose_name=_('MySQL dump'), help_text=_("Create a backup of the projects's database"))
pub_date = models.DateTimeField(blank=True, auto_now_add=True, verbose_name=_('Published'))
description = models.TextField(blank=True, verbose_name=_('Description'))
class Meta:
verbose_name = _('Backup')
verbose_name_plural = _('Backups')
def __str__(self):
return self.title
class Path(models.Model):
backup = models.ForeignKey('Backup', related_name='paths', on_delete=models.CASCADE)
path = models.CharField(max_length=256, verbose_name=_('Path'), help_text=_('Absolute path to file or folder'))
class Meta:
verbose_name = _('file or folder')
verbose_name_plural = _('Files and folders')
def __str__(self):
return self.path
| mit | -3,670,743,920,608,373,000 | 45.685714 | 172 | 0.684823 | false |
clark800/pystarch | backend/type_objects.py | 1 | 4330 |
class EqualityMixin(object):
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
class BasicMixin(object):
def __str__(self):
return self.__class__.__name__
class ItemTypeMixin(object):
def __str__(self):
return '{0}({1})'.format(self.__class__.__name__, str(self.item_type))
class TupleMixin(object):
def __str__(self):
return '{0}({1})'.format(self.__class__.__name__,
','.join([str(x) for x in self.item_types]))
class CallableMixin(object):
def __str__(self):
return '{0}({1} -> {2})'.format(self.__class__.__name__,
self.signature, self.return_type)
class Unknown(EqualityMixin, BasicMixin):
def example(self):
return object()
class NoneType(EqualityMixin, BasicMixin):
def example(self):
return None
class Bool(EqualityMixin, BasicMixin):
def example(self):
return True
class Num(EqualityMixin, BasicMixin):
def example(self):
return 1
class Str(EqualityMixin, BasicMixin):
def example(self):
return 'a'
class List(EqualityMixin, ItemTypeMixin):
def __init__(self, item_type):
self.item_type = item_type
def example(self):
return [self.item_type.example()]
# hack to allow testing for arbitrary-length tuple
class BaseTuple(EqualityMixin, BasicMixin):
def example(self):
return tuple()
class Tuple(EqualityMixin, TupleMixin):
def __init__(self, item_types):
self.item_types = item_types
def example(self):
return tuple(x.example() for x in self.item_types)
class Set(EqualityMixin, ItemTypeMixin):
def __init__(self, item_type):
self.item_type = item_type
def example(self):
return {self.item_type.example()}
class Dict(EqualityMixin):
def __init__(self, key_type, value_type):
self.key_type = key_type
self.value_type = value_type
def example(self):
return {self.key_type.example(): self.value_type.example()}
def __str__(self):
return '{0}({1},{2})'.format(self.__class__.__name__,
self.key_type, self.value_type)
class Function(EqualityMixin, CallableMixin):
def __init__(self, signature, return_type, evaluator, instance=None):
assert evaluator is not None
self.signature = signature
self.return_type = return_type
self.evaluator = evaluator
self.instance = instance
def example(self):
return object()
# set class_name to __import__ for imports
class Instance(EqualityMixin):
def __init__(self, class_name, attributes):
self.class_name = class_name
self.attributes = attributes # Scope object
self.initialized = False
def example(self):
return object()
def __str__(self):
return '{0}({1})'.format(self.__class__.__name__, self.class_name)
# a Class is a Function that returns an Instance plus static methods/attrs
class Class(EqualityMixin, CallableMixin):
def __init__(self, name, signature, return_type, evaluator, attributes):
self.name = name
self.signature = signature
self.return_type = return_type
self.evaluator = evaluator
# only contains class methods and class attributes
self.attributes = attributes
def example(self):
return object()
def __str__(self):
return self.name
class Maybe(EqualityMixin):
def __init__(self, subtype):
assert subtype is not None
self.subtype = subtype
def example(self):
return self.subtype.example()
def __str__(self):
return '{0}({1})'.format(self.__class__.__name__, self.subtype)
class Union(EqualityMixin):
def __init__(self, *subtypes):
assert len(subtypes) > 0
assert not any(isinstance(x, list) for x in subtypes)
self.subtypes = list(subtypes)
def example(self):
return self.subtypes[0].example()
def __str__(self):
return 'Union({0})'.format(','.join([str(x) for x in self.subtypes]))
| mit | -5,775,183,472,630,599,000 | 24.621302 | 78 | 0.598845 | false |
heiths/allura | ForgeSVN/forgesvn/tests/model/test_svnimplementation.py | 1 | 5826 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from mock import Mock, patch
from nose.tools import assert_equal
from pylons import app_globals as g
from alluratest.controller import setup_unit_test
from allura.model.repository import Commit
from forgesvn.model.svn import SVNImplementation
class TestSVNImplementation(object):
def setUp(self):
setup_unit_test()
def test_compute_tree_new(self):
self._test_compute_tree_new('/trunk/foo/')
self._test_compute_tree_new('/trunk/foo')
self._test_compute_tree_new('trunk/foo/')
self._test_compute_tree_new('trunk/foo')
@patch('allura.model.repository.LastCommitDoc.m.update_partial')
@patch('allura.model.repository.Tree.upsert')
@patch('allura.model.repository.Tree.query.get')
def _test_compute_tree_new(self, path, tree_get, tree_upsert, lcd_partial):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
impl = SVNImplementation(repo)
impl._svn.info2 = Mock()
impl._svn.info2.return_value = [('foo', Mock())]
tree_get.return_value = None # no existing tree
commit = Commit()
commit._id = '5057636b9c1040636b81e4b1:6'
tree_upsert.return_value = (Mock(), True)
tree_id = impl.compute_tree_new(commit, path)
assert_equal(impl._svn.info2.call_args[0]
[0], 'file://' + g.tmpdir + '/code/trunk/foo')
assert lcd_partial.called
def test_last_commit_ids(self):
self._test_last_commit_ids('/trunk/foo/')
self._test_last_commit_ids('/trunk/foo')
self._test_last_commit_ids('trunk/foo/')
self._test_last_commit_ids('trunk/foo')
def _test_last_commit_ids(self, path):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
repo._id = '5057636b9c1040636b81e4b1'
impl = SVNImplementation(repo)
impl._svn.info2 = Mock()
impl._svn.info2.return_value = [('trunk', Mock()), ('foo', Mock())]
impl._svn.info2.return_value[1][1].last_changed_rev.number = '1'
commit = Commit()
commit._id = '5057636b9c1040636b81e4b1:6'
entries = impl.last_commit_ids(commit, [path])
assert_equal(entries, {path.strip('/'): '5057636b9c1040636b81e4b1:1'})
assert_equal(impl._svn.info2.call_args[0]
[0], 'file://' + g.tmpdir + '/code/trunk')
@patch('forgesvn.model.svn.svn_path_exists')
def test__path_to_root(self, path_exists):
repo = Mock(fs_path=g.tmpdir + '/')
repo.name = 'code'
repo._id = '5057636b9c1040636b81e4b1'
impl = SVNImplementation(repo)
path_exists.return_value = False
# edge cases
assert_equal(impl._path_to_root(None), '')
assert_equal(impl._path_to_root(''), '')
assert_equal(impl._path_to_root('/some/path/'), '')
assert_equal(impl._path_to_root('some/path'), '')
# tags
assert_equal(impl._path_to_root('/some/path/tags/1.0/some/dir'),
'some/path/tags/1.0')
assert_equal(impl._path_to_root('/some/path/tags/1.0/'),
'some/path/tags/1.0')
assert_equal(impl._path_to_root('/some/path/tags/'), '')
# branches
assert_equal(impl._path_to_root('/some/path/branches/b1/dir'),
'some/path/branches/b1')
assert_equal(impl._path_to_root('/some/path/branches/b1/'),
'some/path/branches/b1')
assert_equal(impl._path_to_root('/some/path/branches/'), '')
# trunk
assert_equal(impl._path_to_root('/some/path/trunk/some/dir/'),
'some/path/trunk')
assert_equal(impl._path_to_root('/some/path/trunk'), 'some/path/trunk')
# with fallback to trunk
path_exists.return_value = True
assert_equal(impl._path_to_root(''), 'trunk')
assert_equal(impl._path_to_root('/some/path/'), 'trunk')
assert_equal(impl._path_to_root('/tags/'), 'trunk')
assert_equal(impl._path_to_root('/branches/'), 'trunk')
assert_equal(impl._path_to_root('/tags/1.0'), 'tags/1.0')
assert_equal(impl._path_to_root('/branches/branch'), 'branches/branch')
@patch('forgesvn.model.svn.svn_path_exists')
def test_update_checkout_url(self, svn_path_exists):
impl = SVNImplementation(Mock())
opts = impl._repo.app.config.options = {}
svn_path_exists.side_effect = lambda path: False
opts['checkout_url'] = 'invalid'
impl.update_checkout_url()
assert_equal(opts['checkout_url'], '')
svn_path_exists.side_effect = lambda path: path.endswith('trunk')
opts['checkout_url'] = 'invalid'
impl.update_checkout_url()
assert_equal(opts['checkout_url'], 'trunk')
svn_path_exists.side_effect = lambda path: path.endswith('trunk')
opts['checkout_url'] = ''
impl.update_checkout_url()
assert_equal(opts['checkout_url'], 'trunk')
| apache-2.0 | -1,980,516,689,807,777,000 | 42.155556 | 79 | 0.61174 | false |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/tests/arithmetic/test_timedelta64.py | 1 | 79354 | # Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import OutOfBoundsDatetime, PerformanceWarning
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
NaT,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
def assert_dtype(obj, expected_dtype):
"""
Helper to check the dtype for a Series, Index, or single-column DataFrame.
"""
if isinstance(obj, DataFrame):
dtype = obj.dtypes.iat[0]
else:
dtype = obj.dtype
assert dtype == expected_dtype
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
tdi = pd.timedelta_range("2H", periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
msg = "Invalid comparison between dtype"
with pytest.raises(TypeError, match=msg):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
@pytest.mark.parametrize(
"td_scalar",
[timedelta(days=1), Timedelta(days=1), Timedelta(days=1).to_timedelta64()],
)
def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):
# regression test for GH#5963
box = box_with_array
xbox = box if box not in [pd.Index, pd.array] else np.ndarray
ser = Series([timedelta(days=1), timedelta(days=2)])
ser = tm.box_expected(ser, box)
actual = ser > td_scalar
expected = Series([False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(actual, expected)
@pytest.mark.parametrize("invalid", [345600000000000, "a"])
def test_td64_comparisons_invalid(self, box_with_array, invalid):
# GH#13624 for str
box = box_with_array
rng = timedelta_range("1 days", periods=10)
obj = tm.box_expected(rng, box)
assert_invalid_comparison(obj, invalid, box)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.date_range("1970-01-01", periods=10, tz="UTC").array,
np.array(pd.date_range("1970-01-01", periods=10)),
list(pd.date_range("1970-01-01", periods=10)),
pd.date_range("1970-01-01", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_td64arr_cmp_arraylike_invalid(self, other):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
rng = timedelta_range("1 days", periods=10)._data
assert_invalid_comparison(rng, other, tm.to_array)
def test_td64arr_cmp_mixed_invalid(self):
rng = timedelta_range("1 days", periods=5)._data
other = np.array([0, 1, 2, rng[3], Timestamp.now()])
result = rng == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = rng != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
rng < other
with pytest.raises(TypeError, match=msg):
rng > other
with pytest.raises(TypeError, match=msg):
rng <= other
with pytest.raises(TypeError, match=msg):
rng >= other
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
@pytest.mark.parametrize("dtype", [None, object])
def test_comp_nat(self, dtype):
left = TimedeltaIndex([Timedelta("1 days"), pd.NaT, Timedelta("3 days")])
right = TimedeltaIndex([pd.NaT, pd.NaT, Timedelta("3 days")])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = TimedeltaIndex(
[
"1 day",
pd.NaT,
"1 day 00:00:01",
pd.NaT,
"1 day 00:00:01",
"5 day 00:00:03",
]
)
tdidx2 = TimedeltaIndex(
["2 day", "2 day", pd.NaT, pd.NaT, "1 day 00:00:02", "5 days 00:00:03"]
)
tdarr = np.array(
[
np.timedelta64(2, "D"),
np.timedelta64(2, "D"),
np.timedelta64("nat"),
np.timedelta64("nat"),
np.timedelta64(1, "D") + np.timedelta64(2, "s"),
np.timedelta64(5, "D") + np.timedelta64(3, "s"),
]
)
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range("1 days", periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["4H", "8H", "12H", "16H", "20H"], freq="4H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4H"
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["1H", "2H", "3H", "4H", "5H"], freq="H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "H"
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(
["-2H", "-4H", "-6H", "-8H", "-10H"], freq="-2H", name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq == "-2H"
idx = TimedeltaIndex(["-2H", "-1H", "0H", "1H", "2H"], freq="H", name="x")
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["2H", "1H", "0H", "1H", "2H"], freq=None, name="x")
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = r"unsupported operand type\(s\) for -"
with pytest.raises(TypeError, match=msg):
td - dt
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(["0 days", pd.NaT, "1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "-1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
["20121231", "20130101", "20130102"], freq="D", name="bar"
)
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(["20121231", pd.NaT, "20121230"], name="foo")
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range("20130101", periods=3)
ts = Timestamp("20130101")
dt = ts.to_pydatetime()
dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern")
ts_tz = Timestamp("20130101").tz_localize("US/Eastern")
ts_tz2 = Timestamp("20130101").tz_localize("CET")
dt_tz = ts_tz.to_pydatetime()
td = Timedelta("1 days")
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta("0 days")
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta("0 days")
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta("0 days")
_check(result, expected)
# tz mismatches
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta("0 days")
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "0 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "4 days"], name="foo")
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(["20121231", pd.NaT, "20130101"])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
result = tdi + dt
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
msg = "Addition/subtraction of integers and integer-arrays"
with pytest.raises(TypeError, match=msg):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : pd.Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp("20130102")
assert result == expected
result = td + dt
expected = Timestamp("20130102")
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize("freq", ["D", "B"])
def test_timedelta(self, freq):
index = pd.date_range("1/1/2000", periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
back = back._with_freq("infer")
tm.assert_index_equal(index, back)
if freq == "D":
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range("2013", "2014")
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
assert result1.freq == rng.freq
result1 = result1._with_freq(None)
tm.assert_index_equal(result1, result4)
assert result3.freq == rng.freq
result3 = result3._with_freq(None)
tm.assert_index_equal(result2, result3)
def test_tda_add_sub_index(self):
# Check that TimedeltaArray defers to Index on arithmetic ops
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
tda = tdi.array
dti = pd.date_range("1999-12-31", periods=3, freq="D")
result = tda + dti
expected = tdi + dti
tm.assert_index_equal(result, expected)
result = tda + tdi
expected = tdi + tdi
tm.assert_index_equal(result, expected)
result = tda - tdi
expected = tdi - tdi
tm.assert_index_equal(result, expected)
def test_tda_add_dt64_object_array(self, box_with_array, tz_naive_fixture):
# Result should be cast back to DatetimeArray
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
dti = dti._with_freq(None)
tdi = dti - dti
obj = tm.box_expected(tdi, box)
other = tm.box_expected(dti, box)
warn = None
if box is not pd.DataFrame or tz_naive_fixture is None:
warn = PerformanceWarning
with tm.assert_produces_warning(warn):
result = obj + other.astype(object)
tm.assert_equal(result, other)
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
def test_tdi_iadd_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
orig_rng = rng
rng += two_hours
tm.assert_equal(rng, expected)
if box_with_array is not pd.Index:
# Check that operation is actually inplace
tm.assert_equal(orig_rng, expected)
def test_tdi_isub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
orig_rng = rng
rng -= two_hours
tm.assert_equal(rng, expected)
if box_with_array is not pd.Index:
# Check that operation is actually inplace
tm.assert_equal(orig_rng, expected)
# -------------------------------------------------------------
def test_tdi_ops_attributes(self):
rng = timedelta_range("2 days", periods=5, freq="2D", name="x")
result = rng + 1 * rng.freq
exp = timedelta_range("4 days", periods=5, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
result = rng - 2 * rng.freq
exp = timedelta_range("-2 days", periods=5, freq="2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
result = rng * 2
exp = timedelta_range("4 days", periods=5, freq="4D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4D"
result = rng / 2
exp = timedelta_range("1 days", periods=5, freq="D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "D"
result = -rng
exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "-2D"
rng = pd.timedelta_range("-2 days", periods=5, freq="D", name="x")
result = abs(rng)
exp = TimedeltaIndex(
["2 days", "1 days", "0 days", "1 days", "2 days"], name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq is None
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
# TODO: Make raised error message more informative and test
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
pd.to_timedelta(106580, "D") + Timestamp("2000")
with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"):
Timestamp("2000") + pd.to_timedelta(106580, "D")
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], "D") + Timestamp("2000")
with pytest.raises(OverflowError, match=msg):
Timestamp("2000") + pd.to_timedelta([106580], "D")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
(
pd.to_timedelta([_NaT, "5 days", "1 hours"])
- pd.to_timedelta(["7 seconds", _NaT, "4 hours"])
)
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(["4 days", pd.NaT])
result = pd.to_timedelta(["5 days", pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, "5 hours"])
result = pd.to_timedelta([pd.NaT, "5 days", "1 hours"]) + pd.to_timedelta(
["7 seconds", pd.NaT, "4 hours"]
)
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(["00:00:01"]))
s2 = pd.to_timedelta(Series(["00:00:02"]))
msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
# Passing datetime64-dtype data to TimedeltaIndex is no longer
# supported GH#29794
pd.to_timedelta(Series([pd.NaT]))
sn = pd.to_timedelta(Series([pd.NaT], dtype="m8[ns]"))
df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta)
df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta)
with pytest.raises(TypeError, match=msg):
# Passing datetime64-dtype data to TimedeltaIndex is no longer
# supported GH#29794
DataFrame([pd.NaT]).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT.value]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta("00:00:01")
scalar2 = pd.to_timedelta("00:00:02")
timedelta_NaT = pd.to_timedelta("NaT")
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
s1 + np.nan
with pytest.raises(TypeError, match=msg):
np.nan + s1
with pytest.raises(TypeError, match=msg):
s1 - np.nan
with pytest.raises(TypeError, match=msg):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
msg = "cannot subtract a datelike from|unsupported operand type"
with pytest.raises(TypeError, match=msg):
df1 + np.nan
with pytest.raises(TypeError, match=msg):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range("2012-1-1", periods=3, freq="D")
v2 = pd.date_range("2012-1-2", periods=3, freq="D")
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]")
tm.assert_series_equal(rs, xp)
assert rs.dtype == "timedelta64[ns]"
df = DataFrame({"A": v1})
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
# series on the rhs
result = df["A"] - df["A"].shift()
assert result.dtype == "timedelta64[ns]"
result = df["A"] + td
assert result.dtype == "M8[ns]"
# scalar Timestamp on rhs
maxa = df["A"].max()
assert isinstance(maxa, Timestamp)
resultb = df["A"] - df["A"].max()
assert resultb.dtype == "timedelta64[ns]"
# timestamp on lhs
result = resultb + df["A"]
values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
expected = Series(values, name="A")
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df["A"] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A")
tm.assert_series_equal(result, expected)
assert result.dtype == "m8[ns]"
d = datetime(2001, 1, 1, 3, 4)
resulta = df["A"] - d
assert resulta.dtype == "m8[ns]"
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df["A"], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df["A"])
assert resultb.dtype == "M8[ns]"
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(df["A"], resultb)
assert resultb.dtype == "M8[ns]"
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta("1s")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
# addition
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
# multiplication
tm.assert_series_equal(
nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta
)
tm.assert_series_equal(
1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(["1 day", "2 day"])
idx = tm.box_expected(idx, box_with_array)
msg = (
"cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation"
)
with pytest.raises(TypeError, match=msg):
idx - Timestamp("2011-01-01")
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp("2011-01-01", tz=tz)
idx = TimedeltaIndex(["1 day", "2 day"])
expected = DatetimeIndex(["2011-01-02", "2011-01-03"], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"ts",
[
Timestamp("2012-01-01"),
Timestamp("2012-01-01").to_pydatetime(),
Timestamp("2012-01-01").to_datetime64(),
],
)
def test_td64arr_add_sub_datetimelike_scalar(self, ts, box_with_array):
# GH#11925, GH#29558
tdi = timedelta_range("1 day", periods=3)
expected = pd.date_range("2012-01-02", periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D")
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
msg = "cannot subtract a datelike from"
with pytest.raises(TypeError, match=msg):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64("NaT")
tdi = timedelta_range("1 day", periods=3)
expected = DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Invalid __add__/__sub__ operations
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_td64arr_sub_periodlike(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
msg = "cannot subtract|unsupported operand type"
with pytest.raises(TypeError, match=msg):
tdi - pi
# GH#13078 subtraction of Period scalar not supported
with pytest.raises(TypeError, match=msg):
tdi - pi[0]
@pytest.mark.parametrize(
"other",
[
# GH#12624 for str case
"a",
# GH#19123
1,
1.5,
np.array(2),
],
)
def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other):
# vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
assert_invalid_addsub_type(tdarr, other)
@pytest.mark.parametrize(
"vec",
[
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3]),
DataFrame([[1, 2, 3]]),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_addsub_numeric_arr_invalid(
self, box_with_array, vec, any_real_dtype
):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdarr = tm.box_expected(tdser, box_with_array)
vector = vec.astype(any_real_dtype)
assert_invalid_addsub_type(tdarr, vector)
def test_td64arr_add_sub_int(self, box_with_array, one):
# Variants of `one` for #19012, deprecated GH#22535
rng = timedelta_range("1 days 09:00:00", freq="H", periods=10)
tdarr = tm.box_expected(rng, box_with_array)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, one, msg)
# TOOD: get inplace ops into assert_invalid_addsub_type
with pytest.raises(TypeError, match=msg):
tdarr += one
with pytest.raises(TypeError, match=msg):
tdarr -= one
def test_td64arr_add_sub_integer_array(self, box_with_array):
# GH#19959, deprecated GH#22535
# GH#22696 for DataFrame case, check that we don't dispatch to numpy
# implementation, which treats int64 as m8[ns]
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days 09:00:00", freq="H", periods=3)
tdarr = tm.box_expected(rng, box)
other = tm.box_expected([4, 3, 2], xbox)
msg = "Addition/subtraction of integers and integer-arrays"
assert_invalid_addsub_type(tdarr, other, msg)
def test_td64arr_addsub_integer_array_no_freq(self, box_with_array):
# GH#19959
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"])
tdarr = tm.box_expected(tdi, box)
other = tm.box_expected([14, -1, 16], xbox)
msg = "Addition/subtraction of integers"
assert_invalid_addsub_type(tdarr, other, msg)
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_with_array):
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_with_array):
box = box_with_array
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
def test_td64arr_add_sub_tdi(self, box_with_array, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
box = box_with_array
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["0 days", "1 day"], name=names[0])
tdi = np.array(tdi) if box in [tm.to_array, pd.array] else tdi
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series(
[Timedelta(hours=3), Timedelta(days=1, hours=4)], name=names[2]
)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser + tdi
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
expected = Series(
[Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=names[2]
)
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
assert_dtype(result, "timedelta64[ns]")
result = ser - tdi
tm.assert_equal(result, -expected)
assert_dtype(result, "timedelta64[ns]")
def test_td64arr_add_sub_td64_nat(self, box_with_array):
# GH#23320 special handling for timedelta64("NaT")
box = box_with_array
tdi = TimedeltaIndex([NaT, Timedelta("1s")])
other = np.timedelta64("NaT")
expected = TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box_with_array):
# GH#18808
box = box_with_array
ser = Series([NaT, Timedelta("1s")])
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as + is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
result = two_hours + rng
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array):
# only test adding/sub offsets as - is now numeric
# GH#10699 for Tick cases
box = box_with_array
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
result = two_hours - rng
tm.assert_equal(result, -expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
def test_td64arr_add_offset_index(self, names, box_with_array):
# GH#18849, GH#19744
box = box_with_array
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
other = np.array(other) if box in [tm.to_array, pd.array] else other
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=names[2]
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_with_array):
# GH#18849
box = box_with_array
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
def test_td64arr_sub_offset_index(self, names, box_with_array):
# GH#18824, GH#19744
box = box_with_array
xbox = box if box not in [tm.to_array, pd.array] else pd.Index
exname = names[2] if box not in [tm.to_array, pd.array] else names[1]
if box is pd.DataFrame and names[1] != names[0]:
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_with_offset_series(self, names, box_with_array):
# GH#18849
box = box_with_array
box2 = Series if box in [pd.Index, tm.to_array, pd.array] else box
if box is pd.DataFrame:
# Since we are operating with a DataFrame and a non-DataFrame,
# the non-DataFrame is cast to Series and its name ignored.
exname = names[0]
elif box in [tm.to_array, pd.array]:
exname = names[1]
else:
exname = names[2]
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))], name=exname)
obj = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = obj + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + obj
tm.assert_equal(res2, expected_add)
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))], name=exname)
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = obj - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize("obox", [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = "has incorrect type|cannot add the type MonthEnd"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
# ------------------------------------------------------------------
# Unsorted
def test_td64arr_add_sub_object_array(self, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = pd.timedelta_range("1 day", periods=3, freq="D")
tdarr = tm.box_expected(tdi, box)
other = np.array(
[Timedelta(days=1), pd.offsets.Day(2), Timestamp("2000-01-04")]
)
with tm.assert_produces_warning(PerformanceWarning):
result = tdarr + other
expected = pd.Index(
[Timedelta(days=2), Timedelta(days=4), Timestamp("2000-01-07")]
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
msg = "unsupported operand type|cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
tdarr - other
with tm.assert_produces_warning(PerformanceWarning):
result = other - tdarr
expected = pd.Index([Timedelta(0), Timedelta(0), Timestamp("2000-01-01")])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
msg = "argument must be an integer|cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype="int64")
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array, pd.array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * Series(np.arange(5, dtype="int64"))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array, pd.array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype="float64")
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize(
"other",
[
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11),
],
ids=lambda x: type(x).__name__,
)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(["1 Day"] * 10)
expected = timedelta_range("1 days", "10 days")
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError, match="unsupported operand type"):
rng / pd.NaT
with pytest.raises(TypeError, match="Cannot divide NaTType by"):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days", "10 days")
rng = tm.box_expected(rng, box)
other = np.timedelta64("NaT")
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, xbox)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match="Cannot divide"):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Float64Index((np.arange(10) + 1) * 12, name="foo")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, xbox)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_td64arr_div_td64_scalar(self, m, unit, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
startdate = Series(pd.date_range("2013-01-01", "2013-01-03"))
enddate = Series(pd.date_range("2013-03-01", "2013-03-03"))
ser = enddate - startdate
ser[2] = np.nan
flat = ser
ser = tm.box_expected(ser, box)
# op
expected = Series([x / np.timedelta64(m, unit) for x in flat])
expected = tm.box_expected(expected, xbox)
result = ser / np.timedelta64(m, unit)
tm.assert_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat])
expected = tm.box_expected(expected, xbox)
result = np.timedelta64(m, unit) / ser
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
expected = pd.Float64Index([12, np.nan, 24], name="foo")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, xbox)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
box = box_with_array
xbox = np.ndarray if box is pd.array else box
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, xbox)
other = np.array([2, 4, 2], dtype="m8[h]")
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
msg = "Cannot divide vectors|Unable to coerce to Series"
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError, match=msg):
rng / other
with pytest.raises(ValueError, match=msg):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array):
# GH#35529
box = box_with_array
xbox = np.ndarray if box is pd.array else box
left = Series([1000, 222330, 30], dtype="timedelta64[ns]")
right = Series([1000, 222330, None], dtype="timedelta64[ns]")
left = tm.box_expected(left, box)
right = tm.box_expected(right, box)
expected = np.array([1.0, 1.0, np.nan], dtype=np.float64)
expected = tm.box_expected(expected, xbox)
result = left // right
tm.assert_equal(result, expected)
# case that goes through __rfloordiv__ with arraylike
result = np.asarray(left) // right
tm.assert_equal(result, expected)
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
box = box_with_array
xbox = np.ndarray if box is pd.array else box
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
box = box_with_array
xbox = np.ndarray if box is pd.array else box
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array, scalar_td):
# GH#18831
box = box_with_array
xbox = np.ndarray if box is pd.array else box
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*"
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
box = box_with_array
xbox = np.ndarray if box is pd.array else box
tdi = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Int64Index((np.arange(10) + 1) * 12, name="foo")
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize(
"scalar_td",
[
timedelta(minutes=10, seconds=7),
Timedelta("10m7s"),
Timedelta("10m7s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
box = box_with_array
xbox = np.ndarray if box_with_array is pd.array else box_with_array
tdi = TimedeltaIndex(["00:05:03", "00:05:03", pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box, transpose=False)
expected = tm.box_expected(expected, xbox, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, xbox, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
warn = None
if box_with_array is pd.DataFrame and isinstance(three_days, pd.DateOffset):
warn = PerformanceWarning
with tm.assert_produces_warning(warn):
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range("1 ns", "10 ns", periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 ns", "0 ns"] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
msg = "Cannot divide int by"
with pytest.raises(TypeError, match=msg):
2 % tdarr
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
msg = (
"cannot use operands with types dtype|"
"Cannot multiply with unequal lengths|"
"Unable to coerce to Series"
)
with pytest.raises(TypeError, match=msg):
# length check before dtype check
idx * idx[:3]
with pytest.raises(ValueError, match=msg):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
msg = "cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match="Cannot divide"):
two / tdser
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, any_real_dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_dtype)
expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_div_numeric_array(self, box_with_array, vector, any_real_dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(any_real_dtype)
expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = (
"true_divide'? cannot use operands|"
"cannot perform __div__|"
"cannot perform __truediv__|"
"unsupported operand|"
"Cannot divide"
)
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is pd.DataFrame:
expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = pd.Index(expected) # do dtype inference
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
def test_td64arr_mul_int_series(self, box_with_array, names, request):
# GH#19042 test for correct name attachment
box = box_with_array
if box_with_array is pd.DataFrame and names[2] is None:
reason = "broadcasts along wrong axis, but doesn't raise"
request.node.add_marker(pytest.mark.xfail(reason=reason))
exname = names[2] if box not in [tm.to_array, pd.array] else names[1]
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
# TODO: Should we be parametrizing over types for `ser` too?
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(
["0days", "1day", "4days", "9days", "16days"],
dtype="timedelta64[ns]",
name=exname,
)
tdi = tm.box_expected(tdi, box)
xbox = (
Series
if (box is pd.Index or box is tm.to_array or box is pd.array)
else box
)
expected = tm.box_expected(expected, xbox)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
if box is pd.DataFrame:
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
def test_float_series_rdiv_td64arr(self, box_with_array, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
box = box_with_array
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
xname = names[2] if box not in [tm.to_array, pd.array] else names[1]
expected = Series(
[tdi[n] / ser[n] for n in range(len(ser))],
dtype="timedelta64[ns]",
name=xname,
)
xbox = box
if box in [pd.Index, tm.to_array, pd.array] and type(ser) is Series:
xbox = Series
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = ser.__rtruediv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedelta64ArrayLikeArithmetic:
# Arithmetic tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic
# tests will eventually end up here.
def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
scalar_td ** td1
with pytest.raises(TypeError, match=pattern):
td1 ** scalar_td
def test_add_timestamp_to_timedelta():
# GH: 35897
timestamp = Timestamp.now()
result = timestamp + pd.timedelta_range("0s", "1s", periods=31)
expected = DatetimeIndex(
[
timestamp
+ (
pd.to_timedelta("0.033333333s") * i
+ pd.to_timedelta("0.000000001s") * divmod(i, 3)[0]
)
for i in range(31)
]
)
tm.assert_index_equal(result, expected)
| gpl-2.0 | 7,952,615,038,926,929,000 | 35.086403 | 88 | 0.578585 | false |
amenonsen/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ssh_local_key.py | 1 | 10210 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ssh_local_key
short_description: SSH proxy local keys in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall_ssh feature and local_key category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
version_added: 2.9
firewall_ssh_local_key:
description:
- SSH proxy local keys.
default: null
type: dict
suboptions:
name:
description:
- SSH proxy local key name.
required: true
type: str
password:
description:
- Password for SSH private key.
type: str
private_key:
description:
- SSH proxy private key, encrypted with a password.
type: str
public_key:
description:
- SSH proxy public key.
type: str
source:
description:
- SSH proxy local key source type.
type: str
choices:
- built-in
- user
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: SSH proxy local keys.
fortios_firewall_ssh_local_key:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ssh_local_key:
name: "default_name_3"
password: "<your_own_value>"
private_key: "<your_own_value>"
public_key: "<your_own_value>"
source: "built-in"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ssh_local_key_data(json):
option_list = ['name', 'password', 'private_key',
'public_key', 'source']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ssh_local_key(data, fos):
vdom = data['vdom']
state = data['state']
firewall_ssh_local_key_data = data['firewall_ssh_local_key']
filtered_data = underscore_to_hyphen(filter_firewall_ssh_local_key_data(firewall_ssh_local_key_data))
if state == "present":
return fos.set('firewall.ssh',
'local-key',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall.ssh',
'local-key',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall_ssh(data, fos):
if data['firewall_ssh_local_key']:
resp = firewall_ssh_local_key(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_ssh_local_key": {
"required": False, "type": "dict", "default": None,
"options": {
"name": {"required": True, "type": "str"},
"password": {"required": False, "type": "str"},
"private_key": {"required": False, "type": "str"},
"public_key": {"required": False, "type": "str"},
"source": {"required": False, "type": "str",
"choices": ["built-in", "user"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall_ssh(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall_ssh(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,207,557,786,850,089,500 | 29.029412 | 105 | 0.59001 | false |
lae/simplemona | manage.py | 1 | 6346 | import os
import logging
import datetime
import sqlalchemy
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
from simplecoin import create_app, db, coinserv
app = create_app()
manager = Manager(app)
migrate = Migrate(app, db)
root = os.path.abspath(os.path.dirname(__file__) + '/../')
from bitcoinrpc.authproxy import AuthServiceProxy
from simplecoin.tasks import (cleanup, payout, server_status,
update_online_workers, update_pplns_est,
cache_user_donation)
from simplecoin.models import (Transaction, Threshold, DonationPercent,
BonusPayout, OneMinuteType, FiveMinuteType,
Block)
from simplecoin.utils import setfee_command
from flask import current_app, _request_ctx_stack
root = logging.getLogger()
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s'))
root.addHandler(ch)
root.setLevel(logging.DEBUG)
hdlr = logging.FileHandler(app.config.get('manage_log_file', 'manage.log'))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
root.addHandler(hdlr)
root.setLevel(logging.DEBUG)
@manager.command
def init_db():
""" Resets entire database to empty state """
with app.app_context():
db.session.commit()
db.drop_all()
db.create_all()
@manager.command
def update_minimum_fee():
""" Sets all custom fees in the database to be at least the minimum. Should
be run after changing the minimum. """
min_fee = current_app.config['minimum_perc']
DonationPercent.query.filter(DonationPercent.perc < min_fee).update(
{DonationPercent.perc: min_fee}, synchronize_session=False)
db.session.commit()
@manager.option('fee')
@manager.option('user')
def set_fee(user, fee):
""" Manually sets a fee percentage. """
setfee_command(user, fee)
@manager.option('blockhash', help="The blockhash that needs to mature for payout to occur")
@manager.option('description', help="A plaintext description of the bonus payout")
@manager.option('amount', help="The amount in satoshi")
@manager.option('user', help="The users address")
def give_bonus(user, amount, description, blockhash):
""" Manually create a BonusPayout for a user """
block = Block.query.filter_by(hash=blockhash).one()
BonusPayout.create(user, amount, description, block)
db.session.commit()
@manager.command
def list_fee_perc():
""" Gives a summary of number of users at each fee amount """
summ = {}
warn = False
for entry in DonationPercent.query.all():
summ.setdefault(entry.perc, 0)
summ[entry.perc] += 1
if entry.perc < current_app.config['minimum_perc']:
warn = True
if warn:
print("WARNING: A user is below the minimum configured value! "
"Run update_minimum_fee command to resolve.")
print "User fee summary"
print "\n".join(["{0:+3d}% Fee: {1}".format(k, v) for k, v in sorted(summ.items())])
@manager.option('-s', '--simulate', dest='simulate', default=True)
def cleanup_cmd(simulate):
""" Manually runs old share cleanup in simulate mode by default. """
simulate = simulate != "0"
cleanup(simulate=simulate)
@manager.option('-t', '--txid', dest='transaction_id')
def confirm_trans(transaction_id):
""" Manually confirms a transaction. """
trans = Transaction.query.filter_by(txid=transaction_id).first()
trans.confirmed = True
db.session.commit()
@manager.command
def reload_cached():
""" Recomputes all the cached values that normally get refreshed by tasks.
Good to run if celery has been down, site just setup, etc. """
update_pplns_est()
update_online_workers()
cache_user_donation()
server_status()
@manager.command
def test_email():
""" Sends a testing email to the send address """
thresh = Threshold(emails=[current_app.config['email']['send_address']])
thresh.report_condition("Test condition")
@manager.option('-b', '--blockheight', dest='blockheight', type=int,
help='blockheight to start working backward from')
def historical_update(blockheight):
""" Very long running task. Fills out the network difficulty values for all
blocks before the site was running (or potentially recording block diff). """
def add_one_minute_diff(diff, time):
try:
m = OneMinuteType(typ='netdiff', value=diff, time=time)
db.session.add(m)
db.session.commit()
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
slc = OneMinuteType.query.with_lockmode('update').filter_by(
time=time, typ='netdiff').one()
# just average the diff of two blocks that occured in the same second..
slc.value = (diff + slc.value) / 2
db.session.commit()
for ht in xrange(blockheight, 0, -1):
hsh = coinserv.getblockhash(ht)
info = coinserv.getblock(hsh)
add_one_minute_diff(info['difficulty'] * 1000,
datetime.datetime.utcfromtimestamp(info['time']))
current_app.logger.info("Processed block height {}".format(ht))
db.session.commit()
OneMinuteType.compress()
db.session.commit()
FiveMinuteType.compress()
db.session.commit()
@manager.option('-s', '--simulate', dest='simulate', default=True)
def payout_cmd(simulate):
""" Runs the payout task manually. Simulate mode is default. """
simulate = simulate != "0"
payout(simulate=simulate)
def make_context():
""" Setup a coinserver connection fot the shell context """
app = _request_ctx_stack.top.app
conn = AuthServiceProxy(
"http://{0}:{1}@{2}:{3}/"
.format(app.config['coinserv']['username'],
app.config['coinserv']['password'],
app.config['coinserv']['address'],
app.config['coinserv']['port']))
return dict(app=app, conn=conn)
manager.add_command("shell", Shell(make_context=make_context))
manager.add_command('db', MigrateCommand)
@manager.command
def runserver():
current_app.run(debug=True, host='0.0.0.0')
if __name__ == "__main__":
manager.run()
| mit | -8,849,437,957,526,354,000 | 32.935829 | 91 | 0.657895 | false |
margamanterola/Cinnamon | files/usr/share/cinnamon/cinnamon-settings/modules/cs_user.py | 1 | 17256 | #!/usr/bin/env python2
from SettingsWidgets import *
import gi
gi.require_version('AccountsService', '1.0')
from gi.repository import AccountsService, GLib
try:
import PAM
except:
import pam as PAM
import pexpect
import time
from random import randint
import shutil
import PIL
import os
import subprocess
class Module:
name = "user"
category = "prefs"
comment = _("Change your user preferences and password")
def __init__(self, content_box):
keywords = _("user, account, information, details")
sidePage = SidePage(_("Account details"), "cs-user", keywords, content_box, module=self)
self.sidePage = sidePage
def on_module_selected(self):
if not self.loaded:
print "Loading User module"
page = SettingsPage()
self.sidePage.add_widget(page)
settings = page.add_section(_("Account details"))
self.face_button = PictureChooserButton(num_cols=4, button_picture_size=64, menu_pictures_size=64)
self.face_button.set_alignment(0.0, 0.5)
self.face_button.set_tooltip_text(_("Click to change your picture"))
self.face_photo_menuitem = Gtk.MenuItem.new_with_label(_("Take a photo..."))
self.face_photo_menuitem.connect('activate', self._on_face_photo_menuitem_activated)
self.face_browse_menuitem = Gtk.MenuItem.new_with_label(_("Browse for more pictures..."))
self.face_browse_menuitem.connect('activate', self._on_face_browse_menuitem_activated)
face_dirs = ["/usr/share/cinnamon/faces"]
for face_dir in face_dirs:
if os.path.exists(face_dir):
pictures = sorted(os.listdir(face_dir))
for picture in pictures:
path = os.path.join(face_dir, picture)
self.face_button.add_picture(path, self._on_face_menuitem_activated)
widget = SettingsWidget()
label = Gtk.Label.new(_("Picture"))
widget.pack_start(label, False, False, 0)
widget.pack_end(self.face_button, False, False, 0)
settings.add_row(widget)
size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
widget = SettingsWidget()
label = Gtk.Label.new(_("Name"))
widget.pack_start(label, False, False, 0)
self.realname_entry = EditableEntry()
size_group.add_widget(self.realname_entry)
self.realname_entry.connect("changed", self._on_realname_changed)
self.realname_entry.set_tooltip_text(_("Click to change your name"))
widget.pack_end(self.realname_entry, False, False, 0)
settings.add_row(widget)
widget = SettingsWidget()
label = Gtk.Label.new(_("Password"))
widget.pack_start(label, False, False, 0)
password_mask = Gtk.Label.new(u'\u2022\u2022\u2022\u2022\u2022\u2022')
password_mask.set_alignment(0.9, 0.5)
self.password_button = Gtk.Button()
size_group.add_widget(self.password_button)
self.password_button.add(password_mask)
self.password_button.set_relief(Gtk.ReliefStyle.NONE)
self.password_button.set_tooltip_text(_("Click to change your password"))
self.password_button.connect('activate', self._on_password_button_clicked)
self.password_button.connect('released', self._on_password_button_clicked)
widget.pack_end(self.password_button, False, False, 0)
settings.add_row(widget)
current_user = GLib.get_user_name()
self.accountService = AccountsService.UserManager.get_default().get_user(current_user)
self.accountService.connect('notify::is-loaded', self.load_user_info)
self.face_button.add_separator()
# Video devices assumed to be webcams
import glob
webcam_detected = len(glob.glob("/dev/video*")) > 0
if webcam_detected:
self.face_button.add_menuitem(self.face_photo_menuitem)
self.face_button.add_menuitem(self.face_browse_menuitem)
def update_preview_cb (self, dialog, preview):
filename = dialog.get_preview_filename()
dialog.set_preview_widget_active(False)
if filename is not None and os.path.isfile(filename):
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(filename, 128, 128)
if pixbuf is not None:
preview.set_from_pixbuf (pixbuf)
dialog.set_preview_widget_active(True)
except:
pass
def _on_face_photo_menuitem_activated(self, menuitem):
# streamer takes -t photos, uses /dev/video0
if 0 != subprocess.call(["streamer", "-j90", "-t8", "-s800x600", "-o", "/tmp/temp-account-pic00.jpeg"]):
print "Error: Webcam not available"
return
# Use the 8th frame (the webcam takes a few frames to "lighten up")
path = "/tmp/temp-account-pic07.jpeg"
# Crop the image to thumbnail size
image = PIL.Image.open(path)
width, height = image.size
if width > height:
new_width = height
new_height = height
elif height > width:
new_width = width
new_height = width
else:
new_width = width
new_height = height
left = (width - new_width) / 2
top = (height - new_height) / 2
right = (width + new_width) / 2
bottom = (height + new_height) / 2
image = image.crop((left, top, right, bottom))
image.thumbnail((96, 96), PIL.Image.ANTIALIAS)
face_path = os.path.join(self.accountService.get_home_dir(), ".face")
image.save(face_path, "png")
self.accountService.set_icon_file(face_path)
self.face_button.set_picture_from_file(face_path)
def _on_face_browse_menuitem_activated(self, menuitem):
dialog = Gtk.FileChooserDialog(None, None, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
dialog.set_current_folder(self.accountService.get_home_dir())
filter = Gtk.FileFilter()
filter.set_name(_("Images"))
filter.add_mime_type("image/*")
dialog.add_filter(filter)
preview = Gtk.Image()
dialog.set_preview_widget(preview);
dialog.connect("update-preview", self.update_preview_cb, preview)
dialog.set_use_preview_label(False)
response = dialog.run()
if response == Gtk.ResponseType.OK:
path = dialog.get_filename()
image = PIL.Image.open(path)
width, height = image.size
if width > height:
new_width = height
new_height = height
elif height > width:
new_width = width
new_height = width
else:
new_width = width
new_height = height
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
image = image.crop((left, top, right, bottom))
image.thumbnail((96, 96), PIL.Image.ANTIALIAS)
face_path = os.path.join(self.accountService.get_home_dir(), ".face")
image.save(face_path, "png")
self.accountService.set_icon_file(face_path)
self.face_button.set_picture_from_file(face_path)
dialog.destroy()
def _on_face_menuitem_activated(self, path):
if os.path.exists(path):
self.accountService.set_icon_file(path)
shutil.copy(path, os.path.join(self.accountService.get_home_dir(), ".face"))
return True
def load_user_info(self, user, param):
self.realname_entry.set_text(user.get_real_name())
for path in [os.path.join(self.accountService.get_home_dir(), ".face"), user.get_icon_file(), "/usr/share/cinnamon/faces/user-generic.png"]:
if os.path.exists(path):
self.face_button.set_picture_from_file(path)
break
def _on_realname_changed(self, widget, text):
self.accountService.set_real_name(text)
def _on_password_button_clicked(self, widget):
dialog = PasswordDialog()
response = dialog.run()
class PasswordDialog(Gtk.Dialog):
def __init__ (self):
super(PasswordDialog, self).__init__()
self.correct_current_password = False # Flag to remember if the current password is correct or not
self.set_modal(True)
self.set_skip_taskbar_hint(True)
self.set_skip_pager_hint(True)
self.set_title(_("Change Password"))
table = Gtk.Table(6, 3)
table.set_border_width(6)
table.set_row_spacings(8)
table.set_col_spacings(15)
label = Gtk.Label.new(_("Current password"))
label.set_alignment(1, 0.5)
table.attach(label, 0, 1, 0, 1)
label = Gtk.Label.new(_("New password"))
label.set_alignment(1, 0.5)
table.attach(label, 0, 1, 1, 2)
label = Gtk.Label.new(_("Confirm password"))
label.set_alignment(1, 0.5)
table.attach(label, 0, 1, 3, 4)
self.current_password = Gtk.Entry()
self.current_password.set_visibility(False)
self.current_password.connect("focus-out-event", self._on_current_password_changed)
table.attach(self.current_password, 1, 3, 0, 1)
self.new_password = Gtk.Entry()
self.new_password.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "reload")
self.new_password.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("Generate a password"))
self.new_password.connect("icon-release", self._on_new_password_icon_released)
self.new_password.connect("changed", self._on_passwords_changed)
table.attach(self.new_password, 1, 3, 1, 2)
self.strengh_indicator = Gtk.ProgressBar()
self.strengh_indicator.set_tooltip_text(_("Your new password needs to be at least 8 characters long"))
self.strengh_indicator.set_fraction(0.0)
table.attach(self.strengh_indicator, 1, 2, 2, 3, xoptions=Gtk.AttachOptions.EXPAND|Gtk.AttachOptions.FILL)
self.strengh_indicator.set_size_request(-1, 1)
self.strengh_label = Gtk.Label()
self.strengh_label.set_tooltip_text(_("Your new password needs to be at least 8 characters long"))
self.strengh_label.set_alignment(1, 0.5)
table.attach(self.strengh_label, 2, 3, 2, 3)
self.confirm_password = Gtk.Entry()
self.confirm_password.connect("changed", self._on_passwords_changed)
table.attach(self.confirm_password, 1, 3, 3, 4)
self.show_password = Gtk.CheckButton(_("Show password"))
self.show_password.connect('toggled', self._on_show_password_toggled)
table.attach(self.show_password, 1, 3, 4, 5)
self.set_border_width(6)
box = self.get_content_area()
box.add(table)
self.show_all()
self.infobar = Gtk.InfoBar()
self.infobar.set_message_type(Gtk.MessageType.ERROR)
label = Gtk.Label.new(_("An error occured. Your password was not changed."))
content = self.infobar.get_content_area()
content.add(label)
table.attach(self.infobar, 0, 3, 5, 6)
self.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, _("Change"), Gtk.ResponseType.OK, )
self.set_passwords_visibility()
self.set_response_sensitive(Gtk.ResponseType.OK, False)
self.infobar.hide()
self.connect("response", self._on_response)
def _on_response(self, dialog, response_id):
if response_id == Gtk.ResponseType.OK:
self.change_password()
else:
self.destroy()
def change_password(self):
oldpass = self.current_password.get_text()
newpass = self.new_password.get_text()
passwd = pexpect.spawn("/usr/bin/passwd")
time.sleep(0.5)
passwd.sendline(oldpass)
time.sleep(0.5)
passwd.sendline(newpass)
time.sleep(0.5)
passwd.sendline(newpass)
time.sleep(0.5)
passwd.close()
if passwd.exitstatus is None or passwd.exitstatus > 0:
self.infobar.show()
else:
self.destroy()
def set_passwords_visibility(self):
visible = self.show_password.get_active()
self.new_password.set_visibility(visible)
self.confirm_password.set_visibility(visible)
def _on_new_password_icon_released(self, widget, icon_pos, event):
self.infobar.hide()
self.show_password.set_active(True)
characters = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-"
newpass = ""
for i in range (8):
index = randint(0, len(characters) -1)
newpass = newpass + characters[index]
self.new_password.set_text(newpass)
self.confirm_password.set_text(newpass)
self.check_passwords()
def _on_show_password_toggled(self, widget):
self.set_passwords_visibility()
def _on_current_password_changed(self, widget, event):
self.infobar.hide()
if self.current_password.get_text() != "":
auth = PAM.pam()
auth.start('passwd')
auth.set_item(PAM.PAM_USER, GLib.get_user_name())
auth.set_item(PAM.PAM_CONV, self.pam_conv)
try:
auth.authenticate()
auth.acct_mgmt()
except PAM.error, resp:
self.current_password.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, Gtk.STOCK_DIALOG_WARNING)
self.current_password.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("Wrong password"))
self.correct_current_password = False
except:
print 'Internal error'
else:
self.current_password.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, None)
self.correct_current_password = True
self.check_passwords()
# Based on setPasswordStrength() in Mozilla Seamonkey, which is tri-licensed under MPL 1.1, GPL 2.0, and LGPL 2.1.
# Forked from Ubiquity validation.py
def password_strength(self, password):
upper = lower = digit = symbol = 0
for char in password:
if char.isdigit():
digit += 1
elif char.islower():
lower += 1
elif char.isupper():
upper += 1
else:
symbol += 1
length = len(password)
length = min(length,4)
digit = min(digit,3)
upper = min(upper,3)
symbol = min(symbol,3)
strength = (
((length * 0.1) - 0.2) +
(digit * 0.1) +
(symbol * 0.15) +
(upper * 0.1))
if strength > 1:
strength = 1
if strength < 0:
strength = 0
return strength
def _on_passwords_changed(self, widget):
self.infobar.hide()
new_password = self.new_password.get_text()
confirm_password = self.confirm_password.get_text()
strength = self.password_strength(new_password)
if new_password != confirm_password:
self.confirm_password.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, Gtk.STOCK_DIALOG_WARNING)
self.confirm_password.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("Passwords do not match"))
else:
self.confirm_password.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, None)
if len(new_password) < 8:
self.strengh_label.set_text(_("Too short"))
self.strengh_indicator.set_fraction(0.0)
elif strength < 0.6:
self.strengh_label.set_text(_("Weak"))
self.strengh_indicator.set_fraction(0.2)
elif strength < 0.75:
self.strengh_label.set_text(_("Fair"))
self.strengh_indicator.set_fraction(0.4)
elif strength < 0.9:
self.strengh_label.set_text(_("Good"))
self.strengh_indicator.set_fraction(0.6)
else:
self.strengh_label.set_text(_("Strong"))
self.strengh_indicator.set_fraction(1.0)
self.check_passwords()
def check_passwords(self):
if self.correct_current_password:
new_password = self.new_password.get_text()
confirm_password = self.confirm_password.get_text()
if len(new_password) >= 8 and new_password == confirm_password:
self.set_response_sensitive(Gtk.ResponseType.OK, True)
else:
self.set_response_sensitive(Gtk.ResponseType.OK, False)
def pam_conv(self, auth, query_list, userData):
resp = []
for i in range(len(query_list)):
query, type = query_list[i]
val = self.current_password.get_text()
resp.append((val, 0))
return resp
| gpl-2.0 | 1,584,468,635,814,905,300 | 38.577982 | 160 | 0.598343 | false |
SophieIPP/openfisca-france | openfisca_france/model/prestations/prestations_familiales/af.py | 1 | 15020 | # -*- coding: utf-8 -*-
from __future__ import division
from numpy import round, maximum as max_, logical_not as not_, logical_or as or_, vectorize, where
from ...base import * # noqa analysis:ignore
from .base_ressource import nb_enf
class af_enfant_a_charge(Variable):
column = BoolCol
entity_class = Individus
label = u"Enfant à charge au sens des allocations familiales"
def function(self, simulation, period):
period = period.this_month
est_enfant_dans_famille = simulation.calculate('est_enfant_dans_famille', period)
smic55 = simulation.calculate('smic55', period)
age = simulation.calculate('age', period)
rempli_obligation_scolaire = simulation.calculate('rempli_obligation_scolaire', period)
pfam = simulation.legislation_at(period.start).fam
condition_enfant = ((age >= pfam.enfants.age_minimal) * (age < pfam.enfants.age_intermediaire) *
rempli_obligation_scolaire)
condition_jeune = (age >= pfam.enfants.age_intermediaire) * (age < pfam.af.age3) * not_(smic55)
return period, or_(condition_enfant, condition_jeune) * est_enfant_dans_famille
class af_nbenf(Variable):
column = IntCol
entity_class = Familles
label = u"Nombre d'enfants dans la famille au sens des allocations familiales"
def function(self, simulation, period):
period_mois = period.this_month
af_enfant_a_charge_holder = simulation.compute('af_enfant_a_charge', period_mois)
af_nbenf = self.sum_by_entity(af_enfant_a_charge_holder)
return period, af_nbenf
class af_coeff_garde_alternee(DatedVariable):
column = FloatCol(default = 1)
entity_class = Familles
label = u"Coefficient à appliquer aux af pour tenir compte de la garde alternée"
@dated_function(start = date(2007, 5, 1))
def function_2007(self, simulation, period):
period = period.this_month
nb_enf = simulation.calculate('af_nbenf', period)
garde_alternee = simulation.compute('garde_alternee', period)
af_enfant_a_charge = simulation.compute('af_enfant_a_charge', period)
# Le nombre d'enfants à charge en garde alternée, qui vérifient donc af_enfant_a_charge = true et garde_alternee = true
nb_enf_garde_alternee = self.sum_by_entity(garde_alternee.array * af_enfant_a_charge.array)
# Avoid division by zero. If nb_enf == 0, necessarily nb_enf_garde_alternee = 0 so coeff = 1
coeff = 1 - (nb_enf_garde_alternee / (nb_enf + (nb_enf == 0))) * 0.5
return period, coeff
class af_forf_nbenf(Variable):
column = IntCol
entity_class = Familles
label = u"Nombre d'enfants dans la famille éligibles à l'allocation forfaitaire des AF"
def function(self, simulation, period):
period = period.this_month
age_holder = simulation.compute('age', period)
age = self.split_by_roles(age_holder, roles = ENFS)
smic55_holder = simulation.compute('smic55', period)
smic55 = self.split_by_roles(smic55_holder, roles = ENFS)
pfam = simulation.legislation_at(period.start).fam.af
af_forf_nbenf = nb_enf(age, smic55, pfam.age3, pfam.age3)
return period, af_forf_nbenf
class af_eligibilite_base(Variable):
column = BoolCol
entity_class = Familles
label = u"Allocations familiales - Éligibilité pour la France métropolitaine sous condition de ressources"
def function(self, simulation, period):
period = period.this_month
residence_dom = simulation.calculate('residence_dom', period)
af_nbenf = simulation.calculate('af_nbenf', period)
return period, not_(residence_dom) * (af_nbenf >= 2)
class af_eligibilite_dom(Variable):
column = BoolCol
entity_class = Familles
label = u"Allocations familiales - Éligibilité pour les DOM (hors Mayotte) sous condition de ressources"
def function(self, simulation, period):
period = period.this_month
residence_dom = simulation.calculate('residence_dom', period)
residence_mayotte = simulation.calculate('residence_mayotte', period)
af_nbenf = simulation.calculate('af_nbenf', period)
return period, residence_dom * not_(residence_mayotte) * (af_nbenf >= 1)
class af_base(Variable):
column = FloatCol
entity_class = Familles
label = u"Allocations familiales - allocation de base"
# prestations familiales (brutes de crds)
def function(self, simulation, period):
period = period.this_month
eligibilite_base = simulation.calculate('af_eligibilite_base', period)
eligibilite_dom = simulation.calculate('af_eligibilite_dom', period)
af_nbenf = simulation.calculate('af_nbenf', period)
pfam = simulation.legislation_at(period.start).fam.af
eligibilite = or_(eligibilite_base, eligibilite_dom)
un_seul_enfant = eligibilite_dom * (af_nbenf == 1) * pfam.taux.enf_seul
plus_de_deux_enfants = (af_nbenf >= 2) * pfam.taux.enf2
plus_de_trois_enfants = max_(af_nbenf - 2, 0) * pfam.taux.enf3
taux_total = un_seul_enfant + plus_de_deux_enfants + plus_de_trois_enfants
montant_base = eligibilite * round(pfam.bmaf * taux_total, 2)
coeff_garde_alternee = simulation.calculate('af_coeff_garde_alternee', period)
montant_base = montant_base * coeff_garde_alternee
af_taux_modulation = simulation.calculate('af_taux_modulation', period)
montant_base_module = montant_base * af_taux_modulation
return period, montant_base_module
class af_taux_modulation(DatedVariable):
column = FloatCol(default = 1)
entity_class = Familles
label = u"Taux de modulation à appliquer au montant des AF depuis 2015"
@dated_function(start = date(2015, 7, 1))
def function_2015(self, simulation, period):
period = period.this_month
af_nbenf = simulation.calculate('af_nbenf', period)
pfam = simulation.legislation_at(period.start).fam.af
br_pf = simulation.calculate('br_pf', period)
modulation = pfam.modulation
plafond1 = modulation.plafond1 + af_nbenf * modulation.enfant_supp
plafond2 = modulation.plafond2 + af_nbenf * modulation.enfant_supp
taux = (
(br_pf <= plafond1) * 1 +
(br_pf > plafond1) * (br_pf <= plafond2) * modulation.taux1 +
(br_pf > plafond2) * modulation.taux2
)
return period, taux
class af_forf_taux_modulation(DatedVariable):
column = FloatCol(default = 1)
entity_class = Familles
label = u"Taux de modulation à appliquer depuis 2007 à l'allocation forfaitaire des AF depuis 2015"
@dated_function(start = date(2015, 7, 1))
def function_2015(self, simulation, period):
period = period.this_month
pfam = simulation.legislation_at(period.start).fam.af
af_nbenf = simulation.calculate('af_nbenf', period)
af_forf_nbenf = simulation.calculate('af_forf_nbenf', period)
nb_enf_tot = af_nbenf + af_forf_nbenf
br_pf = simulation.calculate('br_pf', period)
modulation = pfam.modulation
plafond1 = modulation.plafond1 + nb_enf_tot * modulation.enfant_supp
plafond2 = modulation.plafond2 + nb_enf_tot * modulation.enfant_supp
taux = (
(br_pf <= plafond1) * 1 +
(br_pf > plafond1) * (br_pf <= plafond2) * modulation.taux1 +
(br_pf > plafond2) * modulation.taux2
)
return period, taux
class af_age_aine(Variable):
column = IntCol
entity_class = Familles
label = u"Allocations familiales - Âge de l'aîné des enfants éligibles"
def function(self, simulation, period):
period = period.this_month
age_holder = simulation.compute('age', period)
age_enfants = self.split_by_roles(age_holder, roles = ENFS)
af_enfant_a_charge_holder = simulation.compute('af_enfant_a_charge', period)
af_enfants_a_charge = self.split_by_roles(af_enfant_a_charge_holder, roles = ENFS)
pfam = simulation.legislation_at(period.start).fam
# Calcul de l'âge de l'aîné
age_aine = -9999
for key, age in age_enfants.iteritems():
a_charge = af_enfants_a_charge[key] * (age <= pfam.af.age2)
aine_potentiel = a_charge * (age > age_aine)
age_aine = aine_potentiel * age + not_(aine_potentiel) * age_aine
return period, age_aine
class af_majoration_enfant(Variable):
column = FloatCol
entity_class = Individus
label = u"Allocations familiales - Majoration pour âge applicable à l'enfant"
def function(self, simulation, period):
period = period.this_month
af_enfant_a_charge = simulation.calculate('af_enfant_a_charge', period)
age = simulation.calculate('age', period)
garde_alternee = simulation.calculate('garde_alternee', period)
age_aine_holder = simulation.compute('af_age_aine', period)
age_aine = self.cast_from_entity_to_roles(age_aine_holder, roles = ENFS)
af_nbenf_holder = simulation.compute('af_nbenf', period)
af_nbenf = self.cast_from_entity_to_roles(af_nbenf_holder, roles = ENFS)
af_base_holder = simulation.compute('af_base', period)
af_base = self.cast_from_entity_to_roles(af_base_holder, roles = ENFS)
pfam = simulation.legislation_at(period.start).fam
montant_enfant_seul = pfam.af.bmaf * (
(pfam.af.maj_age_un_enfant.age1 <= age) * (age < pfam.af.maj_age_un_enfant.age2) * pfam.af.maj_age_un_enfant.taux1 +
(pfam.af.maj_age_un_enfant.age2 <= age) * pfam.af.maj_age_un_enfant.taux2
)
montant_plusieurs_enfants = pfam.af.bmaf * (
(pfam.af.maj_age_deux_enfants.age1 <= age) * (age < pfam.af.maj_age_deux_enfants.age2) * pfam.af.maj_age_deux_enfants.taux1 +
(pfam.af.maj_age_deux_enfants.age2 <= age) * pfam.af.maj_age_deux_enfants.taux2
)
montant = (af_nbenf == 1) * montant_enfant_seul + (af_nbenf > 1) * montant_plusieurs_enfants
# Attention ! Ne fonctionne pas pour les enfants du même âge (typiquement les jumeaux...)
pas_aine = or_(af_nbenf != 2, (af_nbenf == 2) * not_(age == age_aine))
coeff_garde_alternee = where(garde_alternee, pfam.af.facteur_garde_alternee, 1)
return period, af_enfant_a_charge * (af_base > 0) * pas_aine * montant * coeff_garde_alternee
class af_majo(Variable):
column = FloatCol
entity_class = Familles
label = u"Allocations familiales - majoration pour âge"
def function(self, simulation, period):
period = period.this_month
af_majoration_enfant_holder = simulation.compute('af_majoration_enfant', period)
af_majoration_enfants = self.sum_by_entity(af_majoration_enfant_holder, roles = ENFS)
af_taux_modulation = simulation.calculate('af_taux_modulation', period)
af_majoration_enfants_module = af_majoration_enfants * af_taux_modulation
return period, af_majoration_enfants_module
class af_complement_degressif(DatedVariable):
column = FloatCol
entity_class = Familles
label = u"AF - Complément dégressif en cas de dépassement du plafond"
@dated_function(start = date(2015, 7, 1))
def function_2015(self, simulation, period):
period = period.this_month
af_nbenf = simulation.calculate('af_nbenf', period)
br_pf = simulation.calculate('br_pf', period)
af_base = simulation.calculate('af_base', period)
af_majo = simulation.calculate('af_majo', period)
pfam = simulation.legislation_at(period.start).fam.af
modulation = pfam.modulation
plafond1 = modulation.plafond1 + af_nbenf * modulation.enfant_supp
plafond2 = modulation.plafond2 + af_nbenf * modulation.enfant_supp
depassement_plafond1 = max_(0, br_pf - plafond1)
depassement_plafond2 = max_(0, br_pf - plafond2)
depassement_mensuel = (
(depassement_plafond2 == 0) * depassement_plafond1 +
(depassement_plafond2 > 0) * depassement_plafond2
) / 12
af = af_base + af_majo
return period, max_(0, af - depassement_mensuel) * (depassement_mensuel > 0)
class af_forf_complement_degressif(DatedVariable):
column = FloatCol
entity_class = Familles
label = u"AF - Complément dégressif pour l'allocation forfaitaire en cas de dépassement du plafond"
@dated_function(start = date(2015, 7, 1))
def function_2015(self, simulation, period):
period = period.this_month
af_nbenf = simulation.calculate('af_nbenf', period)
af_forf_nbenf = simulation.calculate('af_forf_nbenf', period)
pfam = simulation.legislation_at(period.start).fam.af
nb_enf_tot = af_nbenf + af_forf_nbenf
br_pf = simulation.calculate('br_pf', period)
af_forf = simulation.calculate('af_forf', period)
modulation = pfam.modulation
plafond1 = modulation.plafond1 + nb_enf_tot * modulation.enfant_supp
plafond2 = modulation.plafond2 + nb_enf_tot * modulation.enfant_supp
depassement_plafond1 = max_(0, br_pf - plafond1)
depassement_plafond2 = max_(0, br_pf - plafond2)
depassement_mensuel = (
(depassement_plafond2 == 0) * depassement_plafond1 +
(depassement_plafond2 > 0) * depassement_plafond2
) / 12
return period, max_(0, af_forf - depassement_mensuel) * (depassement_mensuel > 0)
class af_forf(Variable):
column = FloatCol
entity_class = Familles
label = u"Allocations familiales - forfait"
def function(self, simulation, period):
period = period.this_month
af_nbenf = simulation.calculate('af_nbenf', period)
af_forf_nbenf = simulation.calculate('af_forf_nbenf', period)
P = simulation.legislation_at(period.start).fam.af
bmaf = P.bmaf
af_forfait = round(bmaf * P.taux.forfait, 2)
af_forf = ((af_nbenf >= 2) * af_forf_nbenf) * af_forfait
af_forf_taux_modulation = simulation.calculate('af_forf_taux_modulation', period)
af_forf_module = af_forf * af_forf_taux_modulation
return period, af_forf_module
class af(Variable):
calculate_output = calculate_output_add
column = FloatCol
entity_class = Familles
label = u"Allocations familiales - total des allocations"
def function(self, simulation, period):
period = period.this_month
af_base = simulation.calculate('af_base', period)
af_majo = simulation.calculate('af_majo', period)
af_forf = simulation.calculate('af_forf', period)
af_complement_degressif = simulation.calculate('af_complement_degressif', period)
af_forf_complement_degressif = simulation.calculate('af_forf_complement_degressif', period)
return period, af_base + af_majo + af_forf + af_complement_degressif + af_forf_complement_degressif
| agpl-3.0 | -1,809,114,468,212,156,400 | 39.612466 | 137 | 0.658882 | false |
mendrugory/reactive-architecture-python | resources-analyzer/resources-analyzer/analyzer.py | 1 | 1567 | #!/usr/bin/python3
import pika
import json
import settings
import time
time.sleep(20)
connection = pika.BlockingConnection(pika.ConnectionParameters(host=settings.RABBITMQ_HOST))
channel = connection.channel()
channel.exchange_declare(exchange=settings.RABBITMQ_EXCHANGE,
exchange_type=settings.RABBITMQ_EXCHANGE_TYPE)
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange=settings.RABBITMQ_EXCHANGE,
queue=queue_name,
routing_key=settings.RABBITMQ_SUB)
def callback(ch, method, properties, body):
machine = method.routing_key.split(".")[0]
result = analyze(json.loads(body.decode('utf-8'))['cpu_percentage'])
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=settings.RABBITMQ_HOST))
channel = connection.channel()
channel.exchange_declare(exchange=settings.RABBITMQ_EXCHANGE,
exchange_type=settings.RABBITMQ_EXCHANGE_TYPE)
routing_key = settings.RABBITMQ_PUB.format(machine)
channel.basic_publish(exchange=settings.RABBITMQ_EXCHANGE,
routing_key=routing_key,
body=json.dumps({'cpu_result': result}))
connection.close()
def analyze(percentage):
if percentage < 25:
result = 'LOW'
elif percentage < 75:
result = 'MEDIUM'
else:
result = 'HIGH'
return result
channel.basic_consume(callback, queue=queue_name, no_ack=True)
channel.start_consuming()
| mit | -5,733,706,653,143,237,000 | 30.34 | 92 | 0.672623 | false |
jeremi/couchdbkit | couchdbkit/loaders.py | 1 | 19439 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2009 Benoit Chesneau <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Loaders are a simple way to manage design docs in your Python application.
Loaders are compatible with couchapp script (http://github.com/couchapp/couchapp).
So it means that you can simply use couchdbkit as replacement for your python
applications with advantages of couchdbkit client. Compatibility with couchapp means that
you can also use macros to include javascript code or design doc members in your views,
shows & lists.
Current loaders are FileSystemDocsLoader and FileSystemDocLoader. The first
one takes a directory and retrieve all design docs before sending them to
CouchDB. Second allow you to send only one design doc.
"""
from __future__ import with_statement
import base64
import copy
import httplib
import mimetypes
import os
import socket
import sys
from couchdbkit.exceptions import DocsPathNotFound
from couchdbkit.resource import ResourceNotFound
from couchdbkit.utils import *
from couchdbkit.macros import *
class BaseDocsLoader(object):
"""Baseclass for all doc loaders. Subclass this and override `get_docs` to
implement a custom loading mechanism. You can then sync docs and design docs
to the db with the `sync` function.
A very basic example for a loader that looks up a json file on the
filesystem could look like this::
from couchdbkit import BaseDocsLoader
import os
import simplejson as json
class MyDocsLoader(BaseDocsLoader):
def __init__(self, path):
self.path = path
def get_docs(self,):
if not os.path.exists(path):
raise DocsPathNotFound
with file(path) as f:
source = json.loads(f.read().decode('utf-8'))
return source
"""
def get_docs(self):
raise NotImplementedError
def sync(self, dbs, atomic=True, verbose=False):
if not isinstance(dbs, (list, tuple)):
dbs = [dbs]
doc_or_docs = self.get_docs()
if not isinstance(doc_or_docs, (list, tuple,)):
doc_or_docs = [doc_or_docs]
for doc in doc_or_docs:
docid = doc['_id']
new_doc = copy.deepcopy(doc)
couchapp = doc.get('couchapp', {})
if not couchapp:
new_doc['couchapp'] = {}
# we process attachments later
del new_doc['_attachments']
if 'signatures' in new_doc['couchapp']:
del new_doc['couchapp']['signatures']
for db in dbs:
if docid in db:
try:
current = db.get(docid)
except ResourceNotFound:
current = {}
_app_meta = current.get('couchapp', {})
if docid.startswith('_design'):
new_doc['couchapp'] ['signatures'] = _app_meta.get('signatures', {})
new_doc['_attachments'] = current.get('_attachments', {})
if '_rev' in current:
new_doc['_rev'] = current.get('_rev')
if not atomic:
db[docid] = new_doc
if docid.startswith('_design/'):
self.send_attachments(db, doc, verbose=verbose)
else:
if docid.startswith('_design/'):
self.encode_attachments(db, doc, new_doc, verbose=verbose)
db[docid] = new_doc
def _put_attachment(self, db, doc, content, filename, content_length=None,
verbose=False):
if hasattr(content, 'read') and content_length is None:
content = content.read()
nb_try = 0
while True:
error = False
try:
db.put_attachment(doc, content, filename, content_length=content_length)
except (socket.error, httplib.BadStatusLine):
time.sleep(0.4)
error = True
nb_try = nb_try +1
if not error:
break
if nb_try > 3:
if verbose >= 2:
print >>sys.stderr, "%s file not uploaded, sorry." % filename
break
def encode_attachments(self, db, design_doc,new_doc, verbose=False):
# init vars
all_signatures = {}
if not 'couchapp' in design_doc:
design_doc['couchapp'] = {}
_signatures = design_doc['couchapp'].get('signatures', {})
_length = design_doc['couchapp'].get('length', {})
_attachments = design_doc.get('_attachments', {})
docid = design_doc['_id']
attachments = _attachments.copy()
current_design = {}
try:
current_design = db[docid]
except ResourceNotFound:
pass
new_attachments = current_design.get('_attachments', {})
metadata = current_design.get('couchapp', {})
if 'signatures' in metadata:
all_signatures = metadata['signatures'].copy()
for filename in metadata['signatures'].iterkeys():
if not filename not in _signatures:
del new_attachments[filename]
del all_signatures[filename]
elif _signatures[filename] == metadata['signatures'][filename]:
del attachments[filename]
for filename, value in attachments.iteritems():
content_length = _length.get(filename, None)
if verbose:
print "Attaching %s (%s)" % (filename, content_length)
with open(value, "rb") as f:
new_attachments[filename] = {
"content_type": ';'.join(filter(None, mimetypes.guess_type(filename))),
"data": base64.b64encode(f.read()),
}
# update signatures
if not 'couchapp' in new_doc:
new_doc['couchapp'] = {}
all_signatures.update(_signatures)
new_doc['couchapp'].update({'signatures': _signatures})
new_doc['_attachments'] = new_attachments
def send_attachments(self, db, design_doc, verbose=False):
# init vars
all_signatures = {}
if not 'couchapp' in design_doc:
design_doc['couchapp'] = {}
_signatures = design_doc['couchapp'].get('signatures', {})
_length = design_doc['couchapp'].get('length', {})
_attachments = design_doc.get('_attachments', {})
docid = design_doc['_id']
# detect attachments to be removed and keep
# only new version attachments to update.
current_design = db[docid]
metadata = current_design.get('couchapp', {})
attachments = _attachments.copy()
if 'signatures' in metadata:
all_signatures = metadata['signatures'].copy()
for filename in metadata['signatures'].iterkeys():
if filename not in _signatures:
db.delete_attachment(current_design, filename)
elif _signatures[filename] == metadata['signatures'][filename]:
del attachments[filename]
for filename, value in attachments.iteritems():
content_length = _length.get(filename, None)
if verbose:
print "Attaching %s (%s)" % (filename, content_length)
with open(value, "rb") as f:
# fix issue with httplib that raises BadStatusLine
# error because it didn't close the connection
self._put_attachment(db, current_design, f, filename,
content_length=content_length, verbose=verbose)
# update signatures
current_design = db[docid]
if not 'couchapp' in current_design:
current_design['couchapp'] = {}
all_signatures.update(_signatures)
current_design['couchapp'].update({'signatures': all_signatures})
db[docid] = current_design
class FileSystemDocsLoader(BaseDocsLoader):
""" Load docs from the filesystem. This loader can find docs
in folders on the filesystem and is the preferred way to load them.
The loader takes the path for design docs as a string or if multiple
locations are wanted a list of them which is then looked up in the
given order:
>>> loader = FileSystemDocsLoader('/path/to/templates')
>>> loader = FileSystemDocsLoader(['/path/to/templates', '/other/path'])
You could also do the same to loads docs.
"""
def __init__(self, designpath, docpath=None):
if isinstance(designpath, basestring):
designpath = [designpath]
docpath = docpath or []
if isinstance(docpath, basestring):
docpath = [docpath]
self.designpath = list(designpath)
self.docpath = list(docpath)
def get_docs(self, verbose=False):
docs = []
for docpath in self.docpath:
if not os.path.isdir(docpath):
raise DocsPathNotFound("%s doesn't exist" % docpath)
for name in os.listdir(docpath):
if name.startswith('.'):
continue
elif os.path.isfile(name):
fpath = os.path.join(docpath, name)
try:
doc = read_file(fpath)
except UnicodeDecodeError, e:
print >>sys.stderr, str(e)
raise
if name.endswith('.json'):
try:
doc = read_json(fpath)
except ValueError:
pass
doc.update({ '_id': name })
docs.append(doc)
else:
doc = { '_id': name }
manifest = []
app_dir = os.path.join(docpath, name)
doc.update(self.dir_to_fields(app_dir, app_dir,
manifest=manifest, verbose=verbose))
if not 'couchapp' in doc:
doc['couchapp'] = {}
doc['couchapp'].update({ 'manifest': manifest })
docs.append(doc)
for designpath in self.designpath:
if not os.path.isdir(designpath):
raise DocsPathNotFound("%s doesn't exist" % designpath)
for name in os.listdir(designpath):
ddoc = self.get_designdoc(designpath, name, verbose=verbose)
if ddoc:
docs.append(ddoc)
return docs
def get_designdoc(self, root, name, design_name=None, verbose=False):
design_doc = {}
if not name.startswith('.') and not os.path.isfile(name):
manifest = []
objects = {}
if design_name is None:
docid = design_doc['_id'] = "_design/%s" % name
else:
docid = design_doc['_id'] = "_design/%s" % design_name
app_dir = os.path.join(root, name)
attach_dir = os.path.join(app_dir, '_attachments')
design_doc.update(self.dir_to_fields(app_dir, manifest=manifest,
verbose=verbose))
if not 'couchapp' in design_doc:
design_doc['couchapp'] = {}
if 'shows' in design_doc:
package_shows(design_doc, design_doc['shows'], app_dir, objects, verbose=verbose)
if 'lists' in design_doc:
package_shows(design_doc, design_doc['lists'], app_dir, objects, verbose=verbose)
if 'views' in design_doc:
package_views(design_doc, design_doc["views"], app_dir, objects, verbose=verbose)
couchapp = design_doc.get('couchapp', False)
design_doc.update({
'couchapp': {
'manifest': manifest,
'objects': objects
}
})
self.attach(design_doc, attach_dir, docid, verbose=verbose)
self.attach_vendors(design_doc, app_dir, docid, verbose=verbose)
return design_doc
def dir_to_fields(self, app_dir, current_dir='', depth=0,
manifest=[], verbose=False):
fields={}
if not current_dir:
current_dir = app_dir
for name in os.listdir(current_dir):
current_path = os.path.join(current_dir, name)
rel_path = relpath(current_path, app_dir)
if name.startswith('.'):
continue
elif name.startswith('_'):
# files starting with "_" are always "special"
continue
elif depth == 0 and name in ('couchapp', 'couchapp.json'):
# we are in app_meta
if name == "couchapp":
manifest.append('%s/' % rel_path)
content = self.dir_to_fields(app_dir, current_path,
depth=depth+1, manifest=manifest)
else:
manifest.append(rel_path)
content = read_json(current_path)
if not isinstance(content, dict):
content = { "meta": content }
if 'signatures' in content:
del content['signatures']
if 'manifest' in content:
del content['manifest']
if 'objects' in content:
del content['objects']
if 'couchapp' in fields:
fields['couchapp'].update(content)
else:
fields['couchapp'] = content
elif os.path.isdir(current_path):
manifest.append('%s/' % rel_path)
fields[name] = self.dir_to_fields(app_dir, current_path,
depth=depth+1, manifest=manifest,
verbose=verbose)
else:
if verbose >= 2:
print >>sys.stderr, "push %s" % rel_path
content = ''
try:
content = read_file(current_path)
except UnicodeDecodeError, e:
if verbose >= 2:
print >>sys.stderr, "%s isn't encoded in utf8" % current_path
content = self.ui.read(current_path, utf8=False)
try:
content.encode('utf-8')
except UnicodeError, e:
print >>sys.stderr, "plan B didn't work, %s is a binary" % current_path
print >>sys.stderr, "use plan C: encode to base64"
content = "base64-encoded;%s" % base64.b64encode(content)
if name.endswith('.json'):
try:
content = json.loads(content)
except ValueError:
if verbose >= 2:
print >>sys.stderr, "Json invalid in %s" % current_path
# remove extension
name, ext = os.path.splitext(name)
if name in fields:
if verbose >= 2:
print >>sys.stderr, "%(name)s is already in properties. Can't add (%(name)s%(ext)s)" % {
"name": name,
"ext": ext
}
else:
manifest.append(rel_path)
fields[name] = content
return fields
def attach_vendors(self, design_doc, app_dir, docid, verbose):
vendor_dir = os.path.join(app_dir, 'vendor')
if not os.path.isdir(vendor_dir):
return
for name in os.listdir(vendor_dir):
current_path = os.path.join(vendor_dir, name)
if os.path.isdir(current_path):
attach_dir = os.path.join(current_path, '_attachments')
if os.path.isdir(attach_dir):
self.push_directory(design_doc, attach_dir, docid, verbose,
vendor=name)
def attach(self, doc, attach_dir, docid, verbose=False, vendor=None):
# get attachments
_signatures = {}
_attachments = {}
_length = {}
all_signatures = {}
for root, dirs, files in os.walk(attach_dir):
if files:
for filename in files:
if filename.startswith('.'):
continue
else:
file_path = os.path.join(root, filename)
name = relpath(file_path, attach_dir)
if vendor is not None:
name = os.path.join('vendor', vendor, name)
_signatures[name] = sign_file(file_path)
_attachments[name] = file_path
_length[name] = int(os.path.getsize(file_path))
for prop in ('couchapp', '_attachments'):
if not prop in doc:
doc[prop] = {}
if not 'signatures' in doc['couchapp']:
doc['couchapp']['signatures'] = {}
if not 'length' in doc['couchapp']:
doc['couchapp']['length'] = {}
doc['_attachments'].update(_attachments)
doc['couchapp']['signatures'].update(_signatures)
doc['couchapp']['length'].update(_length)
class FileSystemDocLoader(FileSystemDocsLoader):
""" Load only one design doc from a path on the filesystem.
>>> loader = FileSystemDocLoader("/path/to/designdocfolder", "nameodesigndoc")
"""
def __init__(self, designpath, name, design_name=None):
self.designpath = designpath
self.name = name
self.design_name = design_name
def get_docs(self, verbose=False):
docs = []
ddoc = self.get_designdoc(self.designpath, self.name,
design_name=self.design_name, verbose=verbose)
if ddoc:
docs.append(ddoc)
return docs
| isc | -4,943,099,272,784,033,000 | 39.413721 | 112 | 0.519111 | false |
rushter/MLAlgorithms | mla/tests/test_classification_accuracy.py | 1 | 3370 | from sklearn.metrics import roc_auc_score
from mla.ensemble import RandomForestClassifier
from mla.ensemble.gbm import GradientBoostingClassifier
from mla.knn import KNNClassifier
from mla.linear_models import LogisticRegression
from mla.metrics import accuracy
from mla.naive_bayes import NaiveBayesClassifier
from mla.neuralnet import NeuralNet
from mla.neuralnet.constraints import MaxNorm
from mla.neuralnet.layers import Activation, Dense, Dropout
from mla.neuralnet.optimizers import Adadelta
from mla.neuralnet.parameters import Parameters
from mla.neuralnet.regularizers import L2
from mla.svm.kernerls import RBF, Linear
from mla.svm.svm import SVM
from mla.utils import one_hot
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification
# Generate a random regression problem
X, y = make_classification(
n_samples=750, n_features=10, n_informative=8, random_state=1111, n_classes=2, class_sep=2.5, n_redundant=0
)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.12, random_state=1111)
# All classifiers except convnet, RNN, LSTM.
def test_linear_model():
model = LogisticRegression(lr=0.01, max_iters=500, penalty="l1", C=0.01)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
assert roc_auc_score(y_test, predictions) >= 0.95
def test_random_forest():
model = RandomForestClassifier(n_estimators=10, max_depth=4)
model.fit(X_train, y_train)
predictions = model.predict(X_test)[:, 1]
assert roc_auc_score(y_test, predictions) >= 0.95
def test_svm_classification():
y_signed_train = (y_train * 2) - 1
y_signed_test = (y_test * 2) - 1
for kernel in [RBF(gamma=0.05), Linear()]:
model = SVM(max_iter=500, kernel=kernel)
model.fit(X_train, y_signed_train)
predictions = model.predict(X_test)
assert accuracy(y_signed_test, predictions) >= 0.8
def test_mlp():
y_train_onehot = one_hot(y_train)
y_test_onehot = one_hot(y_test)
model = NeuralNet(
layers=[
Dense(256, Parameters(init="uniform", regularizers={"W": L2(0.05)})),
Activation("relu"),
Dropout(0.5),
Dense(128, Parameters(init="normal", constraints={"W": MaxNorm()})),
Activation("relu"),
Dense(2),
Activation("softmax"),
],
loss="categorical_crossentropy",
optimizer=Adadelta(),
metric="accuracy",
batch_size=64,
max_epochs=25,
)
model.fit(X_train, y_train_onehot)
predictions = model.predict(X_test)
assert roc_auc_score(y_test_onehot[:, 0], predictions[:, 0]) >= 0.95
def test_gbm():
model = GradientBoostingClassifier(n_estimators=25, max_depth=3, max_features=5, learning_rate=0.1)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
assert roc_auc_score(y_test, predictions) >= 0.95
def test_naive_bayes():
model = NaiveBayesClassifier()
model.fit(X_train, y_train)
predictions = model.predict(X_test)[:, 1]
assert roc_auc_score(y_test, predictions) >= 0.95
def test_knn():
clf = KNNClassifier(k=5)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
assert accuracy(y_test, predictions) >= 0.95
| mit | 6,136,310,670,250,007,000 | 31.403846 | 111 | 0.683976 | false |
saurabh6790/frappe | frappe/automation/doctype/assignment_rule/test_assignment_rule.py | 1 | 9130 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import random_string
from frappe.test_runner import make_test_records
class TestAutoAssign(unittest.TestCase):
def setUp(self):
make_test_records("User")
days = [
dict(day = 'Sunday'),
dict(day = 'Monday'),
dict(day = 'Tuesday'),
dict(day = 'Wednesday'),
dict(day = 'Thursday'),
dict(day = 'Friday'),
dict(day = 'Saturday'),
]
self.days = days
self.assignment_rule = get_assignment_rule([days, days])
clear_assignments()
def test_round_robin(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '[email protected]')
note = make_note(dict(public=1))
# check if auto assigned to second user
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '[email protected]')
clear_assignments()
note = make_note(dict(public=1))
# check if auto assigned to third user, even if
# previous assignments where closed
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '[email protected]')
# check loop back to first user
note = make_note(dict(public=1))
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '[email protected]')
def test_load_balancing(self):
self.assignment_rule.rule = 'Load Balancing'
self.assignment_rule.save()
for _ in range(30):
note = make_note(dict(public=1))
# check if each user has 10 assignments (?)
for user in ('[email protected]', '[email protected]', '[email protected]'):
self.assertEqual(len(frappe.get_all('ToDo', dict(owner = user, reference_type = 'Note'))), 10)
# clear 5 assignments for first user
# can't do a limit in "delete" since postgres does not support it
for d in frappe.get_all('ToDo', dict(reference_type = 'Note', owner = '[email protected]'), limit=5):
frappe.db.sql("delete from tabToDo where name = %s", d.name)
# add 5 more assignments
for i in range(5):
make_note(dict(public=1))
# check if each user still has 10 assignments
for user in ('[email protected]', '[email protected]', '[email protected]'):
self.assertEqual(len(frappe.get_all('ToDo', dict(owner = user, reference_type = 'Note'))), 10)
def test_based_on_field(self):
self.assignment_rule.rule = 'Based on Field'
self.assignment_rule.field = 'owner'
self.assignment_rule.save()
frappe.set_user('[email protected]')
note = make_note(dict(public=1))
# check if auto assigned to doc owner, [email protected]
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '[email protected]')
frappe.set_user('[email protected]')
note = make_note(dict(public=1))
# check if auto assigned to doc owner, [email protected]
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '[email protected]')
frappe.set_user('Administrator')
def test_assign_condition(self):
# check condition
note = make_note(dict(public=0))
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), None)
def test_clear_assignment(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
todo = frappe.get_list('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
))[0]
todo = frappe.get_doc('ToDo', todo['name'])
self.assertEqual(todo.owner, '[email protected]')
# test auto unassign
note.public = 0
note.save()
todo.load_from_db()
# check if todo is cancelled
self.assertEqual(todo.status, 'Cancelled')
def test_close_assignment(self):
note = make_note(dict(public=1, content="valid"))
# check if auto assigned
todo = frappe.get_list('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
))[0]
todo = frappe.get_doc('ToDo', todo['name'])
self.assertEqual(todo.owner, '[email protected]')
note.content="Closed"
note.save()
todo.load_from_db()
# check if todo is closed
self.assertEqual(todo.status, 'Closed')
# check if closed todo retained assignment
self.assertEqual(todo.owner, '[email protected]')
def check_multiple_rules(self):
note = make_note(dict(public=1, notify_on_login=1))
# check if auto assigned to test3 (2nd rule is applied, as it has higher priority)
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '[email protected]')
def check_assignment_rule_scheduling(self):
frappe.db.sql("DELETE FROM `tabAssignment Rule`")
days_1 = [dict(day = 'Sunday'), dict(day = 'Monday'), dict(day = 'Tuesday')]
days_2 = [dict(day = 'Wednesday'), dict(day = 'Thursday'), dict(day = 'Friday'), dict(day = 'Saturday')]
get_assignment_rule([days_1, days_2], ['public == 1', 'public == 1'])
frappe.flags.assignment_day = "Monday"
note = make_note(dict(public=1))
self.assertIn(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), ['[email protected]', '[email protected]', '[email protected]'])
frappe.flags.assignment_day = "Friday"
note = make_note(dict(public=1))
self.assertIn(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), ['[email protected]'])
def test_assignment_rule_condition(self):
frappe.db.sql("DELETE FROM `tabAssignment Rule`")
# Add expiry_date custom field
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
df = dict(fieldname='expiry_date', label='Expiry Date', fieldtype='Date')
create_custom_field('Note', df)
assignment_rule = frappe.get_doc(dict(
name = 'Assignment with Due Date',
doctype = 'Assignment Rule',
document_type = 'Note',
assign_condition = 'public == 0',
due_date_based_on = 'expiry_date',
assignment_days = self.days,
users = [
dict(user = '[email protected]'),
]
)).insert()
expiry_date = frappe.utils.add_days(frappe.utils.nowdate(), 2)
note1 = make_note({'expiry_date': expiry_date})
note2 = make_note({'expiry_date': expiry_date})
note1_todo = frappe.get_all('ToDo', filters=dict(
reference_type = 'Note',
reference_name = note1.name,
status = 'Open'
))[0]
note1_todo_doc = frappe.get_doc('ToDo', note1_todo.name)
self.assertEqual(frappe.utils.get_date_str(note1_todo_doc.date), expiry_date)
# due date should be updated if the reference doc's date is updated.
note1.expiry_date = frappe.utils.add_days(expiry_date, 2)
note1.save()
note1_todo_doc.reload()
self.assertEqual(frappe.utils.get_date_str(note1_todo_doc.date), note1.expiry_date)
# saving one note's expiry should not update other note todo's due date
note2_todo = frappe.get_all('ToDo', filters=dict(
reference_type = 'Note',
reference_name = note2.name,
status = 'Open'
), fields=['name', 'date'])[0]
self.assertNotEqual(frappe.utils.get_date_str(note2_todo.date), note1.expiry_date)
self.assertEqual(frappe.utils.get_date_str(note2_todo.date), expiry_date)
assignment_rule.delete()
def clear_assignments():
frappe.db.sql("delete from tabToDo where reference_type = 'Note'")
def get_assignment_rule(days, assign=None):
frappe.delete_doc_if_exists('Assignment Rule', 'For Note 1')
if not assign:
assign = ['public == 1', 'notify_on_login == 1']
assignment_rule = frappe.get_doc(dict(
name = 'For Note 1',
doctype = 'Assignment Rule',
priority = 0,
document_type = 'Note',
assign_condition = assign[0],
unassign_condition = 'public == 0 or notify_on_login == 1',
close_condition = '"Closed" in content',
rule = 'Round Robin',
assignment_days = days[0],
users = [
dict(user = '[email protected]'),
dict(user = '[email protected]'),
dict(user = '[email protected]'),
]
)).insert()
frappe.delete_doc_if_exists('Assignment Rule', 'For Note 2')
# 2nd rule
frappe.get_doc(dict(
name = 'For Note 2',
doctype = 'Assignment Rule',
priority = 1,
document_type = 'Note',
assign_condition = assign[1],
unassign_condition = 'notify_on_login == 0',
rule = 'Round Robin',
assignment_days = days[1],
users = [
dict(user = '[email protected]')
]
)).insert()
return assignment_rule
def make_note(values=None):
note = frappe.get_doc(dict(
doctype = 'Note',
title = random_string(10),
content = random_string(20)
))
if values:
note.update(values)
note.insert()
return note
| mit | 7,478,623,259,708,553,000 | 28.076433 | 106 | 0.66966 | false |
cwisecarver/osf.io | api_tests/nodes/views/test_node_sparse_fieldsets.py | 1 | 8100 | # -*- coding: utf-8 -*-
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
)
from api_tests.nodes.views.test_view_only_query_parameter import ViewOnlyTestCase
class TestNodeSparseFieldsList(ApiTestCase):
def setUp(self):
super(TestNodeSparseFieldsList, self).setUp()
self.user = AuthUserFactory()
self.non_contrib = AuthUserFactory()
self.deleted = ProjectFactory(is_deleted=True)
self.private = ProjectFactory(is_public=False, creator=self.user)
self.public = ProjectFactory(is_public=True, creator=self.user)
self.url = '/{}nodes/?fields[nodes]='.format(API_BASE)
def test_empty_fields_returns_no_attributes(self):
res = self.app.get(self.url)
node_json = res.json['data'][0]
assert node_json['attributes'] == {}
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes'])
def test_sparse_fields_includes_relationships(self):
res = self.app.get(self.url + 'children')
node_json = res.json['data'][0]
assert node_json['attributes'] == {}
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes', 'relationships'])
assert node_json['relationships']['children']['links']['related']['href'].endswith('/{}nodes/{}/children/'.format(API_BASE, self.public._id))
def test_returns_expected_nodes(self):
res = self.app.get(self.url + 'title')
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert self.public._id in ids
assert self.deleted._id not in ids
assert self.private._id not in ids
assert len(node_json) == 1
node_json = node_json[0]
assert node_json['attributes']['title'] == self.public.title
assert len(node_json['attributes']) == 1
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes'])
def test_filtering_by_id(self):
url = '/{}nodes/?filter[id]={}&fields[nodes]='.format(API_BASE, self.public._id)
res = self.app.get(url)
assert [each['id'] for each in res.json['data']] == [self.public._id]
node_json = res.json['data'][0]
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes'])
assert node_json['attributes'] == {}
def test_filtering_by_excluded_field(self):
url = '/{}nodes/?filter[title]={}&fields[nodes]='.format(API_BASE, self.public.title)
res = self.app.get(url)
assert [each['id'] for each in res.json['data']] == [self.public._id]
node_json = res.json['data'][0]
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes'])
assert node_json['attributes'] == {}
def test_create_with_sparse_fields(self):
payload = {
'data': {
'type': 'nodes',
'attributes':
{
'title': 'New Title',
'description': 'What a test',
'category': 'project',
'public': True,
}
}
}
res = self.app.post_json_api(self.url, payload, auth=self.user.auth)
assert res.status_code == 201
assert set(res.json['data'].keys()) == set(['links', 'type', 'id', 'attributes'])
assert res.json['data']['attributes'] == {}
class TestNodeSparseFieldsDetail(ApiTestCase):
def setUp(self):
super(TestNodeSparseFieldsDetail, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(is_public=True, creator=self.user)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
def test_empty_fields_returns_no_attributes(self):
res = self.app.get(self.url + '?fields[nodes]=')
node_json = res.json['data']
assert node_json['attributes'] == {}
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes'])
def test_embed_sparse_same_type(self):
child = ProjectFactory(parent=self.node, is_public=True, creator=self.user)
url = '{}?embed=children&fields[nodes]=title,children'.format(self.url)
res = self.app.get(url)
node_json = res.json['data']
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes', 'relationships', 'embeds'])
assert node_json['attributes'].keys() == ['title']
assert set(node_json['embeds']['children']['data'][0].keys()) == set(['links', 'type', 'id', 'attributes', 'relationships'])
assert node_json['embeds']['children']['data'][0]['attributes'].keys() == ['title']
assert node_json['embeds']['children']['data'][0]['attributes']['title'] == child.title
def test_embed_sparse_different_types(self):
url = '{}?embed=contributors&fields[nodes]=title,contributors'.format(self.url)
res = self.app.get(url)
node_json = res.json['data']
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes', 'embeds', 'relationships'])
assert node_json['attributes'].keys() == ['title']
assert len(node_json['embeds']['contributors']['data']) == 1
assert node_json['embeds']['contributors']['data'][0]['id'] == '{}-{}'.format(self.node._id, self.user._id)
assert len(node_json['embeds']['contributors']['data'][0]['attributes']) > 1
def test_sparse_embedded_type(self):
url = '{}?embed=contributors&fields[contributors]='.format(self.url)
res = self.app.get(url)
node_json = res.json['data']
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes', 'embeds', 'relationships'])
assert len(node_json['attributes'].keys()) > 1
assert len(node_json['embeds']['contributors']['data']) == 1
assert node_json['embeds']['contributors']['data'][0]['id'] == '{}-{}'.format(self.node._id, self.user._id)
assert len(node_json['embeds']['contributors']['data'][0]['attributes']) == 0
def test_multiple_sparse_types(self):
url = '{}?fields[nodes]=contributors,title&embed=contributors&fields[contributors]=bibliographic'.format(self.url)
res = self.app.get(url)
node_json = res.json['data']
assert set(node_json.keys()) == set(['links', 'type', 'id', 'attributes', 'embeds', 'relationships'])
assert node_json['attributes'].keys() == ['title']
assert len(node_json['embeds']['contributors']['data']) == 1
assert node_json['embeds']['contributors']['data'][0]['id'] == '{}-{}'.format(self.node._id, self.user._id)
assert node_json['embeds']['contributors']['data'][0]['attributes'].keys() == ['bibliographic']
def test_update_with_sparse_fields(self):
url = '{}?fields[nodes]='.format(self.url)
old_title = self.node.title
payload = {'data': {
'id': self.node._id,
'type': 'nodes',
'attributes': {
'title': 'new title'
}
}}
res = self.app.patch_json_api(url, payload, auth=self.user.auth)
assert res.status_code == 200
assert res.json['data']['attributes'] == {}
self.node.reload()
assert self.node.title != old_title
class TestSparseViewOnlyLinks(ViewOnlyTestCase):
def test_sparse_fields_with_anonymous_link(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_anonymous_link.key,
'fields[nodes]': 'title,current_user_can_comment,contributors',
'fields[contributors]': 'id',
'embed': 'contributors'
}) # current_user_can_comment is an anonymized field, should be removed
assert res.status_code == 200
assert res.json['data']['attributes'].keys() == ['title']
for contrib in res.json['data']['embeds']['contributors']['data']:
assert contrib['id'] == ''
assert contrib['attributes'] == {}
| apache-2.0 | 8,490,037,284,475,921,000 | 43.505495 | 149 | 0.58358 | false |
gralog/gralog | gralog-fx/src/main/java/gralog/gralogfx/piping/scripts/Gralog.py | 1 | 47242 | #!/usr/bin/env python3
import sys
from random import randint
import os
try:
import networkx as nx
except:
print("gPrint#-1#" + "netwrokx not installed for " + sys.executable)
sys.stdout.flush()
try:
import igraph as ig
except:
print("gPrint#-1#" + "igraph not installed for " + sys.executable)
import xml.etree.cElementTree as ET
import math
# debugging = False
class Vertex:
def __init__(self, graph, vid):
self.sourced = False
self.id = int(vid)
self.graph = graph
self.properties = dict()
self.properties["id"] = None
self.properties["label"] = None
self.properties["color"] = None
self.properties["strokeColor"] = None
self.properties["shape"] = None
self.properties["coordinates"] = None
self.incomingEdges = []
self.outgoingEdges = []
self.incidentEdges = []
self.wasSourced = False
def sourceProperties(self, stringFromGralog):
self.sourced = True
strings = stringFromGralog.split("#")
for string in strings:
propVal = string.split("=")
valueType = ""
try:
prop = propVal[0]
valueType = propVal[1]
except:
pass
try:
valueType = valueType.split("|")
val = valueType[0]
typ = valueType[1]
castedValue = self.graph.castValueToType(val, typ)
self.properties[prop] = castedValue
except:
pass
def getId(self):
return self.id
def getLabel(self):
if not self.wasSourced:
self.source()
return self.properties["label"]
def setLabel(self, label):
label = str(label)
self.properties["label"] = label
self.graph.setVertexLabel(self.id, label)
def setCoordinates(self, coordinates):
co = self.properties["coordinates"]
x = coordinates[0]
y = coordinates[1]
if co == None:
co = (None, None)
if x == None:
x = co[0]
if y == None:
y = co[1]
newCoordinates = (x, y)
self.properties["coordinates"] = newCoordinates
self.graph.setVertexCoordinates(self.id, newCoordinates)
def setFillColor(self, colorHex=-1, colorRGB=-1):
self.setColor(colorHex, colorRGB)
def getFillColor(self):
return self.getColor()
def getColor(self):
if not self.wasSourced:
self.source()
return self.properties["color"]
def setColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["fillColor"] = colorHex
elif colorRGB != -1:
self.properties["fillColor"] = colorRGB
else:
return
self.graph.setVertexFillColor(self.id, colorHex, colorRGB)
def setStrokeColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["strokeColor"] = colorHex
elif colorRGB != -1:
self.properties["strokeColor"] = colorRGB
else:
return
self.graph.setVertexStrokeColor(self.id, colorHex, colorRGB)
def getStrokeColor(self):
if not self.sourced:
self.source()
return self.properties["strokeColor"]
def setRadius(self, radius):
self.properties["radius"] = radius
self.properties["width"] = radius
self.properties["height"] = radius
self.graph.setVertexRadius(self.id, radius)
def setWidth(self, width):
self.properties["width"] = width
self.graph.setVertexWidth(self.getId(), width)
def setHeight(self, height):
self.properties["height"] = height
self.graph.setVertexHeight(self.getId(), height)
def setShape(self, shape):
self.properties["shape"] = shape
self.graph.setVertexShape(self.id, shape)
def setProperty(self, otherProperty, value):
self.properties[otherProperty] = value
self.graph.setVertexProperty(self.id, otherProperty, value)
def getProperty(self, otherProperty):
if not self.sourced:
self.source()
return self.properties[otherProperty]
def get(self, prop):
if not self.sourced:
self.source()
return self.properties[prop]
def getNeighbours(self):
return self.graph.getNeighbours(self.id)
def getOutgoingNeighbours(self):
return self.graph.getOutgoingNeighbours(self.id)
def getIncomingNeighbours(self):
return self.graph.getIncomingNeighbours(self.id)
def getOutgoingEdges(self):
return self.graph.getOutgoingEdges(self.id)
def getIncomingEdges(self):
return self.graph.getIncomingEdges(self.id)
def getIncidentEdges(self):
return self.graph.getIncidentEdges(self.id)
def delete(self):
return self.graph.deleteVertex(self)
def connect(self, v1, edgeId=-1):
return self.graph.addEdge(self, v1, edgeId)
def getAllEdgesBetween(self, vertex2):
return self.graph.getAllEdgesBetween((self.id, vertex2))
def source(self):
return self.graph.getVertex(self)
def __str__(self):
return str(self.getId())
# what if i want to get a vertex? should i also get all its neighbours? how about incident edges? This is all v aufw\"andig and leads to the paradigm by which we just store the grpah in python???
class Edge:
# private methods
def __init__(self, graph, eid):
self.sourced = False
self.id = int(eid) #if -2, then imported without id like in TGF
self.graph = graph
self.properties = dict()
self.properties["id"] = None
self.properties["label"] = None
self.properties["color"] = None
self.properties["weight"] = None
self.properties["contour"] = None
self.properties["source"] = None
self.properties["target"] = None
self.wasSourced = False
def sourceProperties(self, stringFromGralog):
self.sourced = True
strings = stringFromGralog.split("#")
for string in strings:
propVal = string.split("=")
try:
prop = propVal[0]
valueType = propVal[1]
valueType = valueType.split("|")
val = valueType[0]
typ = valueType[1]
self.properties[prop] = self.graph.castValueToType(val, typ)
except:
pass
def setTarget(self, target): # don't use!!
self.properties["target"] = target
def setSource(self, source):
self.properties["source"] = source
# public methods
def getId(self):
return self.id
def setLabel(self, label):
label = str(label)
self.properties["label"] = label
self.graph.setEdgeLabel(self.id, label)
def getLabel(self):
if not self.sourced:
self.source()
return self.properties["label"]
def setColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["color"] = colorHex
elif colorRGB != -1:
self.properties["color"] = colorRGB
else:
return
self.graph.setEdgeColor(self.id, colorHex, colorRGB)
def getColor(self):
if not self.sourced:
self.source()
return self.properties["color"]
def setWeight(self, weight):
self.properties["weight"] = float(weight)
self.graph.setEdgeWeight(self.id, weight)
def getWeight(self):
if not self.sourced:
self.source()
return self.properties["weight"]
def setThickness(self, thickness):
self.properties["thickness"] = float(thickness)
self.graph.setEdgeThickness(self.id, thickness)
def getThickness(self):
if not self.sourced:
self.source()
return self.properties["thickness"]
def setContour(self, contour):
self.properties["contour"] = contour
self.graph.setEdgeContour(self.id, contour)
def getContour(self):
if not self.sourced:
self.source()
return self.properties["contour"]
def getSource(self):
if not self.sourced:
self.source()
return self.properties["source"]
def getTarget(self):
if not self.sourced:
self.source()
return self.properties["target"]
def setProperty(self, otherProperty, value):
self.properties[otherProperty] = value
self.graph.setEdgeProperty(self, otherProperty, value)
def getProperty(self, otherProperty):
if not self.sourced:
self.source()
return self.properties[otherProperty]
def get(self, prop):
self.source()
return self.properties[prop]
def delete(self):
return self.graph.deleteEdge(self.id)
def source(self):
return self.graph.getEdge(self)
def getAdjacentEdges(self):
return self.graph.getAdjacentEdges(self.id)
def __str__(self):
v = self.getId()
v_str = str(v)
source = self.getSource().getId()
target = self.getTarget().getId()
return "({:},{:})".format(source, target)
def rgbFormatter(colorRGB):
r = colorRGB[0]
g = colorRGB[1]
b = colorRGB[2]
s = "rgb"
s += "(" + str(r).rstrip() + "," + \
str(g).rstrip() + "," + str(b).rstrip() + ")"
return s.rstrip()
def hexFormatter(colorHex):
s = "hex"
if colorHex[0] == "#":
colorHex = colorHex[1:]
s += "("+str(colorHex).rstrip() + ")"
return s.rstrip()
def vertexId(vertex):
if isinstance(vertex, Vertex):
return vertex.getId()
return vertex
def edgeId(edge):
if isinstance(edge, Edge):
return edge.getId()
return edge
def extractIdFromProperties(stringFromGralog):
strings = stringFromGralog.split(",")
for string in strings:
propVal = string.split("=")
if propVal[0] == "id":
return propVal[1]
return None
def edgeSplitter(edge):
if type(edge) == tuple and len(edge) == 2: # edge as defined by start, end nodes
return str(vertexId(edge[0])).rstrip()+","+str(vertexId(edge[1])).rstrip()
if type(edge) == int: # edge is given by id
return str(edge).rstrip()
return str(edge.getId()).rstrip()#edge has type Edge
class Graph:
def __init__(self, format="Undirected Graph"):
# perform analysis of graph
self.id_to_vertex = dict()
self.id_to_edge = dict()
self.lastIndex = -1
self.id = -1
self.variablesToTrack = dict()
if format == None or format.lower() == "none":
# we want a new graph
print("useCurrentGraph")
sys.stdout.flush()
self.lastIndex = -1
self.id = sys.stdin.readline()
self.getGraph("gtgf")
else:
print(format)
sys.stdout.flush()
self.id = sys.stdin.readline()
# helper functions
def castValueToType(self, val, typ):
if typ == "float":
return float(val)
if typ == "int":
return int(val)
if typ == "bool":
return bool(val)
if typ == "string":
return str(val)
if typ == "vertex":
return self.getVertexOrNew(val)
return val
def getVertexOrNew(self, currId):
v = currId
if (isinstance(currId, str)):
currId = int(currId)
if (isinstance(currId, int)):
if currId in self.id_to_vertex:
v=self.id_to_vertex[currId]
else:
v=Vertex(self, currId)
self.id_to_vertex[currId] = v
return v
def getEdgeOrNew(self, currId):
if type(currId) == tuple:
e = self.getEdgeIdByEndpoints(currId)
return e
e = currId
if not (isinstance(currId, Edge)):
try:
e = self.id_to_edge[int(currId)]
except:
e = Edge(self, currId)
else:
gPrint("Error (getEdgeOrNew()): the argument \
is neither an edge id nor a pair of vertices.")
return e
def termToEdge(self, term):
endpoints = term.split(",")
eid = int(endpoints[0])
e = self.id_to_edge[eid]
e.sourceProperties(endpoints[0])
sourceId = int(endpoints[1])
source = self.getVertexOrNew(sourceId)
targetId = int(endpoints[2])
target = self.getVertexOrNew(targetId)
e.setSource(source)
e.setTarget(target)
return e
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def edgifyTGFCommand(self, line):
line = line.strip()
endpoints = line.split(" ")
v1String = endpoints[0]
v1 = self.getVertexOrNew(int(v1String))
v2String = endpoints[1]
v2 = self.getVertexOrNew(int(v2String))
e = self.getEdgeOrNew(-2)
e.setSource(v1)
e.setTarget(v2)
def vertexifyTGFCommand(self, line):
line = line.strip()
vString = line[0]
v = self.getVertexOrNew(int(vString))
self.vertices[v.getId()] = v
def edgifyGTGFCommand(self, line):
line = line.strip()
endpoints = line.split(" ")
v1String = endpoints[0]
v1 = self.getVertexOrNew(int(v1String))
v2String = endpoints[1]
v2 = self.getVertexOrNew(int(v2String))
eid = int(endpoints[2])
e = self.getEdgeOrNew(eid)
e.setSource(v1)
e.setTarget(v2)
self.id_to_edge[eid] = e
def vertexifyGTGFCommand(self, line):
self.vertexifyTGFCommand(line)
def getEdgeIdByEndpoints(self, endpoints):
line = "getEdgeIdByEndpoints#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(endpoints)
print(line.rstrip())
sys.stdout.flush()
edgeId = sys.stdin.readline().rstrip()
return edgeId
def getVertex(self, vertex):
line = "getVertex#"+str(self.id).rstrip() + "#"
line = line + str(vertex).rstrip()
print (line.rstrip())
sys.stdout.flush()
vertexTuple = sys.stdin.readline().rstrip()
vertex.sourceProperties(vertexTuple)
return vertex
def getEdge(self, edge):
line = "getEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print (line.rstrip())
sys.stdout.flush()
edgeTuple = sys.stdin.readline().rstrip()
edge.sourceProperties(edgeTuple)
return edge
# end helper functions
# Graph Manipulating Functions
def addVertex(self, vertexId=-1, pos=(None, None)):
# return: Vertex object with id
line = "addVertex#" + str(self.id).rstrip()
x = -1
y = -1
vertexIdSwap = False
if type(vertexId) == tuple and pos == (None, None):
x = vertexId[0]
y = vertexId[1]
vertexId = -1
else:
x = pos[0]
y = pos[1]
if vertexId != -1:
line += "#"+str(vertexId).rstrip()
if x != None and y != None:
line += "#" + str(x).rstrip() + "#" + str(y).rstrip()
print(line)
sys.stdout.flush()
vid = sys.stdin.readline()
v = Vertex(self, vid)
self.id_to_vertex[v.getId()] = v
return v
def deleteVertex(self, v):
edges = self.getIncidentEdges(v)
for e in edges:
del self.id_to_edge[e.getId()]
v = vertexId(v)
del self.id_to_vertex[v]
print("deleteVertex#" + str(self.id).rstrip() + "#" + str(v))
sys.stdout.flush()
def addEdge(self, sourceVertex, targetVertex, edgeId = -1):
# return: Edge object with id only
sourceVertex = vertexId(sourceVertex)
targetVertex = vertexId(targetVertex)
idSubString = ""
if not edgeId == -1:
idSubString = "#"+str(edgeId)
line = "addEdge#"+str(self.id).rstrip() + "#" + str(sourceVertex).rstrip() + \
"#" + str(targetVertex).rstrip() + idSubString.rstrip()
print(line.rstrip())
sys.stdout.flush()
eid = sys.stdin.readline()
if eid != "\n": # it's possible that the edge cannot be added (e.g., a new selfloop)
e = Edge(self, eid)
self.id_to_edge[e.getId()] = e
return e
return None
def existsEdge(self, edge):
line = "existsEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
thereExistsAnEdge = sys.stdin.readline().rstrip()
return thereExistsAnEdge.lower() == "true"
def existsVertex(self, vertex):
line = "existsVertex#"+str(self.id).rstrip() + "#"
line = line + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
thereExistsAVertex = sys.stdin.readline().rstrip()
return thereExistsAVertex.lower() == "true"
def deleteEdge(self, edge):
del self.id_to_edge[edge.getId()]
line = "deleteEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
def getAllEdgesBetween(self, vertexPair):
line = "getAllEdgesBetween#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(vertexPair)
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
# creates a random Erdos-Reny graph with n id_to_vertex and edge probability p
def generateRandomGraph(self, vertexCount, p):
if not isinstance(vertexCount, int):
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number must be an int.")
if vertexCount < 0:
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number cannot be less than 0.")
if not isinstance(p, float) or p < 0 or p > 1.0:
gPrint("Cannot generate a random graph, wrong parameter: \
probability of an edge must be a float in [0,1].")
if vertexCount == 0:
return
vertices = []
coordinates = dict()
for id in range(vertexCount):
coordinates[id] = (10*math.cos(2*id*math.pi/vertexCount),
10*math.sin(2*id*math.pi/vertexCount))
nxgraph = nx.fast_gnp_random_graph(vertexCount, p)
d = dict()
id = 0
for nxV in nxgraph.nodes():
d[id] = nxV
id += 1
nxEdges = nxgraph.edges()
id = 0
for x in range(vertexCount):
vertices.append(self.addVertex(id, coordinates[id]))
id += 1
for x in vertices:
for y in vertices:
if x.getId() < y.getId():
if (d[x.getId()], d[y.getId()]) in nxEdges:
x.connect(y)
# end manilupative functions
# setter functions
# begin: best for private use!
def setVertexFillColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
line = "setVertexFillColor#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1):
try:
line = line + rgbFormatter(colorRGB)
except:
self.sendErrorToGralog("the rgb color: " + str(colorRGB).rstrip() + " is not properly formatted!")
else:
self.sendErrorToGralog("neither Hex nor RGB color specified!")
print(line.rstrip())
sys.stdout.flush()
def setVertexStrokeColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
# print("colorhex: " + str(colorHex))
line = "setVertexStrokeColor#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexCoordinates(self, vertex, coordinates):
line = "setVertexCoordinates#" + str(self.id).rstrip()+"#" + str(vertexId(vertex)).rstrip()
x = -1
y = -1
x = coordinates[0]
y = coordinates[1]
if x == None:
x = "empty"
if y == None:
y = "empty"
line += "#" + str(x).rstrip() + "#" + str(y).rstrip()
print(line)
sys.stdout.flush()
def setEdgeContour(self, edge, contour):
line = line = "setEdgeContour#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + str(contour).rstrip()
print(line)
sys.stdout.flush()
def setEdgeColor(self, edge, colorHex=-1, colorRGB=-1):
line = "setEdgeColor#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#"
if not (colorHex == -1):
line = line + hexFormatter(colorHex)
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexRadius(self, vertex, newRadius):
self.setVertexDimension(vertex, newRadius, "radius")
def setVertexHeight(self, vertex, newHeight):
self.setVertexDimension(vertex, newHeight, "height")
def setVertexWidth(self, vertex, newWidth):
self.setVertexDimension(vertex, newWidth, "width")
def setVertexDimension(self, vertex, newDimension, dimension):
vertex = vertexId(vertex)
line = "setVertexDimension#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(newDimension).rstrip()+"#" + dimension.rstrip()
print(line.rstrip())
sys.stdout.flush()
def setVertexShape(self, vertex, shape):
vertex = vertexId(vertex)
line = "setVertexShape#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(shape).rstrip()
print(line.rstrip())
sys.stdout.flush()
def setEdgeWeight(self, edge, weight):
self.setEdgeProperty(edge, "weight", weight)
def setEdgeThickness(self, edge, thickness):
self.setEdgeProperty(edge, "thickness", thickness)
def setEdgeProperty(self, edge, propertyName, value):
line = "setEdgeProperty#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setVertexProperty(self, vertex, propertyName, value):
line = "setVertexProperty#"+str(self.id).rstrip() + "#"
line = line + str(vertexId(vertex)).rstrip()
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setEdgeLabel(self, edge, label):
line = "setEdgeLabel#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + label
print(line.rstrip())
sys.stdout.flush()
def setVertexLabel(self, vertex, label):
vertex = vertexId(vertex)
line = "setVertexLabel#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + label
print(line.rstrip())
sys.stdout.flush()
# end: best for private use!
def setGraph(self, graphFormat, graphString = "hello_world"):
graphFormat = graphFormat.lower()
line = "setGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()+"#"
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$$\n"
line += graphString
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$\n"
print(line)
sys.stdout.flush()
# TODO: implement this
# end setter functions
# getter functions
def toIgraph(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ig = ig.Graph.Read_GraphML("tmp.graphml")
os.remove("tmp.graphml")
return g_ig
def toNx(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_nx = nx.read_graphml("tmp.graphml")
os.remove("tmp.graphml")
return g_nx
def toElementTree(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ET = ET.parse("tmp.graphml")
os.remove("tmp.graphml")
return g_ET
def toXml(self):
return self.getGraph("xml")
def getGraph(self, graphFormat):
# warning!! importing as pure TGF will mean edge id's will
# be lost. This will result in errors on the Gralog side.
line = "getGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()
print(line.rstrip())
i = 0
sys.stdout.flush()
line = sys.stdin.readline()
graphString = ""
if graphFormat.lower() == "tgf" or graphFormat.lower() == "gtgf":
tgf = graphFormat.lower() == "tgf"
multiline = False
first = False
if line[0] == line[1] == '$':
multiline = True
if tgf:
first = True
line = sys.stdin.readline()
hashtagSeen = False
if not multiline:
return graphString
while line[0] != '$':
# gPrint("line: " + line +" and line[0]: " + line[0] + " and line[0]!='$': " + str(line[0] != '$'))
graphString += line
if line[0] == '#':
hashtagSeen = True
else:
if not first:
if hashtagSeen:
if tgf:
self.edgifyTGFCommand(line)
else:
self.edgifyGTGFCommand(line)
else:
if tgf:
self.vertexifyTGFCommand(line)
else:
self.vertexifyGTGFCommand(line)
line = sys.stdin.readline()
i += 1
first = False
return graphString
if graphFormat.lower() == "xml":
return line
def getAllVertices(self):
# return: list of Vertex objects with id
line = "getAllVertices#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vertexIdStringList = (sys.stdin.readline()).split("#")
vertexList = []
for vertexIdString in vertexIdStringList:
if representsInt(vertexIdString):
v = self.getVertexOrNew(vertexIdString)
vertexList.append(v)
return vertexList
def getVertices(self):
return(self.getAllVertices())
def getAllEdges(self):
# return: list of fully sourced Edge objects with fully sourced endpoint Vertices
line = "getAllEdges#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
if len(endpointList) == 1 and endpointList[-1] == "\n":
endpointList = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getEdges(self):
return(self.getAllEdges())
# start: best for private use!
def getNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
neighbourIdStringList = (sys.stdin.readline()).split("#")
neighboursList = []
for neighbourIdString in neighbourIdStringList:
if representsInt(neighbourIdString):
v = self.getVertexOrNew(neighbourIdString)
neighboursList.append(v)
return neighboursList
def getOutgoingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getOutgoingNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
outgoingNeighbourIdStringList = (sys.stdin.readline()).split("#")
outgoingNeighboursList = []
for outgoingNeighbourIdString in outgoingNeighbourIdStringList:
if representsInt(outgoingNeighbourIdString):
v = self.getVertexOrNew(outgoingNeighbourIdString)
outgoingNeighboursList.append(v)
return outgoingNeighboursList
def getIncomingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getIncomingNeighbours#"+str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
incomingNeighbourIdStringList = (sys.stdin.readline()).split("#")
incomingNeighboursList = []
for incomingNeighbourIdString in incomingNeighbourIdStringList:
if representsInt(incomingNeighbourIdString):
v = self.getVertexOrNew(incomingNeighbourIdString)
incomingNeighboursList.append(v)
return incomingNeighboursList
def getIncidentEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getIncidentEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getAdjacentEdges(self, edge):
# return: list of Edge objects with id's only
line = "getAdjacentEdges#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getOutgoingEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getOutgoingEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getIncomingEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getIncomingEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getEdgeWeight(self, edge):
return self.getEdgeProperty(edge, "weight")
def getEdgeLabel(self, edge):
return self.getEdgeProperty(edge, "label")
def getEdgeProperty(self, edge, prop):
# internally: fill edge property dictionary
# return: String representing queried property
line = "getEdgeProperty#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + prop.rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
edgeTuple = sys.stdin.readline().rstrip()
edge.sourceProperties(edgeTuple)
return edge.getProperty(prop)
def getVertexProperty(self, vertex, prop):
# internally: fill edge property dictionary
# return: String representing queried property
vid = vertexId(vertex)
line = "getVertexProperty#"+str(self.id).rstrip() + "#"
line = line + vid
line = line + "#" + prop.rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
vertexTuple = sys.stdin.readline().rstrip()
vertex.sourceProperties(vertexTuple)
return vertex.getProperty(prop)
# end: best use privately!
def requestVertex(self):
line = "requestVertex#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
vertex = self.getVertexOrNew(vid)
return vertex
def requestRandomVertex(self):
line = "requestRandomVertex#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
vertex = self.getVertexOrNew(vid)
return vertex
def requestEdge(self):
line = "requestEdge#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
edge = self.getEdgeOrNew(vid)
return edge
def requestRandomEdge(self):
line = "requestRandomEdge#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
eid = sys.stdin.readline().rstrip()
edge = self.getEdgeOrNew(eid)
return edge
def requestInteger(self):
line = "requestInteger#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
i = sys.stdin.readline().rstrip()
return int(i)
def requestFloat(self):
line = "requestFloat#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
d = sys.stdin.readline().rstrip()
return float(d)
def requestString(self):
line = "requestString#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
st = sys.stdin.readline().rstrip()
return str(st)
# runtime changer functions
def pauseUntilSpacePressed(self, *args):
line = "pauseUntilSpacePressed"
rank = None
try:
rank = int(args[0])
except:
pass
if len(args) > 0 and rank != None:
rank = int(args[0])
args = args[1:]
argString = ""
if rank != None:
argString += "#"+str(rank).rstrip()
for key in sorted(self.variablesToTrack.keys()):
term = "#("+str(key).rstrip()+"=" + \ str(self.variablesToTrack[key]).rstrip()+")"
argString = argString + term.rstrip()
for x in args:
if len(x) != 2:
argString = "#(syntax=pauseUntilSpacePressed((key, val)))"
break
if (type(x) == list):
for each in x:
term = "#("+"arrayyyy"+str(each[0])+"="+str(each[1])+")"
argString = argString + term
else:
term = "#("+str(x[0])+"="+str(x[1])+")"
argString = argString + term.rstrip()
line = line + argString
print(line)
sys.stdout.flush()
toSkip = sys.stdin.readline()
def track(self, name, var):
# ideally, something like this:
self.variablesToTrack[name] = var # if this is a pointer, it will work
# if it is an int or str, or some other non-reference type, it will not
def unTrack(self, name):
del self.variablesToTrack[name]
def sendMessage(self, toSend):
print(toSend)
sys.stdout.flush()
def message(self, message):
print("message#"+str(self.id).rstrip() + "#"+str(message).rstrip())
sys.stdout.flush()
def sendErrorToGralog(self, toSend):
print("error#"+str(self.id).rstrip() + "#"+str(toSend).rstrip())
sys.stdout.flush()
exit()
def mistakeLine(self):
print("wubbadubdub 3 men in a tub")
sys.stdout.flush()
sys.stdin.readline()
def pause(self, *args):
self.pauseUntilSpacePressed(*args)
# end runtime changer functions
def __str__(self):
vertices = [str(v) for v in self.id_to_vertex]
vertices.sort()
edges = [str(e) for e in self.getEdges()]
edges.sort()
return "VERTICES: " + " ".join(vertices) + "\nEDGES: " + " ".join(edges)
def gPrint(message):
if not message: # empty: print nothing except the new line (hacked with \t; <space> doesn't work)
print("gPrint#-1#" + "\t")
sys.stdout.flush()
else:
message = str(message)
lines = message.split('\n')
for line in lines:
print("gPrint#-1#" + line)
sys.stdout.flush()
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
| gpl-3.0 | -6,769,466,513,858,263,000 | 34.898176 | 203 | 0.458914 | false |
ExcaliburZero/jekyll-helper | jekyll_helper/AboutJekyllHelperDialog.py | 1 | 1842 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2015 <Christopher Wells> <[email protected]>
# Copyright (C) 2015 <Rui914>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
### END LICENSE
from locale import gettext as _
import logging
logger = logging.getLogger('jekyll_helper')
from jekyll_helper_lib.AboutDialog import AboutDialog
# See jekyll_helper_lib.AboutDialog.py for more details about how this class works.
class AboutJekyllHelperDialog(AboutDialog):
__gtype_name__ = "AboutJekyllHelperDialog"
def finish_initializing(self, builder): # pylint: disable=E1002
"""Set up the about dialog"""
super(AboutJekyllHelperDialog, self).finish_initializing(builder)
# Code for other initialization actions should be added here.
| mit | -6,735,981,486,307,116,000 | 45.05 | 83 | 0.757329 | false |
balghane/py4frc | py4frc.py | 1 | 21828 | #-------------------------------------------------------------------------------
# Name: FRC
# Purpose: Module to simplify getting standings, match results, and OPRs
# from the FIRST web pages
# Author: BaselA
#-------------------------------------------------------------------------------
from BeautifulSoup import BeautifulSoup
import urllib2
from re import search
from re import sub
from re import compile
from numpy.linalg import solve
from numpy.linalg import cholesky
import numpy
eventInfo = {'code': {'week': '6', 'code': 'code', 'name': 'Colorado'},
'mdba': {'week': '6', 'code': 'mdba', 'name': 'Chesapeake'},
'ctgro': {'week': '2', 'code': 'ctgro', 'name': 'Groton'},
'waamv': {'week': '1', 'code': 'waamv', 'name': 'Auburn Mountainview'},
'wamou': {'week': '3', 'code': 'wamou', 'name': 'Mt. Vernon'},
'washo': {'week': '4', 'code': 'washo', 'name': 'Shorewood'},
'vari': {'week': '4', 'code': 'vari', 'name': 'Virginia'},
'mitry': {'week': '6', 'code': 'mitry', 'name': 'Troy'},
'lake': {'week': '6', 'code': 'lake', 'name': 'Bayou'},
'njbri': {'week': '5', 'code': 'njbri', 'name': 'Bridgewater-Raritan'},
'nytr': {'week': '3', 'code': 'nytr', 'name': 'New York Tech Valley'},
'nyli': {'week': '5', 'code': 'nyli', 'name': 'SBPLI Long Island'},
'okok': {'week': '5', 'code': 'okok', 'name': 'Oklahoma'},
'onwi': {'week': '6', 'code': 'onwi', 'name': 'Windsor Essex Great Lakes'},
'azch': {'week': '4', 'code': 'azch', 'name': 'Arizona'},
'ilch': {'week': '6', 'code': 'ilch', 'name': 'Midwest'},
'txho': {'week': '6', 'code': 'txho', 'name': 'Lone Star'},
'mndu': {'week': '2', 'code': 'mndu', 'name': 'Lake Superior'},
'gadu': {'week': '5', 'code': 'gadu', 'name': 'Peachtree'},
'ncre': {'week': '3', 'code': 'ncre', 'name': 'North Carolina'},
'onwa': {'week': '4', 'code': 'onwa', 'name': 'Waterloo'},
'misou': {'week': '1', 'code': 'misou', 'name': 'Southfield'},
'mrcmp': {'week': '7', 'code': 'mrcmp', 'name': 'Mid-Atlantic Robotics FRC Region Championship'},
'melew': {'week': '6', 'code': 'melew', 'name': 'Pine Tree'},
'milan': {'week': '6', 'code': 'milan', 'name': 'Lansing'},
'mxmc': {'week': '3', 'code': 'mxmc', 'name': 'Mexico City'},
'nyny': {'week': '6', 'code': 'nyny', 'name': 'New York City'},
'arfa': {'week': '2', 'code': 'arfa', 'name': 'Arkansas'},
'qcmo': {'week': '4', 'code': 'qcmo', 'name': 'Festival de Robotique FRC a Montreal'},
'miwmi': {'week': '4', 'code': 'miwmi', 'name': 'West Michigan'},
'scmb': {'week': '1', 'code': 'scmb', 'name': 'Palmetto'},
'onnb': {'week': '5', 'code': 'onnb', 'name': 'North Bay'},
'mosl': {'week': '3', 'code': 'mosl', 'name': 'St. Louis'},
'orosu': {'week': '6', 'code': 'orosu', 'name': 'Oregon State University'},
'pahat': {'week': '1', 'code': 'pahat', 'name': 'Hatboro-Horsham'},
'dcwa': {'week': '5', 'code': 'dcwa', 'name': 'Greater DC'},
'wache': {'week': '3', 'code': 'wache', 'name': 'Eastern Washington University'},
'ctsou': {'week': '4', 'code': 'ctsou', 'name': 'Southington'},
'nhnas': {'week': '1', 'code': 'nhnas', 'name': 'Granite State'},
'onto2': {'week': '1', 'code': 'onto2', 'name': 'Greater Toronto West'},
'orore': {'week': '2', 'code': 'orore', 'name': 'Oregon City'},
'nhdur': {'week': '2', 'code': 'nhdur', 'name': 'UNH'},
'inwl': {'week': '4', 'code': 'inwl', 'name': 'Boilermaker'},
'misjo': {'week': '5', 'code': 'misjo', 'name': 'St. Joseph'},
'rismi': {'week': '4', 'code': 'rismi', 'name': 'Rhode Island'},
'onto': {'week': '2', 'code': 'onto', 'name': 'Greater Toronto East'},
'necmp': {'week': '7', 'code': 'necmp', 'name': 'New England FRC Region Championship'},
'mitvc': {'week': '4', 'code': 'mitvc', 'name': 'Traverse City'},
'mawor': {'week': '3', 'code': 'mawor', 'name': 'WPI'},
'inth': {'week': '2', 'code': 'inth', 'name': 'Crossroads'},
'mndu2': {'week': '2', 'code': 'mndu2', 'name': 'Northern Lights'},
'flfo': {'week': '6', 'code': 'flfo', 'name': 'South Florida'},
'miket': {'week': '2', 'code': 'miket', 'name': 'Kettering University'},
'mihow': {'week': '3', 'code': 'mihow', 'name': 'Howell'},
'waell': {'week': '5', 'code': 'waell', 'name': 'Central Washington University'},
'wimi': {'week': '4', 'code': 'wimi', 'name': 'Wisconsin'},
'calb': {'week': '4', 'code': 'calb', 'name': 'Los Angeles'},
'casd': {'week': '2', 'code': 'casd', 'name': 'San Diego'},
'miliv': {'week': '5', 'code': 'miliv', 'name': 'Livonia'},
'casa': {'week': '3', 'code': 'casa', 'name': 'Sacramento'},
'casb': {'week': '1', 'code': 'casb', 'name': 'Inland Empire'},
'mabos': {'week': '5', 'code': 'mabos', 'name': 'Northeastern University'},
'casj': {'week': '6', 'code': 'casj', 'name': 'Silicon Valley'},
'txlu': {'week': '2', 'code': 'txlu', 'name': 'Hub City'},
'mibed': {'week': '6', 'code': 'mibed', 'name': 'Bedford'},
'txsa': {'week': '1', 'code': 'txsa', 'name': 'Alamo'},
'nvlv': {'week': '6', 'code': 'nvlv', 'name': 'Las Vegas'},
'txda': {'week': '3', 'code': 'txda', 'name': 'Dallas'},
'migul': {'week': '2', 'code': 'migul', 'name': 'Gull Lake'},
'abca': {'week': '6', 'code': 'abca', 'name': 'Western Canada'},
'pncmp': {'week': '7', 'code': 'pncmp', 'name': 'Autodesk FRC Championship'},
'orwil': {'week': '4', 'code': 'orwil', 'name': 'Wilsonville'},
'utwv': {'week': '3', 'code': 'utwv', 'name': 'Utah'},
'wasno': {'week': '2', 'code': 'wasno', 'name': 'Glacier Peak'},
'njfla': {'week': '1', 'code': 'njfla', 'name': 'Mt. Olive'},
'ista': {'week': '6', 'code': 'ista', 'name': 'Israel'},
'nyro': {'week': '5', 'code': 'nyro', 'name': 'Finger Lakes'},
'ilil': {'week': '1', 'code': 'ilil', 'name': 'Central Illinois'},
'mnmi': {'week': '5', 'code': 'mnmi', 'name': 'Minnesota 10000 Lakes'},
'njtab': {'week': '4', 'code': 'njtab', 'name': 'Lenape-Seneca'},
'miwat': {'week': '5', 'code': 'miwat', 'name': 'Waterford'},
'hiho': {'week': '5', 'code': 'hiho', 'name': 'Hawaii'},
'njcli': {'week': '3', 'code': 'njcli', 'name': 'Clifton'},
'papi': {'week': '5', 'code': 'papi', 'name': 'Greater Pittsburgh'},
'ohci': {'week': '5', 'code': 'ohci', 'name': 'Queen City'},
'ohcl': {'week': '4', 'code': 'ohcl', 'name': 'Buckeye'},
'miesc': {'week': '3', 'code': 'miesc', 'name': 'Escanaba'},
'tnkn': {'week': '5', 'code': 'tnkn', 'name': 'Smoky Mountains'},
'mokc': {'week': '3', 'code': 'mokc', 'name': 'Greater Kansas City'},
'cthar': {'week': '5', 'code': 'cthar', 'name': 'Hartford'},
'flor': {'week': '3', 'code': 'flor', 'name': 'Orlando'},
'paphi': {'week': '3', 'code': 'paphi', 'name': 'Springside Chestnut Hill'},
'micen': {'week': '1', 'code': 'micen', 'name': 'Center Line'},
'mimid': {'week': '4', 'code': 'mimid', 'name': 'Great Lakes Bay Region'},
'mnmi2': {'week': '5', 'code': 'mnmi2', 'name': 'Minnesota North Star'},
'micmp': {'week': '7', 'code': 'micmp', 'name': 'Michigan FRC State Championship'},
'cama': {'week': '2', 'code': 'cama', 'name': 'Central Valley'}}
codes =['abca','arc','arfa','azch','calb','cama','casa','casb','casd','casj',
'cmp','code','ctgro','cthar','ctsou','cur','dcwa','flfo','flor','gadu','gal',
'hiho','ilch','ilil','inth','inwl','ista','lake','mabos','mawor','mdba','melew',
'mibed','micen','micmp','miesc','migul','mihow','miket','milan','miliv','mimid',
'misjo','misou','mitry','mitvc','miwat','miwmi','mndu','mndu2','mnmi','mnmi2',
'mokc','mosl','mrcmp','mxmc','ncre','necmp','new','nhdur','nhnas','njbri','njcli',
'njfla','njtab','nvlv','nyli','nyny','nyro','nytr','ochl','ohci','okok','onnb',
'onto','onto2','onwa','onwi','orore','orosu','orwil','pahat','paphi','papi',
'pncmp','qcmo','rismi','scmb','tnkn','txda','txho','txlu','txsa','utwv','vari',
'waahs','waamv','wache','waell','wamou','washo','wasno','wimi']
abbreviations = {'abca':'abca','wcr':'abca','westcanada':'abca','westerncanada':'abca',
'westerncanadian':'abca','arc':'arc','archimedes':'arc','arfa':'arfa','razorback':'arfa',
'arkansas':'arfa','azch':'azch','phoenix':'azch','arizona':'azch','calb':'calb',
'losangeles':'calb','LA':'calb','cama':'cama','centralvalley':'cama','cvr':'cama',
'casa':'casa','sacramento':'casa','casb':'casb','inlandempire':'casb','casd':'casd',
'sandiego':'casd','casj':'casj','siliconvalley':'casj','svr':'casj','championship':'cmp',
'cmp':'cmp','einstein':'cmp','code':'code','colorado':'code','groton':'ctgro','ctgro':'ctgro',
'connecticut':'cthar','cthar':'cthar','hartford':'cthar','ctha':'cthar','southington':'ctsou',
'ctsou':'ctsou','cur':'cur','curie':'cur','dc':'dcwa','dcwa':'dcwa','washington':'dcwa',
'washingtondc':'dcwa','flbr':'flfo','southflorida':'flfo','flor':'flor','orlando':'flor',
'gadu':'gadu','peachtree':'gadu','gal':'gal','galileo':'gal','hawaii':'hiho','hiho':'hiho',
'ilch':'ilch','midwest':'ilch','ilil':'ilil','centralillinois':'ilil','centralil':'ilil',
'centillinois':'ilil','centil':'ilil','crossroads':'inth','inth':'inth','bmr':'inwl',
'boilermaker':'inwl','inwl':'inwl','israel':'ista','isreal':'ista','ista':'ista',
'bayou':'lake','lake':'lake','boston':'mabos','mabos':'mabos','mabo':'mabos',
'northeastern':'mabos','mawo':'mawor','wpi':'mawor','mawor':'mawor','chesapeake':'mdba',
'mdba':'mdba','mele':'melew','pine tree':'melew','ptr':'melew','melew':'melew',
'bedford':'mibed','mibed':'mibed','centerline':'micen','micen':'micen',
'michiganstatechampionship':'micmp','micmp':'micmp','msc':'micmp','escanaba':'miesc',
'miesc':'miesc','gulllake':'migul','migul':'migul','howell':'mihow','mihow':'mihow',
'kettering':'miket','ketteringuniversity':'miket','miket':'miket','lansing':'milan',
'milan':'milan','livonia':'miliv','miliv':'miliv','mimid':'mimid','greatlakesbay':'mimid',
'greatlakesbayregion':'mimid','greatlakes':'mimid','misou':'misou','Southfield':'misou',
'misjo':'misjo','stjoe':'misjo','stjoseph':'misjo','mitry':'mitry','troy':'mitry',
'mitvc':'mitvc','tc':'mitvc','traversecity':'mitvc','miwfd':'miwat','waterford':'miwat',
'miwat':'miwat','miwmi':'miwmi','westmichigan':'miwmi','wmr':'miwmi','lakesuperior':'mndu',
'mndu':'mndu','mndu2':'mndu2','northernlights':'mndu2','10000lakes':'mnmi',
'10klakes':'mnmi','mnmi':'mnmi','minnesotanorthstar':'mnmi2','mnmi2':'mnmi2',
'northstar':'mnmi2','greaterkansascity':'mokc','kansascity':'mokc','kc':'mokc',
'kcr':'mokc','mokc':'mokc','mosl':'mosl','stlouis':'mosl','lehigh':'mrcmp',
'mar':'mrcmp','marcmp':'mrcmp','mrcmp':'mrcmp','mexico':'mxmc','mexicocity':'mxmc',
'mxmc':'mxmc','ncr':'ncre','ncre':'ncre','northcarolina':'ncre','new':'new',
'newton':'new','newenglandcmp':'necmp','newenglandchampionship':'necmp',
'necmp':'necmp','nechampionship':'necmp','ne':'necmp','nhdur':'nhdur','unh':'nhdur',
'bae':'nhnas','baegranitestate':'nhnas','granitestate':'nhnas','gsr':'nhnas',
'nhma':'nhnas','nhnas':'nhnas','br':'njbri','bridgewater':'njbri',
'bridgewaterraritan':'njbri','njbrg':'njbri','njbri':'njbri','clifton':'njcli',
'njcli':'njcli','flanders':'njfla','mountolive':'njfla','mtolive':'njfla',
'njfla':'njfla','lenape':'njtab','lenapeseneca':'njtab','njlen':'njtab',
'njtab':'njtab','lasvegas':'nvlv','lvr':'nvlv','nvlv':'nvlv','vegas':'nvlv',
'longisland':'nyli','nyli':'nyli','sbplilongisland':'nyli','sbpli':'nyli',
'newyorkcity':'nyny','nyc':'nyny','nyny':'nyny','fingerlakes':'nyro','flr':'nyro',
'nyro':'nyro','newyorktechvalley':'nytr','techvalley':'nytr','nytr':'nytr',
'ohic':'ohci','qcr':'ohci','queencity':'ohci','ohci':'ohci','buckeye':'ohcl',
'ohcl':'ohcl','oklahoma':'okok','okok':'okok','okc':'okok','northbay':'onnb',
'onnb':'onnb','greatertorontoeast':'onto','gtre':'onto','onto':'onto',
'torontoeast':'onto','greatertorontowest':'onto2','gtrw':'onto2','onto2':'onto2',
'torontowest':'onto2','onwa':'onwa','waterloo':'onwa','onwi':'onwi','windsor':'onwi',
'windsoressex':'onwi','oregoncity':'orore','orore':'orore','oregonstate':'orosu',
'orosu':'orosu','wilsonville':'orwil','orwil':'orwil','hatborohorsham':'pahat',
'hh':'pahat','pahat':'pahat','chestnuthill':'paphi','paphi':'paphi','springside':'paphi',
'springsidechestnuthill':'paphi','papi':'papi','pittsburgh':'papi','pnw':'pncmp',
'pacificcmp':'pncmp','pacificnorthwestcmp':'pncmp','pnwcmp':'pncmp',
'pncmp':'pncmp','montreal':'qcmo','qcmo':'qcmo','rhodeisland':'rismi',
'rismi':'rismi','palmetto':'scmb','scmb':'scmb','smokymountains':'tnkn',
'smr':'tnkn','tnkn':'tnkn','dallas':'txda','txda':'txda','lonestar':'txho',
'lsr':'txho','txho':'txho','hubcity':'txlu','txlu':'txlu','alamo':'txsa',
'txsa':'txsa','utah':'utwv','utwv':'utwv','vari':'vari','virginia':'vari',
'auburn':'waahs','waahs':'waahs','auburnmtn':'waamv','auburnmountainview':'waamv',
'waamv':'waamv','centralwash':'waell','centralwashington':'waell','waell':'waell',
'mtvernon':'wamou','wamou':'wamou','spokane':'wache','wach':'wache','wasche':'wache',
'eastwash':'wache','eastwashington':'wache','easternwash':'wache','easternwashington':'wache',
'wache':'wache','shorewood':'washo','washo':'washo','glacierpeak':'wasno',
'wasno':'wasno','wimi':'wimi','wisconsin':'wimi'}
def getCode(phrase):
phrase = phrase.lower()
for code in codes:
if phrase == code:
return code
while search('[ -\.]', phrase):
phrase = sub("[ -\.]", "", phrase)
phrase = sub("district", "", sub("regional", "", phrase))
for abbrev in abbreviations:
if phrase == abbrev:
return abbreviations[abbrev]
def frclinksTo(code, whatDoYouWant, year):
code = getCode(code)
if whatDoYouWant == "None":
url = "http://frclinks.frclinks.com/e/"+str(code)
elif whatDoYouWant == "m" or whatDoYouWant == "r":
url = "http://frclinks.frclinks.com/e/"+str(whatDoYouWant)+"/"+str(code)
if year != 2014:
url = url + "/"+str(year)
soup = BeautifulSoup(urllib2.urlopen(url))
return soup.findAll('script')[2].getText()[19:-2]
def getTeamlist(code):
code = getCode(code)
soup = BeautifulSoup(urllib2.urlopen(frclinksTo(code, "None", 2014)))
teams = []
for team in soup.body.center.table.tr.td.p.center.table.tr.td.findAll('a')[5:]:
teams.append(team.getText())
return teams
def getTeamStandings(code):
code = getCode(code)
soup = BeautifulSoup(urllib2.urlopen(frclinksTo(code, "r", 2014)))
teams = []
for team in soup.findAll('tr', {"style":"background-color:#FFFFFF;"}):
for i,value in enumerate(team):
if i == 3:
try:
teams.append(value.getText())
except AttributeError:
pass
teams = list(str(z) for z in sorted(int(x) for x in teams))
return teams
def removeBlanks(array):
while True:
try:
if array[-1][9] == "" or array[-1][9] == " ":
array.pop()
else:
return array
except IndexError:
try:
array.pop()
except IndexError:
return []
def getMatches(code):
code = getCode(code)
soup = BeautifulSoup(urllib2.urlopen(frclinksTo(code,"m", 2014)))
values = []
quals= []
for match in soup.findAll('tr', {"style":"background-color:#FFFFFF;"}) + soup.findAll('tr', {"style": compile('mso-yfti-irow:[0-9]')})[6:-3]:
if search('(E|D|T)', match.td.getText()[0]):
pass
else:
for value in match:
if isinstance(value, basestring):
pass
else:
values.append(value.getText())
quals.append(values)
values = []
elims = []
try:
quals[-1][1]
except IndexError:
del(quals[-1])
if code == "cmp":
while len(quals)>0:
try:
if search('(i|t)', quals[-1][1]):
elims.append(quals.pop())
except:
pass
else:
while search('(i|t)', quals[-1][1]):
elims.append(quals.pop())
elims.reverse()
for match in elims:
del match[1]
quals = removeBlanks(quals)
elims = removeBlanks(elims)
return quals, elims
def getStandings(code):
code = getCode(code)
soup = BeautifulSoup(urllib2.urlopen(frclinksTo(code, "r", 2014)))
standings = []
for team in soup.findAll('tr', {"style":"background-color:#FFFFFF;"}):
values = []
for value in team:
try:
values.append(value.getText())
except AttributeError:
pass
values[6]=float(values[6])-float(values[5])-float(values[3])
standings.append(values)
return standings
def getOprMatrix(code):
code = getCode(code)
teamList = getTeamStandings(code)
teamDict={}
for team in teamList:
otherTeamList = {"totalScore":0}
for otherTeam in teamList:
otherTeamList[otherTeam] = 0
teamDict[team] = otherTeamList
alliances=[]
scores =[]
for matchNum, match in enumerate(getMatches(code)[0]):
redData=match[2:5]
scores.append(match[8])
blueData=match[5:8]
scores.append(match[9])
alliances.append(redData)
alliances.append(blueData)
for allianceNum, alliance in enumerate(alliances):
for team in alliance:
teamDict[team][alliance[0]] = teamDict[team][alliance[0]] + 1
teamDict[team][alliance[1]] = teamDict[team][alliance[1]] + 1
teamDict[team][alliance[2]] = teamDict[team][alliance[2]] + 1
teamDict[team]["totalScore"] = teamDict[team]["totalScore"] + int(scores[allianceNum])
oprMatrix =[]
teamScores = []
for team in teamList:
oprMatrixLine = []
for otherTeam in teamList:
oprMatrixLine.append(teamDict[team][otherTeam])
oprMatrix.append(oprMatrixLine)
teamScores.append(teamDict[team]["totalScore"])
return oprMatrix, teamScores
def calcOPR(oprMatrix, scores):
try:
L = cholesky(oprMatrix)
y = solve(L, scores)
OPR = solve(L.T.conj(), y)
except numpy.linalg.LinAlgError:
return []
return OPR
def getRegOpr(code):
code = getCode(code)
oprMatrix, scores = getOprMatrix(code)
OPR = calcOPR(oprMatrix, scores)
if OPR == []:
return OPR
for i in range(len(OPR)):
OPR[i] = round(float(OPR[i]), 2)
return OPR
def getAllOprs(code):
code = getCode(code)
oprMatrix, totalScores = getOprMatrix(code)
teamDict={}
autoScores = []
assistScores = []
trussScores = []
foulScores = []
teamlist = getTeamStandings(code)
for teamNum, team in enumerate(getStandings(code)):
teamDict[team[1]] = {"autoScore":team[4], "assistScore":team[3], "trussScore":team[5], "foulScore":team[6]}
for team in teamlist:
autoScores.append(teamDict[team]["autoScore"])
assistScores.append(teamDict[team]["assistScore"])
trussScores.append(teamDict[team]["trussScore"])
foulScores.append(teamDict[team]["foulScore"])
totOPR = calcOPR(oprMatrix, totalScores)
autoOPR = calcOPR(oprMatrix, autoScores)
assistOPR = calcOPR(oprMatrix, assistScores)
trussOPR = calcOPR(oprMatrix, trussScores)
foulOPR = calcOPR(oprMatrix, foulScores)
oprDict={}
try:
for teamNum, team in enumerate(getTeamStandings(code)):
oprDict[team] = {"OPR":totOPR[teamNum], "autoOPR":autoOPR[teamNum], "assistOPR":assistOPR[teamNum], "trussOPR":trussOPR[teamNum], "foulOPR":foulOPR[teamNum]}
for team in oprDict:
shift = oprDict[team]["OPR"]-(oprDict[team]["autoOPR"]+oprDict[team]["assistOPR"]+oprDict[team]["trussOPR"]+oprDict[team]["foulOPR"])
oprSum = abs(oprDict[team]["autoOPR"])+abs(oprDict[team]["assistOPR"])+abs(oprDict[team]["trussOPR"])+abs(oprDict[team]["foulOPR"])
for oprType in ["autoOPR", "assistOPR", "foulOPR"]:
oprDict[team][oprType] +=(shift/oprSum)*abs(oprDict[team][oprType])
for team in oprDict:
for value in oprDict[team]:
oprDict[team][value] = round(float(oprDict[team][value]),2)
except IndexError:
pass
return oprDict
def main():
while True:
print "To get the teamlist for an event, type 'teams'"
print "To get the team OPRs for an event, type 'opr'"
print "To get all the team OPR subtypes for an event, type 'allopr'"
print "To get the standings for an event, type 'standings'"
want = raw_input("What do you want? ").lower()
while search('[ -\.]', want):
want = sub("[ -\.]", "", want)
if search("allopr", want):
code = getCode(raw_input("What event? "))
opr = getAllOprs(code)
teams = getTeamStandings(code)
print "\t".join(["Team", "OPR ", "autoOPR", "assistOPR", "trussOPR", "foulOPR"])
for team in teams:
print "\t".join([team+int(4-len(team))*" ", str(opr[team]["OPR"]), str(opr[team]["autoOPR"]), str(opr[team]["teleOPR"]), str(opr[team]["climbOPR"])])
elif search("opr", want):
code = getCode(raw_input("What event? "))
opr = getRegOpr(code)
for i,team in enumerate(getTeamStandings(code)):
print team, opr[i]
elif search("team(s|list)", want):
code = getCode(raw_input("What event? "))
teams = getTeamlist(code)
for team in teams:
print team
elif search("(standing|ranking)", want):
code = getCode(raw_input("What event? "))
standings = getStandings(code)
print "\t".join(["Rank", "Team", "QP ","AP ", "CP ", "TP ", "Record", "DQ", "Played"])
for team in standings:
team[0] += int(4-len(team[0]))*" "
if len(team[1]) < 4:
team[1] += int(4-len(team[1]))*" "
print "\t".join(team)
else:
print "I'm not sure what you mean. Try again?"
if __name__ == "__main__":
main()
| mit | 4,354,438,116,930,774,500 | 47.941704 | 169 | 0.573255 | false |
mrocklin/streams | streamz/tests/test_sources.py | 1 | 2787 | from flaky import flaky
import pytest
from streamz import Source
from streamz.utils_test import wait_for, await_for, gen_test
import socket
@flaky(max_runs=3, min_passes=1)
def test_tcp():
port = 9876
s = Source.from_tcp(port)
out = s.sink_to_list()
s.start()
wait_for(lambda: s.server is not None, 2, period=0.02)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", port))
sock.send(b'data\n')
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", port))
sock.send(b'data\n')
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock2.connect(("localhost", port))
sock2.send(b'data2\n')
wait_for(lambda: out == [b'data\n', b'data\n', b'data2\n'], 2,
period=0.01)
finally:
s.stop()
sock.close()
sock2.close()
@flaky(max_runs=3, min_passes=1)
@gen_test(timeout=60)
def test_tcp_async():
port = 9876
s = Source.from_tcp(port)
out = s.sink_to_list()
s.start()
yield await_for(lambda: s.server is not None, 2, period=0.02)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", port))
sock.send(b'data\n')
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", port))
sock.send(b'data\n')
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock2.connect(("localhost", port))
sock2.send(b'data2\n')
yield await_for(lambda: out == [b'data\n', b'data\n', b'data2\n'], 2,
period=0.01)
finally:
s.stop()
sock.close()
sock2.close()
def test_http():
requests = pytest.importorskip('requests')
port = 9875
s = Source.from_http_server(port)
out = s.sink_to_list()
s.start()
wait_for(lambda: s.server is not None, 2, period=0.02)
r = requests.post('http://localhost:%i/' % port, data=b'data')
wait_for(lambda: out == [b'data'], 2, period=0.01)
assert r.ok
r = requests.post('http://localhost:%i/other' % port, data=b'data2')
wait_for(lambda: out == [b'data', b'data2'], 2, period=0.01)
assert r.ok
s.stop()
with pytest.raises(requests.exceptions.RequestException):
requests.post('http://localhost:%i/other' % port, data=b'data2')
@flaky(max_runs=3, min_passes=1)
@gen_test(timeout=60)
def test_process():
cmd = ["python", "-c", "for i in range(4): print(i)"]
s = Source.from_process(cmd)
out = s.sink_to_list()
s.start()
yield await_for(lambda: out == [b'0\n', b'1\n', b'2\n', b'3\n'], timeout=5)
s.stop()
| bsd-3-clause | 1,888,732,176,384,832,300 | 27.731959 | 79 | 0.587729 | false |
eugenekolo/project-euler | eulerlib.py | 1 | 3556 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
################################################################################
# Commonly used functions for Euler Project (www.projecteuler.com)
# Author: Eugene Kolo 2014
# Contact: www.eugenekolo.com
################################################################################
import math, sys, os
from collections import defaultdict
# Simple prime check for number n
def isPrime(n):
if n == 2 or n == 3: return True # 2 or 3
if n < 2 or n%2 == 0: return False # 1, negative or even
for i in range(3,int(math.sqrt(n))+1,2):
if n%i == 0:
return False
return True
# Sieve of Eratosthenes, finds prime #s up to n in O(nloglogn)
def PrimeSieve(n):
# Assume [0,n) are all primes
primes = [True for i in range(0,n)]
for i in range(2,int(math.ceil(math.sqrt(n)))):
if primes[i] is True:
a = 0
while (i**2 + a*i < n): # Remove every multiple of i
primes[i**2 + a*i] = False
a += 1
return [i for i in range(2,n) if primes[i] is True]
# Return the nth fibonacci number
def fib(n):
counter = 2 # Start at fib(2) as F_1 = 1, F_2 = 1
last, cur = 1,1
while(counter < n):
tmpcur = cur
cur = cur + last
last = tmpcur
counter += 1
return cur
# Output the next power of 10 of log10(n) (2->10, 82->100, 255->1000)
# [TODO] Optimize in some bit twiddling and basic integer math instead?
def next10(n):
return 10**int(math.ceil(math.log(n,10)))
# Sum of the proper divisiors of n
def sumDivisors(n):
total = 1 # n always has at least the pal '1' as a divisor
for i in range(2,int(math.sqrt(n))+1):
if n % i == 0:
# Add the divisor pair, except only 1 of a perfect square
if (i != n/i):
total += (i + n/i)
else:
total += i
return total
# Check if input string is a palindrome
def isPalindrome(string):
for i in range(0,len(string)//2):
if (string[i] != string[len(string)-1-i]):
return False
return True
# Concatenated product of n and mylist
# n = 192, mylist = [1 2 3], output = '192'+'384'+'576' = 192384576
def concatProd(n, mylist):
prods = [n * i for i in mylist]
return int("".join([str(prod) for prod in prods]))
# Returns True if n is a pandigital number
def isPanDig(n):
n = str(n)
if (len(n) <= 9 and len(n) > 0): # Number must be 1 to 9 digits long
for i in range(1,len(n)+1): # Range of all the digits
if n.find(str(i)) == -1: # Digit not found
return False
else:
return False
return True
# Returns a dictionary of factors->amount
# n = 644 -> {(2,2),(7,1),(23,1)}
def factor(n):
d = defaultdict(int)
# Make n odd and get rid of the prime 2's
while n%2 == 0:
n = n/2
d[2] += 1
# Factor n down by trial divison
# This works because once an odd divisor is found, it must be prime
# otherwise a previous one would have already occured
for i in range(3, int(math.sqrt(n))+1, 2):
while n % i == 0:
n = n/i
d[i] +=1
# n is prime
if (n > 2):
d[n] += 1
return d
def num(s):
try:
return int(s)
except ValueError:
return float(s)
#[TODO] Make this a singleton
class stdoutToggle:
actualstdout = sys.stdout
def on(self):
sys.stdout = self.actualstdout
def off(self):
sys.stdout = open(os.devnull,'w')
| mit | 5,236,229,105,911,455,000 | 28.147541 | 80 | 0.541901 | false |
jjdmol/LOFAR | CEP/GSM/bremen/src/pipeline.py | 1 | 8042 | #!/usr/bin/env python
import logging
import math
import os
import monetdb.sql as db
from src.errors import SourceException, ImageStateError
from src.gsmconnectionmanager import GSMConnectionManager
from src.gsmlogger import get_gsm_logger
from src.sqllist import get_sql, get_svn_version, GLOBALS
from src.grouper import Grouper
from src.updater import run_update
from src.utils import get_pixels, load_parameters
from src.matcher import MatcherF90, MatcherSQL
from src.resolveFlux import FluxResolver
from src.resolveQuad import QuadResolver
from src.resolveSimple import SimpleResolver
class GSMPipeline(object):
"""
General pipeline class.
"""
def __init__(self, custom_cm=None, use_monet=None,
profile=False,
**params):
"""
@param custom_cm: allows to pass an object to be used as connection
manager.
"""
self.log = get_gsm_logger('pipeline', 'pipeline.log')
self.use_monet = use_monet
if not custom_cm:
if use_monet != None:
self.conn_manager = GSMConnectionManager(use_monet=use_monet)
else:
self.conn_manager = GSMConnectionManager()
else:
self.conn_manager = custom_cm
try:
self.conn = self.conn_manager.get_connection(**params)
if profile:
self.conn.profile = True
self.conn.log.setLevel(logging.DEBUG)
self.conn.commit()
except db.Error as exc:
self.log.error("Failed to connect: %s" % exc)
raise exc
self.options = load_parameters('%s/settings.ini' %
os.path.dirname(__file__))
self.log.debug('Pipeline parameters: %s' % self.options)
self.log.info('Pipeline started.')
def reopen_connection(self, **params):
"""
Reopen connection in case it was closed.
"""
if not self.conn or not self.conn.established():
try:
self.conn = self.conn_manager.get_connection(**params)
self.log.info('Pipeline connection reopened.')
except db.Error as exc:
self.log.error("Failed to connect: %s" % exc)
raise exc
def read_image(self, source):
"""
Read image and detections from a given source.
"""
if source:
source.read_and_store_data(self.conn)
else:
raise SourceException('No source specified.')
def run_parset(self, parset):
"""
Process single parset file.
"""
self.conn.start()
parset.process(self.conn)
self.parset = parset
self.process_image(parset.image_id, parset.run_id)
self.log.info('Parset %s done.' % parset.filename)
return parset.image_id
def run_grouper(self):
"""
Detect/update and store groups of sources for later processing.
"""
#Update groups by merging overlapping patches.
cursor = self.conn.get_cursor(get_sql("GroupFinder"))
grouper = Grouper(cursor.fetchall())
while grouper.is_completed():
grouper.one_cycle()
self.conn.execute_set(get_sql("GroupUpdate",
grouper.group,
",".join(map(str, grouper.runcatset))))
grouper.cleanup()
for resolver in [SimpleResolver]:
self.run_resolver(resolver)
self.conn.execute(get_sql("GroupFill"))
def run_resolver(self, resolve_class):
#Running resolver
resolver = resolve_class(self.conn)
for group_id in self.conn.get_cursor(get_sql("GroupCycle")):
if not resolver.run_resolve(group_id[0]):
#Failed to resolve
self.log.debug("Group id %s not resolved by %s." %
(group_id[0], resolver.__class__.__name__))
self.conn.log.debug("Group id %s not resolved." % group_id[0])
self.conn.execute_set(get_sql("GroupUpdate runcat",
group_id[0]))
else:
self.log.debug("Group id %s resolved by %s." %
(group_id[0], resolver.__class__.__name__))
self.conn.log.debug("Group id %s resolved." % group_id[0])
def update_image_pointing(self, image_id):
"""
Update image pointing to average ra/decl of all sources.
"""
avg_x, avg_y, avg_z, count = self.conn.exec_return(
get_sql('Image properties selector', image_id),
single_column=False)
avg_x, avg_y, avg_z = avg_x / count, avg_y / count, avg_z / count
decl = math.asin(avg_z)
ra = math.atan2(avg_x, avg_y)
self.conn.execute(get_sql('Image properties updater',
ra, decl, image_id))
def process_image(self, image_id, run_id=None, sources_loaded=False):
"""
Process single image.
@sources_loaded: True if there are records in the extractedsources
already.
"""
self.conn.start()
status, band, stokes, fov_radius, \
centr_ra, centr_decl, run_loaded, bmaj = \
self.conn.exec_return("""
select status, band, stokes, fov_radius,
centr_ra, centr_decl, run_id, bmaj
from images
where imageid = %s;""" % image_id, single_column=False)
if not run_id:
run_id = run_loaded
if status == 1:
raise ImageStateError('Image %s in state 1 (Ok). Cannot process' %
image_id)
GLOBALS.update({'i': image_id, 'r': run_id,
'b': band, 's': stokes})
if not sources_loaded:
self.conn.execute(get_sql('insert_extractedsources'))
self.conn.execute(get_sql('insert dummysources'))
if bmaj:
max_assoc = float(bmaj)
else:
max_assoc = float(self.options.get('maximum_association_distance'))
self.log.debug('Using options: %s' % self.options)
self.log.debug('Final max_assoc_dist %s' % max_assoc)
#Now do the matching!
if self.options.get('matcher') == 'F90':
matcher_class = MatcherF90
else:
matcher_class = MatcherSQL
matcher = matcher_class(self.conn, max_assoc,
self.options.get('match_distance'),
self.options.get('match_distance_extended'),
get_pixels(centr_ra, centr_decl, fov_radius + 0.5))
matcher.match(image_id)
self.conn.call_procedure("fill_temp_assoc_kind(%s);" % image_id)
#Process many-to-many;
self.run_grouper()
# Process one-to-one associations;
self.conn.execute(get_sql('add 1 to 1'))
#process one-to-many associations;
self.conn.execute(get_sql('add 1 to N'))
self.conn.execute_set(get_sql('update flux_fraction'))
#process many-to-one associations;
self.conn.execute_set(get_sql('add N to 1'))
#updating runningcatalog
run_update(self.conn, 'update runningcatalog')
run_update(self.conn, 'update runningcatalog extended')
self.conn.execute(get_sql('update runningcatalog XYZ'))
#First update, then insert new (!!!)
run_update(self.conn, 'update runningcatalog_fluxes')
self.conn.execute(get_sql('insert new bands for point sources'))
#inserting new sources
self.conn.execute_set(get_sql('Insert new sources'))
self.conn.execute_set(get_sql('Join extended'))
#update image status and save current svn verion.
self.conn.execute_set(get_sql('Cleanup', get_svn_version()))
if self.parset.recalculate_pointing:
self.update_image_pointing(image_id)
self.conn.commit()
| gpl-3.0 | -943,373,641,355,774,800 | 39.21 | 79 | 0.567645 | false |
SBillion/timetableasy | src/Course.py | 1 | 10584 | # -*- coding: utf-8 -*-
import gtk
import os
from random import randint
from db import db
from Timetableasy import app
global interface_course
class CourseInterface(object):
def __init__(self):
from GtkMapper import GtkMapper
mapper = GtkMapper('graphics/dialog_course.glade', self, app.debug)
self.action_add = mapper.glade.menu.add
self.dialog_confirmation = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_OK_CANCEL,
None)
def add(self, obj):
"""connect buttons, set_title and display dialog_course"""
self.current_obj = obj
self.dialog.name.set_text("")
self.dialog.c_elearning.set_value(0)
self.dialog.c_classroom.set_value(0)
self.dialog.c_practice.set_value(0)
self.dialog.e_practice.set_value(0)
self.dialog.e_exam.set_value(0)
self.dialog.e_oral.set_value(0)
self.dialog.obj.set_title("Création d'un cours")
self.dialog.valid.connect("clicked", self.on_dialog_add)
result = self.dialog.obj.run()
self.dialog.obj.hide()
self.current_obj = None
return result
def change(self, obj):
"""connect buttons, set new title, fill fields with
object fields, and display dialog_course"""
self.current_obj = obj
self.dialog.name.set_text(obj.name)
self.dialog.c_elearning.set_value(obj.c_elearning)
self.dialog.c_classroom.set_value(obj.c_classroom)
self.dialog.c_practice.set_value(obj.c_practice)
self.dialog.e_practice.set_value(obj.e_practice)
self.dialog.e_exam.set_value(obj.e_exam)
self.dialog.e_oral.set_value(obj.e_oral)
self.dialog.obj.set_title("Modification du cours " + obj.name)
self.dialog.valid.connect("clicked", self.on_dialog_change)
result = self.dialog.obj.run()
self.dialog.obj.hide()
self.current_obj = None
return result
def duplicate(self, obj):
"""fill all dialog fields with obj fields, set new title
and display dialog_course """
self.current_obj = obj
self.dialog.name.set_text("")
self.dialog.c_elearning.set_value(obj.c_elearning)
self.dialog.c_classroom.set_value(obj.c_classroom)
self.dialog.c_practice.set_value(obj.c_practice)
self.dialog.e_practice.set_value(obj.e_practice)
self.dialog.e_exam.set_value(obj.e_exam)
self.dialog.e_oral.set_value(obj.e_oral)
self.dialog.obj.set_title("Duplication du cours " + obj.name)
self.dialog.valid.connect("clicked", self.on_dialog_add)
result = self.dialog.obj.run()
self.dialog.obj.hide()
self.current_obj = None
return result
def menu_display(self, obj, event):
self.current_obj = obj
self.menu.show_all()
self.menu.popup(None, None, None, event.button, event.time)
def get_dialog_fields(self):
"""get field of the dialog_course and fill course
object with that"""
obj = self.current_obj
obj.name = self.dialog.name.get_text()
obj.c_elearning = int(self.dialog.c_elearning.get_value())
obj.c_classroom = int(self.dialog.c_classroom.get_value())
obj.c_practice = int(self.dialog.c_practice.get_value())
obj.e_practice = int(self.dialog.e_practice.get_value())
obj.e_exam = int(self.dialog.e_exam.get_value())
obj.e_oral = int(self.dialog.e_oral.get_value())
# dialog callbacks
def on_dialog_cancel(self, widget):
"""close dialog_course"""
self.dialog.obj.response(gtk.RESPONSE_CANCEL)
def on_dialog_add(self, widget):
"""call get_dialog_fields, and call db.session_query,
response gtk.RESPONSE_OK"""
def query(db, obj):
"""callback for db.session_query, this query
add obj to session and call db.session_try_commit"""
db.session.add(obj)
db.session_try_commit()
obj = self.current_obj
self.get_dialog_fields()
db.session_query(query, obj,
str("insert course " + obj.name +
" (e-learning:" + str(obj.c_elearning) +
" ,classroom:" + str(obj.c_classroom) +
" ,practice:" + str(obj.c_practice) +
" ,eval practice:" + str(obj.e_practice) +
" ,exam:" + str(obj.e_exam) +
" ,oral:" + str(obj.e_oral) + ") in db"))
self.dialog.obj.response(gtk.RESPONSE_OK)
def on_dialog_change(self, widget):
"""call get_dialog_fields, call db.session_try_commit,
response gtk.RESPONSE_OK"""
self.get_dialog_fields()
db.session_try_commit()
self.dialog.obj.response(gtk.RESPONSE_OK)
# menu callbacks
def on_menu_add(self, widget):
"""initialize course, call keep all links, call duplicate
and call replug for update object entries"""
obj = self.current_obj
course = Course()
course.period = obj.period
course.teachers = obj.teachers
course.name = obj.name
course.c_elearning = obj.c_elearning
course.c_classroom = obj.c_classroom
course.c_practice = obj.c_practice
course.e_practice = obj.e_practice
course.e_exam = obj.e_exam
course.e_oral = obj.e_oral
if self.duplicate(course) == gtk.RESPONSE_OK:
for id in obj.row:
obj.row[id].tree.replug(obj.row[id].obj_parent)
else:
course.period = None
course.teachers = []
self.current_obj = None
def on_menu_edit(self, widget):
"""call self.change, and call replug for update object
entries"""
obj = self.current_obj
if self.change(obj) == gtk.RESPONSE_OK:
for id in obj.row:
obj.row[id].tree.replug(obj.row[id].obj_parent)
self.current_obj = None
def on_menu_delete(self, widget):
"""call dialog_confirmation for confirmation, if validate delete
object of the session, call db.session_try_commit and
call replug for update object entries, and destroy
dialog_confirmation"""
obj = self.current_obj
self.dialog_confirmation.set_markup("Etes vous sûre de vouloir supprimer le cours " + obj.name + " ?")
if self.dialog_confirmation.run() == gtk.RESPONSE_OK:
db.session.delete(obj)
db.session_try_commit()
for id in obj.row:
obj.row[id].tree.replug(obj.row[id].obj_parent)
self.dialog_confirmation.hide()
self.current_obj = None
class Course(object):
ADD_ACTION = 1
# TreeMgmt Callbacks
def cb_tree_pixbuf(self, tree, id):
"""return the pixbuf for the tree"""
image = gtk.Image()
if self.row[id].action == self.ADD_ACTION:
image.set_from_file(os.path.normpath('graphics/images/course_add.png'))
else:
image.set_from_file(os.path.normpath('graphics/images/course.png'))
return image.get_pixbuf()
def cb_tree_name(self, tree, id):
"""return the name for the tree"""
if self.row[id].action == self.ADD_ACTION:
return "Nouveau cours"
return self.name
def cb_tree_tooltip_text(self, tree, id):
"""return the tooltip text for the tree"""
if self.row[id].action == self.ADD_ACTION:
return "double clic pour ajouter un cours"
c = self.c_elearning + self.c_classroom + self.c_practice
e = self.e_practice + self.e_exam + self.e_practice
event_c = 0
event_e = 0
for event in self.events:
if (event.modality == 'lesson_elearning' or
event.modality == 'lesson_classroom' or
event.modality == 'lesson_practice'):
event_c += event.time_length
elif (event.modality == 'evaluation_practice' or
event.modality == 'evaluation_exam' or
event.modality == 'evaluation_oral'):
event_e += event.time_length
# XXX color based on quota (ex: red if over event_c > c )
return str("Cours: " + str(event_c) + "h/" + str(c) + "h, " +
"Evaluations: " + str(event_e) + "h/" + str(e) + "h")
def on_tree_rightclick(self, tree, event, id):
"""call when the user do a right click on the tree row"""
if not self.row[id].action:
interface_course.menu_display(self, event)
def on_tree_selected(self, tree, id):
"""call when the user double click on the tree row"""
if self.row[id].action == self.ADD_ACTION:
self.period = self.row[id].obj_parent
if interface_course.add(self) == gtk.RESPONSE_OK:
for id in self.row:
self.row[id].tree.replug(self.row[id].obj_parent)
else:
self.period = None
# db_fill callback
def cb_fill(self, prefix):
"""callback for fill the db"""
def fill_insert(course, i, hours):
from Event import Event
hours_free = hours
course.name = prefix+str(i)
course.e_practice = randint(2, 6)
course.e_exam = randint(2, 4)
course.e_exam -= course.e_exam % 2
course.e_oral = randint(2, 4)
hours_free -= (course.e_practice + course.e_exam +
course.e_oral)
course.c_elearning = randint(1, max([6, hours_free]))
hours_free -= course.c_elearning
course.c_classroom = hours_free / 2
hours_free -= course.c_classroom
course.c_practice = hours_free
db.session.add(course)
hours_free = 700
i = 0
while hours_free >= 60:
course = Course()
course.period = self.period
hours = randint(24, 60)
fill_insert(course, i, hours)
hours_free -= hours
i += 1
fill_insert(self, i, hours_free)
def init():
""" initialize graphics interface_course define table course
and mapping"""
global interface_course
interface_course = CourseInterface()
# Database definition
from sqlalchemy import types, orm
from sqlalchemy.schema import Column, Table, Sequence, ForeignKey
from sqlalchemy.orm import relationship, backref, relation, mapper
# Dependencies
from Period import Period
from User import User
from Class import Class
t_course = Table('course', db.metadata,
Column('id', types.Integer,
Sequence('course_seq_id', optional = True),
nullable = False,
primary_key = True),
Column('name', types.VARCHAR(255),
nullable = False,
unique = True),
Column('c_elearning', types.Integer,
nullable = False),
Column('c_classroom', types.Integer,
nullable = False),
Column('c_practice', types.Integer,
nullable = False),
Column('e_practice', types.Integer,
nullable = False),
Column('e_exam', types.Integer,
nullable = False),
Column('e_oral', types.Integer,
nullable = False),
Column('id_period', types.Integer,
ForeignKey('period.id'),
nullable = False),
)
t_user_class_course = Table('user_class_course', db.metadata,
Column('id_user', types.Integer,
ForeignKey('user.id'),
nullable = False),
Column('id_class', types.Integer,
ForeignKey('class.id'),
nullable = False),
Column('id_course', types.Integer,
ForeignKey('course.id'),
nullable = False),
)
mapper(Course, t_course, properties = {
'period' : relationship(Period,
backref = backref('courses',
cascade = "all, delete-orphan",
order_by = t_course.c.name.desc())),
'teachers' : relationship(User,
secondary = t_user_class_course,
backref = 'courses'),
'classes' : relationship(Class,
secondary = t_user_class_course,
backref = 'courses'),
})
| agpl-3.0 | -1,121,114,700,832,665,100 | 29.148148 | 104 | 0.679928 | false |
Vagab0nd/SiCKRAGE | lib3/sqlalchemy/dialects/postgresql/base.py | 1 | 125610 | # postgresql/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql
:name: PostgreSQL
.. _postgresql_sequences:
Sequences/SERIAL/IDENTITY
-------------------------
PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
of creating new primary key values for integer-based primary key columns. When
creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
integer-based primary key columns, which generates a sequence and server side
default corresponding to the column.
To specify a specific named sequence to be used for primary key generation,
use the :func:`~sqlalchemy.schema.Sequence` construct::
Table('sometable', metadata,
Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
)
When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
having the "last insert identifier" available, a RETURNING clause is added to
the INSERT statement which specifies the primary key columns should be
returned after the statement completes. The RETURNING functionality only takes
place if PostgreSQL 8.2 or later is in use. As a fallback approach, the
sequence, whether specified explicitly or implicitly via ``SERIAL``, is
executed independently beforehand, the returned value to be used in the
subsequent insert. Note that when an
:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
"executemany" semantics, the "last inserted identifier" functionality does not
apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
case.
To force the usage of RETURNING by default off, specify the flag
``implicit_returning=False`` to :func:`_sa.create_engine`.
PostgreSQL 10 IDENTITY columns
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PostgreSQL 10 has a new IDENTITY feature that supersedes the use of SERIAL.
Built-in support for rendering of IDENTITY is not available yet, however the
following compilation hook may be used to replace occurrences of SERIAL with
IDENTITY::
from sqlalchemy.schema import CreateColumn
from sqlalchemy.ext.compiler import compiles
@compiles(CreateColumn, 'postgresql')
def use_identity(element, compiler, **kw):
text = compiler.visit_create_column(element, **kw)
text = text.replace("SERIAL", "INT GENERATED BY DEFAULT AS IDENTITY")
return text
Using the above, a table such as::
t = Table(
't', m,
Column('id', Integer, primary_key=True),
Column('data', String)
)
Will generate on the backing database as::
CREATE TABLE t (
id INT GENERATED BY DEFAULT AS IDENTITY NOT NULL,
data VARCHAR,
PRIMARY KEY (id)
)
.. _postgresql_isolation_level:
Transaction Isolation Level
---------------------------
All PostgreSQL dialects support setting of transaction isolation level
both via a dialect-specific parameter
:paramref:`_sa.create_engine.isolation_level` accepted by
:func:`_sa.create_engine`,
as well as the :paramref:`.Connection.execution_options.isolation_level`
argument as passed to :meth:`_engine.Connection.execution_options`.
When using a non-psycopg2 dialect, this feature works by issuing the command
``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL <level>`` for
each new connection. For the special AUTOCOMMIT isolation level,
DBAPI-specific techniques are used.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"postgresql+pg8000://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT`` - on psycopg2 / pg8000 only
.. seealso::
:ref:`dbapi_autocommit`
:ref:`psycopg2_isolation_level`
:ref:`pg8000_isolation_level`
.. _postgresql_schema_reflection:
Remote-Schema Table Introspection and PostgreSQL search_path
------------------------------------------------------------
**TL;DR;**: keep the ``search_path`` variable set to its default of ``public``,
name schemas **other** than ``public`` explicitly within ``Table`` definitions.
The PostgreSQL dialect can reflect tables from any schema. The
:paramref:`_schema.Table.schema` argument, or alternatively the
:paramref:`.MetaData.reflect.schema` argument determines which schema will
be searched for the table or tables. The reflected :class:`_schema.Table`
objects
will in all cases retain this ``.schema`` attribute as was specified.
However, with regards to tables which these :class:`_schema.Table`
objects refer to
via foreign key constraint, a decision must be made as to how the ``.schema``
is represented in those remote tables, in the case where that remote
schema name is also a member of the current
`PostgreSQL search path
<http://www.postgresql.org/docs/current/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
By default, the PostgreSQL dialect mimics the behavior encouraged by
PostgreSQL's own ``pg_get_constraintdef()`` builtin procedure. This function
returns a sample definition for a particular foreign key constraint,
omitting the referenced schema name from that definition when the name is
also in the PostgreSQL schema search path. The interaction below
illustrates this behavior::
test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY);
CREATE TABLE
test=> CREATE TABLE referring(
test(> id INTEGER PRIMARY KEY,
test(> referred_id INTEGER REFERENCES test_schema.referred(id));
CREATE TABLE
test=> SET search_path TO public, test_schema;
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f'
test-> ;
pg_get_constraintdef
---------------------------------------------------
FOREIGN KEY (referred_id) REFERENCES referred(id)
(1 row)
Above, we created a table ``referred`` as a member of the remote schema
``test_schema``, however when we added ``test_schema`` to the
PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the
``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of
the function.
On the other hand, if we set the search path back to the typical default
of ``public``::
test=> SET search_path TO public;
SET
The same query against ``pg_get_constraintdef()`` now returns the fully
schema-qualified name for us::
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f';
pg_get_constraintdef
---------------------------------------------------------------
FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id)
(1 row)
SQLAlchemy will by default use the return value of ``pg_get_constraintdef()``
in order to determine the remote schema name. That is, if our ``search_path``
were set to include ``test_schema``, and we invoked a table
reflection process as follows::
>>> from sqlalchemy import Table, MetaData, create_engine
>>> engine = create_engine("postgresql://scott:tiger@localhost/test")
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
... referring = Table('referring', meta,
... autoload=True, autoload_with=conn)
...
<sqlalchemy.engine.result.ResultProxy object at 0x101612ed0>
The above process would deliver to the :attr:`_schema.MetaData.tables`
collection
``referred`` table named **without** the schema::
>>> meta.tables['referred'].schema is None
True
To alter the behavior of reflection such that the referred schema is
maintained regardless of the ``search_path`` setting, use the
``postgresql_ignore_search_path`` option, which can be specified as a
dialect-specific argument to both :class:`_schema.Table` as well as
:meth:`_schema.MetaData.reflect`::
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
... referring = Table('referring', meta, autoload=True,
... autoload_with=conn,
... postgresql_ignore_search_path=True)
...
<sqlalchemy.engine.result.ResultProxy object at 0x1016126d0>
We will now have ``test_schema.referred`` stored as schema-qualified::
>>> meta.tables['test_schema.referred'].schema
'test_schema'
.. sidebar:: Best Practices for PostgreSQL Schema reflection
The description of PostgreSQL schema reflection behavior is complex, and
is the product of many years of dealing with widely varied use cases and
user preferences. But in fact, there's no need to understand any of it if
you just stick to the simplest use pattern: leave the ``search_path`` set
to its default of ``public`` only, never refer to the name ``public`` as
an explicit schema name otherwise, and refer to all other schema names
explicitly when building up a :class:`_schema.Table` object. The options
described here are only for those users who can't, or prefer not to, stay
within these guidelines.
Note that **in all cases**, the "default" schema is always reflected as
``None``. The "default" schema on PostgreSQL is that which is returned by the
PostgreSQL ``current_schema()`` function. On a typical PostgreSQL
installation, this is the name ``public``. So a table that refers to another
which is in the ``public`` (i.e. default) schema will always have the
``.schema`` attribute set to ``None``.
.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path``
dialect-level option accepted by :class:`_schema.Table` and
:meth:`_schema.MetaData.reflect`.
.. seealso::
`The Schema Search Path
<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_
- on the PostgreSQL website.
INSERT/UPDATE...RETURNING
-------------------------
The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default
for single-row INSERT statements in order to fetch newly generated
primary key identifiers. To specify an explicit ``RETURNING`` clause,
use the :meth:`._UpdateBase.returning` method on a per-statement basis::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\
values(name='foo')
print(result.fetchall())
# UPDATE..RETURNING
result = table.update().returning(table.c.col1, table.c.col2).\
where(table.c.name=='foo').values(name='bar')
print(result.fetchall())
# DELETE..RETURNING
result = table.delete().returning(table.c.col1, table.c.col2).\
where(table.c.name=='foo')
print(result.fetchall())
.. _postgresql_insert_on_conflict:
INSERT...ON CONFLICT (Upsert)
------------------------------
Starting with version 9.5, PostgreSQL allows "upserts" (update or insert) of
rows into a table via the ``ON CONFLICT`` clause of the ``INSERT`` statement. A
candidate row will only be inserted if that row does not violate any unique
constraints. In the case of a unique constraint violation, a secondary action
can occur which can be either "DO UPDATE", indicating that the data in the
target row should be updated, or "DO NOTHING", which indicates to silently skip
this row.
Conflicts are determined using existing unique constraints and indexes. These
constraints may be identified either using their name as stated in DDL,
or they may be *inferred* by stating the columns and conditions that comprise
the indexes.
SQLAlchemy provides ``ON CONFLICT`` support via the PostgreSQL-specific
:func:`_postgresql.insert()` function, which provides
the generative methods :meth:`~.postgresql.Insert.on_conflict_do_update`
and :meth:`~.postgresql.Insert.on_conflict_do_nothing`::
from sqlalchemy.dialects.postgresql import insert
insert_stmt = insert(my_table).values(
id='some_existing_id',
data='inserted value')
do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
index_elements=['id']
)
conn.execute(do_nothing_stmt)
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint='pk_my_table',
set_=dict(data='updated value')
)
conn.execute(do_update_stmt)
Both methods supply the "target" of the conflict using either the
named constraint or by column inference:
* The :paramref:`.Insert.on_conflict_do_update.index_elements` argument
specifies a sequence containing string column names, :class:`_schema.Column`
objects, and/or SQL expression elements, which would identify a unique
index::
do_update_stmt = insert_stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value')
)
do_update_stmt = insert_stmt.on_conflict_do_update(
index_elements=[my_table.c.id],
set_=dict(data='updated value')
)
* When using :paramref:`.Insert.on_conflict_do_update.index_elements` to
infer an index, a partial index can be inferred by also specifying the
use the :paramref:`.Insert.on_conflict_do_update.index_where` parameter::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(user_email='[email protected]', data='inserted data')
stmt = stmt.on_conflict_do_update(
index_elements=[my_table.c.user_email],
index_where=my_table.c.user_email.like('%@gmail.com'),
set_=dict(data=stmt.excluded.data)
)
conn.execute(stmt)
* The :paramref:`.Insert.on_conflict_do_update.constraint` argument is
used to specify an index directly rather than inferring it. This can be
the name of a UNIQUE constraint, a PRIMARY KEY constraint, or an INDEX::
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint='my_table_idx_1',
set_=dict(data='updated value')
)
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint='my_table_pk',
set_=dict(data='updated value')
)
* The :paramref:`.Insert.on_conflict_do_update.constraint` argument may
also refer to a SQLAlchemy construct representing a constraint,
e.g. :class:`.UniqueConstraint`, :class:`.PrimaryKeyConstraint`,
:class:`.Index`, or :class:`.ExcludeConstraint`. In this use,
if the constraint has a name, it is used directly. Otherwise, if the
constraint is unnamed, then inference will be used, where the expressions
and optional WHERE clause of the constraint will be spelled out in the
construct. This use is especially convenient
to refer to the named or unnamed primary key of a :class:`_schema.Table`
using the
:attr:`_schema.Table.primary_key` attribute::
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint=my_table.primary_key,
set_=dict(data='updated value')
)
``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are specified using the
:paramref:`.Insert.on_conflict_do_update.set_` parameter. This
parameter accepts a dictionary which consists of direct values
for UPDATE::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(id='some_id', data='inserted value')
do_update_stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value')
)
conn.execute(do_update_stmt)
.. warning::
The :meth:`_expression.Insert.on_conflict_do_update`
method does **not** take into
account Python-side default UPDATE values or generation functions, e.g.
those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON CONFLICT style of UPDATE,
unless they are manually specified in the
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
In order to refer to the proposed insertion row, the special alias
:attr:`~.postgresql.Insert.excluded` is available as an attribute on
the :class:`_postgresql.Insert` object; this object is a
:class:`_expression.ColumnCollection`
which alias contains all columns of the target
table::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(
id='some_id',
data='inserted value',
author='jlh')
do_update_stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value', author=stmt.excluded.author)
)
conn.execute(do_update_stmt)
The :meth:`_expression.Insert.on_conflict_do_update` method also accepts
a WHERE clause using the :paramref:`.Insert.on_conflict_do_update.where`
parameter, which will limit those rows which receive an UPDATE::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(
id='some_id',
data='inserted value',
author='jlh')
on_update_stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value', author=stmt.excluded.author)
where=(my_table.c.status == 2)
)
conn.execute(on_update_stmt)
``ON CONFLICT`` may also be used to skip inserting a row entirely
if any conflict with a unique or exclusion constraint occurs; below
this is illustrated using the
:meth:`~.postgresql.Insert.on_conflict_do_nothing` method::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(id='some_id', data='inserted value')
stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
conn.execute(stmt)
If ``DO NOTHING`` is used without specifying any columns or constraint,
it has the effect of skipping the INSERT for any unique or exclusion
constraint violation which occurs::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(id='some_id', data='inserted value')
stmt = stmt.on_conflict_do_nothing()
conn.execute(stmt)
.. versionadded:: 1.1 Added support for PostgreSQL ON CONFLICT clauses
.. seealso::
`INSERT .. ON CONFLICT
<http://www.postgresql.org/docs/current/static/sql-insert.html#SQL-ON-CONFLICT>`_
- in the PostgreSQL documentation.
.. _postgresql_match:
Full Text Search
----------------
SQLAlchemy makes available the PostgreSQL ``@@`` operator via the
:meth:`_expression.ColumnElement.match`
method on any textual column expression.
On a PostgreSQL dialect, an expression like the following::
select([sometable.c.text.match("search string")])
will emit to the database::
SELECT text @@ to_tsquery('search string') FROM table
The PostgreSQL text search functions such as ``to_tsquery()``
and ``to_tsvector()`` are available
explicitly using the standard :data:`.func` construct. For example::
select([
func.to_tsvector('fat cats ate rats').match('cat & rat')
])
Emits the equivalent of::
SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat')
The :class:`_postgresql.TSVECTOR` type can provide for explicit CAST::
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy import select, cast
select([cast("some text", TSVECTOR)])
produces a statement equivalent to::
SELECT CAST('some text' AS TSVECTOR) AS anon_1
Full Text Searches in PostgreSQL are influenced by a combination of: the
PostgreSQL setting of ``default_text_search_config``, the ``regconfig`` used
to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in
during a query.
When performing a Full Text Search against a column that has a GIN or
GiST index that is already pre-computed (which is common on full text
searches) one may need to explicitly pass in a particular PostgreSQL
``regconfig`` value to ensure the query-planner utilizes the index and does
not re-compute the column on demand.
In order to provide for this explicit query planning, or to use different
search strategies, the ``match`` method accepts a ``postgresql_regconfig``
keyword argument::
select([mytable.c.id]).where(
mytable.c.title.match('somestring', postgresql_regconfig='english')
)
Emits the equivalent of::
SELECT mytable.id FROM mytable
WHERE mytable.title @@ to_tsquery('english', 'somestring')
One can also specifically pass in a `'regconfig'` value to the
``to_tsvector()`` command as the initial argument::
select([mytable.c.id]).where(
func.to_tsvector('english', mytable.c.title )\
.match('somestring', postgresql_regconfig='english')
)
produces a statement equivalent to::
SELECT mytable.id FROM mytable
WHERE to_tsvector('english', mytable.title) @@
to_tsquery('english', 'somestring')
It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from
PostgreSQL to ensure that you are generating queries with SQLAlchemy that
take full advantage of any indexes you may have created for full text search.
FROM ONLY ...
-------------
The dialect supports PostgreSQL's ONLY keyword for targeting only a particular
table in an inheritance hierarchy. This can be used to produce the
``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...``
syntaxes. It uses SQLAlchemy's hints mechanism::
# SELECT ... FROM ONLY ...
result = table.select().with_hint(table, 'ONLY', 'postgresql')
print(result.fetchall())
# UPDATE ONLY ...
table.update(values=dict(foo='bar')).with_hint('ONLY',
dialect_name='postgresql')
# DELETE FROM ONLY ...
table.delete().with_hint('ONLY', dialect_name='postgresql')
.. _postgresql_indexes:
PostgreSQL-Specific Index Options
---------------------------------
Several extensions to the :class:`.Index` construct are available, specific
to the PostgreSQL dialect.
.. _postgresql_partial_indexes:
Partial Indexes
^^^^^^^^^^^^^^^
Partial indexes add criterion to the index definition so that the index is
applied to a subset of rows. These can be specified on :class:`.Index`
using the ``postgresql_where`` keyword argument::
Index('my_index', my_table.c.id, postgresql_where=my_table.c.value > 10)
Operator Classes
^^^^^^^^^^^^^^^^
PostgreSQL allows the specification of an *operator class* for each column of
an index (see
http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html).
The :class:`.Index` construct allows these to be specified via the
``postgresql_ops`` keyword argument::
Index(
'my_index', my_table.c.id, my_table.c.data,
postgresql_ops={
'data': 'text_pattern_ops',
'id': 'int4_ops'
})
Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of
the :class:`_schema.Column`, i.e. the name used to access it from the ``.c``
collection of :class:`_schema.Table`,
which can be configured to be different than
the actual name of the column as expressed in the database.
If ``postgresql_ops`` is to be used against a complex SQL expression such
as a function call, then to apply to the column it must be given a label
that is identified in the dictionary by name, e.g.::
Index(
'my_index', my_table.c.id,
func.lower(my_table.c.data).label('data_lower'),
postgresql_ops={
'data_lower': 'text_pattern_ops',
'id': 'int4_ops'
})
Index Types
^^^^^^^^^^^
PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well
as the ability for users to create their own (see
http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be
specified on :class:`.Index` using the ``postgresql_using`` keyword argument::
Index('my_index', my_table.c.data, postgresql_using='gin')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX command, so it *must* be a valid index type for your
version of PostgreSQL.
.. _postgresql_index_storage:
Index Storage Parameters
^^^^^^^^^^^^^^^^^^^^^^^^
PostgreSQL allows storage parameters to be set on indexes. The storage
parameters available depend on the index method used by the index. Storage
parameters can be specified on :class:`.Index` using the ``postgresql_with``
keyword argument::
Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50})
.. versionadded:: 1.0.6
PostgreSQL allows to define the tablespace in which to create the index.
The tablespace can be specified on :class:`.Index` using the
``postgresql_tablespace`` keyword argument::
Index('my_index', my_table.c.data, postgresql_tablespace='my_tablespace')
.. versionadded:: 1.1
Note that the same option is available on :class:`_schema.Table` as well.
.. _postgresql_index_concurrently:
Indexes with CONCURRENTLY
^^^^^^^^^^^^^^^^^^^^^^^^^
The PostgreSQL index option CONCURRENTLY is supported by passing the
flag ``postgresql_concurrently`` to the :class:`.Index` construct::
tbl = Table('testtbl', m, Column('data', Integer))
idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True)
The above index construct will render DDL for CREATE INDEX, assuming
PostgreSQL 8.2 or higher is detected or for a connection-less dialect, as::
CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)
For DROP INDEX, assuming PostgreSQL 9.2 or higher is detected or for
a connection-less dialect, it will emit::
DROP INDEX CONCURRENTLY test_idx1
.. versionadded:: 1.1 support for CONCURRENTLY on DROP INDEX. The
CONCURRENTLY keyword is now only emitted if a high enough version
of PostgreSQL is detected on the connection (or for a connection-less
dialect).
When using CONCURRENTLY, the PostgreSQL database requires that the statement
be invoked outside of a transaction block. The Python DBAPI enforces that
even for a single statement, a transaction is present, so to use this
construct, the DBAPI's "autocommit" mode must be used::
metadata = MetaData()
table = Table(
"foo", metadata,
Column("id", String))
index = Index(
"foo_idx", table.c.id, postgresql_concurrently=True)
with engine.connect() as conn:
with conn.execution_options(isolation_level='AUTOCOMMIT'):
table.create(conn)
.. seealso::
:ref:`postgresql_isolation_level`
.. _postgresql_index_reflection:
PostgreSQL Index Reflection
---------------------------
The PostgreSQL database creates a UNIQUE INDEX implicitly whenever the
UNIQUE CONSTRAINT construct is used. When inspecting a table using
:class:`_reflection.Inspector`, the :meth:`_reflection.Inspector.get_indexes`
and the :meth:`_reflection.Inspector.get_unique_constraints`
will report on these
two constructs distinctly; in the case of the index, the key
``duplicates_constraint`` will be present in the index entry if it is
detected as mirroring a constraint. When performing reflection using
``Table(..., autoload=True)``, the UNIQUE INDEX is **not** returned
in :attr:`_schema.Table.indexes` when it is detected as mirroring a
:class:`.UniqueConstraint` in the :attr:`_schema.Table.constraints` collection
.
.. versionchanged:: 1.0.0 - :class:`_schema.Table` reflection now includes
:class:`.UniqueConstraint` objects present in the
:attr:`_schema.Table.constraints`
collection; the PostgreSQL backend will no longer include a "mirrored"
:class:`.Index` construct in :attr:`_schema.Table.indexes`
if it is detected
as corresponding to a unique constraint.
Special Reflection Options
--------------------------
The :class:`_reflection.Inspector`
used for the PostgreSQL backend is an instance
of :class:`.PGInspector`, which offers additional methods::
from sqlalchemy import create_engine, inspect
engine = create_engine("postgresql+psycopg2://localhost/test")
insp = inspect(engine) # will be a PGInspector
print(insp.get_enums())
.. autoclass:: PGInspector
:members:
.. _postgresql_table_options:
PostgreSQL Table Options
------------------------
Several options for CREATE TABLE are supported directly by the PostgreSQL
dialect in conjunction with the :class:`_schema.Table` construct:
* ``TABLESPACE``::
Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace')
The above option is also available on the :class:`.Index` construct.
* ``ON COMMIT``::
Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS')
* ``WITH OIDS``::
Table("some_table", metadata, ..., postgresql_with_oids=True)
* ``WITHOUT OIDS``::
Table("some_table", metadata, ..., postgresql_with_oids=False)
* ``INHERITS``::
Table("some_table", metadata, ..., postgresql_inherits="some_supertable")
Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...))
.. versionadded:: 1.0.0
* ``PARTITION BY``::
Table("some_table", metadata, ...,
postgresql_partition_by='LIST (part_column)')
.. versionadded:: 1.2.6
.. seealso::
`PostgreSQL CREATE TABLE options
<http://www.postgresql.org/docs/current/static/sql-createtable.html>`_
Table values, Row and Tuple objects
-----------------------------------
Row Types
^^^^^^^^^
Built-in support for rendering a ``ROW`` is not available yet, however the
:func:`_expression.tuple_` may be used in its place. Another alternative is
to use the :attr:`_sa.func` generator with ``func.ROW`` ::
table.select().where(
tuple_(table.c.id, table.c.fk) > (1,2)
).where(func.ROW(table.c.id, table.c.fk) < func.ROW(3, 7))
Will generate the row-wise comparison::
SELECT *
FROM table
WHERE (id, fk) > (1, 2)
AND ROW(id, fk) < ROW(3, 7)
.. seealso::
`PostgreSQL Row Constructors
<https://www.postgresql.org/docs/current/sql-expressions.html#SQL-SYNTAX-ROW-CONSTRUCTORS>`_
`PostgreSQL Row Constructor Comparison
<https://www.postgresql.org/docs/current/functions-comparisons.html#ROW-WISE-COMPARISON>`_
Table Types
^^^^^^^^^^^
PostgreSQL also supports passing a table as an argument to a function. This
is not available yet in sqlalchemy, however the
:func:`_expression.literal_column` function with the name of the table may be
used in its place::
select(['*']).select_from(func.my_function(literal_column('my_table')))
Will generate the SQL::
SELECT *
FROM my_function(my_table)
ARRAY Types
-----------
The PostgreSQL dialect supports arrays, both as multidimensional column types
as well as array literals:
* :class:`_postgresql.ARRAY` - ARRAY datatype
* :class:`_postgresql.array` - array literal
* :func:`_postgresql.array_agg` - ARRAY_AGG SQL function
* :class:`_postgresql.aggregate_order_by` - helper for PG's ORDER BY aggregate
function syntax.
JSON Types
----------
The PostgreSQL dialect supports both JSON and JSONB datatypes, including
psycopg2's native support and support for all of PostgreSQL's special
operators:
* :class:`_postgresql.JSON`
* :class:`_postgresql.JSONB`
HSTORE Type
-----------
The PostgreSQL HSTORE type as well as hstore literals are supported:
* :class:`_postgresql.HSTORE` - HSTORE datatype
* :class:`_postgresql.hstore` - hstore literal
ENUM Types
----------
PostgreSQL has an independently creatable TYPE structure which is used
to implement an enumerated type. This approach introduces significant
complexity on the SQLAlchemy side in terms of when this type should be
CREATED and DROPPED. The type object is also an independently reflectable
entity. The following sections should be consulted:
* :class:`_postgresql.ENUM` - DDL and typing support for ENUM.
* :meth:`.PGInspector.get_enums` - retrieve a listing of current ENUM types
* :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual
CREATE and DROP commands for ENUM.
.. _postgresql_array_of_enum:
Using ENUM with ARRAY
^^^^^^^^^^^^^^^^^^^^^
The combination of ENUM and ARRAY is not directly supported by backend
DBAPIs at this time. Prior to SQLAlchemy 1.3.17, a special workaround
was needed in order to allow this combination to work, described below.
.. versionchanged:: 1.3.17 The combination of ENUM and ARRAY is now directly
handled by SQLAlchemy's implementation without any workarounds needed.
.. sourcecode:: python
from sqlalchemy import TypeDecorator
from sqlalchemy.dialects.postgresql import ARRAY
class ArrayOfEnum(TypeDecorator):
impl = ARRAY
def bind_expression(self, bindvalue):
return sa.cast(bindvalue, self)
def result_processor(self, dialect, coltype):
super_rp = super(ArrayOfEnum, self).result_processor(
dialect, coltype)
def handle_raw_string(value):
inner = re.match(r"^{(.*)}$", value).group(1)
return inner.split(",") if inner else []
def process(value):
if value is None:
return None
return super_rp(handle_raw_string(value))
return process
E.g.::
Table(
'mydata', metadata,
Column('id', Integer, primary_key=True),
Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum')))
)
This type is not included as a built-in type as it would be incompatible
with a DBAPI that suddenly decides to support ARRAY of ENUM directly in
a new version.
.. _postgresql_array_of_json:
Using JSON/JSONB with ARRAY
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Similar to using ENUM, prior to SQLAlchemy 1.3.17, for an ARRAY of JSON/JSONB
we need to render the appropriate CAST. Current psycopg2 drivers accomodate
the result set correctly without any special steps.
.. versionchanged:: 1.3.17 The combination of JSON/JSONB and ARRAY is now
directly handled by SQLAlchemy's implementation without any workarounds
needed.
.. sourcecode:: python
class CastingArray(ARRAY):
def bind_expression(self, bindvalue):
return sa.cast(bindvalue, self)
E.g.::
Table(
'mydata', metadata,
Column('id', Integer, primary_key=True),
Column('data', CastingArray(JSONB))
)
"""
from collections import defaultdict
import datetime as dt
import re
from . import array as _array
from . import hstore as _hstore
from . import json as _json
from . import ranges as _ranges
from ... import exc
from ... import schema
from ... import sql
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...sql import elements
from ...sql import expression
from ...sql import sqltypes
from ...sql import util as sql_util
from ...types import BIGINT
from ...types import BOOLEAN
from ...types import CHAR
from ...types import DATE
from ...types import FLOAT
from ...types import INTEGER
from ...types import NUMERIC
from ...types import REAL
from ...types import SMALLINT
from ...types import TEXT
from ...types import VARCHAR
try:
from uuid import UUID as _python_UUID # noqa
except ImportError:
_python_UUID = None
IDX_USING = re.compile(r"^(?:btree|hash|gist|gin|[\w_]+)$", re.I)
AUTOCOMMIT_REGEXP = re.compile(
r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|GRANT|REVOKE|"
"IMPORT FOREIGN SCHEMA|REFRESH MATERIALIZED VIEW|TRUNCATE)",
re.I | re.UNICODE,
)
RESERVED_WORDS = set(
[
"all",
"analyse",
"analyze",
"and",
"any",
"array",
"as",
"asc",
"asymmetric",
"both",
"case",
"cast",
"check",
"collate",
"column",
"constraint",
"create",
"current_catalog",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_user",
"default",
"deferrable",
"desc",
"distinct",
"do",
"else",
"end",
"except",
"false",
"fetch",
"for",
"foreign",
"from",
"grant",
"group",
"having",
"in",
"initially",
"intersect",
"into",
"leading",
"limit",
"localtime",
"localtimestamp",
"new",
"not",
"null",
"of",
"off",
"offset",
"old",
"on",
"only",
"or",
"order",
"placing",
"primary",
"references",
"returning",
"select",
"session_user",
"some",
"symmetric",
"table",
"then",
"to",
"trailing",
"true",
"union",
"unique",
"user",
"using",
"variadic",
"when",
"where",
"window",
"with",
"authorization",
"between",
"binary",
"cross",
"current_schema",
"freeze",
"full",
"ilike",
"inner",
"is",
"isnull",
"join",
"left",
"like",
"natural",
"notnull",
"outer",
"over",
"overlaps",
"right",
"similar",
"verbose",
]
)
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
class BYTEA(sqltypes.LargeBinary):
__visit_name__ = "BYTEA"
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = "DOUBLE_PRECISION"
class INET(sqltypes.TypeEngine):
__visit_name__ = "INET"
PGInet = INET
class CIDR(sqltypes.TypeEngine):
__visit_name__ = "CIDR"
PGCidr = CIDR
class MACADDR(sqltypes.TypeEngine):
__visit_name__ = "MACADDR"
PGMacAddr = MACADDR
class MONEY(sqltypes.TypeEngine):
"""Provide the PostgreSQL MONEY type.
.. versionadded:: 1.2
"""
__visit_name__ = "MONEY"
class OID(sqltypes.TypeEngine):
"""Provide the PostgreSQL OID type.
.. versionadded:: 0.9.5
"""
__visit_name__ = "OID"
class REGCLASS(sqltypes.TypeEngine):
"""Provide the PostgreSQL REGCLASS type.
.. versionadded:: 1.2.7
"""
__visit_name__ = "REGCLASS"
class TIMESTAMP(sqltypes.TIMESTAMP):
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
"""PostgreSQL INTERVAL type.
The INTERVAL type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000 or zxjdbc.
"""
__visit_name__ = "INTERVAL"
native = True
def __init__(self, precision=None, fields=None):
"""Construct an INTERVAL.
:param precision: optional integer precision value
:param fields: string fields specifier. allows storage of fields
to be limited, such as ``"YEAR"``, ``"MONTH"``, ``"DAY TO HOUR"``,
etc.
.. versionadded:: 1.2
"""
self.precision = precision
self.fields = fields
@classmethod
def adapt_emulated_to_native(cls, interval, **kw):
return INTERVAL(precision=interval.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
@property
def python_type(self):
return dt.timedelta
PGInterval = INTERVAL
class BIT(sqltypes.TypeEngine):
__visit_name__ = "BIT"
def __init__(self, length=None, varying=False):
if not varying:
# BIT without VARYING defaults to length 1
self.length = length or 1
else:
# but BIT VARYING can be unlimited-length, so no default
self.length = length
self.varying = varying
PGBit = BIT
class UUID(sqltypes.TypeEngine):
"""PostgreSQL UUID type.
Represents the UUID column type, interpreting
data either as natively returned by the DBAPI
or as Python uuid objects.
The UUID type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = "UUID"
def __init__(self, as_uuid=False):
"""Construct a UUID type.
:param as_uuid=False: if True, values will be interpreted
as Python uuid objects, converting to/from string via the
DBAPI.
"""
if as_uuid and _python_UUID is None:
raise NotImplementedError(
"This version of Python does not support "
"the native UUID type."
)
self.as_uuid = as_uuid
def bind_processor(self, dialect):
if self.as_uuid:
def process(value):
if value is not None:
value = util.text_type(value)
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
if self.as_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
else:
return None
PGUuid = UUID
class TSVECTOR(sqltypes.TypeEngine):
"""The :class:`_postgresql.TSVECTOR` type implements the PostgreSQL
text search type TSVECTOR.
It can be used to do full text queries on natural language
documents.
.. versionadded:: 0.9.0
.. seealso::
:ref:`postgresql_match`
"""
__visit_name__ = "TSVECTOR"
class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum):
"""PostgreSQL ENUM type.
This is a subclass of :class:`_types.Enum` which includes
support for PG's ``CREATE TYPE`` and ``DROP TYPE``.
When the builtin type :class:`_types.Enum` is used and the
:paramref:`.Enum.native_enum` flag is left at its default of
True, the PostgreSQL backend will use a :class:`_postgresql.ENUM`
type as the implementation, so the special create/drop rules
will be used.
The create/drop behavior of ENUM is necessarily intricate, due to the
awkward relationship the ENUM type has in relationship to the
parent table, in that it may be "owned" by just a single table, or
may be shared among many tables.
When using :class:`_types.Enum` or :class:`_postgresql.ENUM`
in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted
corresponding to when the :meth:`_schema.Table.create` and
:meth:`_schema.Table.drop`
methods are called::
table = Table('sometable', metadata,
Column('some_enum', ENUM('a', 'b', 'c', name='myenum'))
)
table.create(engine) # will emit CREATE ENUM and CREATE TABLE
table.drop(engine) # will emit DROP TABLE and DROP ENUM
To use a common enumerated type between multiple tables, the best
practice is to declare the :class:`_types.Enum` or
:class:`_postgresql.ENUM` independently, and associate it with the
:class:`_schema.MetaData` object itself::
my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata)
t1 = Table('sometable_one', metadata,
Column('some_enum', myenum)
)
t2 = Table('sometable_two', metadata,
Column('some_enum', myenum)
)
When this pattern is used, care must still be taken at the level
of individual table creates. Emitting CREATE TABLE without also
specifying ``checkfirst=True`` will still cause issues::
t1.create(engine) # will fail: no such type 'myenum'
If we specify ``checkfirst=True``, the individual table-level create
operation will check for the ``ENUM`` and create if not exists::
# will check if enum exists, and emit CREATE TYPE if not
t1.create(engine, checkfirst=True)
When using a metadata-level ENUM type, the type will always be created
and dropped if either the metadata-wide create/drop is called::
metadata.create_all(engine) # will emit CREATE TYPE
metadata.drop_all(engine) # will emit DROP TYPE
The type can also be created and dropped directly::
my_enum.create(engine)
my_enum.drop(engine)
.. versionchanged:: 1.0.0 The PostgreSQL :class:`_postgresql.ENUM` type
now behaves more strictly with regards to CREATE/DROP. A metadata-level
ENUM type will only be created and dropped at the metadata level,
not the table level, with the exception of
``table.create(checkfirst=True)``.
The ``table.drop()`` call will now emit a DROP TYPE for a table-level
enumerated type.
"""
native_enum = True
def __init__(self, *enums, **kw):
"""Construct an :class:`_postgresql.ENUM`.
Arguments are the same as that of
:class:`_types.Enum`, but also including
the following parameters.
:param create_type: Defaults to True.
Indicates that ``CREATE TYPE`` should be
emitted, after optionally checking for the
presence of the type, when the parent
table is being created; and additionally
that ``DROP TYPE`` is called when the table
is dropped. When ``False``, no check
will be performed and no ``CREATE TYPE``
or ``DROP TYPE`` is emitted, unless
:meth:`~.postgresql.ENUM.create`
or :meth:`~.postgresql.ENUM.drop`
are called directly.
Setting to ``False`` is helpful
when invoking a creation scheme to a SQL file
without access to the actual database -
the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods can
be used to emit SQL to a target bind.
"""
self.create_type = kw.pop("create_type", True)
super(ENUM, self).__init__(*enums, **kw)
@classmethod
def adapt_emulated_to_native(cls, impl, **kw):
"""Produce a PostgreSQL native :class:`_postgresql.ENUM` from plain
:class:`.Enum`.
"""
kw.setdefault("validate_strings", impl.validate_strings)
kw.setdefault("name", impl.name)
kw.setdefault("schema", impl.schema)
kw.setdefault("inherit_schema", impl.inherit_schema)
kw.setdefault("metadata", impl.metadata)
kw.setdefault("_create_events", False)
kw.setdefault("values_callable", impl.values_callable)
return cls(**kw)
def create(self, bind=None, checkfirst=True):
"""Emit ``CREATE TYPE`` for this
:class:`_postgresql.ENUM`.
If the underlying dialect does not support
PostgreSQL CREATE TYPE, no action is taken.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type does not exist already before
creating.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or not bind.dialect.has_type(
bind, self.name, schema=self.schema
):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
"""Emit ``DROP TYPE`` for this
:class:`_postgresql.ENUM`.
If the underlying dialect does not support
PostgreSQL DROP TYPE, no action is taken.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type actually exists before dropping.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or bind.dialect.has_type(
bind, self.name, schema=self.schema
):
bind.execute(DropEnumType(self))
def _check_for_name_in_memos(self, checkfirst, kw):
"""Look in the 'ddl runner' for 'memos', then
note our name in that collection.
This to ensure a particular named enum is operated
upon only once within any kind of create/drop
sequence without relying upon "checkfirst".
"""
if not self.create_type:
return True
if "_ddl_runner" in kw:
ddl_runner = kw["_ddl_runner"]
if "_pg_enums" in ddl_runner.memo:
pg_enums = ddl_runner.memo["_pg_enums"]
else:
pg_enums = ddl_runner.memo["_pg_enums"] = set()
present = (self.schema, self.name) in pg_enums
pg_enums.add((self.schema, self.name))
return present
else:
return False
def _on_table_create(self, target, bind, checkfirst=False, **kw):
if (
checkfirst
or (
not self.metadata
and not kw.get("_is_metadata_operation", False)
)
and not self._check_for_name_in_memos(checkfirst, kw)
):
self.create(bind=bind, checkfirst=checkfirst)
def _on_table_drop(self, target, bind, checkfirst=False, **kw):
if (
not self.metadata
and not kw.get("_is_metadata_operation", False)
and not self._check_for_name_in_memos(checkfirst, kw)
):
self.drop(bind=bind, checkfirst=checkfirst)
def _on_metadata_create(self, target, bind, checkfirst=False, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
def _on_metadata_drop(self, target, bind, checkfirst=False, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.drop(bind=bind, checkfirst=checkfirst)
colspecs = {
sqltypes.ARRAY: _array.ARRAY,
sqltypes.Interval: INTERVAL,
sqltypes.Enum: ENUM,
sqltypes.JSON.JSONPathType: _json.JSONPathType,
sqltypes.JSON: _json.JSON,
}
ischema_names = {
"_array": _array.ARRAY,
"hstore": _hstore.HSTORE,
"json": _json.JSON,
"jsonb": _json.JSONB,
"int4range": _ranges.INT4RANGE,
"int8range": _ranges.INT8RANGE,
"numrange": _ranges.NUMRANGE,
"daterange": _ranges.DATERANGE,
"tsrange": _ranges.TSRANGE,
"tstzrange": _ranges.TSTZRANGE,
"integer": INTEGER,
"bigint": BIGINT,
"smallint": SMALLINT,
"character varying": VARCHAR,
"character": CHAR,
'"char"': sqltypes.String,
"name": sqltypes.String,
"text": TEXT,
"numeric": NUMERIC,
"float": FLOAT,
"real": REAL,
"inet": INET,
"cidr": CIDR,
"uuid": UUID,
"bit": BIT,
"bit varying": BIT,
"macaddr": MACADDR,
"money": MONEY,
"oid": OID,
"regclass": REGCLASS,
"double precision": DOUBLE_PRECISION,
"timestamp": TIMESTAMP,
"timestamp with time zone": TIMESTAMP,
"timestamp without time zone": TIMESTAMP,
"time with time zone": TIME,
"time without time zone": TIME,
"date": DATE,
"time": TIME,
"bytea": BYTEA,
"boolean": BOOLEAN,
"interval": INTERVAL,
"tsvector": TSVECTOR,
}
class PGCompiler(compiler.SQLCompiler):
def visit_array(self, element, **kw):
return "ARRAY[%s]" % self.visit_clauselist(element, **kw)
def visit_slice(self, element, **kw):
return "%s:%s" % (
self.process(element.start, **kw),
self.process(element.stop, **kw),
)
def visit_json_getitem_op_binary(
self, binary, operator, _cast_applied=False, **kw
):
if (
not _cast_applied
and binary.type._type_affinity is not sqltypes.JSON
):
kw["_cast_applied"] = True
return self.process(sql.cast(binary, binary.type), **kw)
kw["eager_grouping"] = True
return self._generate_generic_binary(
binary, " -> " if not _cast_applied else " ->> ", **kw
)
def visit_json_path_getitem_op_binary(
self, binary, operator, _cast_applied=False, **kw
):
if (
not _cast_applied
and binary.type._type_affinity is not sqltypes.JSON
):
kw["_cast_applied"] = True
return self.process(sql.cast(binary, binary.type), **kw)
kw["eager_grouping"] = True
return self._generate_generic_binary(
binary, " #> " if not _cast_applied else " #>> ", **kw
)
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_aggregate_order_by(self, element, **kw):
return "%s ORDER BY %s" % (
self.process(element.target, **kw),
self.process(element.order_by, **kw),
)
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
regconfig = self.render_literal_value(
binary.modifiers["postgresql_regconfig"], sqltypes.STRINGTYPE
)
if regconfig:
return "%s @@ to_tsquery(%s, %s)" % (
self.process(binary.left, **kw),
regconfig,
self.process(binary.right, **kw),
)
return "%s @@ to_tsquery(%s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s ILIKE %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s NOT ILIKE %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_empty_set_expr(self, element_types):
# cast the empty set to the type we are comparing against. if
# we are comparing against the null type, pick an arbitrary
# datatype for the empty set
return "SELECT %s WHERE 1!=1" % (
", ".join(
"CAST(NULL AS %s)"
% self.dialect.type_compiler.process(
INTEGER() if type_._isnull else type_
)
for type_ in element_types or [INTEGER()]
),
)
def render_literal_value(self, value, type_):
value = super(PGCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace("\\", "\\\\")
return value
def visit_sequence(self, seq, **kw):
return "nextval('%s')" % self.preparer.format_sequence(seq)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += " \n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += " \n LIMIT ALL"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def format_from_hint_text(self, sqltext, table, hint, iscrud):
if hint.upper() != "ONLY":
raise exc.CompileError("Unrecognized hint: %r" % hint)
return "ONLY " + sqltext
def get_select_precolumns(self, select, **kw):
if select._distinct is not False:
if select._distinct is True:
return "DISTINCT "
elif isinstance(select._distinct, (list, tuple)):
return (
"DISTINCT ON ("
+ ", ".join(
[self.process(col, **kw) for col in select._distinct]
)
+ ") "
)
else:
return (
"DISTINCT ON ("
+ self.process(select._distinct, **kw)
+ ") "
)
else:
return ""
def for_update_clause(self, select, **kw):
if select._for_update_arg.read:
if select._for_update_arg.key_share:
tmp = " FOR KEY SHARE"
else:
tmp = " FOR SHARE"
elif select._for_update_arg.key_share:
tmp = " FOR NO KEY UPDATE"
else:
tmp = " FOR UPDATE"
if select._for_update_arg.of:
tables = util.OrderedSet()
for c in select._for_update_arg.of:
tables.update(sql_util.surface_selectables_only(c))
tmp += " OF " + ", ".join(
self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked:
tmp += " SKIP LOCKED"
return tmp
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0], **kw)
start = self.process(func.clauses.clauses[1], **kw)
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2], **kw)
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
target_text = "ON CONSTRAINT %s" % clause.constraint_target
elif clause.inferred_target_elements is not None:
target_text = "(%s)" % ", ".join(
(
self.preparer.quote(c)
if isinstance(c, util.string_types)
else self.process(c, include_table=False, use_schema=False)
)
for c in clause.inferred_target_elements
)
if clause.inferred_target_whereclause is not None:
target_text += " WHERE %s" % self.process(
clause.inferred_target_whereclause,
include_table=False,
use_schema=False,
)
else:
target_text = ""
return target_text
def visit_on_conflict_do_nothing(self, on_conflict, **kw):
target_text = self._on_conflict_target(on_conflict, **kw)
if target_text:
return "ON CONFLICT %s DO NOTHING" % target_text
else:
return "ON CONFLICT DO NOTHING"
def visit_on_conflict_do_update(self, on_conflict, **kw):
clause = on_conflict
target_text = self._on_conflict_target(on_conflict, **kw)
action_set_ops = []
set_parameters = dict(clause.update_values_to_set)
# create a list of column assignment clauses as tuples
insert_statement = self.stack[-1]["selectable"]
cols = insert_statement.table.c
for c in cols:
col_key = c.key
if col_key in set_parameters:
value = set_parameters.pop(col_key)
if elements._is_literal(value):
value = elements.BindParameter(None, value, type_=c.type)
else:
if (
isinstance(value, elements.BindParameter)
and value.type._isnull
):
value = value._clone()
value.type = c.type
value_text = self.process(value.self_group(), use_schema=False)
key_text = self.preparer.quote(col_key)
action_set_ops.append("%s = %s" % (key_text, value_text))
# check for names that don't match columns
if set_parameters:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.statement.table.name,
(", ".join("'%s'" % c for c in set_parameters)),
)
)
for k, v in set_parameters.items():
key_text = (
self.preparer.quote(k)
if isinstance(k, util.string_types)
else self.process(k, use_schema=False)
)
value_text = self.process(
elements._literal_as_binds(v), use_schema=False
)
action_set_ops.append("%s = %s" % (key_text, value_text))
action_text = ", ".join(action_set_ops)
if clause.update_whereclause is not None:
action_text += " WHERE %s" % self.process(
clause.update_whereclause, include_table=True, use_schema=False
)
return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. USING clause specific to PostgreSQL."""
return "USING " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
class PGDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
impl_type = column.type.dialect_impl(self.dialect)
if isinstance(impl_type, sqltypes.TypeDecorator):
impl_type = impl_type.impl
if (
column.primary_key
and column is column.table._autoincrement_column
and (
self.dialect.supports_smallserial
or not isinstance(impl_type, sqltypes.SmallInteger)
)
and (
column.default is None
or (
isinstance(column.default, schema.Sequence)
and column.default.optional
)
)
):
if isinstance(impl_type, sqltypes.BigInteger):
colspec += " BIGSERIAL"
elif isinstance(impl_type, sqltypes.SmallInteger):
colspec += " SMALLSERIAL"
else:
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(
column.type,
type_expression=column,
identifier_preparer=self.preparer,
)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.computed is not None:
colspec += " " + self.process(column.computed)
if not column.nullable:
colspec += " NOT NULL"
return colspec
def visit_check_constraint(self, constraint):
if constraint._type_bound:
typ = list(constraint.columns)[0].type
if (
isinstance(typ, sqltypes.ARRAY)
and isinstance(typ.item_type, sqltypes.Enum)
and not typ.item_type.native_enum
):
raise exc.CompileError(
"PostgreSQL dialect cannot produce the CHECK constraint "
"for ARRAY of non-native ENUM; please specify "
"create_constraint=False on this Enum datatype."
)
return super(PGDDLCompiler, self).visit_check_constraint(constraint)
def visit_drop_table_comment(self, drop):
return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table(
drop.element
)
def visit_create_enum_type(self, create):
type_ = create.element
return "CREATE TYPE %s AS ENUM (%s)" % (
self.preparer.format_type(type_),
", ".join(
self.sql_compiler.process(sql.literal(e), literal_binds=True)
for e in type_.enums
),
)
def visit_drop_enum_type(self, drop):
type_ = drop.element
return "DROP TYPE %s" % (self.preparer.format_type(type_))
def visit_create_index(self, create):
preparer = self.preparer
index = create.element
self._verify_index_table(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX "
if self.dialect._supports_create_index_concurrently:
concurrently = index.dialect_options["postgresql"]["concurrently"]
if concurrently:
text += "CONCURRENTLY "
text += "%s ON %s " % (
self._prepared_index_name(index, include_schema=False),
preparer.format_table(index.table),
)
using = index.dialect_options["postgresql"]["using"]
if using:
text += (
"USING %s "
% self.preparer.validate_sql_phrase(using, IDX_USING).lower()
)
ops = index.dialect_options["postgresql"]["ops"]
text += "(%s)" % (
", ".join(
[
self.sql_compiler.process(
expr.self_group()
if not isinstance(expr, expression.ColumnClause)
else expr,
include_table=False,
literal_binds=True,
)
+ (
(" " + ops[expr.key])
if hasattr(expr, "key") and expr.key in ops
else ""
)
for expr in index.expressions
]
)
)
withclause = index.dialect_options["postgresql"]["with"]
if withclause:
text += " WITH (%s)" % (
", ".join(
[
"%s = %s" % storage_parameter
for storage_parameter in withclause.items()
]
)
)
tablespace_name = index.dialect_options["postgresql"]["tablespace"]
if tablespace_name:
text += " TABLESPACE %s" % preparer.quote(tablespace_name)
whereclause = index.dialect_options["postgresql"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
def visit_drop_index(self, drop):
index = drop.element
text = "\nDROP INDEX "
if self.dialect._supports_drop_index_concurrently:
concurrently = index.dialect_options["postgresql"]["concurrently"]
if concurrently:
text += "CONCURRENTLY "
text += self._prepared_index_name(index, include_schema=True)
return text
def visit_exclude_constraint(self, constraint, **kw):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(
constraint
)
elements = []
for expr, name, op in constraint._render_exprs:
kw["include_table"] = False
elements.append(
"%s WITH %s" % (self.sql_compiler.process(expr, **kw), op)
)
text += "EXCLUDE USING %s (%s)" % (
self.preparer.validate_sql_phrase(
constraint.using, IDX_USING
).lower(),
", ".join(elements),
)
if constraint.where is not None:
text += " WHERE (%s)" % self.sql_compiler.process(
constraint.where, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def post_create_table(self, table):
table_opts = []
pg_opts = table.dialect_options["postgresql"]
inherits = pg_opts.get("inherits")
if inherits is not None:
if not isinstance(inherits, (list, tuple)):
inherits = (inherits,)
table_opts.append(
"\n INHERITS ( "
+ ", ".join(self.preparer.quote(name) for name in inherits)
+ " )"
)
if pg_opts["partition_by"]:
table_opts.append("\n PARTITION BY %s" % pg_opts["partition_by"])
if pg_opts["with_oids"] is True:
table_opts.append("\n WITH OIDS")
elif pg_opts["with_oids"] is False:
table_opts.append("\n WITHOUT OIDS")
if pg_opts["on_commit"]:
on_commit_options = pg_opts["on_commit"].replace("_", " ").upper()
table_opts.append("\n ON COMMIT %s" % on_commit_options)
if pg_opts["tablespace"]:
tablespace_name = pg_opts["tablespace"]
table_opts.append(
"\n TABLESPACE %s" % self.preparer.quote(tablespace_name)
)
return "".join(table_opts)
def visit_computed_column(self, generated):
if generated.persisted is False:
raise exc.CompileError(
"PostrgreSQL computed columns do not support 'virtual' "
"persistence; set the 'persisted' flag to None or True for "
"PostgreSQL support."
)
return "GENERATED ALWAYS AS (%s) STORED" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_TSVECTOR(self, type_, **kw):
return "TSVECTOR"
def visit_INET(self, type_, **kw):
return "INET"
def visit_CIDR(self, type_, **kw):
return "CIDR"
def visit_MACADDR(self, type_, **kw):
return "MACADDR"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_OID(self, type_, **kw):
return "OID"
def visit_REGCLASS(self, type_, **kw):
return "REGCLASS"
def visit_FLOAT(self, type_, **kw):
if not type_.precision:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {"precision": type_.precision}
def visit_DOUBLE_PRECISION(self, type_, **kw):
return "DOUBLE PRECISION"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_HSTORE(self, type_, **kw):
return "HSTORE"
def visit_JSON(self, type_, **kw):
return "JSON"
def visit_JSONB(self, type_, **kw):
return "JSONB"
def visit_INT4RANGE(self, type_, **kw):
return "INT4RANGE"
def visit_INT8RANGE(self, type_, **kw):
return "INT8RANGE"
def visit_NUMRANGE(self, type_, **kw):
return "NUMRANGE"
def visit_DATERANGE(self, type_, **kw):
return "DATERANGE"
def visit_TSRANGE(self, type_, **kw):
return "TSRANGE"
def visit_TSTZRANGE(self, type_, **kw):
return "TSTZRANGE"
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_enum(self, type_, **kw):
if not type_.native_enum or not self.dialect.supports_native_enum:
return super(PGTypeCompiler, self).visit_enum(type_, **kw)
else:
return self.visit_ENUM(type_, **kw)
def visit_ENUM(self, type_, identifier_preparer=None, **kw):
if identifier_preparer is None:
identifier_preparer = self.dialect.identifier_preparer
return identifier_preparer.format_type(type_)
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP%s %s" % (
"(%d)" % type_.precision
if getattr(type_, "precision", None) is not None
else "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE",
)
def visit_TIME(self, type_, **kw):
return "TIME%s %s" % (
"(%d)" % type_.precision
if getattr(type_, "precision", None) is not None
else "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE",
)
def visit_INTERVAL(self, type_, **kw):
text = "INTERVAL"
if type_.fields is not None:
text += " " + type_.fields
if type_.precision is not None:
text += " (%d)" % type_.precision
return text
def visit_BIT(self, type_, **kw):
if type_.varying:
compiled = "BIT VARYING"
if type_.length is not None:
compiled += "(%d)" % type_.length
else:
compiled = "BIT(%d)" % type_.length
return compiled
def visit_UUID(self, type_, **kw):
return "UUID"
def visit_large_binary(self, type_, **kw):
return self.visit_BYTEA(type_, **kw)
def visit_BYTEA(self, type_, **kw):
return "BYTEA"
def visit_ARRAY(self, type_, **kw):
# TODO: pass **kw?
inner = self.process(type_.item_type)
return re.sub(
r"((?: COLLATE.*)?)$",
(
r"%s\1"
% (
"[]"
* (type_.dimensions if type_.dimensions is not None else 1)
)
),
inner,
count=1,
)
class PGIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
value = value[1:-1].replace(
self.escape_to_quote, self.escape_quote
)
return value
def format_type(self, type_, use_schema=True):
if not type_.name:
raise exc.CompileError("PostgreSQL ENUM type requires a name.")
name = self.quote(type_.name)
effective_schema = self.schema_for_object(type_)
if (
not self.omit_schema
and use_schema
and effective_schema is not None
):
name = self.quote_schema(effective_schema) + "." + name
return name
class PGInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_oid(self, table_name, schema=None):
"""Return the OID for the given table name."""
return self.dialect.get_table_oid(
self.bind, table_name, schema, info_cache=self.info_cache
)
def get_enums(self, schema=None):
"""Return a list of ENUM objects.
Each member is a dictionary containing these fields:
* name - name of the enum
* schema - the schema name for the enum.
* visible - boolean, whether or not this enum is visible
in the default search path.
* labels - a list of string labels that apply to the enum.
:param schema: schema name. If None, the default schema
(typically 'public') is used. May also be set to '*' to
indicate load enums for all schemas.
.. versionadded:: 1.0.0
"""
schema = schema or self.default_schema_name
return self.dialect._load_enums(self.bind, schema)
def get_foreign_table_names(self, schema=None):
"""Return a list of FOREIGN TABLE names.
Behavior is similar to that of
:meth:`_reflection.Inspector.get_table_names`,
except that the list is limited to those tables that report a
``relkind`` value of ``f``.
.. versionadded:: 1.0.0
"""
schema = schema or self.default_schema_name
return self.dialect._get_foreign_table_names(self.bind, schema)
def get_view_names(self, schema=None, include=("plain", "materialized")):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
:param include: specify which types of views to return. Passed
as a string value (for a single type) or a tuple (for any number
of types). Defaults to ``('plain', 'materialized')``.
.. versionadded:: 1.1
"""
return self.dialect.get_view_names(
self.bind, schema, info_cache=self.info_cache, include=include
)
class CreateEnumType(schema._CreateDropBase):
__visit_name__ = "create_enum_type"
class DropEnumType(schema._CreateDropBase):
__visit_name__ = "drop_enum_type"
class PGExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar(
(
"select nextval('%s')"
% self.dialect.identifier_preparer.format_sequence(seq)
),
type_,
)
def get_insert_default(self, column):
if column.primary_key and column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar(
"select %s" % column.server_default.arg, column.type
)
elif column.default is None or (
column.default.is_sequence and column.default.optional
):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
# generates server side.
try:
seq_name = column._postgresql_seq_name
except AttributeError:
tab = column.table.name
col = column.name
tab = tab[0 : 29 + max(0, (29 - len(col)))]
col = col[0 : 29 + max(0, (29 - len(tab)))]
name = "%s_%s_seq" % (tab, col)
column._postgresql_seq_name = seq_name = name
if column.table is not None:
effective_schema = self.connection.schema_for_object(
column.table
)
else:
effective_schema = None
if effective_schema is not None:
exc = 'select nextval(\'"%s"."%s"\')' % (
effective_schema,
seq_name,
)
else:
exc = "select nextval('\"%s\"')" % (seq_name,)
return self._execute_scalar(exc, column.type)
return super(PGExecutionContext, self).get_insert_default(column)
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
class PGDialect(default.DefaultDialect):
name = "postgresql"
supports_alter = True
max_identifier_length = 63
supports_sane_rowcount = True
supports_native_enum = True
supports_native_boolean = True
supports_smallserial = True
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = True
postfetch_lastrowid = False
supports_comments = True
supports_default_values = True
supports_empty_insert = False
supports_multivalues_insert = True
default_paramstyle = "pyformat"
ischema_names = ischema_names
colspecs = colspecs
statement_compiler = PGCompiler
ddl_compiler = PGDDLCompiler
type_compiler = PGTypeCompiler
preparer = PGIdentifierPreparer
execution_ctx_cls = PGExecutionContext
inspector = PGInspector
isolation_level = None
construct_arguments = [
(
schema.Index,
{
"using": False,
"where": None,
"ops": {},
"concurrently": False,
"with": {},
"tablespace": None,
},
),
(
schema.Table,
{
"ignore_search_path": False,
"tablespace": None,
"partition_by": None,
"with_oids": None,
"on_commit": None,
"inherits": None,
},
),
]
reflection_options = ("postgresql_ignore_search_path",)
_backslash_escapes = True
_supports_create_index_concurrently = True
_supports_drop_index_concurrently = True
def __init__(
self,
isolation_level=None,
json_serializer=None,
json_deserializer=None,
**kwargs
):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_deserializer = json_deserializer
self._json_serializer = json_serializer
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (
8,
2,
) and self.__dict__.get("implicit_returning", True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
# pop base Enum type
self.colspecs.pop(sqltypes.Enum, None)
# psycopg2, others may have placed ENUM here as well
self.colspecs.pop(ENUM, None)
# http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689
self.supports_smallserial = self.server_version_info >= (9, 2)
self._backslash_escapes = (
self.server_version_info < (8, 2)
or connection.scalar("show standard_conforming_strings") == "off"
)
self._supports_create_index_concurrently = (
self.server_version_info >= (8, 2)
)
self._supports_drop_index_concurrently = self.server_version_info >= (
9,
2,
)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
]
)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level
)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute("show transaction isolation level")
val = cursor.fetchone()[0]
cursor.close()
return val.upper()
def do_begin_twophase(self, connection, xid):
self.do_begin(connection.connection)
def do_prepare_twophase(self, connection, xid):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if is_prepared:
if recover:
# FIXME: ugly hack to get out of transaction
# context when committing recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
connection.execute("ROLLBACK")
connection.execute("ROLLBACK PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_rollback(connection.connection)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
connection.execute("COMMIT PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
resultset = connection.execute(
sql.text("SELECT gid FROM pg_prepared_xacts")
)
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_schema(self, connection, schema):
query = (
"select nspname from pg_namespace " "where lower(nspname)=:schema"
)
cursor = connection.execute(
sql.text(query).bindparams(
sql.bindparam(
"schema",
util.text_type(schema.lower()),
type_=sqltypes.Unicode,
)
)
)
return bool(cursor.first())
def has_table(self, connection, table_name, schema=None):
# seems like case gets folded in pg_class...
if schema is None:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where "
"pg_catalog.pg_table_is_visible(c.oid) "
"and relname=:name"
).bindparams(
sql.bindparam(
"name",
util.text_type(table_name),
type_=sqltypes.Unicode,
)
)
)
else:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=:schema and "
"relname=:name"
).bindparams(
sql.bindparam(
"name",
util.text_type(table_name),
type_=sqltypes.Unicode,
),
sql.bindparam(
"schema",
util.text_type(schema),
type_=sqltypes.Unicode,
),
)
)
return bool(cursor.first())
def has_sequence(self, connection, sequence_name, schema=None):
if schema is None:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=current_schema() "
"and relname=:name"
).bindparams(
sql.bindparam(
"name",
util.text_type(sequence_name),
type_=sqltypes.Unicode,
)
)
)
else:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=:schema and relname=:name"
).bindparams(
sql.bindparam(
"name",
util.text_type(sequence_name),
type_=sqltypes.Unicode,
),
sql.bindparam(
"schema",
util.text_type(schema),
type_=sqltypes.Unicode,
),
)
)
return bool(cursor.first())
def has_type(self, connection, type_name, schema=None):
if schema is not None:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
WHERE t.typnamespace = n.oid
AND t.typname = :typname
AND n.nspname = :nspname
)
"""
query = sql.text(query)
else:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t
WHERE t.typname = :typname
AND pg_type_is_visible(t.oid)
)
"""
query = sql.text(query)
query = query.bindparams(
sql.bindparam(
"typname", util.text_type(type_name), type_=sqltypes.Unicode
)
)
if schema is not None:
query = query.bindparams(
sql.bindparam(
"nspname", util.text_type(schema), type_=sqltypes.Unicode
)
)
cursor = connection.execute(query)
return bool(cursor.scalar())
def _get_server_version_info(self, connection):
v = connection.execute("select version()").scalar()
m = re.match(
r".*(?:PostgreSQL|EnterpriseDB) "
r"(\d+)\.?(\d+)?(?:\.(\d+))?(?:\.\d+)?(?:devel|beta)?",
v,
)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % v
)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
def get_table_oid(self, connection, table_name, schema=None, **kw):
"""Fetch the oid for schema.table_name.
Several reflection methods require the table oid. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_oid = None
if schema is not None:
schema_where_clause = "n.nspname = :schema"
else:
schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
query = (
"""
SELECT c.oid
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE (%s)
AND c.relname = :table_name AND c.relkind in
('r', 'v', 'm', 'f', 'p')
"""
% schema_where_clause
)
# Since we're binding to unicode, table_name and schema_name must be
# unicode.
table_name = util.text_type(table_name)
if schema is not None:
schema = util.text_type(schema)
s = sql.text(query).bindparams(table_name=sqltypes.Unicode)
s = s.columns(oid=sqltypes.Integer)
if schema:
s = s.bindparams(sql.bindparam("schema", type_=sqltypes.Unicode))
c = connection.execute(s, table_name=table_name, schema=schema)
table_oid = c.scalar()
if table_oid is None:
raise exc.NoSuchTableError(table_name)
return table_oid
@reflection.cache
def get_schema_names(self, connection, **kw):
result = connection.execute(
sql.text(
"SELECT nspname FROM pg_namespace "
"WHERE nspname NOT LIKE 'pg_%' "
"ORDER BY nspname"
).columns(nspname=sqltypes.Unicode)
)
return [name for name, in result]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
result = connection.execute(
sql.text(
"SELECT c.relname FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relkind in ('r', 'p')"
).columns(relname=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
)
return [name for name, in result]
@reflection.cache
def _get_foreign_table_names(self, connection, schema=None, **kw):
result = connection.execute(
sql.text(
"SELECT c.relname FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relkind = 'f'"
).columns(relname=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
)
return [name for name, in result]
@reflection.cache
def get_view_names(
self, connection, schema=None, include=("plain", "materialized"), **kw
):
include_kind = {"plain": "v", "materialized": "m"}
try:
kinds = [include_kind[i] for i in util.to_list(include)]
except KeyError:
raise ValueError(
"include %r unknown, needs to be a sequence containing "
"one or both of 'plain' and 'materialized'" % (include,)
)
if not kinds:
raise ValueError(
"empty include, needs to be a sequence containing "
"one or both of 'plain' and 'materialized'"
)
result = connection.execute(
sql.text(
"SELECT c.relname FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relkind IN (%s)"
% (", ".join("'%s'" % elem for elem in kinds))
).columns(relname=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
)
return [name for name, in result]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
view_def = connection.scalar(
sql.text(
"SELECT pg_get_viewdef(c.oid) view_def FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relname = :view_name "
"AND c.relkind IN ('v', 'm')"
).columns(view_def=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
view_name=view_name,
)
return view_def
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
generated = (
"a.attgenerated as generated"
if self.server_version_info >= (12,)
else "NULL as generated"
)
SQL_COLS = (
"""
SELECT a.attname,
pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
AND a.atthasdef)
AS DEFAULT,
a.attnotnull, a.attnum, a.attrelid as table_oid,
pgd.description as comment,
%s
FROM pg_catalog.pg_attribute a
LEFT JOIN pg_catalog.pg_description pgd ON (
pgd.objoid = a.attrelid AND pgd.objsubid = a.attnum)
WHERE a.attrelid = :table_oid
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
"""
% generated
)
s = (
sql.text(SQL_COLS)
.bindparams(sql.bindparam("table_oid", type_=sqltypes.Integer))
.columns(attname=sqltypes.Unicode, default=sqltypes.Unicode)
)
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
# dictionary with (name, ) if default search path or (schema, name)
# as keys
domains = self._load_domains(connection)
# dictionary with (name, ) if default search path or (schema, name)
# as keys
enums = dict(
((rec["name"],), rec)
if rec["visible"]
else ((rec["schema"], rec["name"]), rec)
for rec in self._load_enums(connection, schema="*")
)
# format columns
columns = []
for (
name,
format_type,
default_,
notnull,
attnum,
table_oid,
comment,
generated,
) in rows:
column_info = self._get_column_info(
name,
format_type,
default_,
notnull,
domains,
enums,
schema,
comment,
generated,
)
columns.append(column_info)
return columns
def _get_column_info(
self,
name,
format_type,
default,
notnull,
domains,
enums,
schema,
comment,
generated,
):
def _handle_array_type(attype):
return (
# strip '[]' from integer[], etc.
re.sub(r"\[\]$", "", attype),
attype.endswith("[]"),
)
# strip (*) from character varying(5), timestamp(5)
# with time zone, geometry(POLYGON), etc.
attype = re.sub(r"\(.*\)", "", format_type)
# strip '[]' from integer[], etc. and check if an array
attype, is_array = _handle_array_type(attype)
# strip quotes from case sensitive enum or domain names
enum_or_domain_key = tuple(util.quoted_token_parser(attype))
nullable = not notnull
charlen = re.search(r"\(([\d,]+)\)", format_type)
if charlen:
charlen = charlen.group(1)
args = re.search(r"\((.*)\)", format_type)
if args and args.group(1):
args = tuple(re.split(r"\s*,\s*", args.group(1)))
else:
args = ()
kwargs = {}
if attype == "numeric":
if charlen:
prec, scale = charlen.split(",")
args = (int(prec), int(scale))
else:
args = ()
elif attype == "double precision":
args = (53,)
elif attype == "integer":
args = ()
elif attype in ("timestamp with time zone", "time with time zone"):
kwargs["timezone"] = True
if charlen:
kwargs["precision"] = int(charlen)
args = ()
elif attype in (
"timestamp without time zone",
"time without time zone",
"time",
):
kwargs["timezone"] = False
if charlen:
kwargs["precision"] = int(charlen)
args = ()
elif attype == "bit varying":
kwargs["varying"] = True
if charlen:
args = (int(charlen),)
else:
args = ()
elif attype.startswith("interval"):
field_match = re.match(r"interval (.+)", attype, re.I)
if charlen:
kwargs["precision"] = int(charlen)
if field_match:
kwargs["fields"] = field_match.group(1)
attype = "interval"
args = ()
elif charlen:
args = (int(charlen),)
while True:
# looping here to suit nested domains
if attype in self.ischema_names:
coltype = self.ischema_names[attype]
break
elif enum_or_domain_key in enums:
enum = enums[enum_or_domain_key]
coltype = ENUM
kwargs["name"] = enum["name"]
if not enum["visible"]:
kwargs["schema"] = enum["schema"]
args = tuple(enum["labels"])
break
elif enum_or_domain_key in domains:
domain = domains[enum_or_domain_key]
attype = domain["attype"]
attype, is_array = _handle_array_type(attype)
# strip quotes from case sensitive enum or domain names
enum_or_domain_key = tuple(util.quoted_token_parser(attype))
# A table can't override whether the domain is nullable.
nullable = domain["nullable"]
if domain["default"] and not default:
# It can, however, override the default
# value, but can't set it to null.
default = domain["default"]
continue
else:
coltype = None
break
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
coltype = self.ischema_names["_array"](coltype)
else:
util.warn(
"Did not recognize type '%s' of column '%s'" % (attype, name)
)
coltype = sqltypes.NULLTYPE
# If a zero byte (''), then not a generated column.
# Otherwise, s = stored. (Other values might be added in the future.)
if generated:
computed = dict(sqltext=default, persisted=generated == "s")
default = None
else:
computed = None
# adjust the default value
autoincrement = False
if default is not None:
match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
if match is not None:
if issubclass(coltype._type_affinity, sqltypes.Integer):
autoincrement = True
# the default is related to a Sequence
sch = schema
if "." not in match.group(2) and sch is not None:
# unconditionally quote the schema name. this could
# later be enhanced to obey quoting rules /
# "quote schema"
default = (
match.group(1)
+ ('"%s"' % sch)
+ "."
+ match.group(2)
+ match.group(3)
)
column_info = dict(
name=name,
type=coltype,
nullable=nullable,
default=default,
autoincrement=autoincrement,
comment=comment,
)
if computed is not None:
column_info["computed"] = computed
return column_info
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
if self.server_version_info < (8, 4):
PK_SQL = """
SELECT a.attname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_attribute a
on t.oid=a.attrelid AND %s
WHERE
t.oid = :table_oid and ix.indisprimary = 't'
ORDER BY a.attnum
""" % self._pg_index_any(
"a.attnum", "ix.indkey"
)
else:
# unnest() and generate_subscripts() both introduced in
# version 8.4
PK_SQL = """
SELECT a.attname
FROM pg_attribute a JOIN (
SELECT unnest(ix.indkey) attnum,
generate_subscripts(ix.indkey, 1) ord
FROM pg_index ix
WHERE ix.indrelid = :table_oid AND ix.indisprimary
) k ON a.attnum=k.attnum
WHERE a.attrelid = :table_oid
ORDER BY k.ord
"""
t = sql.text(PK_SQL).columns(attname=sqltypes.Unicode)
c = connection.execute(t, table_oid=table_oid)
cols = [r[0] for r in c.fetchall()]
PK_CONS_SQL = """
SELECT conname
FROM pg_catalog.pg_constraint r
WHERE r.conrelid = :table_oid AND r.contype = 'p'
ORDER BY 1
"""
t = sql.text(PK_CONS_SQL).columns(conname=sqltypes.Unicode)
c = connection.execute(t, table_oid=table_oid)
name = c.scalar()
return {"constrained_columns": cols, "name": name}
@reflection.cache
def get_foreign_keys(
self,
connection,
table_name,
schema=None,
postgresql_ignore_search_path=False,
**kw
):
preparer = self.identifier_preparer
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
FK_SQL = """
SELECT r.conname,
pg_catalog.pg_get_constraintdef(r.oid, true) as condef,
n.nspname as conschema
FROM pg_catalog.pg_constraint r,
pg_namespace n,
pg_class c
WHERE r.conrelid = :table AND
r.contype = 'f' AND
c.oid = confrelid AND
n.oid = c.relnamespace
ORDER BY 1
"""
# http://www.postgresql.org/docs/9.0/static/sql-createtable.html
FK_REGEX = re.compile(
r"FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)"
r"[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?"
r"[\s]?(ON UPDATE "
r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?"
r"[\s]?(ON DELETE "
r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?"
r"[\s]?(DEFERRABLE|NOT DEFERRABLE)?"
r"[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?"
)
t = sql.text(FK_SQL).columns(
conname=sqltypes.Unicode, condef=sqltypes.Unicode
)
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef, conschema in c.fetchall():
m = re.search(FK_REGEX, condef).groups()
(
constrained_columns,
referred_schema,
referred_table,
referred_columns,
_,
match,
_,
onupdate,
_,
ondelete,
deferrable,
_,
initially,
) = m
if deferrable is not None:
deferrable = True if deferrable == "DEFERRABLE" else False
constrained_columns = [
preparer._unquote_identifier(x)
for x in re.split(r"\s*,\s*", constrained_columns)
]
if postgresql_ignore_search_path:
# when ignoring search path, we use the actual schema
# provided it isn't the "default" schema
if conschema != self.default_schema_name:
referred_schema = conschema
else:
referred_schema = schema
elif referred_schema:
# referred_schema is the schema that we regexp'ed from
# pg_get_constraintdef(). If the schema is in the search
# path, pg_get_constraintdef() will give us None.
referred_schema = preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == conschema:
# If the actual schema matches the schema of the table
# we're reflecting, then we will use that.
referred_schema = schema
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [
preparer._unquote_identifier(x)
for x in re.split(r"\s*,\s", referred_columns)
]
fkey_d = {
"name": conname,
"constrained_columns": constrained_columns,
"referred_schema": referred_schema,
"referred_table": referred_table,
"referred_columns": referred_columns,
"options": {
"onupdate": onupdate,
"ondelete": ondelete,
"deferrable": deferrable,
"initially": initially,
"match": match,
},
}
fkeys.append(fkey_d)
return fkeys
def _pg_index_any(self, col, compare_to):
if self.server_version_info < (8, 1):
# http://www.postgresql.org/message-id/[email protected]
# "In CVS tip you could replace this with "attnum = ANY (indkey)".
# Unfortunately, most array support doesn't work on int2vector in
# pre-8.1 releases, so I think you're kinda stuck with the above
# for now.
# regards, tom lane"
return "(%s)" % " OR ".join(
"%s[%d] = %s" % (compare_to, ind, col) for ind in range(0, 10)
)
else:
return "%s = ANY(%s)" % (col, compare_to)
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
# cast indkey as varchar since it's an int2vector,
# returned as a list by some drivers such as pypostgresql
if self.server_version_info < (8, 5):
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname, a.attnum, NULL, ix.indkey%s,
%s, %s, am.amname,
NULL as indnkeyatts
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid = ix.indexrelid
left outer join
pg_attribute a
on t.oid = a.attrelid and %s
left outer join
pg_am am
on i.relam = am.oid
WHERE
t.relkind IN ('r', 'v', 'f', 'm')
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
""" % (
# version 8.3 here was based on observing the
# cast does not work in PG 8.2.4, does work in 8.3.0.
# nothing in PG changelogs regarding this.
"::varchar" if self.server_version_info >= (8, 3) else "",
"ix.indoption::varchar"
if self.server_version_info >= (8, 3)
else "NULL",
"i.reloptions"
if self.server_version_info >= (8, 2)
else "NULL",
self._pg_index_any("a.attnum", "ix.indkey"),
)
else:
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname, a.attnum, c.conrelid, ix.indkey::varchar,
ix.indoption::varchar, i.reloptions, am.amname,
%s as indnkeyatts
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid = ix.indexrelid
left outer join
pg_attribute a
on t.oid = a.attrelid and a.attnum = ANY(ix.indkey)
left outer join
pg_constraint c
on (ix.indrelid = c.conrelid and
ix.indexrelid = c.conindid and
c.contype in ('p', 'u', 'x'))
left outer join
pg_am am
on i.relam = am.oid
WHERE
t.relkind IN ('r', 'v', 'f', 'm', 'p')
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
""" % (
"ix.indnkeyatts"
if self.server_version_info >= (11, 0)
else "NULL",
)
t = sql.text(IDX_SQL).columns(
relname=sqltypes.Unicode, attname=sqltypes.Unicode
)
c = connection.execute(t, table_oid=table_oid)
indexes = defaultdict(lambda: defaultdict(dict))
sv_idx_name = None
for row in c.fetchall():
(
idx_name,
unique,
expr,
prd,
col,
col_num,
conrelid,
idx_key,
idx_option,
options,
amname,
indnkeyatts,
) = row
if expr:
if idx_name != sv_idx_name:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s" % idx_name
)
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
"Predicate of partial index %s ignored during reflection"
% idx_name
)
sv_idx_name = idx_name
has_idx = idx_name in indexes
index = indexes[idx_name]
if col is not None:
index["cols"][col_num] = col
if not has_idx:
idx_keys = idx_key.split()
# "The number of key columns in the index, not counting any
# included columns, which are merely stored and do not
# participate in the index semantics"
if indnkeyatts and idx_keys[indnkeyatts:]:
util.warn(
"INCLUDE columns for covering index %s "
"ignored during reflection" % (idx_name,)
)
idx_keys = idx_keys[:indnkeyatts]
index["key"] = [int(k.strip()) for k in idx_keys]
# (new in pg 8.3)
# "pg_index.indoption" is list of ints, one per column/expr.
# int acts as bitmask: 0x01=DESC, 0x02=NULLSFIRST
sorting = {}
for col_idx, col_flags in enumerate(
(idx_option or "").split()
):
col_flags = int(col_flags.strip())
col_sorting = ()
# try to set flags only if they differ from PG defaults...
if col_flags & 0x01:
col_sorting += ("desc",)
if not (col_flags & 0x02):
col_sorting += ("nullslast",)
else:
if col_flags & 0x02:
col_sorting += ("nullsfirst",)
if col_sorting:
sorting[col_idx] = col_sorting
if sorting:
index["sorting"] = sorting
index["unique"] = unique
if conrelid is not None:
index["duplicates_constraint"] = idx_name
if options:
index["options"] = dict(
[option.split("=") for option in options]
)
# it *might* be nice to include that this is 'btree' in the
# reflection info. But we don't want an Index object
# to have a ``postgresql_using`` in it that is just the
# default, so for the moment leaving this out.
if amname and amname != "btree":
index["amname"] = amname
result = []
for name, idx in indexes.items():
entry = {
"name": name,
"unique": idx["unique"],
"column_names": [idx["cols"][i] for i in idx["key"]],
}
if "duplicates_constraint" in idx:
entry["duplicates_constraint"] = idx["duplicates_constraint"]
if "sorting" in idx:
entry["column_sorting"] = dict(
(idx["cols"][idx["key"][i]], value)
for i, value in idx["sorting"].items()
)
if "options" in idx:
entry.setdefault("dialect_options", {})[
"postgresql_with"
] = idx["options"]
if "amname" in idx:
entry.setdefault("dialect_options", {})[
"postgresql_using"
] = idx["amname"]
result.append(entry)
return result
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
UNIQUE_SQL = """
SELECT
cons.conname as name,
cons.conkey as key,
a.attnum as col_num,
a.attname as col_name
FROM
pg_catalog.pg_constraint cons
join pg_attribute a
on cons.conrelid = a.attrelid AND
a.attnum = ANY(cons.conkey)
WHERE
cons.conrelid = :table_oid AND
cons.contype = 'u'
"""
t = sql.text(UNIQUE_SQL).columns(col_name=sqltypes.Unicode)
c = connection.execute(t, table_oid=table_oid)
uniques = defaultdict(lambda: defaultdict(dict))
for row in c.fetchall():
uc = uniques[row.name]
uc["key"] = row.key
uc["cols"][row.col_num] = row.col_name
return [
{"name": name, "column_names": [uc["cols"][i] for i in uc["key"]]}
for name, uc in uniques.items()
]
@reflection.cache
def get_table_comment(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
COMMENT_SQL = """
SELECT
pgd.description as table_comment
FROM
pg_catalog.pg_description pgd
WHERE
pgd.objsubid = 0 AND
pgd.objoid = :table_oid
"""
c = connection.execute(sql.text(COMMENT_SQL), table_oid=table_oid)
return {"text": c.scalar()}
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(
connection, table_name, schema, info_cache=kw.get("info_cache")
)
CHECK_SQL = """
SELECT
cons.conname as name,
pg_get_constraintdef(cons.oid) as src
FROM
pg_catalog.pg_constraint cons
WHERE
cons.conrelid = :table_oid AND
cons.contype = 'c'
"""
c = connection.execute(sql.text(CHECK_SQL), table_oid=table_oid)
ret = []
for name, src in c:
# samples:
# "CHECK (((a > 1) AND (a < 5)))"
# "CHECK (((a = 1) OR ((a > 2) AND (a < 5))))"
# "CHECK (((a > 1) AND (a < 5))) NOT VALID"
# "CHECK (some_boolean_function(a))"
# "CHECK (((a\n < 1)\n OR\n (a\n >= 5))\n)"
m = re.match(
r"^CHECK *\((.+)\)( NOT VALID)?$", src, flags=re.DOTALL
)
if not m:
util.warn("Could not parse CHECK constraint text: %r" % src)
sqltext = ""
else:
sqltext = re.compile(
r"^[\s\n]*\((.+)\)[\s\n]*$", flags=re.DOTALL
).sub(r"\1", m.group(1))
entry = {"name": name, "sqltext": sqltext}
if m and m.group(2):
entry["dialect_options"] = {"not_valid": True}
ret.append(entry)
return ret
def _load_enums(self, connection, schema=None):
schema = schema or self.default_schema_name
if not self.supports_native_enum:
return {}
# Load data types for enums:
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
-- t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema",
e.enumlabel as "label"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
WHERE t.typtype = 'e'
"""
if schema != "*":
SQL_ENUMS += "AND n.nspname = :schema "
# e.oid gives us label order within an enum
SQL_ENUMS += 'ORDER BY "schema", "name", e.oid'
s = sql.text(SQL_ENUMS).columns(
attname=sqltypes.Unicode, label=sqltypes.Unicode
)
if schema != "*":
s = s.bindparams(schema=schema)
c = connection.execute(s)
enums = []
enum_by_name = {}
for enum in c.fetchall():
key = (enum["schema"], enum["name"])
if key in enum_by_name:
enum_by_name[key]["labels"].append(enum["label"])
else:
enum_by_name[key] = enum_rec = {
"name": enum["name"],
"schema": enum["schema"],
"visible": enum["visible"],
"labels": [],
}
if enum["label"] is not None:
enum_rec["labels"].append(enum["label"])
enums.append(enum_rec)
return enums
def _load_domains(self, connection):
# Load data types for domains:
SQL_DOMAINS = """
SELECT t.typname as "name",
pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
not t.typnotnull as "nullable",
t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE t.typtype = 'd'
"""
s = sql.text(SQL_DOMAINS).columns(attname=sqltypes.Unicode)
c = connection.execute(s)
domains = {}
for domain in c.fetchall():
# strip (30) from character varying(30)
attype = re.search(r"([^\(]+)", domain["attype"]).group(1)
# 'visible' just means whether or not the domain is in a
# schema that's on the search path -- or not overridden by
# a schema with higher precedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
if domain["visible"]:
key = (domain["name"],)
else:
key = (domain["schema"], domain["name"])
domains[key] = {
"attype": attype,
"nullable": domain["nullable"],
"default": domain["default"],
}
return domains
| gpl-3.0 | 1,956,788,487,594,982,000 | 32.531767 | 96 | 0.56548 | false |
zeitkunst/FluidNexus | FluidNexus/ui/FluidNexusAboutUI.py | 1 | 8437 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'FluidNexus/ui/FluidNexusAbout.ui'
#
# Created: Sun Nov 13 17:16:26 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_FluidNexusAbout(object):
def setupUi(self, FluidNexusAbout):
FluidNexusAbout.setObjectName(_fromUtf8("FluidNexusAbout"))
FluidNexusAbout.resize(372, 533)
self.buttonBox = QtGui.QDialogButtonBox(FluidNexusAbout)
self.buttonBox.setGeometry(QtCore.QRect(20, 490, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayoutWidget = QtGui.QWidget(FluidNexusAbout)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 351, 470))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.AboutDialogIcon = QtGui.QLabel(self.verticalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.AboutDialogIcon.sizePolicy().hasHeightForWidth())
self.AboutDialogIcon.setSizePolicy(sizePolicy)
self.AboutDialogIcon.setText(_fromUtf8(""))
self.AboutDialogIcon.setPixmap(QtGui.QPixmap(_fromUtf8(":/icons/icons/fluid_nexus_icon.png")))
self.AboutDialogIcon.setObjectName(_fromUtf8("AboutDialogIcon"))
self.horizontalLayout.addWidget(self.AboutDialogIcon)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.AboutDialogTitle = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(20)
font.setWeight(75)
font.setBold(True)
self.AboutDialogTitle.setFont(font)
self.AboutDialogTitle.setObjectName(_fromUtf8("AboutDialogTitle"))
self.verticalLayout_2.addWidget(self.AboutDialogTitle)
self.AboutDialogVersion = QtGui.QLabel(self.verticalLayoutWidget)
self.AboutDialogVersion.setObjectName(_fromUtf8("AboutDialogVersion"))
self.verticalLayout_2.addWidget(self.AboutDialogVersion)
self.AboutDialogLink = QtGui.QLabel(self.verticalLayoutWidget)
self.AboutDialogLink.setOpenExternalLinks(True)
self.AboutDialogLink.setObjectName(_fromUtf8("AboutDialogLink"))
self.verticalLayout_2.addWidget(self.AboutDialogLink)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.tabWidget = QtGui.QTabWidget(self.verticalLayoutWidget)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.AboutDialogAboutTab = QtGui.QWidget()
self.AboutDialogAboutTab.setObjectName(_fromUtf8("AboutDialogAboutTab"))
self.AboutDialogAboutText = QtGui.QTextBrowser(self.AboutDialogAboutTab)
self.AboutDialogAboutText.setGeometry(QtCore.QRect(10, 10, 311, 281))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(244, 244, 244))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.NoRole, brush)
self.AboutDialogAboutText.setPalette(palette)
self.AboutDialogAboutText.setFrameShape(QtGui.QFrame.NoFrame)
self.AboutDialogAboutText.setOpenExternalLinks(True)
self.AboutDialogAboutText.setObjectName(_fromUtf8("AboutDialogAboutText"))
self.tabWidget.addTab(self.AboutDialogAboutTab, _fromUtf8(""))
self.AboutDialogCreditsTab = QtGui.QWidget()
self.AboutDialogCreditsTab.setObjectName(_fromUtf8("AboutDialogCreditsTab"))
self.AboutDialogCreditsText = QtGui.QTextBrowser(self.AboutDialogCreditsTab)
self.AboutDialogCreditsText.setGeometry(QtCore.QRect(10, 10, 311, 281))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(244, 244, 244))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.NoRole, brush)
self.AboutDialogCreditsText.setPalette(palette)
self.AboutDialogCreditsText.setFrameShape(QtGui.QFrame.NoFrame)
self.AboutDialogCreditsText.setOpenExternalLinks(True)
self.AboutDialogCreditsText.setObjectName(_fromUtf8("AboutDialogCreditsText"))
self.tabWidget.addTab(self.AboutDialogCreditsTab, _fromUtf8(""))
self.verticalLayout.addWidget(self.tabWidget)
self.retranslateUi(FluidNexusAbout)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), FluidNexusAbout.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), FluidNexusAbout.reject)
QtCore.QMetaObject.connectSlotsByName(FluidNexusAbout)
def retranslateUi(self, FluidNexusAbout):
FluidNexusAbout.setWindowTitle(QtGui.QApplication.translate("FluidNexusAbout", "About Fluid Nexus", None, QtGui.QApplication.UnicodeUTF8))
self.AboutDialogTitle.setText(QtGui.QApplication.translate("FluidNexusAbout", "Fluid Nexus", None, QtGui.QApplication.UnicodeUTF8))
self.AboutDialogVersion.setText(QtGui.QApplication.translate("FluidNexusAbout", "Version 0.1 Alpha", None, QtGui.QApplication.UnicodeUTF8))
self.AboutDialogLink.setText(QtGui.QApplication.translate("FluidNexusAbout", "<a href=\"http://fluidnexus.net\">http://fluidnexus.net</a>", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.AboutDialogAboutTab), QtGui.QApplication.translate("FluidNexusAbout", "About", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.AboutDialogCreditsTab), QtGui.QApplication.translate("FluidNexusAbout", "Credits", None, QtGui.QApplication.UnicodeUTF8))
import FluidNexus_rc
import FluidNexus_rc
import FluidNexus_rc
| gpl-3.0 | -3,181,977,033,139,788,000 | 59.697842 | 186 | 0.73391 | false |
cobbler/cobbler | cobbler/items/item.py | 1 | 25418 | """
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
"""
import copy
import enum
import fnmatch
import logging
import pprint
import re
import uuid
from typing import List, Union
import yaml
from cobbler import utils, enums
from cobbler.cexceptions import CX
RE_OBJECT_NAME = re.compile(r'[a-zA-Z0-9_\-.:]*$')
class Item:
"""
An Item is a serializable thing that can appear in a Collection
"""
# Constants
TYPE_NAME = "generic"
COLLECTION_TYPE = "generic"
# Class instance variables
converted_cache = {}
@classmethod
def get_from_cache(cls, ref):
"""
Get an object from the cache. This may potentially contain not persisted changes.
:param ref: The object which is in the cache.
:return: The object if present or an empty dict.
"""
return cls.converted_cache.get(ref.COLLECTION_TYPE, {}).get(ref.uid)
@classmethod
def set_cache(cls, ref, value):
"""
Add an object to the cache.
:param ref: An object to identify where to add the item to the cache.
:param value: The object to add to the cache.
"""
if ref.COLLECTION_TYPE not in cls.converted_cache:
cls.converted_cache[ref.COLLECTION_TYPE] = {}
cls.converted_cache[ref.COLLECTION_TYPE][ref.uid] = value
@classmethod
def remove_from_cache(cls, ref):
"""
Remove an item from the cache.
:param ref: The object reference id to identify the object.
"""
cls.converted_cache.get(ref.COLLECTION_TYPE, {}).pop(ref.uid, None)
@classmethod
def __find_compare(cls, from_search, from_obj):
"""
Only one of the two parameters shall be given in this method. If you give both ``from_obj`` will be preferred.
:param from_search: Tries to parse this str in the format as a search result string.
:param from_obj: Tries to parse this str in the format of an obj str.
:return: True if the comparison succeeded, False otherwise.
:raises CX
"""
if isinstance(from_obj, str):
# FIXME: fnmatch is only used for string to string comparisions which should cover most major usage, if
# not, this deserves fixing
from_obj_lower = from_obj.lower()
from_search_lower = from_search.lower()
# It's much faster to not use fnmatch if it's not needed
if '?' not in from_search_lower and '*' not in from_search_lower and '[' not in from_search_lower:
match = from_obj_lower == from_search_lower
else:
match = fnmatch.fnmatch(from_obj_lower, from_search_lower)
return match
else:
if isinstance(from_search, str):
if isinstance(from_obj, list):
from_search = utils.input_string_or_list(from_search)
for x in from_search:
if x not in from_obj:
return False
return True
if isinstance(from_obj, dict):
(junk, from_search) = utils.input_string_or_dict(from_search, allow_multiples=True)
for x in list(from_search.keys()):
y = from_search[x]
if x not in from_obj:
return False
if not (y == from_obj[x]):
return False
return True
if isinstance(from_obj, bool):
if from_search.lower() in ["true", "1", "y", "yes"]:
inp = True
else:
inp = False
if inp == from_obj:
return True
return False
raise TypeError("find cannot compare type: %s" % type(from_obj))
def __init__(self, api, is_subobject: bool = False):
"""
Constructor. Requires a back reference to the CollectionManager object.
NOTE: is_subobject is used for objects that allow inheritance in their trees. This inheritance refers to
conceptual inheritance, not Python inheritance. Objects created with is_subobject need to call their
setter for parent immediately after creation and pass in a value of an object of the same type. Currently this
is only supported for profiles. Subobjects blend their data with their parent objects and only require a valid
parent name and a name for themselves, so other required options can be gathered from items further up the
Cobbler tree.
distro
profile
profile <-- created with is_subobject=True
system <-- created as normal
For consistency, there is some code supporting this in all object types, though it is only usable
(and only should be used) for profiles at this time. Objects that are children of
objects of the same type (i.e. subprofiles) need to pass this in as True. Otherwise, just
use False for is_subobject and the parent object will (therefore) have a different type.
:param api: The Cobbler API object which is used for resolving information.
:param is_subobject: See above extensive description.
"""
self._parent = ''
self._depth = 0
self._children = []
self._ctime = 0.0
self._mtime = 0.0
self._uid = uuid.uuid4().hex
self._name = ""
self._comment = ""
self._kernel_options = {}
self._kernel_options_post = {}
self._autoinstall_meta = {}
self._fetchable_files = {}
self._boot_files = {}
self._template_files = {}
self._last_cached_mtime = 0
self._owners = []
self._cached_dict = ""
self._mgmt_classes = []
self._mgmt_parameters = {}
self._conceptual_parent = None
self._is_subobject = is_subobject
self.logger = logging.getLogger()
self.api = api
def __eq__(self, other):
"""
Comparison based on the uid for our items.
:param other: The other Item to compare.
:return: True if uid is equal, otherwise false.
"""
if isinstance(other, Item):
return self._uid == other.uid
return False
@property
def uid(self) -> str:
"""
The uid is the internal unique representation of a Cobbler object. It should never be used twice, even after an
object was deleted.
:return:
"""
return self._uid
@uid.setter
def uid(self, uid: str):
"""
Setter for the uid of the item.
:param uid: The new uid.
"""
self._uid = uid
@property
def ctime(self) -> float:
"""
TODO
:return:
"""
return self._ctime
@ctime.setter
def ctime(self, ctime: float):
"""
TODO
:param ctime:
:return:
"""
if not isinstance(ctime, float):
raise TypeError("ctime needs to be of type float")
self._ctime = ctime
@property
def name(self):
"""
The objects name.
:return: The name of the object
"""
return self._name
@name.setter
def name(self, name: str):
"""
The objects name.
:param name: object name string
"""
if not isinstance(name, str):
raise TypeError("name must of be type str")
if not RE_OBJECT_NAME.match(name):
raise ValueError("Invalid characters in name: '%s'" % name)
self._name = name
@property
def comment(self) -> str:
"""
For every object you are able to set a unique comment which will be persisted on the object.
:return: The comment or an emtpy string.
"""
return self._comment
@comment.setter
def comment(self, comment: str):
"""
Setter for the comment of the item.
:param comment: The new comment. If ``None`` the comment will be set to an emtpy string.
"""
self._comment = comment
@property
def owners(self):
"""
TODO
:return:
"""
return self._owners
@owners.setter
def owners(self, owners: list):
"""
TODO
:param owners:
:return:
"""
self._owners = utils.input_string_or_list(owners)
@property
def kernel_options(self) -> dict:
"""
TODO
:return:
"""
return self._kernel_options
@kernel_options.setter
def kernel_options(self, options):
"""
Kernel options are a space delimited list, like 'a=b c=d e=f g h i=j' or a dict.
:param options: The new kernel options as a space delimited list.
:raises CX
"""
(success, value) = utils.input_string_or_dict(options, allow_multiples=True)
if not success:
raise ValueError("invalid kernel options")
else:
self._kernel_options = value
@property
def kernel_options_post(self) -> dict:
"""
TODO
:return:
"""
return self._kernel_options_post
@kernel_options_post.setter
def kernel_options_post(self, options):
"""
Post kernel options are a space delimited list, like 'a=b c=d e=f g h i=j' or a dict.
:param options: The new kernel options as a space delimited list.
:raises CX
"""
(success, value) = utils.input_string_or_dict(options, allow_multiples=True)
if not success:
raise ValueError("invalid post kernel options")
else:
self._kernel_options_post = value
@property
def autoinstall_meta(self) -> dict:
"""
Automatic Installation Template Metadata
:return: The metadata or an empty dict.
"""
return self._autoinstall_meta
@autoinstall_meta.setter
def autoinstall_meta(self, options: dict):
"""
A comma delimited list of key value pairs, like 'a=b,c=d,e=f' or a dict.
The meta tags are used as input to the templating system to preprocess automatic installation template files.
:param options: The new options for the automatic installation meta options.
:return: False if this does not succeed.
"""
(success, value) = utils.input_string_or_dict(options, allow_multiples=True)
if not success:
raise ValueError("invalid options given for autoinstall meta")
else:
self._autoinstall_meta = value
@property
def mgmt_classes(self) -> list:
"""
For external config management
:return: An empty list or the list of mgmt_classes.
"""
return self._mgmt_classes
@mgmt_classes.setter
def mgmt_classes(self, mgmt_classes: list):
"""
Assigns a list of configuration management classes that can be assigned to any object, such as those used by
Puppet's external_nodes feature.
:param mgmt_classes: The new options for the management classes of an item.
"""
self._mgmt_classes = utils.input_string_or_list(mgmt_classes)
@property
def mgmt_parameters(self):
"""
Parameters which will be handed to your management application (Must be a valid YAML dictionary)
:return: The mgmt_parameters or an empty dict.
"""
return self._mgmt_parameters
@mgmt_parameters.setter
def mgmt_parameters(self, mgmt_parameters: Union[str, dict]):
"""
A YAML string which can be assigned to any object, this is used by Puppet's external_nodes feature.
:param mgmt_parameters: The management parameters for an item.
:raises TypeError: In case the parsed YAML isn't of type dict afterwards.
"""
if not isinstance(mgmt_parameters, (str, dict)):
raise TypeError("mgmt_parameters must be of type str or dict")
if isinstance(mgmt_parameters, str):
if mgmt_parameters == enums.VALUE_INHERITED:
self._mgmt_parameters = enums.VALUE_INHERITED
else:
mgmt_parameters = yaml.safe_load(mgmt_parameters)
if not isinstance(mgmt_parameters, dict):
raise TypeError("Input YAML in Puppet Parameter field must evaluate to a dictionary.")
self._mgmt_parameters = mgmt_parameters
@property
def template_files(self) -> dict:
"""
File mappings for built-in configuration management
:return:
"""
return self._template_files
@template_files.setter
def template_files(self, template_files: dict):
"""
A comma seperated list of source=destination templates that should be generated during a sync.
:param template_files: The new value for the template files which are used for the item.
:raises ValueError: In case the conversion from non dict values was not successful.
"""
(success, value) = utils.input_string_or_dict(template_files, allow_multiples=False)
if not success:
raise ValueError("template_files should be of type dict")
else:
self._template_files = value
@property
def boot_files(self) -> dict:
"""
Files copied into tftpboot beyond the kernel/initrd
:return:
"""
return self._boot_files
@boot_files.setter
def boot_files(self, boot_files: dict):
"""
A comma separated list of req_name=source_file_path that should be fetchable via tftp.
:param boot_files: The new value for the boot files used by the item.
"""
if not isinstance(boot_files, dict):
raise TypeError("boot_files needs to be of type dict")
self._boot_files = boot_files
@property
def fetchable_files(self) -> dict:
"""
A comma seperated list of ``virt_name=path_to_template`` that should be fetchable via tftp or a webserver
:return:
"""
return self._fetchable_files
@fetchable_files.setter
def fetchable_files(self, fetchable_files: Union[str, dict]):
"""
Setter for the fetchable files.
:param fetchable_files: Files which will be made available to external users.
"""
(success, value) = utils.input_string_or_dict(fetchable_files, allow_multiples=False)
if not success:
raise TypeError("fetchable_files were handed wrong values")
else:
self._fetchable_files = value
@property
def depth(self) -> int:
"""
TODO
:return:
"""
return self._depth
@depth.setter
def depth(self, depth: int):
"""
Setter for depth.
:param depth: The new value for depth.
"""
if not isinstance(depth, int):
raise TypeError("depth needs to be of type int")
self._depth = depth
@property
def mtime(self) -> float:
"""
Represents the last modification time of the object via the API.
:return: The float which can be fed into a Python time object.
"""
return self._mtime
@mtime.setter
def mtime(self, mtime: float):
"""
Setter for the modification time of the object.
:param mtime: The new modification time.
"""
if not isinstance(mtime, float):
raise TypeError("mtime needs to be of type float")
self._mtime = mtime
@property
def parent(self):
"""
TODO
:return:
"""
return None
@parent.setter
def parent(self, parent: str):
"""
Set the parent object for this object.
:param parent: The new parent object. This needs to be a descendant in the logical inheritance chain.
"""
@property
def children(self) -> list:
"""
TODO
:return: An empty list.
"""
return []
@children.setter
def children(self, value):
"""
This is an empty setter to not throw on setting it accidentally.
:param value:
"""
self.logger.warning("Tried to set the children property on object \"%s\" without logical children.", self.name)
def get_children(self, sort_list: bool = False) -> List[str]:
"""
TODO
:return:
"""
result = copy.deepcopy(self.children)
if sort_list:
result.sort()
return result
@property
def descendants(self) -> list:
"""
Get objects that depend on this object, i.e. those that would be affected by a cascading delete, etc.
:return: This is a list of all descendants. May be empty if none exist.
"""
results = []
kids = self.children
for kid in kids:
# FIXME: Get kid objects
grandkids = kid.descendants
results.extend(grandkids)
return results
@property
def is_subobject(self) -> bool:
"""
TODO
:return: True in case the object is a subobject, False otherwise.
"""
return self._is_subobject
@is_subobject.setter
def is_subobject(self, value: bool):
"""
TODO
:param value: The boolean value whether this is a subobject or not.
"""
if not isinstance(value, bool):
raise TypeError("Field is_subobject of object item needs to be of type bool!")
self._is_subobject = value
def get_conceptual_parent(self):
"""
The parent may just be a superclass for something like a subprofile. Get the first parent of a different type.
:return: The first item which is conceptually not from the same type.
"""
mtype = type(self)
parent = self.parent
while parent is not None:
ptype = type(parent)
if mtype != ptype:
self._conceptual_parent = parent
return parent
parent = parent.parent
return None
def sort_key(self, sort_fields: list = None):
"""
Convert the item to a dict and sort the data after specific given fields.
:param sort_fields: The fields to sort the data after.
:return: The sorted data.
"""
data = self.to_dict()
return [data.get(x, "") for x in sort_fields]
def find_match(self, kwargs, no_errors=False):
"""
Find from a given dict if the item matches the kv-pairs.
:param kwargs: The dict to match for in this item.
:param no_errors: How strict this matching is.
:return: True if matches or False if the item does not match.
"""
# used by find() method in collection.py
data = self.to_dict()
for (key, value) in list(kwargs.items()):
# Allow ~ to negate the compare
if value is not None and value.startswith("~"):
res = not self.find_match_single_key(data, key, value[1:], no_errors)
else:
res = self.find_match_single_key(data, key, value, no_errors)
if not res:
return False
return True
def find_match_single_key(self, data, key, value, no_errors: bool = False) -> bool:
"""
Look if the data matches or not. This is an alternative for ``find_match()``.
:param data: The data to search through.
:param key: The key to look for int the item.
:param value: The value for the key.
:param no_errors: How strict this matching is.
:return: Whether the data matches or not.
"""
# special case for systems
key_found_already = False
if "interfaces" in data:
if key in ["mac_address", "ip_address", "netmask", "virt_bridge", "dhcp_tag", "dns_name", "static_routes",
"interface_type", "interface_master", "bonding_opts", "bridge_opts", "interface"]:
key_found_already = True
for (name, interface) in list(data["interfaces"].items()):
if value == name:
return True
if value is not None and key in interface:
if self.__find_compare(interface[key], value):
return True
if key not in data:
if not key_found_already:
if not no_errors:
# FIXME: removed for 2.0 code, shouldn't cause any problems to not have an exception here?
# raise CX("searching for field that does not exist: %s" % key)
return False
else:
if value is not None: # FIXME: new?
return False
if value is None:
return True
else:
return self.__find_compare(value, data[key])
def dump_vars(self, formatted_output: bool = True):
"""
Dump all variables.
:param formatted_output: Whether to format the output or not.
:return: The raw or formatted data.
"""
raw = utils.blender(self.api, False, self)
if formatted_output:
return pprint.pformat(raw)
else:
return raw
def check_if_valid(self):
"""
Raise exceptions if the object state is inconsistent
:raises CX
"""
if not self.name:
raise CX("Name is required")
def make_clone(self):
"""
Must be defined in any subclass
"""
raise NotImplementedError("Must be implemented in a specific Item")
@classmethod
def _remove_depreacted_dict_keys(cls, dictionary: dict):
"""
This method does remove keys which should not be deserialized and are only there for API compability in
``to_dict()``.
:param dictionary: The dict to update
"""
if "ks_meta" in dictionary:
dictionary.pop("ks_meta")
if "kickstart" in dictionary:
dictionary.pop("kickstart")
def from_dict(self, dictionary: dict):
"""
Modify this object to take on values in ``dictionary``.
:param dictionary: This should contain all values which should be updated.
"""
result = copy.deepcopy(dictionary)
for key in dictionary:
lowered_key = key.lower()
# The following also works for child classes because self is a child class at this point and not only an
# Item.
if hasattr(self, "_" + lowered_key):
try:
setattr(self, lowered_key, dictionary[key])
except AttributeError as error:
raise AttributeError("Attribute \"%s\" could not be set!" % lowered_key) from error
result.pop(key)
if len(result) > 0:
raise KeyError("The following keys supplied could not be set: %s" % dictionary.keys())
def to_dict(self) -> dict:
"""
This converts everything in this object to a dictionary.
:return: A dictionary with all values present in this object.
"""
value = Item.get_from_cache(self)
if value is None:
value = {}
for key in self.__dict__:
if key.startswith("_") and not key.startswith("__"):
if key in ("_conceptual_parent", "_last_cached_mtime", "_cached_dict", "_supported_boot_loaders"):
continue
new_key = key[1:].lower()
if isinstance(self.__dict__[key], enum.Enum):
value[new_key] = self.__dict__[key].value
elif new_key == "interfaces":
# This is the special interfaces dict. Lets fix it before it gets to the normal process.
serialized_interfaces = {}
interfaces = self.__dict__[key]
for interface_key in interfaces:
serialized_interfaces[interface_key] = interfaces[interface_key].to_dict()
value[new_key] = serialized_interfaces
elif isinstance(self.__dict__[key], (list, dict)):
value[new_key] = copy.deepcopy(self.__dict__[key])
else:
value[new_key] = self.__dict__[key]
self.set_cache(self, value)
if "autoinstall" in value:
value.update({"kickstart": value["autoinstall"]})
if "autoinstall_meta" in value:
value.update({"ks_meta": value["autoinstall_meta"]})
return value
| gpl-2.0 | -5,420,789,716,813,880,000 | 32.400788 | 119 | 0.568888 | false |
khedron/Plot | graph/line.py | 1 | 1811 | from PyQt4.QtCore import QObject, pyqtSignal, QVariant
from base.property import prop_sig
from style.style import LineStyle
from graph.type import Type
class Column(QObject):
name, name_changed = prop_sig(str, "name")
type, type_changed = prop_sig(QVariant.Type, "type")
data, data_changed = prop_sig(list, "data")
changed = pyqtSignal()
def __init__(self, name, type, data):
object.__init__(self)
self.name = name
self.type = type
self.data = data
for signal in (self.name_changed,
self.type_changed, self.data_changed):
signal.connect(self.changed)
class Line(QObject):
point_styles = ["cross"]
changed = pyqtSignal()
style, style_changed = prop_sig(LineStyle, "style")
point_style, point_style_changed = prop_sig(str, "point_style", point_styles[0])
point_size, point_size_changed = prop_sig(float, "point_size", 1)
x_data_changed = pyqtSignal(int, "QList<QVariant>")
y_data_changed = pyqtSignal(int, "QList<QVariant>")
def __init__(self, column_x, column_y):
QObject.__init__(self)
self.x = column_x
self.x.changed.connect(self.emit_x)
self.y = column_y
self.y.changed.connect(self.emit_y)
for signal in (self.style_changed, self.point_style_changed,
self.point_size_changed, self.x.changed, self.y.changed):
signal.connect(self.changed)
def emit_x(self):
self.x_data_changed.emit(id(self), self.x.data)
def emit_y(self):
self.y_data_changed.emit(id(self), self.y.data)
# Data change signalling possibilities:
# - line has data_changed(QVariantList)
# - line container must handle adding/removing/changing lines
# and keep a copy of the axes around to notify them of changes.
# How to handle accessing and changing the data?
# In GUI it will be a table view exposing a custom editor;
# maybe see spreadsheet example for details.
| gpl-3.0 | -4,145,049,646,966,871,000 | 27.746032 | 81 | 0.712314 | false |
Mazuh/Minerva | views/forms/content.py | 1 | 14357 | """
Forms about content editing.
"""
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField,\
TextAreaField, DateTimeField, SubmitField,\
SelectField, DateField, BooleanField
from flask_wtf.file import FileField, FileRequired
from wtforms.fields.html5 import EmailField, URLField
from wtforms.validators import DataRequired, Email, URL
class FindClass(FlaskForm):
"""
Form for finding the classes in a period and year
"""
year = IntegerField('Ano:', validators=[
DataRequired('Informe o ano da turma.')
])
period = SelectField('Período:', choices=[('1','1 Período'),
('2', '2 Período'), ('3', '3 Período'), ('4', '4 Período')], validators=[
DataRequired('Informe o período da turma.')
])
create = SubmitField('Pesquisar')
class StudentCoordinatorForm(FlaskForm):
"""
Form for adding and editing coordinators
"""
registration = IntegerField('Matricula:', validators=[
DataRequired('Informe a matricula do estudante')
])
coordinator = StringField('Nome do coordenador:')
create = SubmitField('Editar');
class NewsForm(FlaskForm):
"""
Form for adding and editing news
"""
title = StringField('Titúlo da notícia:', validators=[
DataRequired('Digite um titúlo para a notícia')
])
headLine = StringField('Resumo básico/Manchete:', validators=[
DataRequired('Digite a manchete')
])
body = TextAreaField('Notícia:', validators=[
DataRequired('Insira a notícia.')
])
index = IntegerField()
create = SubmitField('Adicionar')
class ParticipationsInEventsForm(FlaskForm):
"""
Form for the list of participations in events.
"""
title = StringField('Instituição visitada:', validators=[
DataRequired('Digite uma chamada para o intercâmbio.')
])
description = TextAreaField('Resumo do envolvimento:', validators=[
DataRequired('Insira um breve resumo sobre a participação.')
])
year = IntegerField('Ano:', validators=[
DataRequired('Informe qual o ano do evento.')
])
location = StringField('Cidade e país:', validators=[
DataRequired('Falta localizar a cidade e país.')
])
index = IntegerField()
create = SubmitField('Adicionar')
class SubjectsForm(FlaskForm):
"""
Form for list of subjects.
"""
name = StringField('Nome da disciplina:', validators=[
DataRequired('Digite o nome da disciplina.')
])
description = TextAreaField('Resumo da disciplina:', validators=[
DataRequired('Insira um breve resumo sobre a disciplina.')
])
workload_in_hours = IntegerField('Carga Horária:', validators=[
DataRequired('Informe a carga horária da disciplina.')
])
credits = IntegerField('Creditos:', validators=[
DataRequired('Informe quantos créditos a disciplina dá.')
])
requirement = SelectField('Tipo de disciplina', choices=[('Obrigatórias','Obrigatórias'), ('Eletivas','Eletivas')], validators = [
DataRequired('Insira o tipo da disciplina')
])
course_type = StringField('Você não devia estar vendo isso', validators=[
DataRequired('Houve um erro por favor tente fechar o navegador e abrir essa página novamente')
])
index = IntegerField()
create = SubmitField('Adicionar')
class StaffForm(FlaskForm):
"""
Form for list of staff.
"""
name = StringField('Nome do servidor:', validators=[
DataRequired('Digite o nome do servidor.')
])
rank = StringField('Posição do servidor:', validators=[
DataRequired('Digite a posição do servidor.')
])
abstract = TextAreaField('Resumo da formação caso da coordenação, e descrição do trabalho caso secretário:', validators=[
DataRequired('Insira um breve resumo sobre a formação acadêmica do servidor.')
])
function = SelectField('Tipo de servidor', choices=[('coordination','Coordenação'), ('secretariat','Secretáriado')], validators = [
DataRequired('Insira o tipo de servidor')
])
photo = URLField('Foto do servidor')
index = IntegerField()
create = SubmitField('Adicionar')
class InstitutionsWithCovenantsForm(FlaskForm):
"""
Form for the list of institutions with covenants.
"""
name = StringField('Instituição com convênio:', validators=[
DataRequired('Digite o nome da instituição.')
])
initials = StringField('Sigla da Instituição:', validators=[
DataRequired('Digite a sigla da instituição.')
])
logo = FileField(validators=[
DataRequired('Por favor insira um logo em formato .jpeg ou .png')
])
create = SubmitField('Adicionar')
class EditInstitutionsWithCovenantsForm(FlaskForm):
"""
Form for editing list of institutions with covenants.
"""
name = StringField('Instituição com convênio:', validators=[
DataRequired('Digite o nome da instituição.')
])
initials = StringField('Sigla da Instituição:', validators=[
DataRequired('Digite a sigla da instituição.')
])
logo = FileField()
index = IntegerField()
create = SubmitField('Editar')
class ScheduledReportForm(FlaskForm):
"""
Scheduled report form.
"""
time = DateTimeField('Data e hora:', format='%d/%m/%Y %H:%M')
title = StringField('Título do trabalho:', validators=[
DataRequired('Digite o título do trabalho.')
])
author = StringField('Autoria:', validators=[
DataRequired('Digite o nome do(s) autor(es).')
])
location = StringField('Localização:', validators=[
DataRequired('Digite a localização.')
])
index = IntegerField()
create = SubmitField('Agendar')
class CalendarForm(FlaskForm):
"""
Calendar event form
"""
title = StringField('Título do evento:', validators=[
DataRequired('Digite o título do evento.')
])
initial_date = DateField('Data inicial:', format='%d/%m/%Y', validators=[
DataRequired('Escolha a data de começo do evento.')
])
final_date = StringField('Data final(Se existir):')
hour = StringField('Hora de começo e termino do evento(Se existir)')
link = URLField('Link para mais informações(Se existir)')
index = IntegerField()
create = SubmitField('Adicionar')
class ProfessorForm(FlaskForm):
"""
Form for adding professors to database
"""
name = StringField('Nome do professor(a):', validators=[
DataRequired('Digite o nome do professor(a).')
])
rank = StringField('Rank do professor(a):', validators=[
DataRequired('Digite o rank do professor(a).')
])
lattes = StringField('Link para Lattes do professor(a):')
email = EmailField('Email do professor(a):', validators=[
DataRequired('Digite o Email do professor(a).'), Email()
])
index = IntegerField()
create = SubmitField('Adicionar')
class AttendanceForm(FlaskForm):
"""
Form for adding attendance information to database
"""
building = StringField('Prédio onde a unidade se localiza:', validators=[
DataRequired('Digite o nome do prédio.')
])
floor = StringField('Digite o andar onde a unidade se localiza:', validators=[
DataRequired('Digite o andar.')
])
room = StringField('Sala onde a unidade se localiza:', validators=[
DataRequired('Digite o nome da sala.')
])
calendar = StringField('Calendário acadêmico:', validators=[
DataRequired('Digite o link para o calendário.')
])
email = EmailField('Email da unidade:', validators=[
DataRequired('Digite o email.')
])
opening = StringField('Horário de funcionamento:', validators=[
DataRequired('Digite o horário de funcionamento.')
])
type1 = StringField('Tipo do telefone:', validators=[
DataRequired('Digite o tipo do telefone.')
])
phone1 = StringField('Telefone:', validators=[
DataRequired('Digite o telefone para contato.')
])
type2 = StringField('Tipo do telefone:')
phone2 = StringField('Telefone:')
type3 = StringField('Tipo do telefone:')
phone3 = StringField('Telefone:')
attendance_id = StringField(validators=[
DataRequired()
])
create = SubmitField('Editar')
class DocumentForm(FlaskForm):
"""
Form for upload of document
"""
title = StringField('Titulo do documento:', validators=[
DataRequired('Digite o título do documento.')
])
cod = StringField('Código:', validators=[
DataRequired('Informe qual o código do documento.')
])
category = SelectField('Categoria', choices=[
('resolucao','Resolução'),('ata','ATA'),('outros','Outros')], validators=[
DataRequired('Especifique o tipo de documento.')
])
document = FileField(validators=[
DataRequired('Por favor carregue um documento valido')
])
create = SubmitField('Adicionar')
class EditDocumentForm(FlaskForm):
"""
Form for edit and delete document
"""
title = StringField('Titulo do documento:', validators=[
DataRequired('Digite o título do documento.')
])
cod = StringField('Código:', validators=[
DataRequired('Informe qual o código do documento.')
])
category = SelectField('Categoria', choices=[
('resolucao','Resolução'),('ata','ATA'),('outros','Outros')], validators=[
DataRequired('Especifique o tipo de documento.')
])
document = FileField()
document_id = StringField(validators=[
DataRequired()
])
create = SubmitField('Adicionar')
class BookForm(FlaskForm):
"""
Form for books
"""
title = StringField('Titulo do livro:', validators=[
DataRequired('Digite o título do livro.')
])
subtitle = StringField('Subtitulo do livro(se houver):')
authors = StringField('Nome do autor(es):', validators=[
DataRequired('Digite o nome dos autor(es)')
])
edition = IntegerField('Número da edição:', validators=[
DataRequired('Digite o número da edição')
])
location = StringField('Local de impressão:')
publisher = StringField('Editora:')
year = IntegerField('Ano da publicação:')
index = IntegerField()
create = SubmitField('Adicionar')
class ArticleForm(FlaskForm):
"""
Form for articles
"""
title = StringField('Titulo do artigo:', validators=[
DataRequired('Digite o título do artigo.')
])
subtitle = StringField('Subtitulo do artigo(se houver):')
authors = StringField('Nome do autor(es):', validators=[
DataRequired('Digite o nome dos autor(es)')
])
edition = IntegerField('Número da edição:', validators=[
DataRequired('Digite o número da edição')
])
pages = StringField('Número das páginas:', validators=[
DataRequired('Digite o número das páginas')
])
number = IntegerField('Número:')
location = StringField('Local de impressão:')
publisher = StringField('Editora:')
date = StringField('Data:')
index = IntegerField()
create = SubmitField('Adicionar')
class ProjectForm(FlaskForm):
"""
Form for projects
"""
title = StringField('Titulo do projeto:', validators=[
DataRequired('Digite o título do projeto')
])
subtitle = StringField('Subtitulo do projeto:')
description = TextAreaField('Descrição do projeto:')
situation = SelectField('Situação', choices=[
('Renovado', 'Renovado'), ('Em execução', 'Em execução'), ('Encerrado com pendências', 'Encerrado com pendências'), ('Finalizado', 'Finalizado'), ('Necessita correção', 'Necessita correção')], validators=[
DataRequired('Escolha a situação do projeto')
])
year = IntegerField('Ano do projeto', validators=[
DataRequired('Digite o ano do projeto')
])
email = EmailField('Email para contato', validators=[
DataRequired('Por favor digite um email válido para contato')
])
dt_init = StringField('Data de início do projeto', validators=[
DataRequired('Digite a data de início do projeto')
])
dt_end = StringField('Data esperada pra o final do projeto', validators=[
DataRequired('Digite a data esperada para finalização do projeto')
])
project_id = StringField()
create = SubmitField('Adicionar')
class MemberOfProjectForm(FlaskForm):
"""
Form for members inside project
"""
name = StringField('Nome do membro:', validators=[
DataRequired('Digite o nome do membro do projeto')
])
project_role = SelectField('Categoria', choices=[
('Colaborador(a)', 'Colaborador(a)'), ('Coordenador(a)', 'Coordenador(a)')
])
general_role = SelectField('Tipo', choices=[
('Discente', 'Discente'), ('Externo', 'Externo'), ('Coordenador(a)', 'Coordenador(a)')
])
project_id = StringField(validators=[
DataRequired('Houve um erro')
])
index = IntegerField()
create = SubmitField('Adicionar')
class ChapterForm(FlaskForm):
"""
Form for chapters in books
"""
book_title = StringField('Titulo do livro:', validators=[
DataRequired('Digite o título do livro.')
])
book_authors = StringField('Nome do autor(es) do livro:', validators=[
DataRequired('Digite o nome dos autor(es)')
])
chapter_title = StringField('Titulo do capitulo:', validators=[
DataRequired('Digite o título do capitulo.')
])
chapter_authors = StringField('Nome do autor(es) do capitulo:', validators=[
DataRequired('Digite o nome dos autor(es)')
])
edition = IntegerField('Número da edição:', validators=[
DataRequired('Digite o número da edição')
])
location = StringField('Local de impressão:')
publisher = StringField('Editora:')
year = IntegerField('Ano da publicação:')
pages = StringField('Número das páginas:', validators=[
DataRequired('Digite o número das páginas')
])
index = IntegerField()
create = SubmitField('Adicionar')
| gpl-3.0 | -3,430,461,533,547,064,000 | 26.796477 | 213 | 0.64334 | false |
TechRunner2/i3-gaps-rice | .config/i3/bar/tests/util.py | 1 | 3718 | # pylint: disable=C0103,C0111,W0613
import json
import shlex
import subprocess
from bumblebee.output import Widget
def assertWidgetAttributes(test, widget):
test.assertTrue(isinstance(widget, Widget))
test.assertTrue(hasattr(widget, "full_text"))
def assertPopen(output, cmd):
res = shlex.split(cmd)
output.assert_any_call(res,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
def assertStateContains(test, module, state):
for widget in module.widgets():
widget.link_module(module)
module.update(module.widgets())
test.assertTrue(state in module.widgets()[0].state())
class MockEpoll(object):
def register(self, fileno, event):
pass
def poll(self, timeout):
return [(1,2)]
def unregister(self, fileno):
pass
def close(self):
pass
def assertMouseEvent(mock_input, mock_output, mock_select, engine, module, button, cmd, instance_id=None):
mock_input.readline.return_value = json.dumps({
"name": module.id if module else "test",
"button": button,
"instance": instance_id
})
mock_input.fileno.return_value = 1
mock_select.return_value = MockEpoll()
engine.input.start()
engine.input.stop()
mock_input.readline.assert_any_call()
if cmd:
assertPopen(mock_output, cmd)
class MockInput(object):
def __init__(self):
self._callbacks = {}
def start(self):
pass
def stop(self):
pass
def get_callback(self, uid):
return self._callbacks.get(uid, None)
def register_callback(self, obj, button, cmd):
if not obj:
return
self._callbacks[obj.id] = {
"button": button,
"command": cmd,
}
class MockEngine(object):
def __init__(self):
self.input = MockInput()
class MockConfig(object):
def __init__(self):
self._data = {}
def get(self, name, default):
if name in self._data:
return self._data[name]
return default
def set(self, name, value):
self._data[name] = value
class MockOutput(object):
def start(self):
pass
def stop(self):
pass
def draw(self, widget, engine, module):
engine.stop()
def begin(self):
pass
def flush(self):
pass
def end(self):
pass
class MockModule(object):
def __init__(self, engine=None, config=None):
self.id = None
class MockWidget(Widget):
def __init__(self, text):
super(MockWidget, self).__init__(text)
self._text = text
self.module = None
self.attr_state = ["state-default"]
self.id = "none"
def state(self):
return self.attr_state
def update(self, widgets):
pass
def full_text(self):
return self._text
class MockTheme(object):
def __init__(self):
self.attr_prefix = None
self.attr_suffix = None
self.attr_fg = None
self.attr_bg = None
self.attr_separator = None
self.attr_separator_block_width = 0
def padding(self, widget):
return ""
def reset(self):
pass
def separator_block_width(self, widget):
return self.attr_separator_block_width
def separator(self, widget):
return self.attr_separator
def prefix(self, widget, default=None):
return self.attr_prefix
def suffix(self, widget, default=None):
return self.attr_suffix
def fg(self, widget):
return self.attr_fg
def bg(self, widget):
return self.attr_bg
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| gpl-2.0 | -8,222,575,388,547,440,000 | 21.950617 | 106 | 0.595212 | false |
devalfrz/django-activity-logger | setup.py | 1 | 2759 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import sys
from setuptools import setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version('activity_logger')
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
if os.system("pip freeze | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
shutil.rmtree('dist')
shutil.rmtree('build')
shutil.rmtree('django-activity-logger.egg-info')
sys.exit()
setup(
name='django-activity-logger',
version=version,
url='https://github.com/devalfrz/django-activity-logger',
license='BSD',
description='Simple tool for logging activity in Django.',
author='Alfredo Rius',
author_email='[email protected]',
packages=get_packages('activity_logger'),
package_data=get_package_data('activity_logger'),
include_package_data=True,
install_requires=['urllib3',],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
]
)
| bsd-2-clause | -3,930,844,943,590,981,000 | 29.318681 | 75 | 0.614715 | false |
pescobar/easybuild-framework | easybuild/framework/easyconfig/style.py | 1 | 6388 | ##
# Copyright 2016-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Style tests for easyconfig files using pycodestyle.
:author: Ward Poelmans (Ghent University)
"""
import re
import sys
from easybuild.base import fancylogger
from easybuild.framework.easyconfig.easyconfig import EasyConfig
from easybuild.tools.build_log import EasyBuildError, print_msg
from easybuild.tools.py2vs3 import reload, string_type
from easybuild.tools.utilities import only_if_module_is_available
try:
import pycodestyle
from pycodestyle import StyleGuide, register_check, trailing_whitespace
except ImportError:
try:
# fallback to importing from 'pep8', which was renamed to pycodestyle in 2016
import pep8
from pep8 import StyleGuide, register_check, trailing_whitespace
except ImportError:
pass
_log = fancylogger.getLogger('easyconfig.style', fname=False)
EB_CHECK = '_eb_check_'
COMMENT_REGEX = re.compile(r'^\s*#')
PARAM_DEF_REGEX = re.compile(r"^(?P<key>[a-z_]+)\s*=\s*")
MAX_LINE_LENGTH = 120
# Any function starting with _eb_check_ (see EB_CHECK variable) will be
# added to the tests if the test number is added to the select list.
#
# Note: only functions that have a first argument named 'physical_line' or 'logical_line'
# will actually be used!
#
# The test number is definied as WXXX and EXXX (for warnings and errors)
# where XXX is a 3 digit number.
#
# It should be mentioned in the docstring as a single word.
# Read the pycodestyle docs to understand the arguments of these functions:
# https://pycodestyle.readthedocs.io or more specifically:
# https://pycodestyle.readthedocs.io/en/latest/developer.html#contribute
def _eb_check_trailing_whitespace(physical_line, lines, line_number, checker_state): # pylint:disable=unused-argument
"""
W299
Warn about trailing whitespace, except for the description and comments.
This differs from the standard trailing whitespace check as that
will warn for any trailing whitespace.
The arguments are explained at
https://pycodestyle.readthedocs.io/en/latest/developer.html#contribute
"""
# apparently this is not the same as physical_line line?!
line = lines[line_number-1]
if COMMENT_REGEX.match(line):
return None
result = trailing_whitespace(line)
if result:
result = (result[0], result[1].replace('W291', 'W299'))
# keep track of name of last parameter that was defined
param_def = PARAM_DEF_REGEX.search(line)
if param_def:
checker_state['eb_last_key'] = param_def.group('key')
# if the warning is about the multiline string of description
# we will not issue a warning
if checker_state.get('eb_last_key') == 'description':
result = None
return result
@only_if_module_is_available(('pycodestyle', 'pep8'))
def check_easyconfigs_style(easyconfigs, verbose=False):
"""
Check the given list of easyconfigs for style
:param: easyconfigs list of file paths to easyconfigs
:param: verbose print our statistics and be verbose about the errors and warning
:return: the number of warnings and errors
"""
# importing autopep8 changes some pep8 functions.
# We reload it to be sure to get the real pep8 functions.
if 'pycodestyle' in sys.modules:
reload(pycodestyle)
else:
reload(pep8)
# register the extra checks before using pep8:
# any function in this module starting with `_eb_check_` will be used.
cands = globals()
for check_function in sorted([cands[f] for f in cands if callable(cands[f]) and f.startswith(EB_CHECK)]):
_log.debug("Adding custom style check %s", check_function)
register_check(check_function)
styleguide = StyleGuide(quiet=False, config_file=None)
options = styleguide.options
# we deviate from standard pep8 and allow 120 chars
# on a line: the default of 79 is too narrow.
options.max_line_length = MAX_LINE_LENGTH
# we ignore some tests
# note that W291 has been replaced by our custom W299
options.ignore = (
'W291', # replaced by W299
)
options.verbose = int(verbose)
result = styleguide.check_files(easyconfigs)
if verbose:
result.print_statistics()
return result.total_errors
def cmdline_easyconfigs_style_check(ecs):
"""
Run easyconfigs style check of each of the specified easyconfigs, triggered from 'eb' command line
:param ecs: list of easyconfigs to check, could be either file paths or EasyConfig instances
:return: True when style check passed on all easyconfig files, False otherwise
"""
print_msg("\nRunning style check on %d easyconfig(s)...\n" % len(ecs), prefix=False)
style_check_passed = True
for ec in ecs:
# if an EasyConfig instance is provided, just grab the corresponding file path
if isinstance(ec, EasyConfig):
path = ec.path
elif isinstance(ec, string_type):
path = ec
else:
raise EasyBuildError("Value of unknown type encountered in cmdline_easyconfigs_style_check: %s (type: %s)",
ec, type(ec))
if check_easyconfigs_style([path]) == 0:
res = 'PASS'
else:
res = 'FAIL'
style_check_passed = False
print_msg('[%s] %s' % (res, path), prefix=False)
return style_check_passed
| gpl-2.0 | -1,648,767,824,103,083,800 | 35.502857 | 119 | 0.69975 | false |
Stupeflix/robust-graphite-client | setup.py | 1 | 1370 | #!/usr/bin/env python
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
setup_requires = []
if 'test' in sys.argv:
setup_requires.append('pytest')
tests_requires = [
'pytest',
'pytest-cov>=1.8.1',
'pytest-localserver',
]
dev_requires = [
'sphinx',
]
install_requires = [
'requests>=2.4.0',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['robgracli']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='robust-graphite-client',
version='1.1.0',
license='MIT',
author='Luper Rouch',
author_email='[email protected]',
url='https://github.com/Stupeflix/robust-graphite-client',
description='A simple graphite querying library with workarounds on '
'some rare bugs',
long_description=open('README.rst').read(),
packages=find_packages(),
install_requires=install_requires,
extras_require={
'tests': tests_requires,
'dev': dev_requires,
},
tests_require=tests_requires,
cmdclass={'test': PyTest},
zip_safe=False,
include_package_data=True,
)
| mit | 2,416,794,560,243,820,000 | 23.035088 | 73 | 0.645985 | false |
beniwohli/apm-agent-python | elasticapm/metrics/base_metrics.py | 1 | 13902 | # BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import threading
import time
from collections import defaultdict
from elasticapm.conf import constants
from elasticapm.utils import compat
from elasticapm.utils.logging import get_logger
from elasticapm.utils.module_import import import_string
from elasticapm.utils.threading import IntervalTimer, ThreadManager
logger = get_logger("elasticapm.metrics")
DISTINCT_LABEL_LIMIT = 1000
class MetricsRegistry(ThreadManager):
def __init__(self, client, tags=None):
"""
Creates a new metric registry
:param client: client instance
:param tags:
"""
self.client = client
self._metricsets = {}
self._tags = tags or {}
self._collect_timer = None
super(MetricsRegistry, self).__init__()
def register(self, class_path):
"""
Register a new metric set
:param class_path: a string with the import path of the metricset class
"""
if class_path in self._metricsets:
return
else:
try:
class_obj = import_string(class_path)
self._metricsets[class_path] = class_obj(self)
except ImportError as e:
logger.warning("Could not register %s metricset: %s", class_path, compat.text_type(e))
def get_metricset(self, class_path):
try:
return self._metricsets[class_path]
except KeyError:
raise MetricSetNotFound(class_path)
def collect(self):
"""
Collect metrics from all registered metric sets and queues them for sending
:return:
"""
if self.client.config.is_recording:
logger.debug("Collecting metrics")
for _, metricset in compat.iteritems(self._metricsets):
for data in metricset.collect():
self.client.queue(constants.METRICSET, data)
def start_thread(self, pid=None):
super(MetricsRegistry, self).start_thread(pid=pid)
if self.client.config.metrics_interval:
self._collect_timer = IntervalTimer(
self.collect, self.collect_interval, name="eapm metrics collect timer", daemon=True
)
logger.debug("Starting metrics collect timer")
self._collect_timer.start()
def stop_thread(self):
if self._collect_timer and self._collect_timer.is_alive():
logger.debug("Cancelling collect timer")
self._collect_timer.cancel()
self._collect_timer = None
@property
def collect_interval(self):
return self.client.config.metrics_interval / 1000.0
@property
def ignore_patterns(self):
return self.client.config.disable_metrics or []
class MetricsSet(object):
def __init__(self, registry):
self._lock = threading.Lock()
self._counters = {}
self._gauges = {}
self._timers = {}
self._registry = registry
self._label_limit_logged = False
def counter(self, name, reset_on_collect=False, **labels):
"""
Returns an existing or creates and returns a new counter
:param name: name of the counter
:param reset_on_collect: indicate if the counter should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the counter object
"""
return self._metric(self._counters, Counter, name, reset_on_collect, labels)
def gauge(self, name, reset_on_collect=False, **labels):
"""
Returns an existing or creates and returns a new gauge
:param name: name of the gauge
:param reset_on_collect: indicate if the gouge should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the gauge object
"""
return self._metric(self._gauges, Gauge, name, reset_on_collect, labels)
def timer(self, name, reset_on_collect=False, **labels):
"""
Returns an existing or creates and returns a new timer
:param name: name of the timer
:param reset_on_collect: indicate if the timer should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the timer object
"""
return self._metric(self._timers, Timer, name, reset_on_collect, labels)
def _metric(self, container, metric_class, name, reset_on_collect, labels):
"""
Returns an existing or creates and returns a metric
:param container: the container for the metric
:param metric_class: the class of the metric
:param name: name of the metric
:param reset_on_collect: indicate if the metric should be reset to 0 when collecting
:param labels: a flat key/value map of labels
:return: the metric object
"""
labels = self._labels_to_key(labels)
key = (name, labels)
with self._lock:
if key not in container:
if any(pattern.match(name) for pattern in self._registry.ignore_patterns):
metric = noop_metric
elif len(self._gauges) + len(self._counters) + len(self._timers) >= DISTINCT_LABEL_LIMIT:
if not self._label_limit_logged:
self._label_limit_logged = True
logger.warning(
"The limit of %d metricsets has been reached, no new metricsets will be created."
% DISTINCT_LABEL_LIMIT
)
metric = noop_metric
else:
metric = metric_class(name, reset_on_collect=reset_on_collect)
container[key] = metric
return container[key]
def collect(self):
"""
Collects all metrics attached to this metricset, and returns it as a generator
with one or more elements. More than one element is returned if labels are used.
The format of the return value should be
{
"samples": {"metric.name": {"value": some_float}, ...},
"timestamp": unix epoch in microsecond precision
}
"""
self.before_collect()
timestamp = int(time.time() * 1000000)
samples = defaultdict(dict)
if self._counters:
# iterate over a copy of the dict to avoid threading issues, see #717
for (name, labels), c in compat.iteritems(self._counters.copy()):
if c is not noop_metric:
val = c.val
if val or not c.reset_on_collect:
samples[labels].update({name: {"value": val}})
if c.reset_on_collect:
c.reset()
if self._gauges:
for (name, labels), g in compat.iteritems(self._gauges.copy()):
if g is not noop_metric:
val = g.val
if val or not g.reset_on_collect:
samples[labels].update({name: {"value": val}})
if g.reset_on_collect:
g.reset()
if self._timers:
for (name, labels), t in compat.iteritems(self._timers.copy()):
if t is not noop_metric:
val, count = t.val
if val or not t.reset_on_collect:
samples[labels].update({name + ".sum.us": {"value": int(val * 1000000)}})
samples[labels].update({name + ".count": {"value": count}})
if t.reset_on_collect:
t.reset()
if samples:
for labels, sample in compat.iteritems(samples):
result = {"samples": sample, "timestamp": timestamp}
if labels:
result["tags"] = {k: v for k, v in labels}
yield self.before_yield(result)
def before_collect(self):
"""
A method that is called right before collection. Can be used to gather metrics.
:return:
"""
pass
def before_yield(self, data):
return data
def _labels_to_key(self, labels):
return tuple((k, compat.text_type(v)) for k, v in sorted(compat.iteritems(labels)))
class SpanBoundMetricSet(MetricsSet):
def before_yield(self, data):
tags = data.get("tags", None)
if tags:
span_type, span_subtype = tags.pop("span.type", None), tags.pop("span.subtype", "")
if span_type or span_subtype:
data["span"] = {"type": span_type, "subtype": span_subtype}
transaction_name, transaction_type = tags.pop("transaction.name", None), tags.pop("transaction.type", None)
if transaction_name or transaction_type:
data["transaction"] = {"name": transaction_name, "type": transaction_type}
return data
class Counter(object):
__slots__ = ("name", "_lock", "_initial_value", "_val", "reset_on_collect")
def __init__(self, name, initial_value=0, reset_on_collect=False):
"""
Creates a new counter
:param name: name of the counter
:param initial_value: initial value of the counter, defaults to 0
"""
self.name = name
self._lock = threading.Lock()
self._val = self._initial_value = initial_value
self.reset_on_collect = reset_on_collect
def inc(self, delta=1):
"""
Increments the counter. If no delta is provided, it is incremented by one
:param delta: the amount to increment the counter by
:returns the counter itself
"""
with self._lock:
self._val += delta
return self
def dec(self, delta=1):
"""
Decrements the counter. If no delta is provided, it is decremented by one
:param delta: the amount to decrement the counter by
:returns the counter itself
"""
with self._lock:
self._val -= delta
return self
def reset(self):
"""
Reset the counter to the initial value
:returns the counter itself
"""
with self._lock:
self._val = self._initial_value
return self
@property
def val(self):
"""Returns the current value of the counter"""
return self._val
class Gauge(object):
__slots__ = ("name", "_val", "reset_on_collect")
def __init__(self, name, reset_on_collect=False):
"""
Creates a new gauge
:param name: label of the gauge
"""
self.name = name
self._val = None
self.reset_on_collect = reset_on_collect
@property
def val(self):
return self._val
@val.setter
def val(self, value):
self._val = value
def reset(self):
self._val = 0
class Timer(object):
__slots__ = ("name", "_val", "_count", "_lock", "reset_on_collect")
def __init__(self, name=None, reset_on_collect=False):
self.name = name
self._val = 0
self._count = 0
self._lock = threading.Lock()
self.reset_on_collect = reset_on_collect
def update(self, duration, count=1):
with self._lock:
self._val += duration
self._count += count
def reset(self):
with self._lock:
self._val = 0
self._count = 0
@property
def val(self):
with self._lock:
return self._val, self._count
class NoopMetric(object):
"""
A no-op metric that implements the "interface" of both Counter and Gauge.
Note that even when using a no-op metric, the value itself will still be calculated.
"""
def __init__(self, label, initial_value=0):
return
@property
def val(self):
return None
@val.setter
def val(self, value):
return
def inc(self, delta=1):
return
def dec(self, delta=-1):
return
def update(self, duration, count=1):
return
def reset(self):
return
noop_metric = NoopMetric("noop")
class MetricSetNotFound(LookupError):
def __init__(self, class_path):
super(MetricSetNotFound, self).__init__("%s metric set not found" % class_path)
| bsd-3-clause | -679,619,616,388,255,400 | 34.194937 | 119 | 0.593512 | false |
RuthAngus/K2rotation | tests/fap.py | 1 | 1301 | import numpy as np
# calculate the false alarm probability
def fap(x, y, basis, fs, N, plot=False, sig=False):
amp2s, s2n, _ = K2pgram(x, y, basis, fs) # 1st pgram
if sig: power = s2n
else: power = amp2s
mf, ms2n = peak_detect(fs, power) # find peak
AT = np.concatenate((basis, np.ones((3, len(y)))), axis=0)
ATA = np.dot(AT, AT.T)
# compute trends
_, _, trends = eval_freq(x, y, mf, AT, ATA, compute_trends=True)
if plot:
plt.clf()
plt.plot(1./fs, power, "k")
peak_heights = []
for n in range(N):
detrended_y = y - trends # remove trends
detrended_y = np.random.choice(detrended_y, len(y)) # shuffle
# add trends back in
amp2s, s2n, _ = K2pgram(x, detrended_y + trends, basis, fs)
if sig: power = s2n
else: power = amp2s
mx, my = peak_detect(fs, power)
peak_heights.append(my)
if plot:
plt.plot(1./fs, power, alpha=.2)
fap95 = np.percentile(peak_heights, 95)
fap90 = np.percentile(peak_heights, 90)
fap85 = np.percentile(peak_heights, 85)
fap50 = np.percentile(peak_heights, 50)
if plot:
plt.axhline(fap95, color=".5")
plt.savefig("fap")
# print fap95, fap90, fap85, fap50
return fap95, fap90, fap85, fap50
| mit | -6,241,883,900,449,100,000 | 35.138889 | 70 | 0.58186 | false |
tomchristie/apistar | apistar/compat.py | 1 | 1613 | import collections
import sys
if sys.version_info < (3, 6):
dict_type = collections.OrderedDict
else:
dict_type = dict
try:
import jinja2
except ImportError:
jinja2 = None
try:
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
def pygments_highlight(text, lang, style):
lexer = get_lexer_by_name(lang, stripall=False)
formatter = HtmlFormatter(nowrap=True, style=style)
return pygments.highlight(text, lexer, formatter)
def pygments_css(style):
formatter = HtmlFormatter(style=style)
return formatter.get_style_defs('.highlight')
except ImportError:
pygments = None
def pygments_highlight(text, lang, style):
return text
def pygments_css(style):
return None
try:
# Ideally we subclass `_TemporaryFileWrapper` to present a clear __repr__
# for downloaded files.
from tempfile import _TemporaryFileWrapper
class DownloadedFile(_TemporaryFileWrapper):
basename = None
def __repr__(self):
state = "closed" if self.closed else "open"
mode = "" if self.closed else " '%s'" % self.file.mode
return "<DownloadedFile '%s', %s%s>" % (self.name, state, mode)
def __str__(self):
return self.__repr__()
except ImportError:
# On some platforms (eg GAE) the private _TemporaryFileWrapper may not be
# available, just use the standard `NamedTemporaryFile` function
# in this case.
import tempfile
DownloadedFile = tempfile.NamedTemporaryFile
| bsd-3-clause | -1,164,033,854,142,004,500 | 25.016129 | 77 | 0.66026 | false |
absalon-james/python-blueflood | bluefloodclient/client.py | 1 | 9568 | import copy
import json
import logging
import pprint
import requests
import utils
from urlparse import urljoin
class Datapoint(dict):
"""
Models a datapoint to be ingested into blueflood.
"""
logger = logging.getLogger('blueflood.client.Datapoint')
def __init__(self, name, value, collection_time=None,
ttl_seconds=None, unit=None):
"""
Inits the datapoint
@param name - String name of the metric
@param value - Value of the metric
@param collection_time - Time of collection
@param ttl_seconds - Number of seconds for datapoint to live
@param unit - String unit of the metric
"""
self['metricValue'] = value
self['metricName'] = name
if collection_time is None:
self.logger.debug("No collection time provided. Generating now")
collection_time = utils.time_in_ms()
self['collectionTime'] = collection_time
# Set ttl
if not ttl_seconds:
ttl_seconds = 60 * 60 * 24 * 180
ttl_seconds = max(ttl_seconds, 0)
self['ttlInSeconds'] = ttl_seconds
# Set units
if unit:
self['unit'] = unit
self.logger.debug("Created datapoint:\n%s" % pprint.pformat(self))
class Blueflood(object):
"""
Blueflood client.
"""
logger = logging.getLogger('blueflood.client.Blueflood')
base_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
# Options available for selection on some GET requests
selectables = [
'average',
'min',
'max',
'numPoints',
'variance'
]
def __init__(self, auth_url=None, apikey=None,
username=None, region='IAD',
ingest_url=None, read_url=None):
"""
Inits the client.
@param auth_url - String url for authentication
@param apikey - String api key for authentication
@param username - String username for authentication
@param region - String region name
"""
self.auth_url = auth_url
self._read_url = read_url
self._ingest_url = ingest_url
self.apikey = apikey
self.username = username
self._token = None
self.read_service = None
self.ingest_service = None
self.region = region
self.get_token()
def selects(self, **kwargs):
"""
Generates the parameter for select queries on certain GET
requests.
@param **kwargs - Dictionary containing selectables.
@return - String - comma separated list of selectables
"""
return ','.join([s for s in self.selectables
if s in kwargs and kwargs.get(s)])
def read_params(self, start, stop, points=None, resolution=None):
"""
Sets up a dictionary with basic read parameters for certain
GET requests.
@param start - Float time in seconds
@param stop - Float time in seconds
@param points - Integer number of points
@return - Dictionary
"""
params = {
'from': start,
'to': stop,
}
if resolution:
params['resolution'] = resolution
elif points:
params['points'] = points
return params
def invalidate_token(self):
"""
Unsets the token.
"""
self.logger.debug("Invalidating token")
self._token = None
def get_token(self):
"""
Returns the current token if exists. Gets a new one otherwise.
Also updates the service catalog.
@return string
"""
# Return token if we have it
if self._token is not None:
return self._token
# We want json
headers = copy.copy(self.base_headers)
# Credential payload
data = {
'auth': {
'RAX-KSKEY:apiKeyCredentials': {
'username': self.username,
'apiKey': self.apikey
}
}
}
resp = requests.post(
urljoin(self.auth_url, 'tokens'),
data=json.dumps(data),
headers=headers
)
resp.raise_for_status()
resp_json = resp.json()
self._token = resp_json['access']['token']['id']
self.update_catalog(resp_json['access']['serviceCatalog'])
return self._token
def update_catalog(self, service_catalog):
"""
Sets the read and ingest service
@param service_catalog - List of dicts from 'serviceCatalog'
"""
ingest_name = 'cloudMetricsIngest'
read_name = 'cloudMetrics'
for s in service_catalog:
if s['name'] == ingest_name:
self.ingest_service = s
elif s['name'] == read_name:
self.read_service = s
def read_url(self, region='IAD'):
"""
Returns the url for reading metrics
@param region - String region name
@return String|None
"""
if self._read_url is not None:
return self._read_url
if self.read_service is not None:
return self.url_for_region(self.read_service, region)
raise Exception("No read service found")
def ingest_url(self, region="IAD"):
"""
Returns the url for ingesting metrics
@param region - String name of the region
@return String|None
"""
if self._ingest_url is not None:
return self._ingest_url
if self.ingest_service is not None:
return self.url_for_region(self.ingest_service, region)
raise Exception("No ingest service found")
def url_for_region(self, service, region):
"""
Returns a url from a service for a region
@param service - Dictionary with endpoints, name, type
@param region - String region name
@return String
"""
for e in service.get('endpoints', []):
if region == e.get('region'):
return e['publicURL']
def request(self, url, method='get', data=None, headers=None, params=None):
"""
Base request method.
Get a token if it doesn't exist
Make a request.
Check for 401.
Reauth one time if needed.
Return object if one is provided.
@param url - String url
@param method - String should be one of (get, post, put, delete)
@param data - Object to be jumped into json
@param headers - Dictionary of headers
@param params - Dictionary of query string parameters
@return - JSON object
"""
func = getattr(requests, method)
_headers = copy.copy(self.base_headers)
_headers.update({
'X-Auth-Token': self.get_token()
})
kwargs = {'headers': _headers}
if params is not None:
kwargs['params'] = params
if headers is not None:
kwargs['headers'].update(headers)
if data is not None:
kwargs['data'] = json.dumps(data)
self.logger.debug("Request method: %s" % method)
self.logger.debug("Request url: %s" % url)
self.logger.debug("Params:\n%s" % pprint.pformat(params))
self.logger.debug("Headers:\n%s" % pprint.pformat(headers))
self.logger.debug("Data: \n%s" % pprint.pformat(data))
resp = func(url, **kwargs)
if resp.status_code == 401:
self.invalidate_token()
kwargs['headers']['X-Auth-Token'] = self.get_token()
resp = func(url, **kwargs)
resp.raise_for_status()
try:
resp_json = resp.json()
self.logger.debug("Response:\n%s" % pprint.pformat(resp_json))
return resp_json
except ValueError:
pass
def find_metrics(self, query='*'):
"""
Returns a list of metric names.
@param query - String metric name name query.
@return - List
"""
params = {'query': query}
url = "%s/%s" % (self.read_url(), 'metrics/search')
return self.request(url, method='get', params=params)
def ingest(self, data):
"""
Expects a list of dictionaries representing metric points.
@param data - List of point dictionaries.
"""
if not isinstance(data, list):
data = [data]
url = '%s/ingest' % self.ingest_url()
return self.request(url, method='post', data=data)
def get_metrics(self, start, stop, metrics,
points=None, resolution=None, **kwargs):
"""
Returns multiple metrics
@param start - Integer time in seconds
@param stop - Integer time in seconds
@param metrics - String list of metric names
@param points - Integer number of points
@param resolution - One of FULL|MIN5|MIN20|MIN60|MIN240|MIN1440
@param kwargs - Remaining keyword arguments should be selectables.
@return - Dictionary
"""
url = '%s/views' % self.read_url()
params = self.read_params(start, stop,
points=points, resolution=resolution)
selects = self.selects(**kwargs) if kwargs else None
if selects:
params['select'] = selects
return self.request(url, method='post', params=params, data=metrics)
| apache-2.0 | -3,208,775,314,037,163,000 | 29.182965 | 79 | 0.559887 | false |
betrisey/home-assistant | homeassistant/config.py | 1 | 11429 | """Module to help with parsing and generating configuration files."""
import asyncio
import logging
import os
import shutil
from types import MappingProxyType
# pylint: disable=unused-import
from typing import Any, Tuple # NOQA
import voluptuous as vol
from homeassistant.const import (
CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, CONF_UNIT_SYSTEM,
CONF_TIME_ZONE, CONF_CUSTOMIZE, CONF_ELEVATION, CONF_UNIT_SYSTEM_METRIC,
CONF_UNIT_SYSTEM_IMPERIAL, CONF_TEMPERATURE_UNIT, TEMP_CELSIUS,
__version__)
from homeassistant.core import valid_entity_id
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.yaml import load_yaml
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import set_customize
from homeassistant.util import dt as date_util, location as loc_util
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM
_LOGGER = logging.getLogger(__name__)
YAML_CONFIG_FILE = 'configuration.yaml'
VERSION_FILE = '.HA_VERSION'
CONFIG_DIR_NAME = '.homeassistant'
DEFAULT_CORE_CONFIG = (
# Tuples (attribute, default, auto detect property, description)
(CONF_NAME, 'Home', None, 'Name of the location where Home Assistant is '
'running'),
(CONF_LATITUDE, 0, 'latitude', 'Location required to calculate the time'
' the sun rises and sets'),
(CONF_LONGITUDE, 0, 'longitude', None),
(CONF_ELEVATION, 0, None, 'Impacts weather/sunrise data'
' (altitude above sea level in meters)'),
(CONF_UNIT_SYSTEM, CONF_UNIT_SYSTEM_METRIC, None,
'{} for Metric, {} for Imperial'.format(CONF_UNIT_SYSTEM_METRIC,
CONF_UNIT_SYSTEM_IMPERIAL)),
(CONF_TIME_ZONE, 'UTC', 'time_zone', 'Pick yours from here: http://en.wiki'
'pedia.org/wiki/List_of_tz_database_time_zones'),
) # type: Tuple[Tuple[str, Any, Any, str], ...]
DEFAULT_CONFIG = """
# Show links to resources in log and frontend
introduction:
# Enables the frontend
frontend:
http:
# Uncomment this to add a password (recommended!)
# api_password: PASSWORD
# Checks for available updates
updater:
# Discover some devices automatically
discovery:
# Allows you to issue voice commands from the frontend in enabled browsers
conversation:
# Enables support for tracking state changes over time.
history:
# View all events in a logbook
logbook:
# Track the sun
sun:
# Weather Prediction
sensor:
platform: yr
"""
def _valid_customize(value):
"""Config validator for customize."""
if not isinstance(value, dict):
raise vol.Invalid('Expected dictionary')
for key, val in value.items():
if not valid_entity_id(key):
raise vol.Invalid('Invalid entity ID: {}'.format(key))
if not isinstance(val, dict):
raise vol.Invalid('Value of {} is not a dictionary'.format(key))
return value
CORE_CONFIG_SCHEMA = vol.Schema({
CONF_NAME: vol.Coerce(str),
CONF_LATITUDE: cv.latitude,
CONF_LONGITUDE: cv.longitude,
CONF_ELEVATION: vol.Coerce(int),
vol.Optional(CONF_TEMPERATURE_UNIT): cv.temperature_unit,
CONF_UNIT_SYSTEM: cv.unit_system,
CONF_TIME_ZONE: cv.time_zone,
vol.Required(CONF_CUSTOMIZE,
default=MappingProxyType({})): _valid_customize,
})
def get_default_config_dir() -> str:
"""Put together the default configuration directory based on OS."""
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
return os.path.join(data_dir, CONFIG_DIR_NAME)
def ensure_config_exists(config_dir: str, detect_location: bool=True) -> str:
"""Ensure a config file exists in given configuration directory.
Creating a default one if needed.
Return path to the config file.
"""
config_path = find_config_file(config_dir)
if config_path is None:
print("Unable to find configuration. Creating default one in",
config_dir)
config_path = create_default_config(config_dir, detect_location)
return config_path
def create_default_config(config_dir, detect_location=True):
"""Create a default configuration file in given configuration directory.
Return path to new config file if success, None if failed.
This method needs to run in an executor.
"""
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
version_path = os.path.join(config_dir, VERSION_FILE)
info = {attr: default for attr, default, _, _ in DEFAULT_CORE_CONFIG}
location_info = detect_location and loc_util.detect_location_info()
if location_info:
if location_info.use_metric:
info[CONF_UNIT_SYSTEM] = CONF_UNIT_SYSTEM_METRIC
else:
info[CONF_UNIT_SYSTEM] = CONF_UNIT_SYSTEM_IMPERIAL
for attr, default, prop, _ in DEFAULT_CORE_CONFIG:
if prop is None:
continue
info[attr] = getattr(location_info, prop) or default
if location_info.latitude and location_info.longitude:
info[CONF_ELEVATION] = loc_util.elevation(location_info.latitude,
location_info.longitude)
# Writing files with YAML does not create the most human readable results
# So we're hard coding a YAML template.
try:
with open(config_path, 'w') as config_file:
config_file.write("homeassistant:\n")
for attr, _, _, description in DEFAULT_CORE_CONFIG:
if info[attr] is None:
continue
elif description:
config_file.write(" # {}\n".format(description))
config_file.write(" {}: {}\n".format(attr, info[attr]))
config_file.write(DEFAULT_CONFIG)
with open(version_path, 'wt') as version_file:
version_file.write(__version__)
return config_path
except IOError:
print('Unable to create default configuration file', config_path)
return None
@asyncio.coroutine
def async_hass_config_yaml(hass):
"""Load YAML from hass config File.
This function allow component inside asyncio loop to reload his config by
self.
This method is a coroutine.
"""
def _load_hass_yaml_config():
path = find_config_file(hass.config.config_dir)
conf = load_yaml_config_file(path)
return conf
conf = yield from hass.loop.run_in_executor(None, _load_hass_yaml_config)
return conf
def find_config_file(config_dir):
"""Look in given directory for supported configuration files.
Async friendly.
"""
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
return config_path if os.path.isfile(config_path) else None
def load_yaml_config_file(config_path):
"""Parse a YAML configuration file.
This method needs to run in an executor.
"""
conf_dict = load_yaml(config_path)
if not isinstance(conf_dict, dict):
msg = 'The configuration file {} does not contain a dictionary'.format(
os.path.basename(config_path))
_LOGGER.error(msg)
raise HomeAssistantError(msg)
return conf_dict
def process_ha_config_upgrade(hass):
"""Upgrade config if necessary.
This method needs to run in an executor.
"""
version_path = hass.config.path(VERSION_FILE)
try:
with open(version_path, 'rt') as inp:
conf_version = inp.readline().strip()
except FileNotFoundError:
# Last version to not have this file
conf_version = '0.7.7'
if conf_version == __version__:
return
_LOGGER.info('Upgrading config directory from %s to %s', conf_version,
__version__)
lib_path = hass.config.path('deps')
if os.path.isdir(lib_path):
shutil.rmtree(lib_path)
with open(version_path, 'wt') as outp:
outp.write(__version__)
@asyncio.coroutine
def async_process_ha_core_config(hass, config):
"""Process the [homeassistant] section from the config.
This method is a coroutine.
"""
# pylint: disable=too-many-branches
config = CORE_CONFIG_SCHEMA(config)
hac = hass.config
def set_time_zone(time_zone_str):
"""Helper method to set time zone."""
if time_zone_str is None:
return
time_zone = date_util.get_time_zone(time_zone_str)
if time_zone:
hac.time_zone = time_zone
date_util.set_default_time_zone(time_zone)
else:
_LOGGER.error('Received invalid time zone %s', time_zone_str)
for key, attr in ((CONF_LATITUDE, 'latitude'),
(CONF_LONGITUDE, 'longitude'),
(CONF_NAME, 'location_name'),
(CONF_ELEVATION, 'elevation')):
if key in config:
setattr(hac, attr, config[key])
if CONF_TIME_ZONE in config:
set_time_zone(config.get(CONF_TIME_ZONE))
set_customize(config.get(CONF_CUSTOMIZE) or {})
if CONF_UNIT_SYSTEM in config:
if config[CONF_UNIT_SYSTEM] == CONF_UNIT_SYSTEM_IMPERIAL:
hac.units = IMPERIAL_SYSTEM
else:
hac.units = METRIC_SYSTEM
elif CONF_TEMPERATURE_UNIT in config:
unit = config[CONF_TEMPERATURE_UNIT]
if unit == TEMP_CELSIUS:
hac.units = METRIC_SYSTEM
else:
hac.units = IMPERIAL_SYSTEM
_LOGGER.warning("Found deprecated temperature unit in core config, "
"expected unit system. Replace '%s: %s' with "
"'%s: %s'", CONF_TEMPERATURE_UNIT, unit,
CONF_UNIT_SYSTEM, hac.units.name)
# Shortcut if no auto-detection necessary
if None not in (hac.latitude, hac.longitude, hac.units,
hac.time_zone, hac.elevation):
return
discovered = []
# If we miss some of the needed values, auto detect them
if None in (hac.latitude, hac.longitude, hac.units,
hac.time_zone):
info = yield from hass.loop.run_in_executor(
None, loc_util.detect_location_info)
if info is None:
_LOGGER.error('Could not detect location information')
return
if hac.latitude is None and hac.longitude is None:
hac.latitude, hac.longitude = (info.latitude, info.longitude)
discovered.append(('latitude', hac.latitude))
discovered.append(('longitude', hac.longitude))
if hac.units is None:
hac.units = METRIC_SYSTEM if info.use_metric else IMPERIAL_SYSTEM
discovered.append((CONF_UNIT_SYSTEM, hac.units.name))
if hac.location_name is None:
hac.location_name = info.city
discovered.append(('name', info.city))
if hac.time_zone is None:
set_time_zone(info.time_zone)
discovered.append(('time_zone', info.time_zone))
if hac.elevation is None and hac.latitude is not None and \
hac.longitude is not None:
elevation = yield from hass.loop.run_in_executor(
None, loc_util.elevation, hac.latitude, hac.longitude)
hac.elevation = elevation
discovered.append(('elevation', elevation))
if discovered:
_LOGGER.warning(
'Incomplete core config. Auto detected %s',
', '.join('{}: {}'.format(key, val) for key, val in discovered))
| mit | -4,513,004,092,382,685,000 | 31.46875 | 79 | 0.636976 | false |
zsdonghao/tensorlayer | tensorlayer/layers/extend.py | 1 | 2630 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorlayer.layers.core import Layer
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
__all__ = [
'ExpandDims',
'Tile',
]
class ExpandDims(Layer):
"""
The :class:`ExpandDims` class inserts a dimension of 1 into a tensor's shape,
see `tf.expand_dims() <https://www.tensorflow.org/api_docs/python/tf/expand_dims>`__ .
Parameters
----------
axis : int
The dimension index at which to expand the shape of input.
name : str
A unique layer name.
Examples
--------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, (None, 100))
>>> n = tl.layers.Input(x, name='in')
>>> n = tl.layers.ExpandDims(n, 2)
[None, 100, 1]
"""
@deprecated_alias(layer='prev_layer', end_support_version=1.9) # TODO remove this line for the 1.9 release
def __init__(
self,
prev_layer,
axis,
name='expand_dims',
):
# super(ExpandDims, self).__init__(prev_layer=prev_layer, name=name)
super().__init__(name)
self.axis = axis
logging.info("ExpandDims %s: axis: %d" % (self.name, self.axis))
def build(self, inputs):
pass
def forward(self, inputs):
outputs = tf.expand_dims(inputs, axis=self.axis, name=self.name)
return outputs
class Tile(Layer):
"""
The :class:`Tile` class constructs a tensor by tiling a given tensor,
see `tf.tile() <https://www.tensorflow.org/api_docs/python/tf/tile>`__ .
Parameters
----------
multiples: tensor
Must be one of the following types: int32, int64.
1-D Length must be the same as the number of dimensions in input.
name : None or str
A unique layer name.
Examples
--------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, (None, 100))
>>> n = tl.layers.Input(x, name='in')
>>> n = tl.layers.ExpandDims(n, 2)
>>> n = tl.layers.Tile(n, [-1, 1, 3])
[None, 100, 3]
"""
def __init__(self, multiples=None, name=None):#'tile'):
# super(Tile, self).__init__(prev_layer=prev_layer, name=name)
super().__init__(name)
self.multiples = multiples
logging.info("Tile %s: multiples: %s" % (self.name, self.multiples))
def build(self, inputs):
pass
def forward(self, inputs):
outputs = tf.tile(inputs, multiples=self.multiples, name=self.name)
return outputs
| apache-2.0 | -4,437,717,608,415,178,000 | 26.113402 | 111 | 0.585932 | false |
googlefonts/fontbakery-dashboard | containers/base/python/worker/diffenator.py | 1 | 4902 | #!/usr/bin/env python
from __future__ import print_function, division, unicode_literals
import os
from .diff_tools_shared import (
DiffWorkerBase
, on_each_matching_font
)
import diffenator
from diffenator.diff import DiffFonts
from diffenator.font import DFont
#################
# START taken from gftools-qa
# https://github.com/googlefonts/gftools/blob/main/bin/gftools-qa.py
#################
DIFFENATOR_THRESHOLDS = {
"weak": dict(
glyphs_thresh=0.02,
marks_thresh=20,
mkmks_thresh=20,
kerns_thresh=30,
render_diffs=True,
),
"normal": dict(
glyphs_thresh=0.01,
marks_thresh=10,
mkmks_thresh=10,
kerns_thresh=15,
render_diffs=True,
),
"strict": dict(
glyphs_thresh=0.00,
marks_thresh=0,
mkmks_thresh=0,
kerns_thresh=1,
render_diffs=True,
)
}
@on_each_matching_font
def run_diffenator(logger, font_before, font_after, out, thresholds=DIFFENATOR_THRESHOLDS['normal']):
logger.debug('run_diffenator with fonts before: %s after: %s'
, font_before, font_after)
font_before = DFont(font_before)
font_after = DFont(font_after)
if font_after.is_variable and not font_before.is_variable:
font_after.set_variations_from_static(font_before)
elif not font_after.is_variable and font_before.is_variable:
font_before.set_variations_from_static(font_after)
elif font_after.is_variable and font_before.is_variable:
# TODO get wdth and slnt axis vals
variations = {"wght": font_before.ttfont["OS/2"].usWeightClass}
font_after.set_variations(variations)
font_before.set_variations(variations)
diff = DiffFonts(font_before, font_after, settings=thresholds)
diff.to_gifs(dst=out)
diff.to_txt(20, os.path.join(out, "report.txt"))
diff.to_md(20, os.path.join(out, "report.md"))
diff.to_html(20, os.path.join(out, "report.html"), image_dir=".")
#################
# /END taken from gftools-qa
#################
class DiffenatorWorker(DiffWorkerBase):
def __init__(self, logging, job, cache, persistence, queue, tmp_directory):
self._workername = 'diffenator'
super().__init__(logging, job, cache, persistence, queue, tmp_directory)
self._answer.preparation_logs.append(
'Diffenator version {}'.format(diffenator.__version__))
def run(self):
self._set_answer_timestamp('started')
fonts = self._prepare(self._cache.get(self._job.cache_key).files, ['before', 'after'])
# all_fonts = reduce(lambda a,b: a+b, fonts.values(),[])
all_files = [os.path.join(dp, f) for dp, dn, fn \
in os.walk(self._tmp_directory) for f in fn]
self._log.debug('Files in Tempdir {}: {}'.format(
self._tmp_directory, all_files))
self._log.info('entering run_diffenator …')
# FIXME: should we collect stdout/stderr here???
run_diffenator(self._log, fonts['before'], fonts['after'], self._out_dir, DIFFENATOR_THRESHOLDS['normal'])
self._log.info('DONE! docid: %s', self._job.docid)
# The intention for main was ever only for debugging/profiling.
# debug_run.py:
# #!/usr/bin/env python3
#
# import logging
# FORMAT = '%(asctime)s:%(name)s:%(levelname)s:%(message)s'
# logging.basicConfig(format=FORMAT)
#
# import sys
# print('python version:', sys.version)
#
# from worker.diffenator import main
# main()
# with memory profiling:
# base/python$ mprof run debug_python.py
def main():
import logging
FORMAT = '%(asctime)s:%(name)s:%(levelname)s:%(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('DIFFENATOR_WORKER')
import importlib
wl = importlib.import_module('worker-launcher')
setLoglevel = wl.setLoglevel
getSetup = wl.getSetup
setup = getSetup()
setLoglevel(logger, setup.log_level)
# DEBUG is a lot of output!
# setLoglevel(logging.getLogger('fontdiffenator'), 'INFO')
setLoglevel(logging.getLogger('fontdiffenator'), setup.log_level)
logger.info('loglevel: ' + setup.log_level)
fonts = {'before': [], 'after': []}
tmp = '/var/python/debug_vollkorn'
out_dir = os.path.join(tmp, 'result')
os.mkdir(out_dir)
# just collect the fonts
for sub in fonts.keys():
dirname = os.path.join(tmp, sub)
fonts[sub] = [os.path.join(dirname, filename)\
for filename in next(os.walk(dirname))[2]\
if filename.endswith('.ttf')]
logger.info('fonts before:\n%s', '\n'.join(fonts['before']))
logger.info('fonts after:\n%s', '\n'.join(fonts['after']))
run_diffenator(logger, fonts['before'], fonts['after'], out_dir, DIFFENATOR_THRESHOLDS['normal'])
| apache-2.0 | -2,122,081,485,672,597,200 | 33.027778 | 110 | 0.618776 | false |
apache/chemistry-cmislib | src/cmislib/exceptions.py | 2 | 2436 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains exceptions used throughout the API.
"""
class CmisException(Exception):
"""
Common base class for all exceptions.
"""
def __init__(self, status=None, url=None):
Exception.__init__(self, "Error %s at %s" % (status, url))
self.status = status
self.url = url
class InvalidArgumentException(CmisException):
""" InvalidArgumentException """
pass
class ObjectNotFoundException(CmisException):
""" ObjectNotFoundException """
pass
class NotSupportedException(CmisException):
""" NotSupportedException """
pass
class PermissionDeniedException(CmisException):
""" PermissionDeniedException """
pass
class RuntimeException(CmisException):
""" RuntimeException """
pass
class ConstraintException(CmisException):
""" ConstraintException """
pass
class ContentAlreadyExistsException(CmisException):
"""ContentAlreadyExistsException """
pass
class FilterNotValidException(CmisException):
"""FilterNotValidException """
pass
class NameConstraintViolationException(CmisException):
"""NameConstraintViolationException """
pass
class StorageException(CmisException):
"""StorageException """
pass
class StreamNotSupportedException(CmisException):
""" StreamNotSupportedException """
pass
class UpdateConflictException(CmisException):
""" UpdateConflictException """
pass
class VersioningException(CmisException):
""" VersioningException """
pass
| apache-2.0 | -5,310,385,305,240,446,000 | 18.333333 | 67 | 0.694581 | false |
selkhateeb/tic | docs/conf.py | 1 | 8718 | # -*- coding: utf-8 -*-
#
# Tic Toolkit documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 15 11:39:05 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import logging
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
from tic.development.appengine.server import APPENGINE_LIBS
sys.path[1:1] = APPENGINE_LIBS
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tic Toolkit'
copyright = u'2012, NanoSN Cloud Computing Services Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9.alpha1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = ['_themes']
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TicToolkitdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'TicToolkit.tex', u'Tic Toolkit Documentation',
u'NanoSN Cloud Computing Services Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tictoolkit', u'Tic Toolkit Documentation',
[u'NanoSN Cloud Computing Services Inc'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TicToolkit', u'Tic Toolkit Documentation',
u'NanoSN Cloud Computing Services Inc', 'TicToolkit', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for sphinx.ext.todo ------------------------------------------------
# If True, todo and todolist produce output, else produce nothing. The default is False.
todo_include_todos = True
# -- Options for the Flask theme ------------------------------------------------
# filename of a picture in _static to be used as replacement for the h1
# in the index.rst file.
#index_logo = ''
# height of the index logo
#index_logo_height = 120px
#repository name on github for the "fork me" badge
github_fork = 'https://github.com/selkhateeb/tic'
| apache-2.0 | 8,012,013,657,169,049,000 | 32.022727 | 122 | 0.699587 | false |
GyroscopeHQ/grpcat | hello_pb2.py | 1 | 7371 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hello.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='hello.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0bhello.proto\"\x1f\n\x0fGetHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t24\n\x05Hello\x12+\n\x08GetHello\x12\x10.GetHelloRequest\x1a\x0b.HelloReply\"\x00\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETHELLOREQUEST = _descriptor.Descriptor(
name='GetHelloRequest',
full_name='GetHelloRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='GetHelloRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=15,
serialized_end=46,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=77,
)
DESCRIPTOR.message_types_by_name['GetHelloRequest'] = _GETHELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
GetHelloRequest = _reflection.GeneratedProtocolMessageType('GetHelloRequest', (_message.Message,), dict(
DESCRIPTOR = _GETHELLOREQUEST,
__module__ = 'hello_pb2'
# @@protoc_insertion_point(class_scope:GetHelloRequest)
))
_sym_db.RegisterMessage(GetHelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'hello_pb2'
# @@protoc_insertion_point(class_scope:HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class HelloStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetHello = channel.unary_unary(
'/Hello/GetHello',
request_serializer=GetHelloRequest.SerializeToString,
response_deserializer=HelloReply.FromString,
)
class HelloServicer(object):
def GetHello(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_HelloServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetHello': grpc.unary_unary_rpc_method_handler(
servicer.GetHello,
request_deserializer=GetHelloRequest.FromString,
response_serializer=HelloReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Hello', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaHelloServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
def GetHello(self, request, context):
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaHelloStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
def GetHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
raise NotImplementedError()
GetHello.future = None
def beta_create_Hello_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('Hello', 'GetHello'): GetHelloRequest.FromString,
}
response_serializers = {
('Hello', 'GetHello'): HelloReply.SerializeToString,
}
method_implementations = {
('Hello', 'GetHello'): face_utilities.unary_unary_inline(servicer.GetHello),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Hello_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('Hello', 'GetHello'): GetHelloRequest.SerializeToString,
}
response_deserializers = {
('Hello', 'GetHello'): HelloReply.FromString,
}
cardinalities = {
'GetHello': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'Hello', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| mit | -1,346,464,413,731,646,000 | 33.605634 | 256 | 0.715507 | false |
mpekalski/Y8M | video_level_code/xp_video_level_models.py | 1 | 52274 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import numpy as np
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"MoNN_num_experts", 4,
"The number of mixtures (excluding the dummy 'expert') used for MoNNs.")
#%% helper functions
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=1.0/np.sqrt(2*shape[0]))
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1/shape[0], shape=shape)
return tf.Variable(initial)
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights)
regularizer = tf.nn.l2_loss(weights)
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases)
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.summary.histogram('pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.summary.histogram('activations', activations)
return activations, regularizer
#
# First part contains models we have used,
# later there are some models we have tried/experimented with
#
# MoNN3L
# MoNN2Lw
# MoNN3Lw
# MoNN4Ln
#
class MoNN3L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 4096
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 4096
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 4096
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
expert_activations = slim.fully_connected(
A3,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
# a wide model hoping to memorize rare labels better
class MoNN2Lw(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="MoN2w_gates")
h1Units = 2305 * 6
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='MoN2w_H1')
h2Units = 2305 * 3
A2 = slim.fully_connected(
A1, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='MoN2w_H2')
#
expert_activations = slim.fully_connected(
A2,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="MoN2_experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN3Lw(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 2305*8
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 2305
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 2305*4
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
expert_activations = slim.fully_connected(
A3,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4Ln(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 2048
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 2048
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 2048
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 2048
A4 = slim.fully_connected(
A3, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
#
# Abandoned Experiments
#
#%%
class MyNNModel0(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self, model_input, vocab_size, l2_penalty=1e-4, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
with tf.name_scope('MyNNModel0'):
h1Units = 2400
a1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC1')
output = slim.fully_connected(
a1, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC2')
return {"predictions": output}
#%%
class MyNNModel1(models.BaseModel):
"""A simple NN models (with L2 regularization)."""
def create_model(self, model_input, vocab_size, l2_penalty=1e-4,
is_train=True, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
with tf.name_scope('MyNNModel1'):
h1Units = 1152
h2Units = 2248
h3Units = 3096
keep_prob = 0.90
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
A2 = slim.fully_connected(
A1, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
A3 = slim.fully_connected(
A2, h3Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H3')
#A4 = tf.nn.dropout(A3, keep_prob)
output = slim.fully_connected(
A3, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_P')
return {"predictions": output}
#%%
class MyNNModel2(models.BaseModel):
"""A simple NN models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-4,
**unused_params):
"""Creates a simple one-hidden-layer Neural Network model.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
#A1 = slim.fully_connected(
# model_input, 800, activation_fn=tf.nn.sigmoid,
# weights_regularizer=slim.l2_regularizer(l2_penalty),
# scope='hidden1')
# output = slim.fully_connected(
# A1, vocab_size, activation_fn=tf.nn.sigmoid,
# weights_regularizer=slim.l2_regularizer(l2_penalty))
h1Units = 3600
A1, reg1 = nn_layer(model_input, 1024+128, h1Units, 'Hidden1', act=tf.nn.relu)
h2Units = 3600
A2, reg2 = nn_layer(A1, h1Units, h2Units, 'Hidden2', act=tf.nn.relu)
output, reg3 = nn_layer(A2, h2Units, vocab_size, 'Pred', act=tf.nn.sigmoid)
return {"predictions": output,
"regularization_loss":l2_penalty*(reg1+reg2+reg3)}
#%%
def nn_layer2( input_tensor, input_dim, output_dim, var_scope, act=tf.nn.relu):
with tf.variable_scope(var_scope):
weights = weight_variable([input_dim, output_dim])
regularizer = tf.nn.l2_loss(weights)
biases = bias_variable([output_dim])
preactivate = tf.matmul(input_tensor, weights) + biases
activations = act(preactivate, name='activation')
return activations, regularizer
class MyNNModel3(models.BaseModel):
"""A simple NN models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-4,
**unused_params):
"""Creates a simple one-hidden-layer Neural Network model.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
#A1 = slim.fully_connected(
# model_input, 800, activation_fn=tf.nn.sigmoid,
# weights_regularizer=slim.l2_regularizer(l2_penalty),
# scope='hidden1')
# output = slim.fully_connected(
# A1, vocab_size, activation_fn=tf.nn.sigmoid,
# weights_regularizer=slim.l2_regularizer(l2_penalty))
with tf.variable_scope('MyNNModel3'):
h1Units = 3600
A1,reg1 = nn_layer2(model_input, 1024+128, h1Units, 'Hidden1', act=tf.nn.relu)
h2Units = 2400
A2, reg2 = nn_layer2(A1, h1Units, h2Units, 'Hidden2', act=tf.nn.relu)
h3Units = 2400
A3, reg3 = nn_layer2(A2, h2Units, h3Units, 'Hidden3', act=tf.nn.relu)
output, reg4 = nn_layer(A3, h3Units, vocab_size, 'ProdictionLayer', act=tf.nn.sigmoid)
return {"predictions": output,
"regularization_loss":l2_penalty*(reg1+reg2+reg3+reg4)}
#%%
class MoNN2L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
h1Units = 4096
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A2 = slim.fully_connected(
A1, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
#
expert_activations = slim.fully_connected(
A2,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN2L_L1(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
h1Units = 4096
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A2 = slim.fully_connected(
A1, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l1_l2_regularizer(l2_penalty),
scope='FC_H2')
#
expert_activations = slim.fully_connected(
A2,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
from tensorflow import logging
class MoNN2Drop(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN2Drop " + str(layers_keep_probs))
drop_out = tf.nn.dropout(model_input, layers_keep_probs[0],name="var_dropout")
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
h1Units = 4096
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A1a = tf.nn.dropout(A1, layers_keep_probs[1])
A2 = slim.fully_connected(
A1a, h2Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
A2a = tf.nn.dropout(A2, layers_keep_probs[2])
expert_activations = slim.fully_connected(
A2a,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN2DropBNorm(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN2Drop " + str(layers_keep_probs))
drop_out = tf.nn.dropout(model_input, layers_keep_probs[0],name="input/dropout")
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
model_input_norm = slim.batch_norm(
model_input,
center=True,
scale=True,
is_training=is_training,
scope='input/batch_norm')
h1Units = 4096
A1 = slim.fully_connected(
model_input_norm, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A1a = tf.nn.dropout(A1, layers_keep_probs[1], name='layer1/dropout')
A1b = slim.batch_norm(
A1a,
center=True,
scale=True,
is_training=is_training,
scope='layer1/batch_norm')
A2 = slim.fully_connected(
A1b, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
A2a = tf.nn.dropout(A2, layers_keep_probs[2], name='layer2/dropout')
expert_activations = slim.fully_connected(
A2a,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN2DropBNorm1Crelu(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN2Drop " + str(layers_keep_probs))
drop_out = tf.nn.dropout(model_input, layers_keep_probs[0],name="input/dropout")
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
model_input_norm = slim.batch_norm(
model_input,
center=True,
scale=True,
is_training=is_training,
scope='input/batch_norm')
h1Units = 4096
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A1a = tf.nn.dropout(A1, layers_keep_probs[1], name='layer1/dropout')
A1b = slim.batch_norm(
A1a,
center=True,
scale=True,
is_training=is_training,
scope='layer1/batch_norm')
A2 = slim.fully_connected(
A1b, h2Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
A2a = tf.nn.dropout(A2, layers_keep_probs[2], name='layer2/dropout')
expert_activations = slim.fully_connected(
A2a,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 4096
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 4096
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 4096
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 4096
A4 = slim.fully_connected(
A3, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4LDropG2L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN4LDrop " + str(layers_keep_probs))
drop_model_input = tf.nn.dropout(model_input, layers_keep_probs[0])
#
# Added one more layer to gate
#
X1 = slim.fully_connected(
drop_model_input,
vocab_size ,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l1")
gate_activations = slim.fully_connected(
X1,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l2")
a1Units = 4096
A1 = slim.fully_connected(
drop_model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 4096
A1d = tf.nn.dropout(A1, layers_keep_probs[1])
A2 = slim.fully_connected(
A1d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 4096
A2d = tf.nn.dropout(A1, layers_keep_probs[2])
A3 = slim.fully_connected(
A2d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 4096
A3d = tf.nn.dropout(A3, layers_keep_probs[3])
A4 = slim.fully_connected(
A3d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4LDropG3L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN4LDrop " + str(layers_keep_probs))
drop_model_input = tf.nn.dropout(model_input, layers_keep_probs[0])
#
# Added one more layer to gate
#
X1 = slim.fully_connected(
drop_model_input,
vocab_size ,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l1")
X2 = slim.fully_connected(
X1,
vocab_size,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l2")
gate_activations = slim.fully_connected(
X2,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_activation")
a1Units = 4096
A1 = slim.fully_connected(
drop_model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 4096
A1d = tf.nn.dropout(A1, layers_keep_probs[1])
A2 = slim.fully_connected(
A1d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 4096
A2d = tf.nn.dropout(A2, layers_keep_probs[2])
A3 = slim.fully_connected(
A2d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 4096
A3d = tf.nn.dropout(A3, layers_keep_probs[3])
A4 = slim.fully_connected(
A3d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
from tensorflow import logging
class MoNN2a128r1024G1L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN2Drop " + str(layers_keep_probs))
drop_out = tf.nn.dropout(model_input, layers_keep_probs[0],name="var_dropout")
logging.info(model_input.shape)
inputA = model_input[:,0:128]
inputB = model_input[:,128:1152]
inputAd = tf.nn.dropout(inputA, layers_keep_probs[0])
inputBd = tf.nn.dropout(inputB, layers_keep_probs[0])
inputAdn = slim.batch_norm(
inputAd,
center=True,
scale=True,
is_training=is_training,
scope='inputAd/batch_norm')
inputBdn = slim.batch_norm(
inputBd,
center=True,
scale=True,
is_training=is_training,
scope='inputBd/batch_norm')
X1 = slim.fully_connected(
tf.concat([inputAdn,inputBdn],1),
vocab_size ,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l1")
gate_activations = slim.fully_connected(
X1,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a_h1Units = 512
A1 = slim.fully_connected(
inputAdn, a_h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1_audio')
a_h2Units = 512
A1n = slim.batch_norm(
A1,
center=True,
scale=True,
is_training=is_training,
scope='A1/batch_norm')
A1a = tf.nn.dropout(A1n, layers_keep_probs[1])
logging.info("A1a")
logging.info(A1a.shape)
A2 = slim.fully_connected(
A1a, a_h2Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2_audio')
A2n = slim.batch_norm(
A2,
center=True,
scale=True,
is_training=is_training,
scope='A2/batch_norm')
logging.info("A2")
logging.info(A2.shape)
b_h1Units = 2048
B1 = slim.fully_connected(
inputBdn, b_h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1_rgb')
B1n = slim.batch_norm(
B1,
center=True,
scale=True,
is_training=is_training,
scope='B1/batch_norm')
b_h2Units = 2048
B1a = tf.nn.dropout(B1n, layers_keep_probs[1])
logging.info("B1a")
logging.info(B1a.shape)
B2 = slim.fully_connected(
B1a, b_h2Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2_rgb')
B2n = slim.batch_norm(
B2,
center=True,
scale=True,
is_training=is_training,
scope='B2/batch_norm')
A2na = tf.nn.dropout(A2n, layers_keep_probs[2])
B2na = tf.nn.dropout(B2n, layers_keep_probs[2])
logging.info(A2.shape)
logging.info(B2.shape)
C3 = tf.concat([inputAdn, inputBdn, A2na, B2na], 1)
h3Units = 4096
C3a = slim.fully_connected(
C3, h3Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H3_concat')
C3ad = tf.nn.dropout(C3a, layers_keep_probs[3])
h4Units = 4096
C4a = slim.fully_connected(
C3a, h4Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H4_concat')
expert_activations = slim.fully_connected(
C4a,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4Lw(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 2305*8
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 2305
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 2305*4
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 2305*2
A4 = slim.fully_connected(
A3, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN1Lvw(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 2305*64
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
expert_activations = slim.fully_connected(
A1,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
| apache-2.0 | 5,253,891,996,855,866,000 | 35.683509 | 92 | 0.608237 | false |
kholia/pyrpm | pyrpm/database/repodb.py | 1 | 39016 | #
# Copyright (C) 2004, 2005 Red Hat, Inc.
# Author: Phil Knirsch, Thomas Woerner, Florian La Roche
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import lists, types
import sys, re, os, os.path, stat
import memorydb
from pyrpm.base import *
from pyrpm.cache import NetworkCache
from comps import RpmCompsXML
import pyrpm.functions as functions
import pyrpm.package as package
import pyrpm.openpgp as openpgp
from pyrpm.logger import log
from pyrpm.io import PyGZIP
if sys.version_info < (2, 5):
import md5
import sha as sha1
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
else:
from hashlib import md5, sha1, sha256
try:
# python-2.5 layout:
from xml.etree.cElementTree import iterparse
except ImportError:
try:
# often older python versions add this to site-packages:
from cElementTree import iterparse
except ImportError:
try:
# maybe the python-only version is available?
from ElementTree import iterparse
except:
raise "No ElementTree parser found. Aborting."
class RpmRepoDB(memorydb.RpmMemoryDB):
"""A (mostly) read-only RPM database storage in repodata XML.
This is not a full implementation of Database: notably the file database
is not populated at all."""
# A mapping between strings and RPMSENSE_* comparison flags
flagmap = { 0 : None,
None: 0,
"EQ": RPMSENSE_EQUAL,
"LT": RPMSENSE_LESS,
"GT": RPMSENSE_GREATER,
"LE": RPMSENSE_EQUAL | RPMSENSE_LESS,
"GE": RPMSENSE_EQUAL | RPMSENSE_GREATER,
RPMSENSE_EQUAL: "EQ",
RPMSENSE_LESS: "LT",
RPMSENSE_GREATER: "GT",
RPMSENSE_EQUAL | RPMSENSE_LESS: "LE",
RPMSENSE_EQUAL | RPMSENSE_GREATER: "GE"}
def __init__(self, config, source, buildroot='', reponame="default", nc=None):
"""Exclude packages matching whitespace-separated excludes. Use
reponame for cache subdirectory name and pkg["yumreponame"].
Load PGP keys from URLs in key_urls."""
memorydb.RpmMemoryDB.__init__(self, config, source, buildroot)
self.reponame = reponame
self.excludes = self.config.excludes[:]
self.mirrorlist = None
self.baseurls = None
self.yumconf = None
self.key_urls = []
if nc:
self.nc = nc
else:
self.nc = NetworkCache([], self.config.cachedir, self.reponame)
if isinstance(source, types.DictType):
found_urls = False
self.yumconf = source
if self.yumconf.has_key("main"):
sec = self.yumconf["main"]
if sec.has_key("exclude"):
self.excludes.extend(sec["exclude"])
sec = self.yumconf[self.reponame]
if sec.has_key("exclude"):
self.excludes.extend(sec["exclude"])
if sec.has_key("gpgkey"):
self.key_urls = sec["gpgkey"]
if sec.has_key("baseurl"):
self.nc.addCache(sec["baseurl"], self.reponame)
found_urls = True
if sec.has_key("mirrorlist"):
self.mirrorlist = sec["mirrorlist"]
found_urls = True
if not found_urls:
raise ValueError, "yum.conf is missing mirrorlist or baseurl parameter"
else:
self.baseurls = source
self.nc.addCache(self.baseurls, self.reponame)
self.repomd = None
self.filelist_imported = 0
# Files included in primary.xml
self._filerc = re.compile('^(.*bin/.*|/etc/.*|/usr/lib/sendmail)$')
self._dirrc = re.compile('^(.*bin/.*|/etc/.*)$')
self.comps = None
def readMirrorList(self):
if not self.is_read and self.mirrorlist and self.yumconf:
fname = self.nc.cache(self.mirrorlist, 1)
if fname:
lines = open(fname).readlines()
os.unlink(fname)
else:
lines = []
for l in lines:
l = l.strip()
l = l.replace("$ARCH", "$basearch")
l = self.yumconf.replaceVars(l)
if l and l[0] != "#":
self.nc.addCache([l,])
def getExcludes(self):
return self.excludes
def getMirrorList(self):
return self.mirrorlist
def isIdentitySave(self):
"""return if package objects that are added are in the db afterwards
(.__contains__() returns True and the object are return from searches)
"""
return False
def readRepoMD(self):
# First we try and read the repomd file as a starting point.
filename = self.nc.cache("repodata/repomd.xml", 1)
if not filename:
log.error("Couldn't open repomd.xml")
return 0
try:
fd = open(filename)
ip = iterparse(fd, events=("start","end"))
ip = iter(ip)
except IOError:
log.error("Couldn't parse repomd.xml")
return 0
# Create our network cache object
self.repomd = self._parse(ip)
return 1
def readComps(self):
# Try to read a comps.xml file if there is any before we parse the
# primary.xml
if self.repomd.has_key("group"):
if not self.repomd["group"].has_key("location"):
log.error("Couldn't find proper location for comps.xml in repomd")
return 0
comps = self.repomd["group"]["location"]
(csum, destfile) = self.nc.checksum(comps, "sha")
if self.repomd["group"].has_key("checksum") and \
csum == self.repomd["group"]["checksum"]:
filename = destfile
else:
filename = self.nc.cache(comps, 1)
if not filename:
return 0
try:
self.comps = RpmCompsXML(self.config, filename)
self.comps.read()
except IOError:
return 0
return 1
def readPrimary(self):
# If we have either a local cache of the primary.xml.gz file or if
# it is already local (nfs or local file system) we calculate it's
# checksum and compare it with the one from repomd. If they are
# the same we don't need to cache it again and can directly use it.
if self.repomd.has_key("primary"):
if not self.repomd["primary"].has_key("location"):
print "Error primary has no location"
return 0
primary = self.repomd["primary"]["location"]
# if self.repomd["primary"].has_key("checksum"):
# (csum, destfile) = self.nc.checksum(primary, "sha")
# csum == self.repomd["primary"]["checksum"]:
# filename = destfile
# else:
filename = self.nc.cache(primary, 1)
if not filename:
print "Error can't find file for primary: "+primary
return 0
try:
fd = PyGZIP(filename)
ip = iterparse(fd, events=("start","end"))
ip = iter(ip)
except IOError:
log.error("Couldn't parse primary.xml")
print "Error parsing primary.xml"
return 0
self._parse(ip)
return 1
def readPGPKeys(self):
for url in self.key_urls:
filename = self.nc.cache(url, 1)
try:
f = file(filename)
key_data = f.read()
f.close()
except Exception, e:
log.error("Error reading GPG key %s: %s", filename, e)
continue
try:
key_data = openpgp.isolateASCIIArmor(key_data)
keys = openpgp.parsePGPKeys(key_data)
except Exception, e:
log.error("Invalid GPG key %s: %s", url, e)
continue
for k in keys:
self.keyring.addKey(k)
return 1
def read(self):
self.readMirrorList()
#self.is_read = 1 # FIXME: write-only
while True:
if not self.readRepoMD():
print "Error self.readRepoMD()"
break
if not self.readComps():
print "Error self.readComps()"
break
if not self.readPrimary():
print "Error self.readPrimary()"
break
if not self.readPGPKeys():
print "Error self.readPGPKeys()"
break
self.is_read = 1 # FIXME: write-only
return 1
return 0
def getNetworkCache(self):
return self.nc
def addPkg(self, pkg):
if self._isExcluded(pkg):
return 0
return memorydb.RpmMemoryDB.addPkg(self, pkg)
def isFilelistImported(self):
return self.filelist_imported
def importFilelist(self):
"""Parse filelists.xml.gz if it was not parsed before.
Return 1 on success, 0 on failure."""
# We need to have successfully read a repo from one source before we
# can import it's filelist.
if not self.is_read:
return 0
if self.filelist_imported:
return 1
# Same as with primary.xml.gz: If we already have a local version and
# it matches the checksum found in repomd then we don't need to
# download it again.
if self.repomd.has_key("filelists"):
if not self.repomd["filelists"].has_key("location"):
return 0
filelists = self.repomd["filelists"]["location"]
(csum, destfile) = self.nc.checksum(filelists, "sha")
if self.repomd["filelists"].has_key("checksum") and \
csum == self.repomd["filelists"]["checksum"]:
filename = destfile
else:
filename = self.nc.cache(filelists, 1)
if not filename:
return 0
try:
fd = PyGZIP(filename)
ip = iterparse(fd, events=("start","end"))
ip = iter(ip)
except IOError:
log.error("Couldn't parse filelists.xml")
return 0
self._parse(ip)
self.filelist_imported = 1
return 1
def createRepo(self):
"""Create repodata metadata for self.source.
Return 1 on success, 0 on failure. Assumes self.source is a local file
system path without schema prefix."""
import gzip, libxml2
log.info1("Pass 1: Parsing package headers for file requires.")
self.__readDir(self.source, "")
filename = functions._uriToFilename(self.source)
datapath = os.path.join(filename, "repodata")
if not os.path.isdir(datapath):
try:
os.makedirs(datapath)
except OSError, e:
log.error("%s: Couldn't create repodata: %s", filename, e)
return 0
try:
pfd = gzip.GzipFile(os.path.join(datapath, "primary.xml.gz"), "wb")
except IOError:
return 0
try:
ffd = gzip.GzipFile(os.path.join(datapath, "filelists.xml.gz"),
"wb")
except IOError:
return 0
try:
rfd = open(os.path.join(datapath, "repomd.xml"))
except IOError:
return 0
#try:
# ofd = gzip.GzipFile(os.path.join(datapath, "other.xml.gz"), "wb")
#except IOError:
# return 0
pdoc = libxml2.newDoc("1.0")
proot = pdoc.newChild(None, "metadata", None)
fdoc = libxml2.newDoc("1.0")
froot = fdoc.newChild(None, "filelists", None)
#odoc = libxml2.newDoc("1.0")
#oroot = odoc.newChild(None, "filelists", None)
log.info1("Pass 2: Writing repodata information.")
pfd.write('<?xml version="1.0" encoding="UTF-8"?>\n')
pfd.write('<metadata xmlns="http://linux.duke.edu/metadata/common" xmlns:rpm="http://linux.duke.edu/metadata/rpm" packages="%d">\n' % len(self.getPkgs()))
ffd.write('<?xml version="1.0" encoding="UTF-8"?>\n')
ffd.write('<filelists xmlns:rpm="http://linux.duke.edu/filelists" packages="%d">\n' % len(self.getPkgs()))
for pkg in self.getPkgs():
log.info2("Processing complete data of package %s.",
pkg.getNEVRA())
pkg.header_read = 0
try:
pkg.open()
pkg.read()
except (IOError, ValueError), e:
log.warning("%s: %s", pkg.getNEVRA(), e)
continue
# If it is a source rpm change the arch to "src". Only valid
# for createRepo, never do this anywhere else. ;)
if pkg.isSourceRPM():
pkg["arch"] = "src"
try:
checksum = self.__getChecksum(pkg)
except (IOError, NotImplementedError), e:
log.warning("%s: %s", pkg.getNEVRA(), e)
continue
pkg["yumchecksum"] = checksum
self.__writePrimary(pfd, proot, pkg)
self.__writeFilelists(ffd, froot, pkg)
# self.__writeOther(ofd, oroot, pkg)
try:
pkg.close()
except IOError:
pass # Should not happen when opening for reading anyway
pkg.clear()
pfd.write('</metadata>\n')
ffd.write('</filelists>\n')
pfd.close()
ffd.close()
# Write repomd.xml
rfd.write('<?xml version="1.0" encoding="UTF-8"?>\n')
rfd.write('<repomd xmlns="http://linux.duke.edu/metadata/repo">\n')
rfd.write(' <data type="primary">\n')
rfd.write(' <location href="repodata/primary.xml.gz"/>\n')
rfd.write(' </data>\n')
rfd.write(' <data type="filelists">\n')
rfd.write(' <location href="repodata/filelists.xml.gz"/>\n')
rfd.write(' </data>\n')
# how do we know that there is a comps file?
# rfd.write(' <data type="group">\n')
# rfd.write(' <location href="repodata/comps.xml"/>\n')
# rfd.write(' </data>\n')
rfd.write('</repomd>\n')
rfd.close()
return 1
def _matchesFile(self, fname):
return self._filerc.match(fname) or \
self._dirrc.match(fname)
def _parse(self, ip):
"""Parse <package> tags."""
for event, elem in ip:
tag = elem.tag
if event != "start":
continue
if not tag.endswith("}package") and \
not tag.endswith("}repomd"):
continue
if tag.endswith("}repomd"):
return self.__parseRepomd(ip)
props = elem.attrib
if props.get("type") == "rpm":
try:
pkg = self.__parsePackage(ip)
except ValueError, e:
log.warning("%s: %s", ip, e)
continue
pkg.yumrepo = self
if self.comps != None:
if self.comps.hasType(pkg["name"], "mandatory"):
pkg.compstype = "mandatory"
elif self.comps.hasType(pkg["name"], "default"):
pkg.compstype = "default"
elif self.comps.hasType(pkg["name"], "optional"):
pkg.compstype = "optional"
self.addPkg(pkg)
elif props.has_key("name"):
arch = props.get("arch")
if arch == None:
log.warning("%s: missing arch= in <package>",
pkg.getNEVRA())
continue
self.__parseFilelist(ip, props["name"], arch)
elem.clear()
def _isExcluded(self, pkg):
"""Return True if RpmPackage pkg is excluded by configuration."""
if pkg["arch"] == "src":
return 1
if not self.config.ignorearch and \
(not functions.archCompat(pkg["arch"], self.config.machine) or \
(self.config.archlist != None and not pkg["arch"] in self.config.archlist)) and \
not pkg.isSourceRPM():
log.warning("%s: Package excluded because of arch "
"incompatibility", pkg.getNEVRA())
return 1
index = lists.NevraList()
index.addPkg(pkg)
result = index.search(self.excludes)
return bool(result)
def __escape(self, s):
"""Return escaped string converted to UTF-8"""
if s == None:
return ''
s = s.replace("&", "&")
if isinstance(s, unicode):
return s
try:
x = unicode(s, 'ascii')
return s
except UnicodeError:
encodings = ['utf-8', 'iso-8859-1', 'iso-8859-15', 'iso-8859-2']
for enc in encodings:
try:
x = unicode(s, enc)
except UnicodeError:
pass
else:
if x.encode(enc) == s:
return x.encode('utf-8')
newstring = ''
for char in s:
if ord(char) > 127:
newstring = newstring + '?'
else:
newstring = newstring + char
return re.sub("\n$", '', newstring) # FIXME: not done in other returns
def __parseRepomd(self, ip):
"""Parse repomd.xml for SHA1 checks of the files.
Returns a hash of the form:
name -> {location, checksum, timestamp, open-checksum}"""
rethash = {}
# Make local variables for heavy used functions to speed up this loop
tmphash = {}
fname = None
for event, elem in ip:
isend = (event == "end")
props = elem.attrib
tag = elem.tag
if not isend and tag.endswith("}data"):
fname = props.get("type")
if not fname:
break
tmphash = {}
rethash[fname] = tmphash
if not isend:
continue
if tag.endswith("}repomd"):
break
elif tag.endswith("}location"):
loc = props.get("href")
if loc:
tmphash["location"] = loc
elif tag.endswith("}checksum"):
type = props.get("type")
if type != "sha" and type != "sha256":
log.warning("Unsupported checksum type %s in repomd.xml "
"for file %s", type, fname)
continue
tmphash["checksum"] = elem.text
tmphash["checksum.type"] = type
elif tag.endswith("}timestamp"):
tmphash["timestamp"] = elem.text
elif tag.endswith("}open-checksum"):
type = props.get("type")
if type != "sha" and type != "sha256":
log.warning("Unsupported open-checksum type %s in "
"repomd.xml for file %s", type, fname)
continue
tmphash["open-checksum"] = elem.text
tmphash["open-checksum.type"] = type
return rethash
def __parsePackage(self, ip):
"""Parse a package from current <package> tag.
Raise ValueError on invalid data."""
pkg = package.RpmPackage(self.config, "dummy", db = self)
pkg["signature"] = {}
pkg["signature"]["size_in_sig"] = [0,]
pkg.time_file = None
pname = None
pepoch = None
pversion = None
prelease = None
parch = None
excheck = 0
for event, elem in ip:
tag = elem.tag
if tag.endswith("}format"):
self.__parseFormat(ip, pkg)
isend = (event == "end")
if not isend:
continue
props = elem.attrib
if tag.endswith("}package"):
break
elif not excheck and pname != None and pepoch != None and \
pversion != None and prelease != None and parch != None:
excheck = 1
if tag.endswith("}name"):
pname = elem.text
pkg["name"] = pname
elif tag.endswith("}arch"):
parch = elem.text
pkg["arch"] = parch
if parch != "src":
pkg["sourcerpm"] = ""
elif tag.endswith("}version"):
pversion = props.get("ver")
prelease = props.get("rel")
pepoch = props.get("epoch")
if pversion == None or prelease == None or pepoch == None:
raise ValueError, "Missing attributes of <version>"
pepoch = [int(pepoch),]
pkg["version"] = pversion
pkg["release"] = prelease
pkg["epoch"] = pepoch
elif tag.endswith("}checksum"):
type_ = props.get("type")
if type_ == "md5":
pkg["signature"]["md5"] = elem.text
elif type_ == "sha":
pkg["signature"]["sha1header"] = elem.text
else:
raise ValueError, "Wrong or missing type= in <checksum>"
elif tag.endswith("}location"):
href = props.get("href")
if href == None:
raise ValueError, "Missing href= in <location>"
if self.config.nocache:
pkg.source = os.path.join(self.nc.getBaseURL(self.reponame), href)
else:
pkg.source = href
pkg.yumhref = href
elif tag.endswith("}size"):
size_in_sig = props.get("package")
if size_in_sig == None:
raise ValueError, "Missing package= in <size>"
pkg["signature"]["size_in_sig"][0] += int(size_in_sig)
pkg.sizes = props
elif tag.endswith("}time"):
pkg.time_file = props.get('file')
pkg['buildtime'] = props.get('build')
else:
for pkgtag, xmltag in (("summary", "summary"),
("description", "description"),
("url", "url"),
("packager", "packager")):
if not tag.endswith("}%s" % xmltag):
continue
if elem.text == None or elem.text == '\n ':
pkg[pkgtag] = None # fix for empty tags
else:
pkg[pkgtag] = elem.text
else:
continue
break # break while loop if break in for loop
pkg.header_read = 1
pkg["provides"] = pkg.getProvides()
pkg["requires"] = pkg.getRequires()
pkg["obsoletes"] = pkg.getObsoletes()
pkg["conflicts"] = pkg.getConflicts()
pkg["triggers"] = pkg.getTriggers()
# clean up list
for tag in ("provide", "require", "obsolete",
"conflict", "trigger"):
for suffix in ("name", "flags", "version"):
pkg.pop(tag + suffix, None) # remove if set
return pkg
def __parseFilelist(self, ip, pname, arch):
"""Parse a file list from current <package name=pname> tag.
Raise ValueError on invalid data."""
filelist = []
typelist = []
version, release, epoch = None, None, None
for event, elem in ip:
tag = elem.tag
isend = (event == "end")
if not isend:
continue
props = elem.attrib
if tag.endswith("}file"):
filelist.append(elem.text)
typelist.append(props.get("type", "file"))
elif tag.endswith("}version"):
version = props.get("ver")
release = props.get("rel")
epoch = props.get("epoch")
elif tag.endswith("}package"):
break
elem.clear()
if version is None or release is None or epoch is None:
raise ValueError, "Missing version information"
self._addFilesToPkg(pname, epoch, version, release, arch,
filelist, typelist)
def __parseFormat(self, ip, pkg):
"""Parse data from current <format> tag to RpmPackage pkg.
Raise ValueError on invalid input."""
pkg["oldfilenames"] = []
pkg.filetypelist = []
for event, elem in ip:
tag = elem.tag
isend = (event == "end")
props = elem.attrib
if not isend:
for rtag in ("provide", "require", "obsolete", "conflict"):
if not tag.endswith("}%ss" % rtag):
continue
plist = self.__parseDeps(ip, rtag)
(pkg[rtag + 'name'], pkg[rtag + 'flags'],
pkg[rtag + 'version']) = plist
if not isend:
continue
if tag.endswith("}file"):
pkg.filetypelist.append(props.get("type", "file"))
pkg["oldfilenames"].append(elem.text)
elif tag.endswith("}format"):
break
elif tag.endswith("}header-range"):
header_start = props.get("start")
header_end = props.get("end")
if header_start == None or header_end == None:
raise ValueError, "Missing property in <rpm:header_range>"
header_start = int(header_start)
header_end = int(header_end)
pkg["signature"]["size_in_sig"][0] -= header_start
pkg.range_signature = [96, header_start-96]
pkg.range_header = [header_start, header_end-header_start]
pkg.range_payload = [header_end, None]
else:
for rtag in ("license", "sourcerpm",
"vendor", "buildhost", "group"):
if not tag.endswith("}%s" % rtag):
continue
pkg[rtag] = elem.text
def __parseDeps(self, ip, ename):
"""Parse a dependency list from currrent tag ename.
Return [namelist, flaglist, versionlist]. Raise ValueError on invalid
input."""
plist = [[], [], []]
for event, elem in ip:
tag = elem.tag
isend = (event == "end")
if not isend:
continue
if tag.endswith("}%ss" % ename):
break
props = elem.attrib
if tag.endswith("}entry"):
name = props.get("name")
if name == None:
raise ValueError, "Missing name= in <rpm.entry>"
ver = props.get("ver")
flags = props.get("flags")
if props.has_key("pre"):
prereq = RPMSENSE_PREREQ
else:
prereq = 0
if ver == None:
plist[0].append(name)
plist[1].append(prereq)
plist[2].append("")
continue
epoch = props.get("epoch")
rel = props.get("rel")
if epoch != None:
ver = "%s:%s" % (epoch, ver)
if rel != None and \
(rel != "0" or self.__class__.__name__ != "RhnChannelRepoDB"):
ver = "%s-%s" % (ver, rel)
plist[0].append(name)
try:
flags = self.flagmap[flags]
except KeyError:
raise ValueError, "Unknown flags %s" % flags
plist[1].append(flags + prereq)
plist[2].append(ver)
return plist
def _addFilesToPkg(self, pname, epoch, version, release, arch,
filelist, filetypelist):
nevra = "%s-%s:%s-%s.%s" % (pname, epoch, version, release, arch)
pkgs = self.getPkgsByName(pname)
dhash = {}
for pkg in pkgs:
if pkg.getNEVRA() == nevra:
if len(dhash) == 0:
(didx, dnameold) = (-1, None)
(dnames, dindexes, bnames) = ([], [], [])
for f in filelist:
idx = f.rindex("/")
if idx < 0:
raise ValueError, "Couldn't find '/' in filename from filelist"
dname = f[:idx+1]
fname = f[idx+1:]
dhash.setdefault(dname, []).append(fname)
bnames.append(fname)
if dnameold == dname:
dindexes.append(didx)
else:
dnames.append(dname)
didx += 1
dindexes.append(didx)
dnameold = dname
pkg["dirnames"] = dnames
pkg["dirindexes"] = dindexes
pkg["basenames"] = bnames
if pkg.has_key("oldfilenames"):
del pkg["oldfilenames"]
pkg.filetypelist = filetypelist
# get rid of old dirnames, dirindexes and basenames
#if pkg.has_key("dirnames"):
# del pkg["dirnames"]
#if pkg.has_key("dirindexes"):
# del pkg["dirindexes"]
#if pkg.has_key("basenames"):
# del pkg["basenames"]
#pkg["oldfilenames"] = filelist
def __readDir(self, dir, location):
"""Look for non-excluded *.rpm files under dir and add them to
self.pkglist.
dir must be a local file system path. The remote location prefix
corresponding to dir is location. Set pkg["yumlocation"] to the remote
relative path to the package."""
tmplist = []
functions.readDir(dir, tmplist,
("name", "epoch", "version", "release", "arch",
"sourcerpm", "requirename", "requireflags",
"requireversion"))
for pkg in tmplist:
# FIXME: this is done in createRepo too
# If it is a source rpm change the arch to "src". Only valid
# for createRepo, never do this anywhere else. ;)
if pkg.isSourceRPM():
pkg["arch"] = "src"
nevra = pkg.getNEVRA()
log.info2("Adding %s to repo and checking file requires.", nevra)
pkg["yumlocation"] = location+pkg.source[len(dir):]
self.addPkg(pkg)
def __writePrimary(self, fd, parent, pkg):
"""Write primary.xml data about RpmPackage pkg to fd."""
pkg_node = parent.newChild(None, "package", None)
pkg_node.newProp('type', 'rpm')
pkg_node.newChild(None, 'name', pkg['name'])
pkg_node.newChild(None, 'arch', pkg['arch'])
tnode = pkg_node.newChild(None, 'version', None)
if pkg.has_key('epoch'):
tnode.newProp('epoch', str(pkg['epoch'][0]))
else:
tnode.newProp('epoch', '0')
tnode.newProp('ver', pkg['version'])
tnode.newProp('rel', pkg['release'])
tnode = pkg_node.newChild(None, 'checksum', pkg["yumchecksum"])
tnode.newProp('type', self.config.checksum)
tnode.newProp('pkgid', 'YES')
pkg_node.newChild(None, 'summary', self.__escape(pkg['summary'][0]))
pkg_node.newChild(None, 'description', self.__escape(pkg['description'][0]))
pkg_node.newChild(None, 'packager', self.__escape(pkg['packager']))
pkg_node.newChild(None, 'url', self.__escape(pkg['url']))
tnode = pkg_node.newChild(None, 'time', None)
tnode.newProp('file', str(pkg['buildtime'][0]))
tnode.newProp('build', str(pkg['buildtime'][0]))
tnode = pkg_node.newChild(None, 'size', None)
tnode.newProp('package', str(pkg['signature']['size_in_sig'][0]+pkg.range_signature[0]+pkg.range_signature[1]))
tnode.newProp('installed', str(pkg['size'][0]))
tnode.newProp('archive', str(pkg['signature']['payloadsize'][0]))
tnode = pkg_node.newChild(None, 'location', None)
tnode.newProp('href', pkg["yumlocation"])
fnode = pkg_node.newChild(None, 'format', None)
self.__generateFormat(fnode, pkg)
output = pkg_node.serialize('UTF-8', self.config.pretty)
fd.write(output+"\n")
pkg_node.unlinkNode()
pkg_node.freeNode()
del pkg_node
def __writeFilelists(self, fd, parent, pkg):
"""Write primary.xml data about RpmPackage pkg to fd."""
pkg_node = parent.newChild(None, "package", None)
pkg_node.newProp('pkgid', pkg["yumchecksum"])
pkg_node.newProp('name', pkg["name"])
pkg_node.newProp('arch', pkg["arch"])
tnode = pkg_node.newChild(None, 'version', None)
if pkg.has_key('epoch'):
tnode.newProp('epoch', str(pkg['epoch'][0]))
else:
tnode.newProp('epoch', '0')
tnode.newProp('ver', pkg['version'])
tnode.newProp('rel', pkg['release'])
self.__generateFilelist(pkg_node, pkg, 0)
output = pkg_node.serialize('UTF-8', self.config.pretty)
fd.write(output+"\n")
pkg_node.unlinkNode()
pkg_node.freeNode()
del pkg_node
def __getChecksum(self, pkg):
"""Return checksum of package source of RpmPackage pkg.
Raise IOError, NotImplementedError."""
from pyrpm.io import getRpmIOFactory
io = getRpmIOFactory(pkg.source)
if self.config.checksum == "md5":
s = md5.new()
else:
s = sha1.new()
io.updateDigestFromRange(s, 0, None)
return s.hexdigest()
def __generateFormat(self, node, pkg):
"""Add RPM-specific tags for RpmPackage pkg."""
node.newChild(None, 'rpm:license', self.__escape(pkg['license']))
node.newChild(None, 'rpm:vendor', self.__escape(pkg['vendor']))
node.newChild(None, 'rpm:group', self.__escape(pkg['group'][0]))
node.newChild(None, 'rpm:buildhost', self.__escape(pkg['buildhost']))
node.newChild(None, 'rpm:sourcerpm', self.__escape(pkg['sourcerpm']))
tnode = node.newChild(None, 'rpm:header-range', None)
tnode.newProp('start', str(pkg.range_signature[0] + pkg.range_signature[1]))
tnode.newProp('end', str(pkg.range_payload[0]))
if len(pkg["provides"]) > 0:
self.__generateDeps(node, pkg, "provides")
if len(pkg["requires"]) > 0:
self.__generateDeps(node, pkg, "requires")
if len(pkg["conflicts"]) > 0:
self.__generateDeps(node, pkg, "conflicts")
if len(pkg["obsoletes"]) > 0:
self.__generateDeps(node, pkg, "obsoletes")
self.__generateFilelist(node, pkg)
def __generateDeps(self, node, pkg, name):
"""Add RPM-specific dependency info for
RpmPackage pkg dependencies "name"."""
dnode = node.newChild(None, 'rpm:%s' % name, None)
deps = self.__filterDuplicateDeps(pkg[name])
for dep in deps:
enode = dnode.newChild(None, 'rpm:entry', None)
enode.newProp('name', dep[0])
if dep[1] != "":
if (dep[1] & RPMSENSE_SENSEMASK) != 0:
enode.newProp('flags', self.flagmap[dep[1] & RPMSENSE_SENSEMASK])
if isLegacyPreReq(dep[1]) or isInstallPreReq(dep[1]):
enode.newProp('pre', '1')
if dep[2] != "":
e,v,r = functions.evrSplit(dep[2])
enode.newProp('epoch', e)
enode.newProp('ver', v)
if r != "":
enode.newProp('rel', r)
def __generateFilelist(self, node, pkg, filter=1):
"""Add RPM-specific file list for RpmPackage pkg.
Restrict the output to _dirrc/_filerc or known file requires if
filter."""
files = pkg['filenames']
fileflags = pkg['fileflags']
filemodes = pkg['filemodes']
if files == None or fileflags == None or filemodes == None:
return
(writefile, writedir, writeghost) = ([], [], [])
for (fname, mode, flag) in zip(files, filemodes, fileflags):
if stat.S_ISDIR(mode):
if not filter or \
self._dirrc.match(fname):
writedir.append(fname)
elif not filter or \
self._filerc.match(fname):
if flag & RPMFILE_GHOST:
writeghost.append(fname)
else:
writefile.append(fname)
writefile.sort()
for f in writefile:
tnode = node.newChild(None, "file", self.__escape(f))
writedir.sort()
for f in writedir:
tnode = node.newChild(None, "file", self.__escape(f))
tnode.newProp("type", "dir")
writeghost.sort()
for f in writeghost:
tnode = node.newChild(None, "file", self.__escape(f))
tnode.newProp("type", "ghost")
def __filterDuplicateDeps(self, deps):
"""Return the list of (name, flags, release) dependencies deps with
duplicates (when output by __generateDeps ()) removed."""
fdeps = []
for (name, flags, version) in deps:
flags &= RPMSENSE_SENSEMASK | RPMSENSE_PREREQ
if (name, flags, version) not in fdeps:
fdeps.append((name, flags, version))
fdeps.sort()
return fdeps
# vim:ts=4:sw=4:showmatch:expandtab
| gpl-2.0 | -6,778,407,803,479,843,000 | 38.97541 | 162 | 0.509253 | false |
nerevu/riko | riko/modules/typecast.py | 1 | 4059 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.typecast
~~~~~~~~~~~~~~~~~~~~~
Provides functions for casting fields into specific types.
Examples:
basic usage::
>>> from riko.modules.typecast import pipe
>>>
>>> conf = {'type': 'date'}
>>> next(pipe({'content': '5/4/82'}, conf=conf))['typecast']['year']
1982
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
import pygogo as gogo
from . import processor
from riko.utils import cast
OPTS = {'field': 'content'}
DEFAULTS = {'type': 'text'}
logger = gogo.Gogo(__name__, monolog=True).logger
def parser(content, objconf, skip=False, **kwargs):
""" Parsers the pipe content
Args:
content (scalar): The content to cast
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: typecast)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> item = {'content': '1.0'}
>>> objconf = Objectify({'type': 'int'})
>>> kwargs = {'stream': item, 'assign': 'content'}
>>> parser(item['content'], objconf, **kwargs)
1
"""
return kwargs['stream'] if skip else cast(content, objconf.type)
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A processor module that asynchronously parses a URL into its components.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain the key 'type'.
type (str): The object type to cast to (default: text)
assign (str): Attribute to assign parsed content (default: typecast)
field (str): Item attribute to operate on (default: 'content')
Returns:
Deferred: twisted.internet.defer.Deferred item with type casted content
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['typecast'])
... d = async_pipe({'content': '1.0'}, conf={'type': 'int'})
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
1
"""
return parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A processor that parses a URL into its components.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. May contain the key 'type'.
type (str): The object type to cast to (default: text)
assign (str): Attribute to assign parsed content (default: typecast)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with type casted content
Examples:
>>> from datetime import datetime as dt
>>> next(pipe({'content': '1.0'}, conf={'type': 'int'}))['typecast']
1
>>> item = {'content': '5/4/82'}
>>> conf = {'type': 'date'}
>>> date = next(pipe(item, conf=conf, emit=True))['date']
>>> date.isoformat() == '1982-05-04T00:00:00+00:00'
True
>>> item = {'content': dt(1982, 5, 4).timetuple()}
>>> date = next(pipe(item, conf=conf, emit=True))['date']
>>> date.isoformat() == '1982-05-04T00:00:00+00:00'
True
>>> item = {'content': 'False'}
>>> conf = {'type': 'bool'}
>>> next(pipe(item, conf=conf, emit=True))
False
"""
return parser(*args, **kwargs)
| mit | 2,967,422,950,843,804,700 | 29.75 | 79 | 0.573787 | false |
meisamhe/GPLshared | Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/shortest_path_fewest_edges.py | 1 | 5505 | import bintrees
import sys
import random
import collections
# @include
DistanceWithFewestEdges = collections.namedtuple('DistanceWithFewestEdges',
('distance', 'min_num_edges'))
VertexWithDistance = collections.namedtuple('VertexWithDistance',
('vertex', 'distance'))
class GraphVertex:
def __init__(self, id=0):
self.distance_with_fewest_edges = DistanceWithFewestEdges(
float('inf'), 0)
self.edges = []
self.id = id # The id of this vertex.
self.pred = None # The predecessor in the shortest path.
def __lt__(self, other):
if self.distance_with_fewest_edges != other.distance_with_fewest_edges:
return self.distance_with_fewest_edges < other.distance_with_fewest_edges
return self.id < other.id
# @exclude
def __repr__(self):
return 'id=%d,distance_with_fewest_edges=%s,edge=%s' % (
self.id, str(self.distance_with_fewest_edges),
','.join('%s(%d)' % (x.vertex.id, x.distance) for x in self.edges))
# @include
def dijkstra_shortest_path(s, t):
# Initialization of the distance of starting point.
s.distance_with_fewest_edges = DistanceWithFewestEdges(0, 0)
node_set = bintrees.RBTree([(s, None)])
while node_set:
# Extracts the minimum distance vertex from heap.
u = node_set.pop_min()[0]
if u.id == t.id:
break
# Relax neighboring vertices of u.
for v in u.edges:
v_distance = u.distance_with_fewest_edges.distance + v.distance
v_num_edges = u.distance_with_fewest_edges.min_num_edges + 1
new_distance = DistanceWithFewestEdges(v_distance, v_num_edges)
if v.vertex.distance_with_fewest_edges > new_distance:
node_set.discard(v.vertex)
v.vertex.pred = u
v.vertex.distance_with_fewest_edges = new_distance
node_set.insert(v.vertex, None)
def output_shortest_path(v):
if v:
output_shortest_path(v.pred)
print(v.id, end=' ')
# Outputs the shortest path with fewest edges.
output_shortest_path(t)
# @exclude
# DBH test
def test():
G = [GraphVertex(i) for i in range(9)]
# G[0] is the source node that connects to 8 other nodes.
G[0].edges.append(VertexWithDistance(G[1], 13)) # 0-1
G[1].edges.append(VertexWithDistance(G[0], 13)) # 1-0
G[0].edges.append(VertexWithDistance(G[2], 24)) # 0-2
G[2].edges.append(VertexWithDistance(G[0], 24)) # 2-0
G[0].edges.append(VertexWithDistance(G[3], 28)) # 0-3
G[3].edges.append(VertexWithDistance(G[0], 28)) # 3-0
G[0].edges.append(VertexWithDistance(G[4], 25)) # 0-4
G[4].edges.append(VertexWithDistance(G[0], 25)) # 4-0
G[0].edges.append(VertexWithDistance(G[5], 30)) # 0-5
G[5].edges.append(VertexWithDistance(G[0], 30)) # 5-0
G[0].edges.append(VertexWithDistance(G[6], 31)) # 0-6
G[6].edges.append(VertexWithDistance(G[0], 31)) # 6-0
G[0].edges.append(VertexWithDistance(G[7], 10)) # 0-7
G[7].edges.append(VertexWithDistance(G[0], 10)) # 7-0
G[0].edges.append(VertexWithDistance(G[8], 29)) # 0-8
G[8].edges.append(VertexWithDistance(G[0], 29)) # 8-0
G[1].edges.append(VertexWithDistance(G[8], 7)) # 1-8
G[8].edges.append(VertexWithDistance(G[1], 7)) # 8-1
G[2].edges.append(VertexWithDistance(G[8], 1)) # 2-8
G[8].edges.append(VertexWithDistance(G[2], 1)) # 8-2
G[7].edges.append(VertexWithDistance(G[8], 16)) # 7-8
G[8].edges.append(VertexWithDistance(G[7], 16)) # 8-7
s = 0 # Source is G[0].
t = 2 # Destination is G[2].
# Minimum distance path should be:
# G[0] => G[1] => G[8] => G[2],
# distance is: 13 + 7 + 1 = 21.
dijkstra_shortest_path(G[s], G[t])
print('\nmin distance:', G[t].distance_with_fewest_edges.distance)
assert G[t].distance_with_fewest_edges.distance == 21
print('number of edges:', G[t].distance_with_fewest_edges.min_num_edges)
assert G[t].distance_with_fewest_edges.min_num_edges == 3
def main():
if len(sys.argv) == 2:
n = int(sys.argv[1])
else:
n = random.randint(2, 1000)
G = [GraphVertex(i) for i in range(n)]
m = random.randint(1, n * (n - 1) // 2)
is_edge_exist = [[False] * n for i in range(n)]
# Make the graph as connected.
for i in range(1, n):
length = random.randint(1, 100)
G[i - 1].edges.append(VertexWithDistance(G[i], length))
G[i].edges.append(VertexWithDistance(G[i - 1], length))
is_edge_exist[i - 1][i] = is_edge_exist[i][i - 1] = True
# Generate edges randomly.
m -= (n - 1)
while m > 0:
m -= 1
while True:
a = random.randrange(n)
b = random.randrange(n)
if a != b and is_edge_exist[a][b] == False:
break
is_edge_exist[a][b] = is_edge_exist[b][a] = True
length = random.randint(1, 100)
G[a].edges.append(VertexWithDistance(G[b], length))
G[b].edges.append(VertexWithDistance(G[a], length))
s = random.randrange(n)
t = random.randrange(n)
print('source = %s, terminal = %s' % (s, t))
dijkstra_shortest_path(G[s], G[t])
print()
print(G[t].distance_with_fewest_edges.distance,
G[t].distance_with_fewest_edges.min_num_edges)
test()
if __name__ == '__main__':
main()
| gpl-3.0 | 8,504,607,108,654,241,000 | 33.622642 | 85 | 0.588374 | false |
paulproteus/tag-sandstorm-tarballs | script.py | 1 | 2150 | #!/usr/bin/python
import datetime
import email.utils
import os
import glob
import pytz
import re
import subprocess
import sys
import tempfile
DRY_RUN = False
if os.environ.get('DRY_RUN', ''):
DRY_RUN = True
def handle_one(tar_filename):
# We're already in a tmpdir, yay. But there might be other
# people's stuff here. So let's make a new tmpdir within the
# current one, and 'cd' in.
new_place = tempfile.mkdtemp(dir=os.getcwd())
os.chdir(new_place)
# Uncompress thing, then throw away output.
subprocess.check_output(['tar', 'xvf', tar_filename])
# Grab the commit ID.
revision_filenames = glob.glob('*/git-revision')
assert len(revision_filenames) == 1
revision_filename = revision_filenames[0]
revision = open(revision_filename).read().strip()
# Make up a nice tag name.
number = re.search(r'sandstorm-(\d+).tar.xz',
tar_filename).group(1)
tag_name = 'v0.%s' % (number,)
make_branch = ['git', 'tag', tag_name, revision, '-m', 'Release %s' % (tag_name,)]
print ' '.join(make_branch)
env = os.environ.copy()
env['GIT_COMMITTER_DATE'] = filename2date(revision_filename)
if not DRY_RUN:
subprocess.check_output(make_branch,
cwd='/home/paulproteus/projects/sandstorm',
env=env,
)
def main():
tar_filenames = [os.path.abspath(f) for f in sys.argv[1:]]
# sanity-check
for f in tar_filenames:
assert os.path.exists(f)
# Print a tmpdir and let the person running the script remove it.
tmpdir = tempfile.mkdtemp()
print 'Created', tmpdir
# Uncompress it, etc.
for tar_filename in tar_filenames:
os.chdir(tmpdir)
handle_one(tar_filename)
def filename2date(f):
mtime_float = os.stat(f).st_mtime
# Localize to Pacific
pacific = pytz.timezone('US/Pacific')
fmt = '%Y-%m-%d %H:%M:%S %Z%z'
utc_dt = pytz.utc.localize(datetime.datetime.utcfromtimestamp(mtime_float))
local_dt = utc_dt.astimezone(pacific)
return local_dt.strftime(fmt)
if __name__ == '__main__':
main()
| apache-2.0 | -4,667,197,656,315,441,000 | 26.21519 | 86 | 0.62186 | false |
shanot/imp | modules/domino/test/test_nested_states.py | 2 | 2397 | from __future__ import print_function
import IMP
import IMP.test
import IMP.core
import IMP.algebra
import IMP.atom
import IMP.domino
class Tests(IMP.test.TestCase):
"""Tests for nested RigidBody function"""
def test_nested2(self):
"""Test nested with transformations"""
mdl = IMP.Model()
mhs = []
rbs = []
orig_rfs = []
for i in range(3):
mhs.append(
IMP.atom.read_pdb(
self.get_input_file_name(
"small_protein.pdb"),
mdl))
mhs[-1].set_name("prot" + str(i))
rb = IMP.atom.create_rigid_body(mhs[-1])
print(rb.get_name())
rbs.append(rb)
orig_rfs.append(rb.get_reference_frame())
# set the nesting
for i in range(2):
rbs[i].add_member(rbs[i + 1])
# set domino states
translation = IMP.algebra.Transformation3D(
IMP.algebra.Rotation3D(1., 0., 0., 0.), IMP.algebra.Vector3D(-5., 0., 0.))
root_rf = rbs[0].get_reference_frame()
root_to_global = root_rf.get_transformation_from()
global_to_root = root_rf.get_transformation_to()
pst = IMP.domino.ParticleStatesTable()
for i, rb in enumerate(rbs[1:]):
rb_father = rbs[i]
states = IMP.domino.NestedRigidBodyStates(
[IMP.algebra.get_transformation_from_first_to_second(
rb.get_reference_frame(),
rb_father.get_reference_frame()) * translation])
pst.set_particle_states(rb, states)
# set states to the root
pst.set_particle_states(
rbs[0],
IMP.domino.RigidBodyStates([rbs[0].get_reference_frame()]))
print("sample")
s = IMP.domino.DominoSampler(mdl, pst)
s.set_restraints([])
cs = s.create_sample()
print("number of configurations", cs.get_number_of_configurations())
# TODO - check that the transformations are correct
cs.load_configuration(0)
for i, rb in enumerate(rbs[1:]):
rmsd = IMP.atom.get_rmsd(
IMP.core.XYZs(IMP.core.get_leaves(mhs[0])),
IMP.core.XYZs(IMP.core.get_leaves(mhs[i + 1])))
self.assertAlmostEqual(rmsd, 5 * (i + 1), delta=.05)
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 | -5,375,942,305,469,811,000 | 35.318182 | 86 | 0.546934 | false |
samdroid-apps/something-for-reddit | redditisgtk/subentry.py | 1 | 11419 | # Copyright 2016 Sam Parkinson <[email protected]>
#
# This file is part of Something for Reddit.
#
# Something for Reddit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Something for Reddit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Something for Reddit. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from redditisgtk.api import (RedditAPI, PREPEND_SUBS,
SPECIAL_SUBS, SORTING_TIMES)
SORTINGS = [
'hot', 'new', 'random', 'top?t=all', 'controversial?t=all'
]
def clean_sub(sub):
'''
Normalize paths to have a leading slash and no trailing slash, like
/hello/world
And normalize /u/ -> /user/
'''
if sub.startswith('http://') or sub.startswith('https://'):
return sub
if sub.endswith('/'):
sub = sub[:-1]
if not sub.startswith('/'):
sub = '/' + sub
if sub.startswith('/u/'):
sub = '/user/' + sub[len('/u/'):]
return sub
def format_sub_for_api(sub):
sub = clean_sub(sub)
empty, *parts = sub.split('/')
if len(parts) == 2 and parts[0] == 'user': # /user/name
parts.append('overview')
if len(parts) == 2 and parts[0] == 'r': # /r/name
parts.append('hot')
if len(parts) == 1 and not parts[0]: # / --> /hot
parts[0] = 'hot'
return '/' + '/'.join(parts)
class SubEntry(Gtk.Box):
'''
The thing that goes in the middle of the header bar, and
shows the current subreddit
'''
activate = GObject.Signal('reddit-activate', arg_types=[str])
escape_me = GObject.Signal('escape-me')
def __init__(self, api: RedditAPI, text='/r/all'):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.HORIZONTAL)
self.get_style_context().add_class('linked')
self._entry = Gtk.Entry(text=text)
self._entry.connect('event', self.__event_cb)
self._entry.connect('changed', self.__changed_cb)
self._entry.connect('activate', self.__activate_cb)
self._entry.connect('focus-in-event', self.__focus_in_event_cb)
self._entry.connect('focus-out-event', self.__focus_out_event_cb)
self._entry.set_size_request(300, 0)
self.add(self._entry)
self._entry.show()
self._palette = _ListPalette(api, self)
self._palette.selected.connect(self.__selected_cb)
show_palette = Gtk.MenuButton(popover=self._palette)
show_palette.connect('toggled', self.__show_palette_toggled_cb)
self.add(show_palette)
show_palette.show()
def focus(self):
self._entry.grab_focus()
# When the entry is unfocused, we should make the popover behave in a
# normal way. When it is focused, we make it not modal so that it can
# behave as a suggestions list
def __focus_in_event_cb(self, entry, event):
self._palette.props.modal = False
def __focus_out_event_cb(self, entry, event):
self._palette.props.modal = True
def __event_cb(self, entry, event):
if event.type != Gdk.EventType.KEY_PRESS:
return
if event.keyval == Gdk.KEY_Down:
if self._palette.props.visible:
self._palette.grab_focus()
else:
self._show_palette()
return True
if event.keyval == Gdk.KEY_Escape:
self.escape_me.emit()
def __changed_cb(self, entry):
if entry.is_focus():
self._palette.popup()
self._palette.set_filter(self.current_location)
entry.grab_focus_without_selecting()
def __show_palette_toggled_cb(self, button):
if button.props.active:
# When the user clicks on the button, ensure the palette is empty
self._palette.set_filter(None)
def _show_palette(self):
self._palette.set_filter(None)
self._palette.popup()
def __selected_cb(self, palette, sub):
self._entry.props.text = sub
self.__activate_cb()
def goto(self, sub):
self._entry.props.text = sub
@property
def current_location(self):
return clean_sub(self._entry.props.text)
def __activate_cb(self, entry=None):
text = self._entry.props.text
if text.startswith('http://') or text.startswith('https://'):
self.get_toplevel().goto_reddit_uri(text)
else:
formatted = format_sub_for_api(self._entry.props.text)
self.activate.emit(formatted)
self._palette.popdown()
# If we don't override the selection, the whole text will be selected
# This is confusing - as it makes the entry look :focused
p = len(self._entry.props.text)
self._entry.select_region(p, p)
class VScrollingPopover(Gtk.Popover):
def __init__(self, **kwargs):
Gtk.Popover.__init__(self, vexpand=True, **kwargs)
self._sw = Gtk.ScrolledWindow(
hscrollbar_policy=Gtk.PolicyType.NEVER)
self.add(self._sw)
self._sw.show()
def set_scrolled_child(self, child):
'''
Sets the child of the scrolled window.
Destroys any child if it is already in the scrolled window
'''
c = self._sw.get_child()
if c is not None:
self._sw.remove(c)
c.destroy()
self._sw.add(child)
class _ListPalette(VScrollingPopover):
'''
A nice list of subreddits with headers for different sections
'''
selected = GObject.Signal('selected', arg_types=[str])
def __init__(self, api: RedditAPI, parent, **kwargs):
VScrollingPopover.__init__(self, **kwargs)
self.get_style_context().add_class('subentry-palette')
self._parent = parent
self._filter = None
self._api = api
self._api.subs_changed.connect(self.__changed_cb)
self._api.user_changed.connect(self.__changed_cb)
self._rebuild()
def __changed_cb(self, caller, *args):
self._rebuild()
def set_filter(self, filter):
if filter is not None:
if filter.startswith('https://') or filter.startswith('http://'):
self._show_open_uri(filter)
return
if not filter:
filter = None
else:
filter = clean_sub(filter)
self._filter = filter
self._rebuild()
def _do_filter(self, sub_list):
if self._filter is None:
yield from sub_list
return
for sub in sub_list:
if sub.lower().startswith(self._filter.lower()):
yield sub
def _show_open_uri(self, uri):
button = Gtk.Button(label='Open this reddit.com URI')
button.connect('clicked', self.__open_reddit_uri_cb, uri)
self.set_scrolled_child(button)
button.show()
def __open_reddit_uri_cb(self, button, uri):
self.get_toplevel().goto_reddit_uri(uri)
self.hide()
def _rebuild(self):
self._box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.set_scrolled_child(self._box)
self._box.show()
self._add_subs(self._do_filter(PREPEND_SUBS))
# If the user is typing for a subreddit, show them suggestions
if self._filter:
empty, *filter_parts = self._filter.split('/')
# Only show suggestions if it is /r/aaa, not /r/aaa/sort
if filter_parts[0] in ('', 'r') and len(filter_parts) <= 2:
subs = list(self._do_filter(self._api.user_subs))
if subs:
self._add_header('Searching Subscribed')
self._add_subs(subs)
# Show sorting suggestions
current_location = self._parent.current_location
if current_location.startswith('/r/'):
by_slash = current_location.split('/')
name = by_slash[2] # get the /r/[thing]/whatever part
self._add_header('Sorting')
self._add_subs([
'/r/{}/{}'.format(name, x)
for x in ['hot', 'new', 'random']
])
for x in ['top', 'controversial']:
self._add_expander_sub('/r/{}/{}'.format(name, x))
# If there is no filter, show the subscribed subreddits
if not self._filter:
self._add_header('Subscribed')
self._add_subs(self._api.user_subs)
# Show user related stuff last
# This should end up first if you are typing /u/...
user_name = None
if self._filter is not None:
empty, *filter_parts = self._filter.split('/')
if self._filter.startswith('/user'):
user_name = self._filter.split('/')[2]
else:
user_name = self._api.user_name
if user_name is not None:
self._add_header('Profile')
self._add_subs((x.replace('USER', user_name)
for x in SPECIAL_SUBS))
def _add_header(self, header):
l = Gtk.Label(xalign=0, justify=Gtk.Justification.LEFT)
l.get_style_context().add_class('header')
l.set_markup('<b>{}</b>'.format(header))
self._box.add(l)
l.show()
def _add_subs(self, subs, to=None):
for sub in subs:
b = Gtk.Button(label=sub, xalign=0)
b.get_style_context().add_class('full-width')
b.connect('clicked', self.__sub_button_clicked)
if to is None:
self._box.add(b)
else:
to.add(b)
b.show()
def __sub_button_clicked(self, button):
self.selected.emit(button.props.label)
def _add_expander_sub(self, sub):
btn_content = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
btn_content.add(Gtk.Label(label=sub, xalign=0))
btn_content.add(Gtk.Image.new_from_icon_name('pan-down-symbolic',
Gtk.IconSize.BUTTON))
btn_content.show_all()
btn = Gtk.ToggleButton(xalign=0)
btn.add(btn_content)
btn.get_style_context().add_class('full-width')
btn.get_style_context().add_class('subentry-revealer-button')
# we want to pretend it is a normal button, but this class would get
# removed by GTK when we added the custom box content
btn.get_style_context().add_class('text-button')
revealer = Gtk.Revealer(
transition_type=Gtk.RevealerTransitionType.SLIDE_DOWN)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
revealer.add(box)
self._add_subs(('{}?t={}'.format(sub, x) for x in SORTING_TIMES),
to=box)
btn.connect('toggled', self.__sub_expander_toggled_cb, revealer)
self._box.add(btn)
self._box.add(revealer)
btn.show()
revealer.show_all()
def __sub_expander_toggled_cb(self, button, revealer):
revealer.props.reveal_child = button.props.active
| gpl-3.0 | 2,170,667,786,260,079,000 | 32.585294 | 79 | 0.584815 | false |
fuziontech/pgshovel | tests/pgshovel/streams/sequences.py | 1 | 2887 | import uuid
import pytest
from pgshovel.interfaces.common_pb2 import Timestamp
from pgshovel.interfaces.streams_pb2 import (
Header,
Message,
)
from pgshovel.streams.sequences import (
InvalidPublisher,
InvalidSequenceStartError,
RepeatedSequenceError,
SequencingError,
validate,
)
timestamp = Timestamp(seconds=0, nanos=0)
def build_header(sequence, publisher=uuid.uuid1().bytes, timestamp=timestamp):
return Header(
publisher=publisher,
sequence=sequence,
timestamp=timestamp,
)
def test_simple_sequence():
messages = [
Message(header=build_header(0)),
Message(header=build_header(1)),
Message(header=build_header(2)),
]
stream = validate(messages)
assert list(stream) == messages
def test_incorrect_sequence_start():
messages = [
Message(header=build_header(1)),
]
stream = validate(messages)
with pytest.raises(InvalidSequenceStartError):
next(stream)
def test_invalid_multiplexed_sequence():
messages = [
Message(header=build_header(0, publisher='a')),
Message(header=build_header(1, publisher='a')),
Message(header=build_header(0, publisher='b')),
Message(header=build_header(2, publisher='a')),
]
stream = validate(messages)
assert next(stream) is messages[0]
assert next(stream) is messages[1]
assert next(stream) is messages[2]
with pytest.raises(InvalidPublisher):
next(stream)
def test_missing_message():
messages = [
Message(header=build_header(0)),
Message(header=build_header(2)),
]
stream = validate(messages)
assert next(stream) is messages[0]
with pytest.raises(SequencingError):
next(stream)
def test_out_of_order_message():
messages = [
Message(header=build_header(0)),
Message(header=build_header(1)),
Message(header=build_header(2)),
Message(header=build_header(1)),
]
stream = validate(messages)
assert next(stream) is messages[0]
assert next(stream) is messages[1]
assert next(stream) is messages[2]
with pytest.raises(SequencingError):
next(stream)
def test_duplicate_message():
messages = [
Message(header=build_header(0)),
Message(header=build_header(1)),
Message(header=build_header(1)),
Message(header=build_header(2)),
]
stream = validate(messages)
assert list(stream) == [messages[0], messages[1], messages[3]]
def test_repeated_sequence():
messages = [
Message(header=build_header(0, timestamp=Timestamp(seconds=0, nanos=0))),
Message(header=build_header(0, timestamp=Timestamp(seconds=1, nanos=0))),
]
stream = validate(messages)
assert next(stream) is messages[0]
with pytest.raises(RepeatedSequenceError):
next(stream)
| apache-2.0 | -3,564,747,311,229,026,300 | 23.260504 | 81 | 0.654659 | false |
josircg/raizcidadanista | raizcidadanista/forum/migrations/0005_auto__add_unique_grupousuario_grupo_usuario.py | 1 | 8110 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'GrupoUsuario', fields ['grupo', 'usuario']
db.create_unique('forum_grupousuario', ['grupo_id', 'usuario_id'])
def backwards(self, orm):
# Removing unique constraint on 'GrupoUsuario', fields ['grupo', 'usuario']
db.delete_unique('forum_grupousuario', ['grupo_id', 'usuario_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forum.conversa': {
'Meta': {'object_name': 'Conversa'},
'arquivo': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'autor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'conversa_pai': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Conversa']", 'null': 'True', 'blank': 'True'}),
'dt_criacao': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'texto': ('django.db.models.fields.TextField', [], {}),
'topico': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Topico']"})
},
'forum.conversacurtida': {
'Meta': {'object_name': 'ConversaCurtida'},
'colaborador': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'conversa': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Conversa']"}),
'curtida': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'forum.grupo': {
'Meta': {'object_name': 'Grupo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'forum.grupousuario': {
'Meta': {'unique_together': "(('grupo', 'usuario'),)", 'object_name': 'GrupoUsuario'},
'grupo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Grupo']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'forum.proposta': {
'Meta': {'object_name': 'Proposta', '_ormbases': ['forum.Conversa']},
'conversa_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['forum.Conversa']", 'unique': 'True', 'primary_key': 'True'}),
'dt_encerramento': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'forum.topico': {
'Meta': {'object_name': 'Topico'},
'criador': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'dt_criacao': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dt_ultima_atualizacao': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'grupo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Grupo']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visitacoes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'forum.topicoouvinte': {
'Meta': {'object_name': 'TopicoOuvinte'},
'dtentrada': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notificacao': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'ouvinte': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'topico': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Topico']"})
},
'forum.voto': {
'Meta': {'object_name': 'Voto'},
'eleitor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposta': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forum.Proposta']"}),
'voto': ('django.db.models.fields.CharField', [], {'max_length': '1'})
}
}
complete_apps = ['forum'] | gpl-3.0 | -7,466,070,325,261,934,000 | 67.159664 | 182 | 0.545746 | false |
alpha-rain/zeromon | util/networkUtil.py | 1 | 2991 | import datetime
import psutil
# import cpuinfo
import platform
import json
import re
# import tojson
def info():
jsondata = '"network":{"info":{'
jsondata += '},"usage":{'
jsondata += networkConnectionsInfo()
jsondata += '}}'
return jsondata
def networkConnectionsInfo():
networkConnections = psutil.net_connections()
jsondata = toJsonArray(networkConnections, 'network-connections', True, True);
return jsondata
def tupletojson(fields,dataString = False,isnumeric = False):
jsondata = '['
fieldsLength = len(fields);
fieldCount = 0
for field in fields:
if(dataString and (isnumeric == False)):
jsondata += '"'
elif(isnumeric):
if(isinstance(field,float) == False) and (isinstance(field,int) == False):
jsondata += '"'
jsondata +=str(field)
if(dataString and (isnumeric == False)):
jsondata += '"'
elif(isnumeric):
if(isinstance(field,float) == False) and (isinstance(field,int) == False):
jsondata += '"'
fieldCount +=1
if fieldsLength != fieldCount:
jsondata += ','
jsondata += ']'
return jsondata
def toJson(fields,data,dataString = False,isnumeric = False):
jsondata = ''
fieldsLength = len(fields);
fieldCount = 0
for field in fields:
jsondata += '"'
jsondata += field
jsondata += '":'
# print( type(data[fieldCount]))
if(isinstance(data[fieldCount], tuple)):
jsondata += tupletojson(data[fieldCount],dataString,isnumeric)
else:
if(dataString and (isnumeric == False)):
jsondata += '"'
elif(isnumeric):
tests = str(data[fieldCount]).isnumeric()
# if(unicode(str(data[fieldCount])).isnumeric() == False):
if(isinstance(data[fieldCount],float) == False) and (isinstance(data[fieldCount],int) == False):
jsondata += '"'
if(field == 'family'):
jsondata += str(data[fieldCount].real)
else:
jsondata += str(data[fieldCount])
if(dataString and (isnumeric == False)):
jsondata += '"'
elif(isnumeric):
if(isinstance(data[fieldCount],float) == False) and (isinstance(data[fieldCount],int) == False):
jsondata += '"'
fieldCount +=1
if fieldsLength != fieldCount:
jsondata += ','
return jsondata
def toJsonArray(datas, name,dataString = False,isnumeric = False):
jsondata = '"'
jsondata += name
jsondata += '":['
dataLength = len(datas);
dataCount = 0
for data in datas:
jsondata += '{'
jsondata += toJson(data._fields,data,dataString,isnumeric)
dataCount += 1
if dataLength != dataCount:
jsondata += '},'
jsondata += '}]'
return jsondata | lgpl-3.0 | 3,878,571,183,246,361,000 | 31.879121 | 112 | 0.554998 | false |
ml-slac/deep-jets | viz/performance.py | 2 | 4161 | '''
performance.py
author: Luke de Oliveira ([email protected])
Usage:
>>> weights = np.ones(n_samples)
>>> # -- going to match bkg to signal
>>> weights[signal == True] = get_weights(sig_pt, bkg_pt)
>>> discs = {}
>>> add_curve(r'\tau_{32}', 'red', calculate_roc(signal, tau_32, weights=weights))
>>> fg = ROC_plotter(discs)
>>> fg.savefig('myroc.pdf')
'''
import numpy as np
import matplotlib.pyplot as plt
def get_weights(target, actual, bins = 10, cap = 10, match = True):
'''
re-weights a actual distribution to a target.
Args:
target (array/list): observations drawn from target distribution
actual (array/list): observations drawn from distribution to
match to the target.
bins (numeric or list/array of numerics): bins to use to do weighting
cap (numeric): maximum weight value.
match (bool): whether to make the sum of weights in actual equal to the
number of samples in target
Returns:
numpy.array: returns array of shape len(actual).
'''
target_counts, target_bins = np.histogram(target, bins=bins)
counts, _ = np.histogram(actual, bins=target_bins)
counts = (1.0 * counts)
counts = np.array([max(a, 0.0001) for a in counts])
multiplier = np.array((target_counts / counts).tolist() + [1.0])
weights = np.array([min(multiplier[target_bins.searchsorted(point) - 1], cap) for point in actual])
# weights = np.array([target_bins.searchsorted(point) for point in actual])
if match:
weights *= (len(target) / np.sum(weights))
return weights
def calculate_roc(labels, discriminant, weights=None, bins = 2000):
'''
makes a weighted ROC curve
Args:
labels (numpy.array): an array of 1/0 representing signal/background
discriminant (numpy.array): an array that represents the discriminant
weights: sample weights for each point.
`assert(weights.shape == discriminant.shape)
bins: binning to use -- can be an int or a list/array of bins.
Returns:
tuple: (signal_efficiency, background_rejection) where each are arrays
'''
sig_ind = labels == 1
bkg_ind = labels == 0
if weights is None:
bkg_total = np.sum(labels == 0)
sig_total = np.sum(labels == 1)
else:
bkg_total = np.sum(weights[bkg_ind])
sig_total = np.sum(weights[sig_ind])
discriminant_bins = np.linspace(np.min(discriminant), np.max(discriminant), bins)
if weights is None:
sig, _ = np.histogram(discriminant[sig_ind], discriminant_bins)
bkd, _ = np.histogram(discriminant[bkg_ind], discriminant_bins)
else:
sig, _ = np.histogram(discriminant[sig_ind], discriminant_bins, weights = weights[sig_ind])
bkd, _ = np.histogram(discriminant[bkg_ind], discriminant_bins, weights = weights[bkg_ind])
sig_eff = np.add.accumulate(sig[::-1]) / float(sig_total)
bkg_rej = 1 / (np.add.accumulate(bkd[::-1]) / float(bkg_total))
return sig_eff, bkg_rej
def ROC_plotter(curves, min_eff = 0, max_eff = 1, linewidth = 1.4,
pp = False, signal = "$Z\rightarrow t\bar{t}$", background = "QCD",
title = "Jet Image Tagging Comparison", logscale = True, ymax=10**4, ymin=1):
fig = plt.figure(figsize=(11.69, 8.27), dpi=100)
ax = fig.add_subplot(111)
plt.xlim(min_eff,max_eff)
plt.grid(b = True, which = 'minor')
plt.grid(b = True, which = 'major')
max_ = 0
for tagger, data in curves.iteritems():
sel = (data['efficiency'] >= min_eff) & (data['efficiency'] <= max_eff)
if np.max(data['rejection'][sel]) > max_:
max_ = np.max(data['rejection'][sel])
plt.plot(data['efficiency'][sel], data['rejection'][sel], '-', label = r''+tagger, color = data['color'], linewidth=linewidth)
ax = plt.subplot(1,1,1)
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(20)
if logscale == True:
plt.ylim(ymin,ymax)
ax.set_yscale('log')
ax.set_xlabel(r'$\epsilon_{\mathrm{signal}}$')
ax.set_ylabel(r"$1 / \epsilon_{\mathrm{bkg}}$")
plt.legend()
plt.title(r''+title)
if pp:
pp.savefig(fig)
else:
plt.show()
return fig
def add_curve(name, color, curve_pair, dictref):
dictref.update(
{
name : {
'efficiency' : curve_pair[0],
'rejection' : curve_pair[1],
'color' : color
}
}
)
| mit | -7,226,643,947,570,281,000 | 27.114865 | 128 | 0.66883 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.