repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jereze/scikit-learn | sklearn/preprocessing/data.py | 68 | 57385 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause | -8,716,250,005,458,972,000 | 34.843223 | 79 | 0.596567 | false |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/pygments/console.py | 27 | 1809 | # -*- coding: utf-8 -*-
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
light_colors = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%i;01m" % x
x += 1
del d, l, x
codes["darkteal"] = codes["turquoise"]
codes["darkyellow"] = codes["brown"]
codes["fuscia"] = codes["fuchsia"]
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)
| gpl-3.0 | 6,352,068,305,438,203,000 | 23.445946 | 70 | 0.545605 | false |
aruppen/xwot.py | xwot/device/lightbulb.py | 1 | 4704 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# xwot.py - Python tools for the extended Web of Things
# Copyright (C) 2015 Alexander Rüedlinger
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
__author__ = 'Alexander Rüedlinger'
from xwot.model import Context as XWOTContext
from xwot.model import Sensor as XWOTSensor
from xwot.model import Device as XWOTDevice
from xwot.model import Model
from xwot.model import BaseModel
class LightBulb(XWOTDevice, BaseModel):
__mutable_props__ = ['name', 'streetAddress', 'roomAddress', 'postalCode', 'addressLocality']
__expose__ = __mutable_props__ + ['description', 'switch', 'sensor']
def __init__(self, name, street_address, postal_code, address_locality, room_address):
super(LightBulb, self).__init__()
self._dic = {
'name': name,
'streetAddress': street_address,
'postalCode': postal_code,
'addressLocality': address_locality,
'roomAddress': room_address
}
self.add_type('xwot-ext:LightBulb')
self.add_link('switch')
self.add_link('sensor')
@property
def resource_path(self):
return '/lightbulb'
@property
def name(self):
return self._dic['name']
@property
def description(self):
return "Hi there my name is %s. I'm a light bulb and currently present in room %s at the location: %s, %s, %s" % \
(self.name, self.roomAddress, self.streetAddress, self.addressLocality, self.postalCode)
@property
def switch(self):
return '/lightbulb/switch'
@property
def sensor(self):
return '/lightbulb/sensor'
@property
def streetAddress(self):
return self._dic['streetAddress']
@property
def postalCode(self):
return self._dic['postalCode']
@property
def addressLocality(self):
return self._dic['addressLocality']
@property
def roomAddress(self):
return self._dic['roomAddress']
from xwot.i2c.adapter import LightBulbAdapter
class Switch(XWOTContext, Model):
__mutable_props__ = ['name', 'state']
__expose__ = __mutable_props__ + ['description', 'lightbulb']
def __init__(self, name, adapter=LightBulbAdapter()):
super(Switch, self).__init__()
self._dic = {
'name': name
}
self._adapter = adapter
self.add_type('xwot-ext:Switch')
self.add_link('lightbulb')
@property
def resource_path(self):
return '/lightbulb/switch'
@property
def description(self):
return "A light switch to turn off or on."
@property
def lightbulb(self):
return '/lightbulb'
@property
def state(self):
return self._adapter.state
@property
def name(self):
return self._dic['name']
def handle_update(self, dic):
if dic.get('state') == 'off':
self._adapter.switch_off()
if dic.get('state') == 'on':
self._adapter.switch_on()
self._dic['name'] = str(dic.get('name', self._dic['name']))
return 200
class Sensor(XWOTSensor, Model):
__expose__ = ['name', 'unit', 'measures', 'description', 'measurement', 'symbol', 'lightbulb']
def __init__(self, adapter=LightBulbAdapter()):
super(Sensor, self).__init__()
self._adapter = adapter
self.add_type('xwot-ext:IlluminanceSensor')
self.add_link('lightbulb')
@property
def resource_path(self):
return '/lightbulb/sensor'
@property
def name(self):
return 'Illuminance sensor'
@property
def lightbulb(self):
return '/lightbulb'
@property
def unit(self):
return 'Lux'
@property
def description(self):
return 'A sensor that measures the illuminance of this light bulb.'
@property
def measures(self):
return 'Illuminance'
@property
def measurement(self):
return self._adapter.illuminance
@property
def symbol(self):
return 'lx'
def handle_update(self, dic):
pass
| gpl-3.0 | 4,317,574,846,848,423,400 | 25.268156 | 122 | 0.624628 | false |
pquerna/cloud-init-debian-pkg-dead | cloudinit/config/cc_disk_setup.py | 6 | 25274 | # vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Ben Howard <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
import logging
import os
import shlex
frequency = PER_INSTANCE
# Define the commands to use
UDEVADM_CMD = util.which('udevadm')
SFDISK_CMD = util.which("sfdisk")
LSBLK_CMD = util.which("lsblk")
BLKID_CMD = util.which("blkid")
BLKDEV_CMD = util.which("blockdev")
WIPEFS_CMD = util.which("wipefs")
LOG = logging.getLogger(__name__)
def handle(_name, cfg, cloud, log, _args):
"""
See doc/examples/cloud-config_disk-setup.txt for documentation on the
format.
"""
disk_setup = cfg.get("disk_setup")
if isinstance(disk_setup, dict):
update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
log.debug("Partitioning disks: %s", str(disk_setup))
for disk, definition in disk_setup.items():
if not isinstance(definition, dict):
log.warn("Invalid disk definition for %s" % disk)
continue
try:
log.debug("Creating new partition table/disk")
util.log_time(logfunc=LOG.debug,
msg="Creating partition on %s" % disk,
func=mkpart, args=(disk, definition))
except Exception as e:
util.logexc(LOG, "Failed partitioning operation\n%s" % e)
fs_setup = cfg.get("fs_setup")
if isinstance(fs_setup, list):
log.debug("setting up filesystems: %s", str(fs_setup))
update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
for definition in fs_setup:
if not isinstance(definition, dict):
log.warn("Invalid file system definition: %s" % definition)
continue
try:
log.debug("Creating new filesystem.")
device = definition.get('device')
util.log_time(logfunc=LOG.debug,
msg="Creating fs for %s" % device,
func=mkfs, args=(definition,))
except Exception as e:
util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
def update_disk_setup_devices(disk_setup, tformer):
# update 'disk_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
for origname in disk_setup.keys():
transformed = tformer(origname)
if transformed is None or transformed == origname:
continue
if transformed in disk_setup:
LOG.info("Replacing %s in disk_setup for translation of %s",
origname, transformed)
del disk_setup[transformed]
disk_setup[transformed] = disk_setup[origname]
disk_setup[transformed]['_origname'] = origname
del disk_setup[origname]
LOG.debug("updated disk_setup device entry '%s' to '%s'",
origname, transformed)
def update_fs_setup_devices(disk_setup, tformer):
# update 'fs_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
for definition in disk_setup:
if not isinstance(definition, dict):
LOG.warn("entry in disk_setup not a dict: %s", definition)
continue
origname = definition.get('device')
if origname is None:
continue
(dev, part) = util.expand_dotted_devname(origname)
tformed = tformer(dev)
if tformed is not None:
dev = tformed
LOG.debug("%s is mapped to disk=%s part=%s",
origname, tformed, part)
definition['_origname'] = origname
definition['device'] = tformed
if part and 'partition' in definition:
definition['_partition'] = definition['partition']
definition['partition'] = part
def value_splitter(values, start=None):
"""
Returns the key/value pairs of output sent as string
like: FOO='BAR' HOME='127.0.0.1'
"""
_values = shlex.split(values)
if start:
_values = _values[start:]
for key, value in [x.split('=') for x in _values]:
yield key, value
def enumerate_disk(device, nodeps=False):
"""
Enumerate the elements of a child device.
Parameters:
device: the kernel device name
nodeps <BOOL>: don't enumerate children devices
Return a dict describing the disk:
type: the entry type, i.e disk or part
fstype: the filesystem type, if it exists
label: file system label, if it exists
name: the device name, i.e. sda
"""
lsblk_cmd = [LSBLK_CMD, '--pairs', '--out', 'NAME,TYPE,FSTYPE,LABEL',
device]
if nodeps:
lsblk_cmd.append('--nodeps')
info = None
try:
info, _err = util.subp(lsblk_cmd)
except Exception as e:
raise Exception("Failed during disk check for %s\n%s" % (device, e))
parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
for part in parts:
d = {'name': None,
'type': None,
'fstype': None,
'label': None,
}
for key, value in value_splitter(part):
d[key.lower()] = value
yield d
def device_type(device):
"""
Return the device type of the device by calling lsblk.
"""
for d in enumerate_disk(device, nodeps=True):
if "type" in d:
return d["type"].lower()
return None
def is_device_valid(name, partition=False):
"""
Check if the device is a valid device.
"""
d_type = ""
try:
d_type = device_type(name)
except:
LOG.warn("Query against device %s failed" % name)
return False
if partition and d_type == 'part':
return True
elif not partition and d_type == 'disk':
return True
return False
def check_fs(device):
"""
Check if the device has a filesystem on it
Output of blkid is generally something like:
/dev/sda: LABEL="Backup500G" UUID="..." TYPE="ext4"
Return values are device, label, type, uuid
"""
out, label, fs_type, uuid = None, None, None, None
blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
try:
out, _err = util.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
raise Exception("Failed during disk check for %s\n%s" % (device, e))
if out:
if len(out.splitlines()) == 1:
for key, value in value_splitter(out, start=1):
if key.lower() == 'label':
label = value
elif key.lower() == 'type':
fs_type = value
elif key.lower() == 'uuid':
uuid = value
return label, fs_type, uuid
def is_filesystem(device):
"""
Returns true if the device has a file system.
"""
_, fs_type, _ = check_fs(device)
return fs_type
def find_device_node(device, fs_type=None, label=None, valid_targets=None,
label_match=True, replace_fs=None):
"""
Find a device that is either matches the spec, or the first
The return is value is (<device>, <bool>) where the device is the
device to use and the bool is whether the device matches the
fs_type and label.
Note: This works with GPT partition tables!
"""
# label of None is same as no label
if label is None:
label = ""
if not valid_targets:
valid_targets = ['disk', 'part']
raw_device_used = False
for d in enumerate_disk(device):
if d['fstype'] == replace_fs and label_match is False:
# We found a device where we want to replace the FS
return ('/dev/%s' % d['name'], False)
if (d['fstype'] == fs_type and
((label_match and d['label'] == label) or not label_match)):
# If we find a matching device, we return that
return ('/dev/%s' % d['name'], True)
if d['type'] in valid_targets:
if d['type'] != 'disk' or d['fstype']:
raw_device_used = True
if d['type'] == 'disk':
# Skip the raw disk, its the default
pass
elif not d['fstype']:
return ('/dev/%s' % d['name'], False)
if not raw_device_used:
return (device, False)
LOG.warn("Failed to find device during available device search.")
return (None, False)
def is_disk_used(device):
"""
Check if the device is currently used. Returns true if the device
has either a file system or a partition entry
is no filesystem found on the disk.
"""
# If the child count is higher 1, then there are child nodes
# such as partition or device mapper nodes
use_count = [x for x in enumerate_disk(device)]
if len(use_count.splitlines()) > 1:
return True
# If we see a file system, then its used
_, check_fstype, _ = check_fs(device)
if check_fstype:
return True
return False
def get_hdd_size(device):
"""
Returns the hard disk size.
This works with any disk type, including GPT.
"""
size_cmd = [SFDISK_CMD, '--show-size', device]
size = None
try:
size, _err = util.subp(size_cmd)
except Exception as e:
raise Exception("Failed to get %s size\n%s" % (device, e))
return int(size.strip())
def get_dyn_func(*args):
"""
Call the appropriate function.
The first value is the template for function name
The second value is the template replacement
The remain values are passed to the function
For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,)
would call "foo_bar" with args of 1, 2, 3
"""
if len(args) < 2:
raise Exception("Unable to determine dynamic funcation name")
func_name = (args[0] % args[1])
func_args = args[2:]
try:
if func_args:
return globals()[func_name](*func_args)
else:
return globals()[func_name]
except KeyError:
raise Exception("No such function %s to call!" % func_name)
def check_partition_mbr_layout(device, layout):
"""
Returns true if the partition layout matches the one on the disk
Layout should be a list of values. At this time, this only
verifies that the number of partitions and their labels is correct.
"""
read_parttbl(device)
prt_cmd = [SFDISK_CMD, "-l", device]
try:
out, _err = util.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
raise Exception("Error running partition command on %s\n%s" % (
device, e))
found_layout = []
for line in out.splitlines():
_line = line.split()
if len(_line) == 0:
continue
if device in _line[0]:
# We don't understand extended partitions yet
if _line[-1].lower() in ['extended', 'empty']:
continue
# Find the partition types
type_label = None
for x in sorted(range(1, len(_line)), reverse=True):
if _line[x].isdigit() and _line[x] != '/':
type_label = _line[x]
break
found_layout.append(type_label)
if isinstance(layout, bool):
# if we are using auto partitioning, or "True" be happy
# if a single partition exists.
if layout and len(found_layout) >= 1:
return True
return False
else:
if len(found_layout) != len(layout):
return False
else:
# This just makes sure that the number of requested
# partitions and the type labels are right
for x in range(1, len(layout) + 1):
if isinstance(layout[x - 1], tuple):
_, part_type = layout[x]
if int(found_layout[x]) != int(part_type):
return False
return True
return False
def check_partition_layout(table_type, device, layout):
"""
See if the partition lay out matches.
This is future a future proofing function. In order
to add support for other disk layout schemes, add a
function called check_partition_%s_layout
"""
return get_dyn_func("check_partition_%s_layout", table_type, device,
layout)
def get_partition_mbr_layout(size, layout):
"""
Calculate the layout of the partition table. Partition sizes
are defined as percentage values or a tuple of percentage and
partition type.
For example:
[ 33, [66: 82] ]
Defines the first partition to be a size of 1/3 the disk,
while the remaining 2/3's will be of type Linux Swap.
"""
if not isinstance(layout, list) and isinstance(layout, bool):
# Create a single partition
return "0,"
if ((len(layout) == 0 and isinstance(layout, list)) or
not isinstance(layout, list)):
raise Exception("Partition layout is invalid")
last_part_num = len(layout)
if last_part_num > 4:
raise Exception("Only simply partitioning is allowed.")
part_definition = []
part_num = 0
for part in layout:
part_type = 83 # Default to Linux
percent = part
part_num += 1
if isinstance(part, list):
if len(part) != 2:
raise Exception("Partition was incorrectly defined: %s" % part)
percent, part_type = part
part_size = int((float(size) * (float(percent) / 100)) / 1024)
if part_num == last_part_num:
part_definition.append(",,%s" % part_type)
else:
part_definition.append(",%s,%s" % (part_size, part_type))
sfdisk_definition = "\n".join(part_definition)
if len(part_definition) > 4:
raise Exception("Calculated partition definition is too big\n%s" %
sfdisk_definition)
return sfdisk_definition
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
null = '\0' # pylint: disable=W1401
start_len = 1024 * 1024
end_len = 1024 * 1024
with open(device, "rb+") as fp:
fp.write(null * (start_len))
fp.seek(-end_len, os.SEEK_END)
fp.write(null * end_len)
fp.flush()
read_parttbl(device)
def purge_disk(device):
"""
Remove parition table entries
"""
# wipe any file systems first
for d in enumerate_disk(device):
if d['type'] not in ["disk", "crypt"]:
wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
try:
LOG.info("Purging filesystem on /dev/%s" % d['name'])
util.subp(wipefs_cmd)
except Exception:
raise Exception("Failed FS purge of /dev/%s" % d['name'])
purge_disk_ptable(device)
def get_partition_layout(table_type, size, layout):
"""
Call the appropriate function for creating the table
definition. Returns the table definition
This is a future proofing function. To add support for
other layouts, simply add a "get_partition_%s_layout"
function.
"""
return get_dyn_func("get_partition_%s_layout", table_type, size, layout)
def read_parttbl(device):
"""
Use partprobe instead of 'udevadm'. Partprobe is the only
reliable way to probe the partition table.
"""
blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
udev_cmd = [UDEVADM_CMD, 'settle']
try:
util.subp(udev_cmd)
util.subp(blkdev_cmd)
util.subp(udev_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
def exec_mkpart_mbr(device, layout):
"""
Break out of mbr partition to allow for future partition
types, i.e. gpt
"""
# Create the partitions
prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device]
try:
util.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
raise Exception("Failed to partition device %s\n%s" % (device, e))
read_parttbl(device)
def exec_mkpart(table_type, device, layout):
"""
Fetches the function for creating the table type.
This allows to dynamically find which function to call.
Paramaters:
table_type: type of partition table to use
device: the device to work on
layout: layout definition specific to partition table
"""
return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
def mkpart(device, definition):
"""
Creates the partition table.
Parameters:
definition: dictionary describing how to create the partition.
The following are supported values in the dict:
overwrite: Should the partition table be created regardless
of any pre-exisiting data?
layout: the layout of the partition table
table_type: Which partition table to use, defaults to MBR
device: the device to work on.
"""
LOG.debug("Checking values for %s definition" % device)
overwrite = definition.get('overwrite', False)
layout = definition.get('layout', False)
table_type = definition.get('table_type', 'mbr')
# Check if the default device is a partition or not
LOG.debug("Checking against default devices")
if (isinstance(layout, bool) and not layout) or not layout:
LOG.debug("Device is not to be partitioned, skipping")
return # Device is not to be partitioned
# This prevents you from overwriting the device
LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
raise Exception("Device %s is not a disk device!", device)
# Remove the partition table entries
if isinstance(layout, str) and layout.lower() == "remove":
LOG.debug("Instructed to remove partition table entries")
purge_disk(device)
return
LOG.debug("Checking if device layout matches")
if check_partition_layout(table_type, device, layout):
LOG.debug("Device partitioning layout matches")
return True
LOG.debug("Checking if device is safe to partition")
if not overwrite and (is_disk_used(device) or is_filesystem(device)):
LOG.debug("Skipping partitioning on configured device %s" % device)
return
LOG.debug("Checking for device size")
device_size = get_hdd_size(device)
LOG.debug("Calculating partition layout")
part_definition = get_partition_layout(table_type, device_size, layout)
LOG.debug(" Layout is: %s" % part_definition)
LOG.debug("Creating partition table on %s", device)
exec_mkpart(table_type, device, part_definition)
LOG.debug("Partition table created for %s", device)
def lookup_force_flag(fs):
"""
A force flag might be -F or -F, this look it up
"""
flags = {'ext': '-F',
'btrfs': '-f',
'xfs': '-f',
'reiserfs': '-f',
}
if 'ext' in fs.lower():
fs = 'ext'
if fs.lower() in flags:
return flags[fs]
LOG.warn("Force flag for %s is unknown." % fs)
return ''
def mkfs(fs_cfg):
"""
Create a file system on the device.
label: defines the label to use on the device
fs_cfg: defines how the filesystem is to look
The following values are required generally:
device: which device or cloud defined default_device
filesystem: which file system type
overwrite: indiscriminately create the file system
partition: when device does not define a partition,
setting this to a number will mean
device + partition. When set to 'auto', the
first free device or the first device which
matches both label and type will be used.
'any' means the first filesystem that matches
on the device.
When 'cmd' is provided then no other parameter is required.
"""
label = fs_cfg.get('label')
device = fs_cfg.get('device')
partition = str(fs_cfg.get('partition', 'any'))
fs_type = fs_cfg.get('filesystem')
fs_cmd = fs_cfg.get('cmd', [])
fs_opts = fs_cfg.get('extra_opts', [])
fs_replace = fs_cfg.get('replace_fs', False)
overwrite = fs_cfg.get('overwrite', False)
# This allows you to define the default ephemeral or swap
LOG.debug("Checking %s against default devices", device)
if not partition or partition.isdigit():
# Handle manual definition of partition
if partition.isdigit():
device = "%s%s" % (device, partition)
LOG.debug("Manual request of partition %s for %s",
partition, device)
# Check to see if the fs already exists
LOG.debug("Checking device %s", device)
check_label, check_fstype, _ = check_fs(device)
LOG.debug("Device %s has %s %s", device, check_label, check_fstype)
if check_label == label and check_fstype == fs_type:
LOG.debug("Existing file system found at %s", device)
if not overwrite:
LOG.debug("Device %s has required file system", device)
return
else:
LOG.warn("Destroying filesystem on %s", device)
else:
LOG.debug("Device %s is cleared for formating", device)
elif partition and str(partition).lower() in ('auto', 'any'):
# For auto devices, we match if the filesystem does exist
odevice = device
LOG.debug("Identifying device to create %s filesytem on", label)
# any mean pick the first match on the device with matching fs_type
label_match = True
if partition.lower() == 'any':
label_match = False
device, reuse = find_device_node(device, fs_type=fs_type, label=label,
label_match=label_match,
replace_fs=fs_replace)
LOG.debug("Automatic device for %s identified as %s", odevice, device)
if reuse:
LOG.debug("Found filesystem match, skipping formating.")
return
if not reuse and fs_replace and device:
LOG.debug("Replacing file system on %s as instructed." % device)
if not device:
LOG.debug("No device aviable that matches request. "
"Skipping fs creation for %s", fs_cfg)
return
elif not partition or str(partition).lower() == 'none':
LOG.debug("Using the raw device to place filesystem %s on" % label)
else:
LOG.debug("Error in device identification handling.")
return
LOG.debug("File system %s will be created on %s", label, device)
# Make sure the device is defined
if not device:
LOG.warn("Device is not known: %s", device)
return
# Check that we can create the FS
if not (fs_type or fs_cmd):
raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "
"must be set.", label)
# Create the commands
if fs_cmd:
fs_cmd = fs_cfg['cmd'] % {'label': label,
'filesystem': fs_type,
'device': device,
}
else:
# Find the mkfs command
mkfs_cmd = util.which("mkfs.%s" % fs_type)
if not mkfs_cmd:
mkfs_cmd = util.which("mk%s" % fs_type)
if not mkfs_cmd:
LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type,
fs_type)
return
fs_cmd = [mkfs_cmd, device]
if label:
fs_cmd.extend(["-L", label])
# File systems that support the -F flag
if not fs_cmd and (overwrite or device_type(device) == "disk"):
fs_cmd.append(lookup_force_flag(fs_type))
# Add the extends FS options
if fs_opts:
fs_cmd.extend(fs_opts)
LOG.debug("Creating file system %s on %s", label, device)
LOG.debug(" Using cmd: %s", " ".join(fs_cmd))
try:
util.subp(fs_cmd)
except Exception as e:
raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
| gpl-3.0 | 7,936,762,838,936,123,000 | 30.992405 | 79 | 0.586057 | false |
DESHRAJ/fjord | vendor/packages/translate-toolkit/translate/tools/test_pretranslate.py | 3 | 14236 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
from pytest import mark
from translate.tools import pretranslate
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import po
from translate.storage import xliff
class TestPretranslate:
xliff_skeleton = '''<?xml version="1.0" encoding="utf-8"?>
<xliff version="1.1" xmlns="urn:oasis:names:tc:xliff:document:1.1">
<file original="doc.txt" source-language="en-US">
<body>
%s
</body>
</file>
</xliff>'''
def setup_method(self, method):
warnings.resetwarnings()
def teardown_method(self, method):
warnings.resetwarnings()
def pretranslatepo(self, input_source, template_source=None):
"""helper that converts strings to po source without requiring files"""
input_file = wStringIO.StringIO(input_source)
if template_source:
template_file = wStringIO.StringIO(template_source)
else:
template_file = None
output_file = wStringIO.StringIO()
pretranslate.pretranslate_file(input_file, output_file, template_file)
output_file.seek(0)
return po.pofile(output_file.read())
def pretranslatexliff(self, input_source, template_source=None):
"""helper that converts strings to po source without requiring files"""
input_file = wStringIO.StringIO(input_source)
if template_source:
template_file = wStringIO.StringIO(template_source)
else:
template_file = None
output_file = wStringIO.StringIO()
pretranslate.pretranslate_file(input_file, output_file, template_file)
output_file.seek(0)
return xliff.xlifffile(output_file.read())
def singleunit(self, pofile):
"""checks that the pofile contains a single non-header unit, and
returns it"""
if len(pofile.units) == 2 and pofile.units[0].isheader():
print pofile.units[1]
return pofile.units[1]
else:
print pofile.units[0]
return pofile.units[0]
def test_pretranslatepo_blank(self):
"""checks that the pretranslatepo function is working for a simple file
initialisation"""
input_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
newpo = self.pretranslatepo(input_source)
assert str(self.singleunit(newpo)) == input_source
def test_merging_simple(self):
"""checks that the pretranslatepo function is working for a simple
merge"""
input_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
template_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
assert str(self.singleunit(newpo)) == template_source
def test_merging_messages_marked_fuzzy(self):
"""test that when we merge PO files with a fuzzy message that it
remains fuzzy"""
input_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
template_source = '''#: simple.label%ssimple.accesskey\n#, fuzzy\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
assert str(self.singleunit(newpo)) == template_source
def test_merging_plurals_with_fuzzy_matching(self):
"""test that when we merge PO files with a fuzzy message that it
remains fuzzy"""
input_source = r'''#: file.cpp:2
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] ""
msgstr[1] ""
'''
template_source = r'''#: file.cpp:3
#, fuzzy
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] "%d handleiding."
msgstr[1] "%d handleidings."
'''
# The #: comment and msgid's are different between the pot and the po
poexpected = r'''#: file.cpp:2
#, fuzzy
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] "%d handleiding."
msgstr[1] "%d handleidings."
'''
newpo = self.pretranslatepo(input_source, template_source)
assert str(self.singleunit(newpo)) == poexpected
@mark.xfail(reason="Not Implemented")
def test_merging_msgid_change(self):
"""tests that if the msgid changes but the location stays the same that
we merge"""
input_source = '''#: simple.label\n#: simple.accesskey\nmsgid "Its &hard coding a newline.\\n"\nmsgstr ""\n'''
template_source = '''#: simple.label\n#: simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n'''
poexpected = '''#: simple.label\n#: simple.accesskey\n#, fuzzy\nmsgid "Its &hard coding a newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
def test_merging_location_change(self):
"""tests that if the location changes but the msgid stays the same that
we merge"""
input_source = '''#: new_simple.label%snew_simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
template_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
poexpected = '''#: new_simple.label%snew_simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
def test_merging_location_and_whitespace_change(self):
"""test that even if the location changes that if the msgid only has
whitespace changes we can still merge"""
input_source = '''#: singlespace.label%ssinglespace.accesskey\nmsgid "&We have spaces"\nmsgstr ""\n''' % po.lsep
template_source = '''#: doublespace.label%sdoublespace.accesskey\nmsgid "&We have spaces"\nmsgstr "&One het spasies"\n''' % po.lsep
poexpected = '''#: singlespace.label%ssinglespace.accesskey\n#, fuzzy\nmsgid "&We have spaces"\nmsgstr "&One het spasies"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
@mark.xfail(reason="Not Implemented")
def test_merging_accelerator_changes(self):
"""test that a change in the accelerator localtion still allows
merging"""
input_source = '''#: someline.c\nmsgid "A&bout"\nmsgstr ""\n'''
template_source = '''#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
poexpected = '''#: someline.c\nmsgid "A&bout"\nmsgstr "&Info"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
@mark.xfail(reason="Not Implemented")
def test_lines_cut_differently(self):
"""Checks that the correct formatting is preserved when pot an po lines
differ."""
input_source = '''#: simple.label\nmsgid "Line split "\n"differently"\nmsgstr ""\n'''
template_source = '''#: simple.label\nmsgid "Line"\n" split differently"\nmsgstr "Lyne verskillend gesny"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_automatic_comments_dont_duplicate(self):
"""ensure that we can merge #. comments correctly"""
input_source = '''#. Row 35\nmsgid "&About"\nmsgstr ""\n'''
template_source = '''#. Row 35\nmsgid "&About"\nmsgstr "&Info"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_automatic_comments_new_overides_old(self):
"""ensure that new #. comments override the old comments"""
input_source = '''#. new comment\n#: someline.c\nmsgid "&About"\nmsgstr ""\n'''
template_source = '''#. old comment\n#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
poexpected = '''#. new comment\n#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_merging_comments_with_blank_comment_lines(self):
"""test that when we merge a comment that has a blank line we keep the
blank line"""
input_source = '''#: someline.c\nmsgid "About"\nmsgstr ""\n'''
template_source = '''# comment1\n#\n# comment2\n#: someline.c\nmsgid "About"\nmsgstr "Omtrent"\n'''
poexpected = template_source
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_empty_commentlines(self):
input_source = '''#: paneSecurity.title
msgid "Security"
msgstr ""
'''
template_source = '''# - Contributor(s):
# -
# - Alternatively, the
# -
#: paneSecurity.title
msgid "Security"
msgstr "Sekuriteit"
'''
poexpected = template_source
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
print "expected"
print poexpected
print "got:"
print str(newpounit)
assert str(newpounit) == poexpected
def test_merging_msgidcomments(self):
"""ensure that we can merge msgidcomments messages"""
input_source = r'''#: window.width
msgid ""
"_: Do not translate this.\n"
"36em"
msgstr ""
'''
template_source = r'''#: window.width
msgid ""
"_: Do not translate this.\n"
"36em"
msgstr "36em"
'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_plurals(self):
"""ensure that we can merge plural messages"""
input_source = '''msgid "One"\nmsgid_plural "Two"\nmsgstr[0] ""\nmsgstr[1] ""\n'''
template_source = '''msgid "One"\nmsgid_plural "Two"\nmsgstr[0] "Een"\nmsgstr[1] "Twee"\nmsgstr[2] "Drie"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_resurect_obsolete_messages(self):
"""check that we can reuse old obsolete messages if the message comes
back"""
input_source = '''#: resurect.c\nmsgid "&About"\nmsgstr ""\n'''
template_source = '''#~ msgid "&About"\n#~ msgstr "&Omtrent"\n'''
expected = '''#: resurect.c\nmsgid "&About"\nmsgstr "&Omtrent"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == expected
def test_merging_comments(self):
"""Test that we can merge comments correctly"""
input_source = '''#. Don't do it!\n#: file.py:1\nmsgid "One"\nmsgstr ""\n'''
template_source = '''#. Don't do it!\n#: file.py:2\nmsgid "One"\nmsgstr "Een"\n'''
poexpected = '''#. Don't do it!\n#: file.py:1\nmsgid "One"\nmsgstr "Een"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_merging_typecomments(self):
"""Test that we can merge with typecomments"""
input_source = '''#: file.c:1\n#, c-format\nmsgid "%d pipes"\nmsgstr ""\n'''
template_source = '''#: file.c:2\nmsgid "%d pipes"\nmsgstr "%d pype"\n'''
poexpected = '''#: file.c:1\n#, c-format\nmsgid "%d pipes"\nmsgstr "%d pype"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
print newpounit
assert str(newpounit) == poexpected
input_source = '''#: file.c:1\n#, c-format\nmsgid "%d computers"\nmsgstr ""\n'''
template_source = '''#: file.c:2\n#, c-format\nmsgid "%s computers "\nmsgstr "%s-rekenaars"\n'''
poexpected = '''#: file.c:1\n#, fuzzy, c-format\nmsgid "%d computers"\nmsgstr "%s-rekenaars"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert newpounit.isfuzzy()
assert newpounit.hastypecomment("c-format")
def test_xliff_states(self):
"""Test correct maintenance of XLIFF states."""
xlf_template = self.xliff_skeleton \
% '''<trans-unit id="1" xml:space="preserve">
<source> File 1 </source>
</trans-unit>'''
xlf_old = self.xliff_skeleton \
% '''<trans-unit id="1" xml:space="preserve" approved="yes">
<source> File 1 </source>
<target> Lêer 1 </target>
</trans-unit>'''
template = xliff.xlifffile.parsestring(xlf_template)
old = xliff.xlifffile.parsestring(xlf_old)
new = self.pretranslatexliff(template, old)
print str(old)
print '---'
print str(new)
assert new.units[0].isapproved()
# Layout might have changed, so we won't compare the serialised
# versions
class TestPretranslateCommand(test_convert.TestConvertCommand, TestPretranslate):
"""Tests running actual pretranslate commands on files"""
convertmodule = pretranslate
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--tm")
options = self.help_check(options, "-s MIN_SIMILARITY, --similarity=MIN_SIMILARITY")
options = self.help_check(options, "--nofuzzymatching", last=True)
| bsd-3-clause | -4,920,748,769,575,958,000 | 44.047468 | 161 | 0.63372 | false |
lombritz/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/compile_all.py | 384 | 1193 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import compileall
compileall.compile_dir('package')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,717,657,398,113,399,000 | 41.607143 | 81 | 0.65549 | false |
40423136/2016fallcadp_hw | plugin/liquid_tags/audio.py | 277 | 2161 | """
Audio Tag
---------
This implements a Liquid-style audio tag for Pelican,
based on the pelican video plugin [1]_
Syntax
------
{% audio url/to/audio [url/to/audio] [/url/to/audio] %}
Example
-------
{% audio http://example.tld/foo.mp3 http://example.tld/foo.ogg %}
Output
------
<audio controls><source src="http://example.tld/foo.mp3" type="audio/mpeg"><source src="http://example.tld/foo.ogg" type="audio/ogg">Your browser does not support the audio element.</audio>
[1] https://github.com/getpelican/pelican-plugins/blob/master/liquid_tags/video.py
"""
import os
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% audio url/to/audio [url/to/audio] [/url/to/audio] %}"
AUDIO = re.compile(r'(/\S+|https?:\S+)(?:\s+(/\S+|https?:\S+))?(?:\s+(/\S+|https?:\S+))?')
AUDIO_TYPEDICT = {'.mp3': 'audio/mpeg',
'.ogg': 'audio/ogg',
'.oga': 'audio/ogg',
'.opus': 'audio/ogg',
'.wav': 'audio/wav',
'.mp4': 'audio/mp4'}
def create_html(markup):
match = AUDIO.search(markup)
if match:
groups = match.groups()
audio_files = [g for g in groups if g]
if any(audio_files):
audio_out = '<audio controls>'
for audio_file in audio_files:
base, ext = os.path.splitext(audio_file)
if ext not in AUDIO_TYPEDICT:
raise ValueError("Unrecognized audio extension: "
"{0}".format(ext))
# add audio source
audio_out += '<source src="{}" type="{}">'.format(
audio_file, AUDIO_TYPEDICT[ext])
# close audio tag
audio_out += 'Your browser does not support the audio element.'
audio_out += '</audio>'
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return audio_out
@LiquidTags.register('audio')
def audio(preprocessor, tag, markup):
return create_html(markup)
# ---------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
| agpl-3.0 | 1,151,745,764,497,600,400 | 27.813333 | 189 | 0.56224 | false |
caisq/tensorflow | tensorflow/contrib/gan/python/features/python/random_tensor_pool.py | 55 | 1552 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tensor pool stores values from an input tensor and returns a stored one.
See the following papers for more details.
1) `Learning from simulated and unsupervised images through adversarial
training` (https://arxiv.org/abs/1612.07828).
2) `Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial
Networks` (https://arxiv.org/abs/1703.10593).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import random_tensor_pool_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.features.python.random_tensor_pool_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = random_tensor_pool_impl.__all__
remove_undocumented(__name__, __all__)
| apache-2.0 | -4,912,173,702,625,223,000 | 43.342857 | 83 | 0.730026 | false |
sukwon0709/equip | equip/rewriter/simple.py | 1 | 11974 | # -*- coding: utf-8 -*-
"""
equip.rewriter.simple
~~~~~~~~~~~~~~~~~~~~~
A simplified interface (yet the main one) to handle the injection
of instrumentation code.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import os
import copy
from ..utils.log import logger
from ..bytecode.decl import ModuleDeclaration, \
MethodDeclaration, \
TypeDeclaration
from ..bytecode.code import BytecodeObject
from ..bytecode.utils import show_bytecode, \
get_debug_code_object_info
from .merger import Merger, RETURN_CANARY_NAME, LOAD_GLOBAL
# A global tracking what file we added the imports to. This should be refactored
# and we should inspect the module/method for imports.
GLOBAL_IMPORTS_ADDED = set()
EXIT_ENTER_CODE_TEMPLATE = """
if __name__ == '__main__':
%s
"""
class SimpleRewriter(object):
"""
The current main rewriter that works for one ``Declaration`` object. Using this
rewriter will modify the given declaration object by possibly replacing all of
its associated code object.
"""
#: List of the parameters that can be used for formatting the code
#: to inject.
#: The values are:
#:
#: * ``method_name``: The name of the method that is being called.
#:
#: * ``lineno``: The start line number of the declaration object being
#: instrumented.
#:
#: * ``file_name``: The file name of the current module.
#:
#: * ``class_name``: The name of the class a method belongs to.
#:
KNOWN_FIELDS = ('method_name', 'lineno', 'file_name', 'class_name',
'arg0', 'arg1', 'arg2', 'arg3', 'arg4',
'arg5', 'arg6', 'arg7', 'arg8', 'arg9',
'arg10', 'arg11', 'arg12', 'arg13', 'arg14',
'arguments', 'return_value')
def __init__(self, decl):
self.decl = decl
self.original_decl = copy.deepcopy(self.decl)
self.module = None
if isinstance(self.module, ModuleDeclaration):
self.module = self.decl
else:
self.module = self.decl.parent_module
self.import_lives = set()
def insert_before(self, python_code):
"""
Insert code at the beginning of the method's body.
The submitted code can be formatted using ``fields`` declared in ``KNOWN_FIELDS``.
Since ``string.format`` is used once the values are dumped, the injected code should
be property structured.
:param python_code: The python code to be formatted, compiled, and inserted
at the beginning of the method body.
"""
if not isinstance(self.decl, MethodDeclaration):
raise TypeError('Can only insert before/after in a method')
return self.insert_generic(python_code, location=Merger.BEFORE)
def insert_after(self, python_code):
"""
Insert code at each `RETURN_VALUE` opcode. See `insert_before`.
"""
if not isinstance(self.decl, MethodDeclaration):
raise TypeError('Can only insert before/after in a method')
return self.insert_generic(python_code, location=Merger.AFTER)
def insert_generic(self, python_code, location=Merger.UNKNOWN, \
ins_lineno=-1, ins_offset=-1, ins_module=False, ins_import=False):
"""
Generic code injection utils. It first formats the supplied ``python_code``,
compiles it to get the `code_object`, and merge this new `code_object` with
the one of the current declaration object (``decl``). The insertion is done by
the ``Merger``.
When the injection is done, this method will go and recursively update all
references to the old `code_object` in the parents (when a parent changes, it is
as well updated and its new ``code_object`` propagated upwards). This process is
required as Python's code objects are nested in parent's code objects, and they
are all read-only. This process breaks any references that were hold on previously
used code objects (e.g., don't do that when the instrumented code is running).
:param python_code: The code to be formatted and inserted.
:param location: The kind of insertion to perform.
:param ins_lineno: When an insertion should occur at one given line of code,
use this parameter. Defaults to -1.
:param ins_offset: When an insertion should occur at one given bytecode offset,
use this parameter. Defaults to -1.
:param ins_module: Specify the code insertion should happen in the module
itself and not the current declaration.
:param ins_import: True of the method is called for inserting an import statement.
"""
target_decl = self.decl if not ins_module else self.module
original_decl = self.original_decl
if ins_module and not isinstance(original_decl, ModuleDeclaration):
original_decl = original_decl.parent_module
formatted_code = SimpleRewriter.format_code(target_decl, python_code, location)
injected_co = SimpleRewriter.get_code_object(formatted_code)
if ins_import:
# Parse the import statement to extract the imported names.
bc_import = BytecodeObject.get_parsed_code(injected_co)
import_stmts = BytecodeObject.get_imports_from_bytecode(injected_co, bc_import)
for import_stmt in import_stmts:
self.import_lives = self.import_lives | import_stmt.live_names
self.inspect_all_globals()
working_co = target_decl.code_object
new_co = Merger.merge(working_co,
injected_co,
location,
ins_lineno,
ins_offset,
self.import_lives)
if not new_co:
return self
original_co = target_decl.code_object
target_decl.code_object = new_co
target_decl.has_changes = True
# Recursively apply this to the parent cos
parent = target_decl.parent
original_parent = original_decl.parent
while parent is not None:
# inspect the parent cos and update the consts for
# the original to the current sub-CO
parent.update_nested_code_object(original_co, new_co)
original_co = original_parent.code_object
new_co = parent.code_object
original_parent = original_parent.parent
parent = parent.parent
return self
def insert_import(self, import_code, module_import=True):
"""
Insert an import statement in the current bytecode. The import is added
in front of every other imports.
"""
logger.debug("Insert import on: %s", self.decl)
if not module_import:
return self.insert_generic(import_code, location=Merger.BEFORE, ins_import=True)
else:
global GLOBAL_IMPORTS_ADDED
if self.module.module_path in GLOBAL_IMPORTS_ADDED:
logger.debug("Already added imports in %s" % self.module.module_path)
return
self.insert_generic(import_code, location=Merger.BEFORE,
ins_module=True, ins_import=True)
GLOBAL_IMPORTS_ADDED.add(self.module.module_path)
return self
def insert_enter_code(self, python_code, import_code=None):
"""
Insert generic code at the beginning of the module. The code is wrapped
in a ``if __name__ == '__main__'`` statement.
:param python_code: The python code to compile and inject.
:param import_code: The import statements, if any, to add before the
insertion of `python_code`. Defaults to None.
"""
return self.insert_enter_exit_code(python_code,
import_code,
location=Merger.MODULE_ENTER)
def insert_exit_code(self, python_code, import_code=None):
"""
Insert generic code at the end of the module. The code is wrapped
in a ``if __name__ == '__main__'`` statement.
:param python_code: The python code to compile and inject.
:param import_code: The import statements, if any, to add before the
insertion of `python_code`. Defaults to None.
"""
return self.insert_enter_exit_code(python_code,
import_code,
location=Merger.MODULE_EXIT)
def insert_enter_exit_code(self, python_code, import_code=None, location=Merger.MODULE_EXIT):
indented_python_code = SimpleRewriter.indent(python_code, indent_level=1)
if import_code:
indented_import_code = SimpleRewriter.indent(import_code, indent_level=1)
indented_python_code = indented_import_code + '\n' + indented_python_code
new_code = EXIT_ENTER_CODE_TEMPLATE % indented_python_code
return self.insert_generic(new_code, location)
def inspect_all_globals(self):
if not self.module:
return
co_module = self.module.code_object
bc_module = BytecodeObject.get_parsed_code(co_module)
for bc_tpl in bc_module:
if bc_tpl[2] == LOAD_GLOBAL:
self.import_lives.add(bc_tpl[3])
@staticmethod
def indent(original_code, indent_level=0):
"""
Lousy helper that indents the supplied python code, so that it will fit under
an if statement.
"""
new_code = []
indent = ' ' * 4 * indent_level
for l in original_code.split('\n'):
new_code.append(indent + l)
return '\n'.join(new_code)
@staticmethod
def get_code_object(python_code):
"""
Actually compiles the supplied code and return the ``code_object`` to be
merged with the source ``code_object``.
:param python_code: The python code to compile.
"""
try:
co = compile(python_code, '<string>', 'exec')
return co
except Exception, ex:
logger.error(str(ex))
logger.error('Compilation error:\n%s', python_code)
return None
# We know of some fields in KNOWN_FIELDS, and we inject them
# using the format string
@staticmethod
def format_code(decl, python_code, location):
"""
Formats the supplied ``python_code`` with format string, and values listed
in `KNOWN_FIELDS`.
:param decl: The declaration object (e.g., ``MethodDeclaration``, ``TypeDeclaration``, etc.).
:param python_code: The python code to format.
:param location: The kind of insertion to perform (e.g., ``Merger.BEFORE``).
"""
values = SimpleRewriter.get_formatting_values(decl, location)
return python_code.format(**values)
@staticmethod
def get_formatting_values(decl, location):
"""
Retrieves the dynamic values to be added in the format string. All values
are statically computed, but formal parameters (of methods) are passed by name so
it is possible to dereference them in the inserted code (same for the return value).
:param decl: The declaration object.
:param location: The kind of insertion to perform (e.g., ``Merger.BEFORE``).
"""
values = {}
values['lineno'] = decl.start_lineno
values['file_name'] = os.path.basename(decl.parent_module.module_path) \
if not isinstance(decl, ModuleDeclaration) \
else decl.module_path
values['class_name'] = decl.parent_class.type_name \
if decl.parent_class is not None \
else None
# Method specific arguments
if isinstance(decl, MethodDeclaration):
values['method_name'] = decl.method_name
values['arguments'] = ', '.join(decl.formal_parameters) if decl.formal_parameters else None
values['return_value'] = RETURN_CANARY_NAME if location == Merger.AFTER else None
args = decl.formal_parameters
length = len(args)
for arg_cnt in range(15):
if arg_cnt >= length:
values['arg%d' % arg_cnt] = None
else:
values['arg%d' % arg_cnt] = args[arg_cnt]
return values
| apache-2.0 | 698,868,447,245,636,400 | 35.95679 | 99 | 0.638801 | false |
ioos/compliance-checker | compliance_checker/tests/test_feature_detection.py | 2 | 31793 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
compliance_checker/tests/test_feature_detection.py
"""
from unittest import TestCase
from netCDF4 import Dataset
from compliance_checker import cfutil as util
from compliance_checker.tests import resources
from compliance_checker.tests.helpers import MockRaggedArrayRepr
class TestFeatureDetection(TestCase):
"""
Tests the feature type detection of cdftools
"""
def test_point(self):
"""
Ensures point detection works
"""
with Dataset(resources.STATIC_FILES["point"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_point(nc, variable), "{} is point".format(variable)
def test_timeseries(self):
"""
Ensures timeseries detection works
"""
with Dataset(resources.STATIC_FILES["timeseries"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries(nc, variable), "{} is timeseries".format(
variable
)
def test_multi_timeseries_orthogonal(self):
"""
Ensures multi-timeseries-orthogonal detection works
"""
with Dataset(resources.STATIC_FILES["multi-timeseries-orthogonal"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_multi_timeseries_orthogonal(
nc, variable
), "{} is multi-timeseries orthogonal".format(variable)
def test_multi_timeseries_incomplete(self):
"""
Ensures multi-timeseries-incomplete detection works
"""
with Dataset(resources.STATIC_FILES["multi-timeseries-incomplete"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_multi_timeseries_incomplete(
nc, variable
), "{} is multi-timeseries incomplete".format(variable)
def test_trajectory(self):
"""
Ensures trajectory detection works
"""
with Dataset(resources.STATIC_FILES["trajectory"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_cf_trajectory(nc, variable), "{} is trajectory".format(
variable
)
def test_trajectory_single(self):
"""
Ensures trajectory-single detection works
"""
with Dataset(resources.STATIC_FILES["trajectory-single"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_single_trajectory(
nc, variable
), "{} is trajectory-single".format(variable)
def test_profile_orthogonal(self):
"""
Ensures profile-orthogonal detection works
"""
with Dataset(resources.STATIC_FILES["profile-orthogonal"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_profile_orthogonal(
nc, variable
), "{} is profile-orthogonal".format(variable)
def test_profile_incomplete(self):
"""
Ensures profile-incomplete detection works
"""
with Dataset(resources.STATIC_FILES["profile-incomplete"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_profile_incomplete(
nc, variable
), "{} is profile-incomplete".format(variable)
def test_timeseries_profile_single_station(self):
"""
Ensures timeseries profile single station detection works
"""
with Dataset(resources.STATIC_FILES["timeseries-profile-single-station"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_single_station(
nc, variable
), "{} is timeseries-profile-single-station".format(variable)
def test_timeseries_profile_multi_station(self):
"""
Ensures timeseries profile multi station detection works
"""
with Dataset(resources.STATIC_FILES["timeseries-profile-multi-station"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_multi_station(
nc, variable
), "{} is timeseries-profile-multi-station".format(variable)
def test_timeseries_profile_single_ortho_time(self):
"""
Ensures timeseries profile single station ortho time detection works
"""
with Dataset(
resources.STATIC_FILES["timeseries-profile-single-ortho-time"]
) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_single_ortho_time(
nc, variable
), "{} is timeseries-profile-single-ortho-time".format(variable)
def test_timeseries_profile_multi_ortho_time(self):
"""
Ensures timeseries profile multi station ortho time detection works
"""
with Dataset(
resources.STATIC_FILES["timeseries-profile-multi-ortho-time"]
) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_multi_ortho_time(
nc, variable
), "{} is timeseries-profile-multi-ortho-time".format(variable)
def test_timeseries_profile_ortho_depth(self):
"""
Ensures timeseries profile ortho depth detection works
"""
with Dataset(resources.STATIC_FILES["timeseries-profile-ortho-depth"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_ortho_depth(
nc, variable
), "{} is timeseries-profile-ortho-depth".format(variable)
def test_timeseries_profile_incomplete(self):
"""
Ensures timeseries profile station incomplete detection works
"""
with Dataset(resources.STATIC_FILES["timeseries-profile-incomplete"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_timeseries_profile_incomplete(
nc, variable
), "{} is timeseries-profile-incomplete".format(variable)
def test_trajectory_profile_orthogonal(self):
"""
Ensures trajectory profile orthogonal detection works
"""
with Dataset(resources.STATIC_FILES["trajectory-profile-orthogonal"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_trajectory_profile_orthogonal(
nc, variable
), "{} is trajectory profile orthogonal".format(variable)
def test_trajectory_profile_incomplete(self):
"""
Ensures trajectory profile incomplete detection works
"""
with Dataset(resources.STATIC_FILES["trajectory-profile-incomplete"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_trajectory_profile_incomplete(
nc, variable
), "{} is trajectory profile incomplete".format(variable)
def test_2d_regular_grid(self):
"""
Ensures 2D Regular Grid detection works
"""
with Dataset(resources.STATIC_FILES["2d-regular-grid"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_2d_regular_grid(
nc, variable
), "{} is 2D regular grid".format(variable)
def test_2d_static_grid(self):
"""
Ensures 2D Static Grid detection works
"""
with Dataset(resources.STATIC_FILES["2d-static-grid"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_2d_static_grid(
nc, variable
), "{} is a 2D static grid".format(variable)
def test_3d_regular_grid(self):
"""
Ensures 2U Regular Grid detection works
"""
with Dataset(resources.STATIC_FILES["3d-regular-grid"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_3d_regular_grid(
nc, variable
), "{} is 3d regular grid".format(variable)
def test_3d_static_grid(self):
"""
Ensures 3D Static Grid detection works
"""
with Dataset(resources.STATIC_FILES["3d-static-grid"]) as nc:
for variable in util.get_geophysical_variables(nc):
assert util.is_3d_static_grid(
nc, variable
), "{} is a 3D static grid".format(variable)
def test_boundaries(self):
"""
Ensures that boundary variables are not listed as geophysical variables
"""
with Dataset(resources.STATIC_FILES["grid-boundaries"]) as nc:
assert "lat_bnds" not in util.get_geophysical_variables(nc)
assert "lon_bnds" not in util.get_geophysical_variables(nc)
assert "lat_bnds" in util.get_cell_boundary_variables(nc)
assert "lon_bnds" in util.get_cell_boundary_variables(nc)
boundary_map = util.get_cell_boundary_map(nc)
assert boundary_map["lat"] == "lat_bnds"
assert boundary_map["lon"] == "lon_bnds"
def test_climatology(self):
"""
Ensures that climatology variables are identified as climatology variables and not geophysical variables
"""
with Dataset(resources.STATIC_FILES["climatology"]) as nc:
geophysical_variables = util.get_geophysical_variables(nc)
climatology_variable = util.get_climatology_variable(nc)
assert "temperature" in geophysical_variables
assert "climatology_bounds" not in geophysical_variables
assert "climatology_bounds" == climatology_variable
def test_grid_mapping(self):
"""
Ensures that grid mapping variables are properly identified
"""
with Dataset(resources.STATIC_FILES["rotated_pole_grid"]) as nc:
grid_mapping = util.get_grid_mapping_variables(nc)
coordinate_variables = util.get_coordinate_variables(nc)
axis_variables = util.get_axis_variables(nc)
assert "rotated_pole" in grid_mapping
assert set(["rlon", "rlat", "lev"]) == set(coordinate_variables)
assert set(["rlon", "rlat", "lev"]) == set(axis_variables)
assert "lat" == util.get_lat_variable(nc)
assert "lon" == util.get_lon_variable(nc)
def test_auxiliary_coordinates(self):
"""
Ensures variables are classified as auxiliary coordinate variables
"""
with Dataset(resources.STATIC_FILES["bad_units"]) as nc:
coordinate_variables = util.get_coordinate_variables(nc)
assert set(["time"]) == set(coordinate_variables)
aux_coord_vards = util.get_auxiliary_coordinate_variables(nc)
assert set(["lat", "lon"]) == set(aux_coord_vards)
def test_forecast_reference_metadata(self):
"""
Tests variables used for forecast reference metadata to ensure they are
not misclassified as geophysical variables.
"""
with Dataset(resources.STATIC_FILES["forecast_reference"]) as nc:
self.assertFalse(util.is_geophysical(nc, "forecast_reference_time"))
self.assertFalse(util.is_geophysical(nc, "forecast_hour"))
self.assertTrue(util.is_geophysical(nc, "air_temp"))
self.assertFalse(util.is_geophysical(nc, "time"))
assert len(util.get_coordinate_variables(nc)) == 3
assert len(util.get_geophysical_variables(nc)) == 1
def test_rotated_pole_grid(self):
with Dataset(resources.STATIC_FILES["rotated_pole_grid"]) as nc:
latitudes = util.get_latitude_variables(nc)
assert latitudes == ["lat", "rlat"]
assert util.is_mapped_grid(nc, "temperature") is True
def test_vertical_coords(self):
with Dataset(resources.STATIC_FILES["vertical_coords"]) as nc:
vertical = util.get_z_variables(nc)
assert vertical == ["height"]
def test_reduced_grid(self):
with Dataset(resources.STATIC_FILES["reduced_horizontal_grid"]) as nc:
assert util.guess_feature_type(nc, "PS") == "reduced-grid"
def test_global_feature_detection(self):
with Dataset(resources.STATIC_FILES["reduced_horizontal_grid"]) as nc:
assert util.guess_feature_type(nc, "PS") == "reduced-grid"
with Dataset(resources.STATIC_FILES["vertical_coords"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "point"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["Z"] == ["height"]
assert axis_map["T"] == ["time"]
with Dataset(resources.STATIC_FILES["2d-regular-grid"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "2d-regular-grid"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == ["z"]
assert axis_map["X"] == ["lon"]
assert axis_map["Y"] == ["lat"]
with Dataset(resources.STATIC_FILES["2dim"]) as nc:
assert util.guess_feature_type(nc, "T") == "mapped-grid"
axis_map = util.get_axis_map(nc, "T")
assert axis_map["Z"] == ["lev"]
assert axis_map["Y"] == ["yc", "lat"]
assert axis_map["X"] == ["xc", "lon"]
with Dataset(resources.STATIC_FILES["3d-regular-grid"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "3d-regular-grid"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == ["z"]
assert axis_map["Y"] == ["lat"]
assert axis_map["X"] == ["lon"]
with Dataset(resources.STATIC_FILES["climatology"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "timeseries"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == []
assert axis_map["Y"] == []
assert axis_map["X"] == []
with Dataset(resources.STATIC_FILES["index_ragged"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "trajectory"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == ["z"]
assert axis_map["Y"] == ["lat"]
assert axis_map["X"] == ["lon"]
with Dataset(resources.STATIC_FILES["mapping"]) as nc:
assert (
util.guess_feature_type(nc, "sea_surface_height")
== "timeseries"
)
axis_map = util.get_axis_map(nc, "sea_surface_height")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == []
assert axis_map["Y"] == ["lat"]
assert axis_map["X"] == ["lon"]
with Dataset(resources.STATIC_FILES["rotated_pole_grid"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "mapped-grid"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == []
assert axis_map["Z"] == ["lev"]
assert axis_map["Y"] == ["rlat", "lat"]
assert axis_map["X"] == ["rlon", "lon"]
with Dataset(resources.STATIC_FILES["rutgers"]) as nc:
assert util.guess_feature_type(nc, "temperature") == "trajectory"
axis_map = util.get_axis_map(nc, "temperature")
assert axis_map["T"] == ["time"]
assert axis_map["Z"] == ["depth"]
assert axis_map["Y"] == ["lat"]
assert axis_map["X"] == ["lon"]
with Dataset(resources.STATIC_FILES["self-referencing-var"]) as nc:
assert util.guess_feature_type(nc, "TEMP") == "point"
axis_map = util.get_axis_map(nc, "TEMP")
assert axis_map["T"] == ["TIME"]
assert axis_map["Z"] == ["DEPTH"]
assert axis_map["Y"] == []
assert axis_map["X"] == []
with Dataset(resources.STATIC_FILES["2d-static-grid"]) as nc:
assert util.guess_feature_type(nc, "T") == "2d-static-grid"
axis_map = util.get_axis_map(nc, "T")
assert axis_map["X"] == ["lon"]
assert axis_map["Y"] == ["lat"]
assert axis_map["T"] == []
assert axis_map["Z"] == []
with Dataset(resources.STATIC_FILES["3d-static-grid"]) as nc:
assert util.guess_feature_type(nc, "T") == "3d-static-grid"
axis_map = util.get_axis_map(nc, "T")
assert axis_map["X"] == ["lon"]
assert axis_map["Y"] == ["lat"]
assert axis_map["T"] == []
assert axis_map["Z"] == ["depth"]
def test_is_variable_valid_ragged_array_repr_featureType(self):
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
# add a variable that isn't recognized as geophysical
v = nc.createVariable(
"data1",
"d",
("SAMPLE_DIMENSION",),
fill_value=None
)
v.setncattr("cf_role", "blah")
self.assertFalse(util.is_variable_valid_ragged_array_repr_featureType(nc, "data1"))
# add geophysical variable with correct dimension
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
v = nc.createVariable(
"data1",
"d",
("SAMPLE_DIMENSION",),
fill_value=None
)
v.setncattr("standard_name", "sea_water_pressure")
# test the variable
self.assertTrue(util.is_variable_valid_ragged_array_repr_featureType(nc, "data1"))
# add good variable and another variable, this time with the improper dimension
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
v = nc.createVariable(
"data1",
"d",
("SAMPLE_DIMENSION",),
fill_value=None
)
v.setncattr("standard_name", "sea_water_pressure")
v2 = nc.createVariable(
"data2",
"d",
("INSTANCE_DIMENSION",),
fill_value=None
)
v2.setncattr("standard_name", "sea_water_salinity")
# good variable should pass, second should fail
self.assertTrue(util.is_variable_valid_ragged_array_repr_featureType(nc, "data1"))
self.assertFalse(util.is_variable_valid_ragged_array_repr_featureType(nc, "data2"))
def test_is_dataset_valid_ragged_array_repr_featureType(self):
# first test single featureType
# ----- timeseries, indexed ----- #
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# we'll add another cf_role variable
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# we'll add another index variable, also bad
nc = MockRaggedArrayRepr(
"timeseries",
"indexed"
)
v = nc.createVariable(
"index_var2",
"i",
("SAMPLE_DIMENSION",),
fill_value=None)
v.setncattr("instance_dimension", "INSTANCE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# ----- timeseries, contiguous ----- #
nc = MockRaggedArrayRepr(
"timeseries",
"contiguous"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# add another cf_role var, bad
nc = MockRaggedArrayRepr(
"timeseries",
"contiguous"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# add another count variable, bad
v = nc.createVariable(
"count_var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("sample_dimension", "SAMPLE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseries")
)
# ----- profile, indexed ----- #
nc = MockRaggedArrayRepr(
"profile",
"indexed"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# add another cf_role var
nc = MockRaggedArrayRepr(
"profile",
"indexed"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# we'll add another index variable, also bad
nc = MockRaggedArrayRepr(
"profile",
"indexed"
)
v = nc.createVariable(
"index_var2",
"i",
("SAMPLE_DIMENSION",),
fill_value=None)
v.setncattr("instance_dimension", "INSTANCE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# ----- profile, contiguous ----- #
nc = MockRaggedArrayRepr(
"profile",
"contiguous"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# add another cf_role var
nc = MockRaggedArrayRepr(
"profile",
"contiguous"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# we'll add another count variable, also bad
nc = MockRaggedArrayRepr(
"profile",
"contiguous"
)
v = nc.createVariable(
"index_var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("sample_dimension", "SAMPLE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "profile")
)
# ----- trajectory, indexed ----- #
nc = MockRaggedArrayRepr(
"trajectory",
"indexed"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# add another cf_role var
nc = MockRaggedArrayRepr(
"trajectory",
"indexed"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# we'll add another index variable, also bad
nc = MockRaggedArrayRepr(
"trajectory",
"indexed"
)
v = nc.createVariable(
"index_var2",
"i",
("SAMPLE_DIMENSION",),
fill_value=None)
v.setncattr("instance_dimension", "INSTANCE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# ----- trajectory, contiguous ----- #
nc = MockRaggedArrayRepr(
"trajectory",
"contiguous"
)
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# add another cf_role var
nc = MockRaggedArrayRepr(
"trajectory",
"contiguous"
)
v = nc.createVariable(
"var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# we'll add another count variable, also bad
nc = MockRaggedArrayRepr(
"trajectory",
"contiguous"
)
v = nc.createVariable(
"index_var2",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("sample_dimension", "SAMPLE_DIMENSION")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectory")
)
# ----- now test compound featureType ----- #
# ----- timeSeriesProfile ----- #
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
# NOTE
# has no geophysical vars, so should (?) (will) fail
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# add a geophysical variable and test again
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
v1.setncattr("standard_name", "pressure")
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
# add a thid cf_role variable - this should fail
v = nc.createVariable(
"cf_role_var3",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# set the index variable to have an incorrect attr
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
nc.variables["station_index_variable"].instance_dimension = "SIKE!"
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# change the sample_dimension attr on the count variable, bad
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
nc.variables["counter_var"].sample_dimension = "SIKE!"
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# give another geophysical data variable a different dimension
nc = MockRaggedArrayRepr(
"timeSeriesProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
v1 = nc.createVariable(
"data2",
"i",
("STATION_DIMENSION",), # bad!
fill_value=None
)
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "timeseriesprofile")
)
# ----- trajectoryProfile ----- #
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
# NOTE
# has no geophysical vars, so should (?) (will) fail
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
# add a geophysical variable and test again
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
v1.setncattr("standard_name", "pressure")
self.assertTrue(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
# add a thid cf_role variable - this should fail
v = nc.createVariable(
"cf_role_var3",
"i",
("INSTANCE_DIMENSION",),
fill_value=None)
v.setncattr("cf_role", "yeetyeet_id")
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
# set the index variable to have an incorrect attr
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
nc.variables["station_index_variable"].instance_dimension = "SIKE!"
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
# change the sample_dimension attr on the count variable, bad
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
nc.variables["counter_var"].sample_dimension = "SIKE!"
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
# give another geophysical data variable a different dimension
nc = MockRaggedArrayRepr(
"trajectoryProfile"
)
v1 = nc.createVariable(
"data1",
"i",
("SAMPLE_DIMENSION",),
fill_value=None
)
v1 = nc.createVariable(
"data2",
"i",
("STATION_DIMENSION",), # bad!
fill_value=None
)
self.assertFalse(
util.is_dataset_valid_ragged_array_repr_featureType(nc, "trajectoryprofile")
)
| apache-2.0 | -6,603,368,347,699,152,000 | 34.091611 | 112 | 0.550278 | false |
wilebeast/FireFox-OS | B2G/external/wpa_supplicant_8/wpa_supplicant/examples/wpas-dbus-new-wps.py | 114 | 2221 | #!/usr/bin/python
import dbus
import sys, os
import time
import gobject
from dbus.mainloop.glib import DBusGMainLoop
WPAS_DBUS_SERVICE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_INTERFACE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_OPATH = "/fi/w1/wpa_supplicant1"
WPAS_DBUS_INTERFACES_INTERFACE = "fi.w1.wpa_supplicant1.Interface"
WPAS_DBUS_WPS_INTERFACE = "fi.w1.wpa_supplicant1.Interface.WPS"
def propertiesChanged(properties):
if properties.has_key("State"):
print "PropertiesChanged: State: %s" % (properties["State"])
def scanDone(success):
print "Scan done: success=%s" % success
def bssAdded(bss, properties):
print "BSS added: %s" % (bss)
def bssRemoved(bss):
print "BSS removed: %s" % (bss)
def wpsEvent(name, args):
print "WPS event: %s" % (name)
print args
def credentials(cred):
print "WPS credentials: %s" % (cred)
def main():
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
global bus
bus = dbus.SystemBus()
wpas_obj = bus.get_object(WPAS_DBUS_SERVICE, WPAS_DBUS_OPATH)
if len(sys.argv) != 2:
print "Missing ifname argument"
os._exit(1)
wpas = dbus.Interface(wpas_obj, WPAS_DBUS_INTERFACE)
bus.add_signal_receiver(scanDone,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="ScanDone")
bus.add_signal_receiver(bssAdded,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSAdded")
bus.add_signal_receiver(bssRemoved,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSRemoved")
bus.add_signal_receiver(propertiesChanged,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="PropertiesChanged")
bus.add_signal_receiver(wpsEvent,
dbus_interface=WPAS_DBUS_WPS_INTERFACE,
signal_name="Event")
bus.add_signal_receiver(credentials,
dbus_interface=WPAS_DBUS_WPS_INTERFACE,
signal_name="Credentials")
ifname = sys.argv[1]
path = wpas.GetInterface(ifname)
if_obj = bus.get_object(WPAS_DBUS_SERVICE, path)
if_obj.Set(WPAS_DBUS_WPS_INTERFACE, 'ProcessCredentials',
dbus.Boolean(1),
dbus_interface=dbus.PROPERTIES_IFACE)
wps = dbus.Interface(if_obj, WPAS_DBUS_WPS_INTERFACE)
wps.Start({'Role': 'enrollee', 'Type': 'pbc'})
gobject.MainLoop().run()
if __name__ == "__main__":
main()
| apache-2.0 | -883,712,869,169,879,800 | 26.7625 | 66 | 0.723998 | false |
jtyr/ansible-modules-extras | messaging/rabbitmq_queue.py | 29 | 9468 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: rabbitmq_queue
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ queues
description:
- This module uses rabbitMQ Rest API to create/delete queues
requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
- Name of the queue to create
required: true
state:
description:
- Whether the queue should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
required: false
default: present
login_user:
description:
- rabbitMQ user for connection
required: false
default: guest
login_password:
description:
- rabbitMQ password for connection
required: false
default: false
login_host:
description:
- rabbitMQ host for connection
required: false
default: localhost
login_port:
description:
- rabbitMQ management api port
required: false
default: 15672
vhost:
description:
- rabbitMQ virtual host
required: false
default: "/"
durable:
description:
- whether queue is durable or not
required: false
choices: [ "yes", "no" ]
default: yes
auto_delete:
description:
- if the queue should delete itself after all queues/queues unbound from it
required: false
choices: [ "yes", "no" ]
default: no
message_ttl:
description:
- How long a message can live in queue before it is discarded (milliseconds)
required: False
default: forever
auto_expires:
description:
- How long a queue can be unused before it is automatically deleted (milliseconds)
required: false
default: forever
max_length:
description:
- How many messages can the queue contain before it starts rejecting
required: false
default: no limit
dead_letter_exchange:
description:
- Optional name of an exchange to which messages will be republished if they
- are rejected or expire
required: false
default: None
dead_letter_routing_key:
description:
- Optional replacement routing key to use when a message is dead-lettered.
- Original routing key will be used if unset
required: false
default: None
arguments:
description:
- extra arguments for queue. If defined this argument is a key/value dictionary
required: false
default: {}
'''
EXAMPLES = '''
# Create a queue
- rabbitmq_queue: name=myQueue
# Create a queue on remote host
- rabbitmq_queue: name=myRemoteQueue login_user=user login_password=secret login_host=remote.example.org
'''
import requests
import urllib
import json
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
name = dict(required=True, type='str'),
login_user = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str', no_log=True),
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
durable = dict(default=True, type='bool'),
auto_delete = dict(default=False, type='bool'),
message_ttl = dict(default=None, type='int'),
auto_expires = dict(default=None, type='int'),
max_length = dict(default=None, type='int'),
dead_letter_exchange = dict(default=None, type='str'),
dead_letter_routing_key = dict(default=None, type='str'),
arguments = dict(default=dict(), type='dict')
),
supports_check_mode = True
)
url = "http://%s:%s/api/queues/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
module.params['name']
)
# Check if queue already exists
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
if r.status_code==200:
queue_exists = True
response = r.json()
elif r.status_code==404:
queue_exists = False
response = r.text
else:
module.fail_json(
msg = "Invalid response from RESTAPI when trying to check if queue exists",
details = r.text
)
if module.params['state']=='present':
change_required = not queue_exists
else:
change_required = queue_exists
# Check if attributes change on existing queue
if not change_required and r.status_code==200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
(
( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl'] ) or
( 'x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None )
) and
(
( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires'] ) or
( 'x-expires' not in response['arguments'] and module.params['auto_expires'] is None )
) and
(
( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length'] ) or
( 'x-max-length' not in response['arguments'] and module.params['max_length'] is None )
) and
(
( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange'] ) or
( 'x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None )
) and
(
( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key'] ) or
( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None )
)
):
module.fail_json(
msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
)
# Copy parameters to arguments as used by RabbitMQ
for k,v in {
'message_ttl': 'x-message-ttl',
'auto_expires': 'x-expires',
'max_length': 'x-max-length',
'dead_letter_exchange': 'x-dead-letter-exchange',
'dead_letter_routing_key': 'x-dead-letter-routing-key'
}.items():
if module.params[k]:
module.params['arguments'][v] = module.params[k]
# Exit if check_mode
if module.check_mode:
module.exit_json(
changed= change_required,
name = module.params['name'],
details = response,
arguments = module.params['arguments']
)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth = (module.params['login_user'],module.params['login_password']),
headers = { "content-type": "application/json"},
data = json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"arguments": module.params['arguments']
})
)
elif module.params['state'] == 'absent':
r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
if r.status_code == 204:
module.exit_json(
changed = True,
name = module.params['name']
)
else:
module.fail_json(
msg = "Error creating queue",
status = r.status_code,
details = r.text
)
else:
module.exit_json(
changed = False,
name = module.params['name']
)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -6,747,696,595,846,376,000 | 35 | 174 | 0.578792 | false |
hujiajie/chromium-crosswalk | tools/telemetry/telemetry/internal/backends/android_command_line_backend_unittest.py | 13 | 3617 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import unittest
from telemetry import decorators
from telemetry.internal.backends import android_command_line_backend
from telemetry.testing import options_for_unittests
from devil.android import device_utils
class _MockBackendSettings(object):
pseudo_exec_name = 'chrome'
def __init__(self, path):
self._path = path
def GetCommandLineFile(self, _):
return self._path
class AndroidCommandLineBackendTest(unittest.TestCase):
def _GetDeviceForTest(self):
serial = options_for_unittests.GetCopy().device
if serial:
device = device_utils.DeviceUtils(serial)
return device
else:
devices = device_utils.DeviceUtils.HealthyDevices()
if not devices:
return None
return devices[0]
def testQuoteIfNeededNoEquals(self):
string = 'value'
self.assertEqual(string,
android_command_line_backend._QuoteIfNeeded(string))
def testQuoteIfNeededNoSpaces(self):
string = 'key=valueA'
self.assertEqual(string,
android_command_line_backend._QuoteIfNeeded(string))
def testQuoteIfNeededAlreadyQuoted(self):
string = "key='valueA valueB'"
self.assertEqual(string,
android_command_line_backend._QuoteIfNeeded(string))
def testQuoteIfNeeded(self):
string = 'key=valueA valueB'
expected_output = "key='valueA valueB'"
self.assertEqual(expected_output,
android_command_line_backend._QuoteIfNeeded(string))
@decorators.Enabled('android')
def testSetUpCommandLineFlagsCmdRestored(self):
"""Test that a previous command line file is restored.
Requires a device connected to the host.
"""
device = self._GetDeviceForTest()
if not device:
logging.warning('Skip the test because we cannot find any healthy device')
return
cmd_file = '/data/local/tmp/test_cmd2'
backend_settings = _MockBackendSettings(cmd_file)
startup_args = ['--some', '--test', '--args']
try:
device.WriteFile(cmd_file, 'chrome --args --to --save')
self.assertEqual('chrome --args --to --save',
device.ReadFile(cmd_file).strip())
with android_command_line_backend.SetUpCommandLineFlags(
device, backend_settings, startup_args):
self.assertEqual('chrome --some --test --args',
device.ReadFile(cmd_file).strip())
self.assertEqual('chrome --args --to --save',
device.ReadFile(cmd_file).strip())
finally:
device.RunShellCommand(['rm', '-f', cmd_file], check_return=True)
@decorators.Enabled('android')
def testSetUpCommandLineFlagsCmdRemoved(self):
"""Test that the command line file is removed if it did not exist before.
Requires a device connected to the host.
"""
device = self._GetDeviceForTest()
if not device:
logging.warning('Skip the test because we cannot find any healthy device')
return
cmd_file = '/data/local/tmp/test_cmd'
backend_settings = _MockBackendSettings(cmd_file)
startup_args = ['--some', '--test', '--args']
device.RunShellCommand(['rm', '-f', cmd_file], check_return=True)
with android_command_line_backend.SetUpCommandLineFlags(
device, backend_settings, startup_args):
self.assertEqual('chrome --some --test --args',
device.ReadFile(cmd_file).strip())
self.assertFalse(device.FileExists(cmd_file))
| bsd-3-clause | -2,089,787,591,728,052,000 | 34.116505 | 80 | 0.672104 | false |
Cinntax/home-assistant | homeassistant/components/lcn/scene.py | 7 | 2167 | """Support for LCN scenes."""
import pypck
from homeassistant.components.scene import Scene
from homeassistant.const import CONF_ADDRESS
from . import LcnDevice
from .const import (
CONF_CONNECTIONS,
CONF_OUTPUTS,
CONF_REGISTER,
CONF_SCENE,
CONF_TRANSITION,
DATA_LCN,
OUTPUT_PORTS,
)
from .helpers import get_connection
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Set up the LCN scene platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
devices.append(LcnScene(config, address_connection))
async_add_entities(devices)
class LcnScene(LcnDevice, Scene):
"""Representation of a LCN scene."""
def __init__(self, config, address_connection):
"""Initialize the LCN scene."""
super().__init__(config, address_connection)
self.register_id = config[CONF_REGISTER]
self.scene_id = config[CONF_SCENE]
self.output_ports = []
self.relay_ports = []
for port in config[CONF_OUTPUTS]:
if port in OUTPUT_PORTS:
self.output_ports.append(pypck.lcn_defs.OutputPort[port])
else: # in RELEAY_PORTS
self.relay_ports.append(pypck.lcn_defs.RelayPort[port])
if config[CONF_TRANSITION] is None:
self.transition = None
else:
self.transition = pypck.lcn_defs.time_to_ramp_value(config[CONF_TRANSITION])
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
async def async_activate(self):
"""Activate scene."""
self.address_connection.activate_scene(
self.register_id,
self.scene_id,
self.output_ports,
self.relay_ports,
self.transition,
)
| apache-2.0 | -4,493,372,589,906,779,000 | 28.283784 | 88 | 0.630826 | false |
Ultimaker/Uranium | plugins/FileHandlers/STLWriter/STLWriter.py | 1 | 5501 | # Copyright (c) 2016 Ultimaker B.V.
# Copyright (c) 2013 David Braam
# Uranium is released under the terms of the LGPLv3 or higher.
import struct
import time
from UM.Logger import Logger
from UM.Mesh.MeshWriter import MeshWriter
from UM.i18n import i18nCatalog
catalog = i18nCatalog("uranium")
class STLWriter(MeshWriter):
def write(self, stream, nodes, mode = MeshWriter.OutputMode.TextMode):
"""Write the specified sequence of nodes to a stream in the STL format.
:param stream: The output stream to write to.
:param nodes: A sequence of scene nodes to write to the output stream.
:param mode: The output mode to use for writing scene nodes. Text mode
causes the writer to write in STL's ASCII format. Binary mode causes the
writer to write in STL's binary format. Any other mode is invalid.
"""
try:
MeshWriter._meshNodes(nodes).__next__()
except StopIteration:
Logger.log("e", "There is no mesh to write.")
self.setInformation(catalog.i18nc("@error:no mesh", "There is no mesh to write."))
return False # Don't try to write a file if there is no mesh.
if mode == MeshWriter.OutputMode.TextMode:
self._writeAscii(stream, MeshWriter._meshNodes(nodes))
elif mode == MeshWriter.OutputMode.BinaryMode:
self._writeBinary(stream, MeshWriter._meshNodes(nodes))
else:
Logger.log("e", "Unsupported output mode writing STL to stream")
self.setInformation(catalog.i18nc("@error:not supported", "Unsupported output mode writing STL to stream."))
return False
return True
def _writeAscii(self, stream, nodes):
name = "Uranium STLWriter {0}".format(time.strftime("%a %d %b %Y %H:%M:%S"))
stream.write("solid {0}\n".format(name))
for node in nodes:
mesh_data = node.getMeshData().getTransformed(node.getWorldTransformation())
verts = mesh_data.getVertices()
if verts is None:
continue # No mesh data, nothing to do.
if mesh_data.hasIndices():
for face in mesh_data.getIndices():
stream.write("facet normal 0.0 0.0 0.0\n")
stream.write(" outer loop\n")
v1 = verts[face[0]]
v2 = verts[face[1]]
v3 = verts[face[2]]
stream.write(" vertex {0} {1} {2}\n".format(v1[0], -v1[2], v1[1]))
stream.write(" vertex {0} {1} {2}\n".format(v2[0], -v2[2], v2[1]))
stream.write(" vertex {0} {1} {2}\n".format(v3[0], -v3[2], v3[1]))
stream.write(" endloop\n")
stream.write("endfacet\n")
else:
num_verts = mesh_data.getVertexCount()
for index in range(0, num_verts - 2, 3):
stream.write("facet normal 0.0 0.0 0.0\n")
stream.write(" outer loop\n")
v1 = verts[index]
v2 = verts[index + 1]
v3 = verts[index + 2]
stream.write(" vertex {0} {1} {2}\n".format(v1[0], -v1[2], v1[1]))
stream.write(" vertex {0} {1} {2}\n".format(v2[0], -v2[2], v2[1]))
stream.write(" vertex {0} {1} {2}\n".format(v3[0], -v3[2], v3[1]))
stream.write(" endloop\n")
stream.write("endfacet\n")
stream.write("endsolid {0}\n".format(name))
def _writeBinary(self, stream, nodes):
stream.write("Uranium STLWriter {0}".format(time.strftime("%a %d %b %Y %H:%M:%S")).encode().ljust(80, b"\000"))
face_count = 0
nodes = list(nodes)
for node in nodes:
if node.getMeshData().hasIndices():
face_count += node.getMeshData().getFaceCount()
else:
face_count += node.getMeshData().getVertexCount() / 3
stream.write(struct.pack("<I", int(face_count))) #Write number of faces to STL
for node in nodes:
mesh_data = node.getMeshData().getTransformed(node.getWorldTransformation())
if mesh_data.hasIndices():
verts = mesh_data.getVertices()
for face in mesh_data.getIndices():
v1 = verts[face[0]]
v2 = verts[face[1]]
v3 = verts[face[2]]
stream.write(struct.pack("<fff", 0.0, 0.0, 0.0))
stream.write(struct.pack("<fff", v1[0], -v1[2], v1[1]))
stream.write(struct.pack("<fff", v2[0], -v2[2], v2[1]))
stream.write(struct.pack("<fff", v3[0], -v3[2], v3[1]))
stream.write(struct.pack("<H", 0))
else:
num_verts = mesh_data.getVertexCount()
verts = mesh_data.getVertices()
for index in range(0, num_verts - 1, 3):
v1 = verts[index]
v2 = verts[index + 1]
v3 = verts[index + 2]
stream.write(struct.pack("<fff", 0.0, 0.0, 0.0))
stream.write(struct.pack("<fff", v1[0], -v1[2], v1[1]))
stream.write(struct.pack("<fff", v2[0], -v2[2], v2[1]))
stream.write(struct.pack("<fff", v3[0], -v3[2], v3[1]))
stream.write(struct.pack("<H", 0)) | lgpl-3.0 | -7,929,089,762,363,005,000 | 44.098361 | 120 | 0.525177 | false |
dati91/servo | tests/wpt/web-platform-tests/xhr/resources/access-control-basic-preflight-cache.py | 46 | 1709 | def main(request, response):
def fail(message):
response.content = "FAIL " + request.method + ": " + str(message)
response.status = 400
def getState(token):
server_state = request.server.stash.take(token)
if not server_state:
return "Uninitialized"
return server_state
def setState(state, token):
request.server.stash.put(token, state)
response.headers.set("Access-Control-Allow-Origin", request.headers.get("origin"))
response.headers.set("Access-Control-Allow-Credentials", "true")
token = request.GET.first("token", None)
state = getState(token)
if state == "Uninitialized":
if request.method == "OPTIONS":
response.headers.set("Access-Control-Allow-Methods", "PUT")
response.headers.set("Access-Control-Max-Age", 10)
setState("OPTIONSSent", token)
else:
fail(state)
elif state == "OPTIONSSent":
if request.method == "PUT":
response.content = "PASS: First PUT request."
setState("FirstPUTSent", token)
else:
fail(state)
elif state == "FirstPUTSent":
if request.method == "PUT":
response.content = "PASS: Second PUT request. Preflight worked."
elif request.method == "OPTIONS":
response.headers.set("Access-Control-Allow-Methods", "PUT")
setState("FAILSecondOPTIONSSent", token)
else:
fail(state)
elif state == "FAILSecondOPTIONSSent":
if request.method == "PUT":
fail("Second OPTIONS request was sent. Preflight failed.")
else:
fail(state)
else:
fail(state)
| mpl-2.0 | -8,702,085,592,530,500,000 | 35.361702 | 86 | 0.5945 | false |
magnushiie/geopy | geopy/format.py | 24 | 3013 | """
Formatting...
"""
from geopy import units
from geopy.compat import py3k
if py3k:
unichr = chr # pylint: disable=W0622
# Unicode characters for symbols that appear in coordinate strings.
DEGREE = unichr(176)
PRIME = unichr(8242)
DOUBLE_PRIME = unichr(8243)
ASCII_DEGREE = ''
ASCII_PRIME = "'"
ASCII_DOUBLE_PRIME = '"'
LATIN1_DEGREE = chr(176)
HTML_DEGREE = '°'
HTML_PRIME = '′'
HTML_DOUBLE_PRIME = '″'
XML_DECIMAL_DEGREE = '°'
XML_DECIMAL_PRIME = '′'
XML_DECIMAL_DOUBLE_PRIME = '″'
XML_HEX_DEGREE = '&xB0;'
XML_HEX_PRIME = '&x2032;'
XML_HEX_DOUBLE_PRIME = '&x2033;'
ABBR_DEGREE = 'deg'
ABBR_ARCMIN = 'arcmin'
ABBR_ARCSEC = 'arcsec'
DEGREES_FORMAT = (
"%(degrees)d%(deg)s %(minutes)d%(arcmin)s %(seconds)g%(arcsec)s"
)
UNICODE_SYMBOLS = {
'deg': DEGREE,
'arcmin': PRIME,
'arcsec': DOUBLE_PRIME
}
ASCII_SYMBOLS = {
'deg': ASCII_DEGREE,
'arcmin': ASCII_PRIME,
'arcsec': ASCII_DOUBLE_PRIME
}
LATIN1_SYMBOLS = {
'deg': LATIN1_DEGREE,
'arcmin': ASCII_PRIME,
'arcsec': ASCII_DOUBLE_PRIME
}
HTML_SYMBOLS = {
'deg': HTML_DEGREE,
'arcmin': HTML_PRIME,
'arcsec': HTML_DOUBLE_PRIME
}
XML_SYMBOLS = {
'deg': XML_DECIMAL_DEGREE,
'arcmin': XML_DECIMAL_PRIME,
'arcsec': XML_DECIMAL_DOUBLE_PRIME
}
ABBR_SYMBOLS = {
'deg': ABBR_DEGREE,
'arcmin': ABBR_ARCMIN,
'arcsec': ABBR_ARCSEC
}
def format_degrees(degrees, fmt=DEGREES_FORMAT, symbols=None):
"""
TODO docs.
"""
symbols = symbols or ASCII_SYMBOLS
arcminutes = units.arcminutes(degrees=degrees - int(degrees))
arcseconds = units.arcseconds(arcminutes=arcminutes - int(arcminutes))
format_dict = dict(
symbols,
degrees=degrees,
minutes=abs(arcminutes),
seconds=abs(arcseconds)
)
return fmt % format_dict
DISTANCE_FORMAT = "%(magnitude)s%(unit)s"
DISTANCE_UNITS = {
'km': lambda d: d,
'm': lambda d: units.meters(kilometers=d),
'mi': lambda d: units.miles(kilometers=d),
'ft': lambda d: units.feet(kilometers=d),
'nm': lambda d: units.nautical(kilometers=d),
'nmi': lambda d: units.nautical(kilometers=d)
}
def format_distance(kilometers, fmt=DISTANCE_FORMAT, unit='km'):
"""
TODO docs.
"""
magnitude = DISTANCE_UNITS[unit](kilometers)
return fmt % {'magnitude': magnitude, 'unit': unit}
_DIRECTIONS = [
('north', 'N'),
('north by east', 'NbE'),
('north-northeast', 'NNE'),
('northeast by north', 'NEbN'),
('northeast', 'NE'),
('northeast by east', 'NEbE'),
('east-northeast', 'ENE'),
('east by north', 'EbN'),
('east', 'E'),
('east by south', 'EbS'),
('east-southeast', 'ESE'),
('southeast by east', 'SEbE'),
('southeast', 'SE'),
('southeast by south', 'SEbS'),
]
DIRECTIONS, DIRECTIONS_ABBR = zip(*_DIRECTIONS)
ANGLE_DIRECTIONS = {
n * 11.25: d
for n, d
in enumerate(DIRECTIONS)
}
ANGLE_DIRECTIONS_ABBR = {
n * 11.25: d
for n, d
in enumerate(DIRECTIONS_ABBR)
}
| mit | 4,052,557,929,298,188,300 | 22.912698 | 74 | 0.617989 | false |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/numpy/core/tests/test_errstate.py | 23 | 1783 | # The following exec statement (or something like it) is needed to
# prevent SyntaxError on Python < 2.5. Even though this is a test,
# SyntaxErrors are not acceptable; on Debian systems, they block
# byte-compilation during install and thus cause the package to fail
# to install.
import sys
if sys.version_info[:2] >= (2, 5):
exec """
from __future__ import with_statement
from numpy.core import *
from numpy.random import rand, randint
from numpy.testing import *
class TestErrstate(TestCase):
def test_invalid(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(invalid='ignore'):
sqrt(a)
# While this should fail!
try:
sqrt(a)
except FloatingPointError:
pass
else:
self.fail("Did not raise an invalid error")
def test_divide(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(divide='ignore'):
a // 0
# While this should fail!
try:
a // 0
except FloatingPointError:
pass
else:
self.fail("Did not raise divide by zero error")
def test_errcall(self):
def foo(*args):
print(args)
olderrcall = geterrcall()
with errstate(call=foo):
assert(geterrcall() is foo), 'call is not foo'
with errstate(call=None):
assert(geterrcall() is None), 'call is not None'
assert(geterrcall() is olderrcall), 'call is not olderrcall'
"""
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | -5,993,280,319,917,633,000 | 30.280702 | 68 | 0.556927 | false |
boone/ansible-modules-core | cloud/rackspace/rax_scaling_policy.py | 157 | 9070 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_scaling_policy
short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
description:
- Manipulate Rackspace Cloud Autoscale Scaling Policy
version_added: 1.7
options:
at:
description:
- The UTC time when this policy will be executed. The time must be
formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
C(2013-05-19T08:07:08Z)
change:
description:
- The change, either as a number of servers or as a percentage, to make
in the scaling group. If this is a percentage, you must set
I(is_percent) to C(true) also.
cron:
description:
- The time when the policy will be executed, as a cron entry. For
example, if this is parameter is set to C(1 0 * * *)
cooldown:
description:
- The period of time, in seconds, that must pass before any scaling can
occur after the previous scaling. Must be an integer between 0 and
86400 (24 hrs).
desired_capacity:
description:
- The desired server capacity of the scaling the group; that is, how
many servers should be in the scaling group.
is_percent:
description:
- Whether the value in I(change) is a percent value
default: false
name:
description:
- Name to give the policy
required: true
policy_type:
description:
- The type of policy that will be executed for the current release.
choices:
- webhook
- schedule
required: true
scaling_group:
description:
- Name of the scaling group that this policy will be added to
required: true
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: false
connection: local
tasks:
- rax_scaling_policy:
credentials: ~/.raxpub
region: ORD
at: '2013-05-19T08:07:08Z'
change: 25
cooldown: 300
is_percent: true
name: ASG Test Policy - at
policy_type: schedule
scaling_group: ASG Test
register: asps_at
- rax_scaling_policy:
credentials: ~/.raxpub
region: ORD
cron: '1 0 * * *'
change: 25
cooldown: 300
is_percent: true
name: ASG Test Policy - cron
policy_type: schedule
scaling_group: ASG Test
register: asp_cron
- rax_scaling_policy:
credentials: ~/.raxpub
region: ORD
cooldown: 300
desired_capacity: 5
name: ASG Test Policy - webhook
policy_type: webhook
scaling_group: ASG Test
register: asp_webhook
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
desired_capacity=0, is_percent=False, name=None,
policy_type=None, scaling_group=None, state='present'):
changed = False
au = pyrax.autoscale
if not au:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
try:
UUID(scaling_group)
except ValueError:
try:
sg = au.find(name=scaling_group)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
try:
sg = au.get(scaling_group)
except Exception, e:
module.fail_json(msg='%s' % e.message)
if state == 'present':
policies = filter(lambda p: name == p.name, sg.list_policies())
if len(policies) > 1:
module.fail_json(msg='No unique policy match found by name')
if at:
args = dict(at=at)
elif cron:
args = dict(cron=cron)
else:
args = None
if not policies:
try:
policy = sg.add_policy(name, policy_type=policy_type,
cooldown=cooldown, change=change,
is_percent=is_percent,
desired_capacity=desired_capacity,
args=args)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
policy = policies[0]
kwargs = {}
if policy_type != policy.type:
kwargs['policy_type'] = policy_type
if cooldown != policy.cooldown:
kwargs['cooldown'] = cooldown
if hasattr(policy, 'change') and change != policy.change:
kwargs['change'] = change
if hasattr(policy, 'changePercent') and is_percent is False:
kwargs['change'] = change
kwargs['is_percent'] = False
elif hasattr(policy, 'change') and is_percent is True:
kwargs['change'] = change
kwargs['is_percent'] = True
if hasattr(policy, 'desiredCapacity') and change:
kwargs['change'] = change
elif ((hasattr(policy, 'change') or
hasattr(policy, 'changePercent')) and desired_capacity):
kwargs['desired_capacity'] = desired_capacity
if hasattr(policy, 'args') and args != policy.args:
kwargs['args'] = args
if kwargs:
policy.update(**kwargs)
changed = True
policy.get()
module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
else:
try:
policies = filter(lambda p: name == p.name, sg.list_policies())
if len(policies) > 1:
module.fail_json(msg='No unique policy match found by name')
elif not policies:
policy = {}
else:
policy.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
at=dict(),
change=dict(type='int'),
cron=dict(),
cooldown=dict(type='int', default=300),
desired_capacity=dict(type='int'),
is_percent=dict(type='bool', default=False),
name=dict(required=True),
policy_type=dict(required=True, choices=['webhook', 'schedule']),
scaling_group=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['cron', 'at'],
['change', 'desired_capacity'],
]
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
at = module.params.get('at')
change = module.params.get('change')
cron = module.params.get('cron')
cooldown = module.params.get('cooldown')
desired_capacity = module.params.get('desired_capacity')
is_percent = module.params.get('is_percent')
name = module.params.get('name')
policy_type = module.params.get('policy_type')
scaling_group = module.params.get('scaling_group')
state = module.params.get('state')
if (at or cron) and policy_type == 'webhook':
module.fail_json(msg='policy_type=schedule is required for a time '
'based policy')
setup_rax_module(module, pyrax)
rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
desired_capacity=desired_capacity, is_percent=is_percent,
name=name, policy_type=policy_type, scaling_group=scaling_group,
state=state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
| gpl-3.0 | 9,101,954,445,078,879,000 | 31.04947 | 79 | 0.589636 | false |
uni-peter-zheng/tp-libvirt | libvirt/tests/src/virsh_cmd/host/virsh_capabilities.py | 7 | 5687 | import logging
import re
from autotest.client.shared import utils, error
from autotest.client import os_dep
from virttest import libvirt_vm, virsh, utils_libvirtd, utils_misc
from virttest.libvirt_xml import capability_xml
def run(test, params, env):
"""
Test the command virsh capabilities
(1) Call virsh capabilities
(2) Call virsh capabilities with an unexpected option
(3) Call virsh capabilities with libvirtd service stop
"""
def compare_capabilities_xml(source):
cap_xml = capability_xml.CapabilityXML()
cap_xml.xml = source
# Check that host has a non-empty UUID tag.
xml_uuid = cap_xml.uuid
logging.debug("Host UUID (capabilities_xml): %s" % xml_uuid)
if xml_uuid == "":
raise error.TestFail("The host uuid in capabilities_xml is none!")
# Check the host arch.
xml_arch = cap_xml.arch
logging.debug("Host arch (capabilities_xml): %s", xml_arch)
exp_arch = utils.run("arch", ignore_status=True).stdout.strip()
if cmp(xml_arch, exp_arch) != 0:
raise error.TestFail("The host arch in capabilities_xml is expected"
" to be %s, but get %s" % (exp_arch, xml_arch))
# Check the host cpu count.
xml_cpu_count = cap_xml.cpu_count
logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count)
cmd = "grep processor /proc/cpuinfo | wc -l"
exp_cpu_count = int(utils.run(cmd, ignore_status=True).stdout.strip())
if xml_cpu_count != exp_cpu_count:
raise error.TestFail("Host cpus count is expected to be %s, but get "
"%s" % (exp_cpu_count, xml_cpu_count))
# Check the arch of guest supported.
guest_capa = cap_xml.get_guest_capabilities()
logging.debug(guest_capa)
try:
img = utils_misc.find_command("qemu-kvm")
except ValueError:
raise error.TestNAError("Cannot find qemu-kvm")
if re.search("ppc", utils.run("arch").stdout):
cmd = img + " --cpu ? | grep ppc"
else:
cmd = img + " --cpu ? | grep qemu"
cmd_result = utils.run(cmd, ignore_status=True)
for guest in cap_xml.xmltreefile.findall('guest'):
guest_wordsize = guest.find('arch').find('wordsize').text
logging.debug("Arch of guest supported (capabilities_xml):%s",
guest_wordsize)
if not re.search(guest_wordsize, cmd_result.stdout.strip()):
raise error.TestFail("The capabilities_xml gives an extra arch "
"of guest to support!")
# Check the type of hypervisor.
first_guest = cap_xml.xmltreefile.findall('guest')[0]
first_domain = first_guest.find('arch').findall('domain')[0]
guest_domain_type = first_domain.get('type')
logging.debug("Hypervisor (capabilities_xml):%s", guest_domain_type)
cmd_result = utils.run("virsh uri", ignore_status=True)
if not re.search(guest_domain_type, cmd_result.stdout.strip()):
raise error.TestFail("The capabilities_xml gives an different "
"hypervisor")
# check power management support.
try:
pm_cmd = os_dep.command('pm-is-supported')
pm_cap_map = {'suspend': 'suspend_mem',
'hibernate': 'suspend_disk',
'suspend-hybrid': 'suspend_hybrid',
}
exp_pms = []
for opt in pm_cap_map:
cmd = '%s --%s' % (pm_cmd, opt)
res = utils.run(cmd, ignore_status=True)
if res.exit_status == 0:
exp_pms.append(pm_cap_map[opt])
pms = cap_xml.power_management_list
if set(exp_pms) != set(pms):
raise error.TestFail("Expected supported PMs are %s, got %s "
"instead." % (exp_pms, pms))
except ValueError:
logging.debug('Power management checking is skipped, since command '
'pm-is-supported is not found.')
connect_uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
"default"))
# Prepare libvirtd service
if "libvirtd" in params:
libvirtd = params.get("libvirtd")
if libvirtd == "off":
utils_libvirtd.libvirtd_stop()
# Run test case
option = params.get("virsh_cap_options")
try:
output = virsh.capabilities(option, uri=connect_uri,
ignore_status=False, debug=True)
status = 0 # good
except error.CmdError:
status = 1 # bad
output = ''
# Recover libvirtd service start
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
# Check status_error
status_error = params.get("status_error")
if status_error == "yes":
if status == 0:
if libvirtd == "off":
raise error.TestFail("Command 'virsh capabilities' succeeded "
"with libvirtd service stopped, incorrect")
else:
raise error.TestFail("Command 'virsh capabilities %s' succeeded "
"(incorrect command)" % option)
elif status_error == "no":
compare_capabilities_xml(output)
if status != 0:
raise error.TestFail("Command 'virsh capabilities %s' failed "
"(correct command)" % option)
| gpl-2.0 | -5,878,921,287,191,791,000 | 42.083333 | 81 | 0.554598 | false |
CasparLi/calibre | src/calibre/ebooks/pdf/render/engine.py | 10 | 14304 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, traceback, math
from collections import namedtuple
from functools import wraps, partial
from future_builtins import map, zip
from PyQt5.Qt import (QPaintEngine, QPaintDevice, Qt, QTransform, QBrush)
from calibre.constants import plugins
from calibre.ebooks.pdf.render.serialize import (PDFStream, Path)
from calibre.ebooks.pdf.render.common import inch, A4, fmtnum
from calibre.ebooks.pdf.render.graphics import convert_path, Graphics
from calibre.utils.fonts.sfnt.container import Sfnt, UnsupportedFont
from calibre.utils.fonts.sfnt.metrics import FontMetrics
Point = namedtuple('Point', 'x y')
ColorState = namedtuple('ColorState', 'color opacity do')
GlyphInfo = namedtuple('GlyphInfo', 'name size stretch positions indices')
def repr_transform(t):
vals = map(fmtnum, (t.m11(), t.m12(), t.m21(), t.m22(), t.dx(), t.dy()))
return '[%s]'%' '.join(vals)
def store_error(func):
@wraps(func)
def errh(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except:
self.errors_occurred = True
self.errors(traceback.format_exc())
return errh
class Font(FontMetrics):
def __init__(self, sfnt):
FontMetrics.__init__(self, sfnt)
self.glyph_map = {}
class PdfEngine(QPaintEngine):
FEATURES = QPaintEngine.AllFeatures & ~(
QPaintEngine.PorterDuff | QPaintEngine.PerspectiveTransform |
QPaintEngine.ObjectBoundingModeGradients |
QPaintEngine.RadialGradientFill |
QPaintEngine.ConicalGradientFill
)
def __init__(self, file_object, page_width, page_height, left_margin,
top_margin, right_margin, bottom_margin, width, height,
errors=print, debug=print, compress=True,
mark_links=False):
QPaintEngine.__init__(self, self.FEATURES)
self.file_object = file_object
self.compress, self.mark_links = compress, mark_links
self.page_height, self.page_width = page_height, page_width
self.left_margin, self.top_margin = left_margin, top_margin
self.right_margin, self.bottom_margin = right_margin, bottom_margin
self.pixel_width, self.pixel_height = width, height
# Setup a co-ordinate transform that allows us to use co-ords
# from Qt's pixel based co-ordinate system with its origin at the top
# left corner. PDF's co-ordinate system is based on pts and has its
# origin in the bottom left corner. We also have to implement the page
# margins. Therefore, we need to translate, scale and reflect about the
# x-axis.
dy = self.page_height - self.top_margin
dx = self.left_margin
sx = (self.page_width - self.left_margin -
self.right_margin) / self.pixel_width
sy = (self.page_height - self.top_margin -
self.bottom_margin) / self.pixel_height
self.pdf_system = QTransform(sx, 0, 0, -sy, dx, dy)
self.graphics = Graphics(self.pixel_width, self.pixel_height)
self.errors_occurred = False
self.errors, self.debug = errors, debug
self.fonts = {}
self.current_page_num = 1
self.current_page_inited = False
self.qt_hack, err = plugins['qt_hack']
if err:
raise RuntimeError('Failed to load qt_hack with err: %s'%err)
def apply_graphics_state(self):
self.graphics(self.pdf_system, self.painter())
def resolve_fill(self, rect):
self.graphics.resolve_fill(rect, self.pdf_system,
self.painter().transform())
@property
def do_fill(self):
return self.graphics.current_state.do_fill
@property
def do_stroke(self):
return self.graphics.current_state.do_stroke
def init_page(self):
self.pdf.transform(self.pdf_system)
self.pdf.apply_fill(color=(1, 1, 1)) # QPainter has a default background brush of white
self.graphics.reset()
self.pdf.save_stack()
self.current_page_inited = True
def begin(self, device):
if not hasattr(self, 'pdf'):
try:
self.pdf = PDFStream(self.file_object, (self.page_width,
self.page_height), compress=self.compress,
mark_links=self.mark_links,
debug=self.debug)
self.graphics.begin(self.pdf)
except:
self.errors(traceback.format_exc())
self.errors_occurred = True
return False
return True
def end_page(self):
if self.current_page_inited:
self.pdf.restore_stack()
self.pdf.end_page()
self.current_page_inited = False
self.current_page_num += 1
def end(self):
try:
self.end_page()
self.pdf.end()
except:
self.errors(traceback.format_exc())
self.errors_occurred = True
return False
finally:
self.pdf = self.file_object = None
return True
def type(self):
return QPaintEngine.Pdf
def add_image(self, img, cache_key):
if img.isNull():
return
return self.pdf.add_image(img, cache_key)
@store_error
def drawTiledPixmap(self, rect, pixmap, point):
self.apply_graphics_state()
brush = QBrush(pixmap)
bl = rect.topLeft()
color, opacity, pattern, do_fill = self.graphics.convert_brush(
brush, bl-point, 1.0, self.pdf_system,
self.painter().transform())
self.pdf.save_stack()
self.pdf.apply_fill(color, pattern)
self.pdf.draw_rect(bl.x(), bl.y(), rect.width(), rect.height(),
stroke=False, fill=True)
self.pdf.restore_stack()
@store_error
def drawPixmap(self, rect, pixmap, source_rect):
self.apply_graphics_state()
source_rect = source_rect.toRect()
pixmap = (pixmap if source_rect == pixmap.rect() else
pixmap.copy(source_rect))
image = pixmap.toImage()
ref = self.add_image(image, pixmap.cacheKey())
if ref is not None:
self.pdf.draw_image(rect.x(), rect.y(), rect.width(),
rect.height(), ref)
@store_error
def drawImage(self, rect, image, source_rect, flags=Qt.AutoColor):
self.apply_graphics_state()
source_rect = source_rect.toRect()
image = (image if source_rect == image.rect() else
image.copy(source_rect))
ref = self.add_image(image, image.cacheKey())
if ref is not None:
self.pdf.draw_image(rect.x(), rect.y(), rect.width(),
rect.height(), ref)
@store_error
def updateState(self, state):
self.graphics.update_state(state, self.painter())
@store_error
def drawPath(self, path):
self.apply_graphics_state()
p = convert_path(path)
fill_rule = {Qt.OddEvenFill:'evenodd',
Qt.WindingFill:'winding'}[path.fillRule()]
self.pdf.draw_path(p, stroke=self.do_stroke,
fill=self.do_fill, fill_rule=fill_rule)
@store_error
def drawPoints(self, points):
self.apply_graphics_state()
p = Path()
for point in points:
p.move_to(point.x(), point.y())
p.line_to(point.x(), point.y() + 0.001)
self.pdf.draw_path(p, stroke=self.do_stroke, fill=False)
@store_error
def drawRects(self, rects):
self.apply_graphics_state()
with self.graphics:
for rect in rects:
self.resolve_fill(rect)
bl = rect.topLeft()
self.pdf.draw_rect(bl.x(), bl.y(), rect.width(), rect.height(),
stroke=self.do_stroke, fill=self.do_fill)
def create_sfnt(self, text_item):
get_table = partial(self.qt_hack.get_sfnt_table, text_item)
try:
ans = Font(Sfnt(get_table))
except UnsupportedFont as e:
raise UnsupportedFont('The font %s is not a valid sfnt. Error: %s'%(
text_item.font().family(), e))
glyph_map = self.qt_hack.get_glyph_map(text_item)
gm = {}
for uc, glyph_id in enumerate(glyph_map):
if glyph_id not in gm:
gm[glyph_id] = unichr(uc)
ans.full_glyph_map = gm
return ans
@store_error
def drawTextItem(self, point, text_item):
# return super(PdfEngine, self).drawTextItem(point, text_item)
self.apply_graphics_state()
gi = GlyphInfo(*self.qt_hack.get_glyphs(point, text_item))
if not gi.indices:
return
name = hash(gi.name)
if name not in self.fonts:
try:
self.fonts[name] = self.create_sfnt(text_item)
except UnsupportedFont:
return super(PdfEngine, self).drawTextItem(point, text_item)
metrics = self.fonts[name]
for glyph_id in gi.indices:
try:
metrics.glyph_map[glyph_id] = metrics.full_glyph_map[glyph_id]
except (KeyError, ValueError):
pass
glyphs = []
last_x = last_y = 0
for glyph_index, (x, y) in zip(gi.indices, gi.positions):
glyphs.append((x-last_x, last_y - y, glyph_index))
last_x, last_y = x, y
self.pdf.draw_glyph_run([gi.stretch, 0, 0, -1, 0, 0], gi.size, metrics,
glyphs)
@store_error
def drawPolygon(self, points, mode):
self.apply_graphics_state()
if not points:
return
p = Path()
p.move_to(points[0].x(), points[0].y())
for point in points[1:]:
p.line_to(point.x(), point.y())
p.close()
fill_rule = {self.OddEvenMode:'evenodd',
self.WindingMode:'winding'}.get(mode, 'evenodd')
self.pdf.draw_path(p, stroke=True, fill_rule=fill_rule,
fill=(mode in (self.OddEvenMode, self.WindingMode, self.ConvexMode)))
def set_metadata(self, *args, **kwargs):
self.pdf.set_metadata(*args, **kwargs)
def add_outline(self, toc):
self.pdf.links.add_outline(toc)
def add_links(self, current_item, start_page, links, anchors):
for pos in anchors.itervalues():
pos['left'], pos['top'] = self.pdf_system.map(pos['left'], pos['top'])
for link in links:
pos = link[1]
llx = pos['left']
lly = pos['top'] + pos['height']
urx = pos['left'] + pos['width']
ury = pos['top']
llx, lly = self.pdf_system.map(llx, lly)
urx, ury = self.pdf_system.map(urx, ury)
link[1] = pos['column'] + start_page
link.append((llx, lly, urx, ury))
self.pdf.links.add(current_item, start_page, links, anchors)
class PdfDevice(QPaintDevice): # {{{
def __init__(self, file_object, page_size=A4, left_margin=inch,
top_margin=inch, right_margin=inch, bottom_margin=inch,
xdpi=1200, ydpi=1200, errors=print, debug=print,
compress=True, mark_links=False):
QPaintDevice.__init__(self)
self.xdpi, self.ydpi = xdpi, ydpi
self.page_width, self.page_height = page_size
self.body_width = self.page_width - left_margin - right_margin
self.body_height = self.page_height - top_margin - bottom_margin
self.left_margin, self.right_margin = left_margin, right_margin
self.top_margin, self.bottom_margin = top_margin, bottom_margin
self.engine = PdfEngine(file_object, self.page_width, self.page_height,
left_margin, top_margin, right_margin,
bottom_margin, self.width(), self.height(),
errors=errors, debug=debug, compress=compress,
mark_links=mark_links)
self.add_outline = self.engine.add_outline
self.add_links = self.engine.add_links
def paintEngine(self):
return self.engine
def metric(self, m):
if m in (self.PdmDpiX, self.PdmPhysicalDpiX):
return self.xdpi
if m in (self.PdmDpiY, self.PdmPhysicalDpiY):
return self.ydpi
if m == self.PdmDepth:
return 32
if m == self.PdmNumColors:
return sys.maxint
if m == self.PdmWidthMM:
return int(round(self.body_width * 0.35277777777778))
if m == self.PdmHeightMM:
return int(round(self.body_height * 0.35277777777778))
if m == self.PdmWidth:
return int(round(self.body_width * self.xdpi / 72.0))
if m == self.PdmHeight:
return int(round(self.body_height * self.ydpi / 72.0))
return 0
def end_page(self, *args, **kwargs):
self.engine.end_page(*args, **kwargs)
def init_page(self):
self.engine.init_page()
@property
def full_page_rect(self):
page_width = int(math.ceil(self.page_width * self.xdpi / 72.0))
lm = int(math.ceil(self.left_margin * self.xdpi / 72.0))
page_height = int(math.ceil(self.page_height * self.ydpi / 72.0))
tm = int(math.ceil(self.top_margin * self.ydpi / 72.0))
return (-lm, -tm, page_width+1, page_height+1)
@property
def current_page_num(self):
return self.engine.current_page_num
@property
def errors_occurred(self):
return self.engine.errors_occurred
def to_px(self, pt, vertical=True):
return pt * (self.height()/self.page_height if vertical else
self.width()/self.page_width)
def set_metadata(self, *args, **kwargs):
self.engine.set_metadata(*args, **kwargs)
# }}}
| gpl-3.0 | -7,610,861,351,182,947,000 | 36.941645 | 96 | 0.57851 | false |
nmiroshnichenko/scripts | diskinfo/diskinfo.py | 1 | 4642 | #!/usr/bin/env python
import sys
_LINUX_PARTITIONS_FILE_NAME = '/proc/partitions'
_LINUX_PARTITION_SIZE_MULTIPLIER = 1024
_LINUX_DISK_TYPE_NUMBERS = (3, 8)
class DiskInfo(object):
def get_disk_full_list(self):
raise NotImplementedError("Should be called in subclasses")
class DiskInfoLinux(DiskInfo):
def get_disk_full_list(self):
return self._get_linux_disk_list()
@staticmethod
def _get_linux_disk_list():
disk_list = []
with open(_LINUX_PARTITIONS_FILE_NAME) as file:
# omit header and empty line
lines_total = file.readlines()[2:]
hard_disk_number = 0
partition_number = 0
current_hard_disk = None
for line in lines_total:
# fields: major minor #blocks name
fields = line.split()
major = int(fields[0])
if major not in _LINUX_DISK_TYPE_NUMBERS:
continue
size = int(fields[2]) * _LINUX_PARTITION_SIZE_MULTIPLIER
is_partition = fields[3][-1].isdigit()
if is_partition:
partition_number += 1
disk = Disk(partition_number, size, current_hard_disk)
else:
partition_number = 0
hard_disk_number += 1
disk = Disk(hard_disk_number, size, None)
current_hard_disk = disk
disk_list.append(disk)
return disk_list
class DiskInfoWindows(DiskInfo):
def get_disk_full_list(self):
return self._get_windows_disk_list()
@staticmethod
def _get_windows_disk_list():
disk_list = []
try:
import win32com.client
except ImportError:
print 'ERROR: you should install lib: pip install pypiwin32'
sys.exit(66)
strComputer = '.'
objWMIService = win32com.client.Dispatch('WbemScripting.SWbemLocator')
objSWbemServices = objWMIService.ConnectServer(strComputer,'root\cimv2')
colItems = objSWbemServices.ExecQuery('Select * from Win32_DiskDrive')
hd_list = []
for objItem in colItems:
hd_list.append((objItem.DeviceID, objItem.Size))
hd_list.sort()
hard_disk_number = 0
current_hard_disk = None
for hd in hd_list:
partition_number = 0
hard_disk_number += 1
disk = Disk(hard_disk_number, hd[1], None)
current_hard_disk = disk
disk_list.append(disk)
colItems = objSWbemServices.ExecQuery(
'Select * from Win32_DiskPartition where DiskIndex={}'.format(hard_disk_number - 1))
for objItem in colItems:
partition_number += 1
disk = Disk(partition_number, objItem.Size, current_hard_disk)
disk_list.append(disk)
return disk_list
class Disk(object):
def __init__(self, number, size, parent=None):
self.number = int(number)
self.size = int(size)
'''size in bytes'''
self.parent = parent
'''None for hard disk or parent hard disk for partition'''
def __repr__(self):
return {self.number: self.size}.__str__()
def get_disk_full_list():
platform = sys.platform
if platform.lower().startswith('linux'):
return DiskInfoLinux().get_disk_full_list()
elif platform.lower().startswith('win'):
return DiskInfoWindows().get_disk_full_list()
else:
print 'ERROR: unsupported platform: {}'.format(platform)
sys.exit(65)
def print_disk_list(disk_list):
print '\n'.join([str(e) for e in disk_list])
def main():
import argparse
parser = argparse.ArgumentParser(description='Print disk info')
parser.add_argument('hard_disk_number', type=int, nargs='?', help='hard disk number')
args = parser.parse_args()
hd_number = args.hard_disk_number
if hd_number is not None and hd_number < 1:
parser.error('invalid disk number: {}'.format(hd_number))
disk_full_list = get_disk_full_list()
hard_disk_list = [e for e in disk_full_list if e.parent is None]
result_list = []
if args.hard_disk_number is None:
result_list.extend(hard_disk_list)
else:
if hd_number not in [e.number for e in hard_disk_list]:
parser.error('no such disk: {}'.format(hd_number))
result_list.extend(
[e for e in disk_full_list if e.parent is not None and e.parent.number == hd_number])
print_disk_list(result_list)
if __name__ == '__main__':
main()
| mit | -8,487,443,974,152,042,000 | 32.157143 | 100 | 0.582508 | false |
nathanielvarona/airflow | tests/test_utils/perf/perf_kit/sqlalchemy.py | 8 | 8062 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import time
import traceback
from typing import Callable
from sqlalchemy import event
def _pretty_format_sql(text: str):
import pygments
from pygments.formatters.terminal import TerminalFormatter
from pygments.lexers.sql import SqlLexer
text = pygments.highlight(code=text, formatter=TerminalFormatter(), lexer=SqlLexer()).rstrip()
return text
class TraceQueries:
"""
Tracking SQL queries in a code block.
:param display_num: If True, displays the query number.
:param display_time: If True, displays the query execution time.
:param display_trace: If True, displays the simplified (one-line) stack trace
:param display_sql: If True, displays the SQL statements
:param display_parameters: If True, display SQL statement parameters
:param print_fn: The function used to display the text. By default,``builtins.print``
"""
def __init__(
self,
*,
display_num: bool = True,
display_time: bool = True,
display_trace: bool = True,
display_sql: bool = False,
display_parameters: bool = True,
print_fn: Callable[[str], None] = print,
):
self.display_num = display_num
self.display_time = display_time
self.display_trace = display_trace
self.display_sql = display_sql
self.display_parameters = display_parameters
self.print_fn = print_fn
self.query_count = 0
def before_cursor_execute(
self,
conn,
cursor, # pylint: disable=unused-argument
statement, # pylint: disable=unused-argument
parameters, # pylint: disable=unused-argument
context, # pylint: disable=unused-argument
executemany,
): # pylint: disable=unused-argument
"""
Executed before cursor.
:param conn: connection
:param cursor: cursor
:param statement: statement
:param parameters: parameters
:param context: context
:param executemany: whether many statements executed
:return:
"""
conn.info.setdefault("query_start_time", []).append(time.monotonic())
self.query_count += 1
def after_cursor_execute(
self,
conn,
cursor, # pylint: disable=unused-argument
statement,
parameters,
context, # pylint: disable=unused-argument
executemany,
): # pylint: disable=unused-argument
"""
Executed after cursor.
:param conn: connection
:param cursor: cursor
:param statement: statement
:param parameters: parameters
:param context: context
:param executemany: whether many statements executed
:return:
"""
total = time.monotonic() - conn.info["query_start_time"].pop()
file_names = [
f"{f.filename}:{f.name}:{f.lineno}"
for f in traceback.extract_stack()
if "sqlalchemy" not in f.filename
]
file_name = file_names[-1] if file_names else ""
stack = [f for f in traceback.extract_stack() if "sqlalchemy" not in f.filename]
stack_info = " > ".join([f"{f.filename.rpartition('/')[-1]}:{f.name}:{f.lineno}" for f in stack][-7:])
conn.info.setdefault("query_start_time", []).append(time.monotonic())
output_parts = []
if self.display_num:
output_parts.append(f"{self.query_count:>3}")
if self.display_time:
output_parts.append(f"{total:.5f}")
if self.display_trace:
output_parts.extend([f"{file_name}", f"{stack_info}"])
if self.display_sql:
sql_oneline = statement.replace("\n", " ")
output_parts.append(f"{_pretty_format_sql(sql_oneline)}")
if self.display_parameters:
output_parts.append(f"{parameters}")
self.print_fn(" | ".join(output_parts))
def __enter__(self):
import airflow.settings
event.listen(airflow.settings.engine, "before_cursor_execute", self.before_cursor_execute)
event.listen(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
def __exit__(self, type_, value, traceback): # noqa pylint: disable=redefined-outer-name
import airflow.settings
event.remove(airflow.settings.engine, "before_cursor_execute", self.before_cursor_execute)
event.remove(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
trace_queries = TraceQueries # pylint: disable=invalid-name
class CountQueriesResult:
"""
Counter for number of queries.
"""
def __init__(self):
self.count = 0
class CountQueries:
"""
Counts the number of queries sent to Airflow Database in a given context.
Does not support multiple processes. When a new process is started in context, its queries will
not be included.
:param print_fn: The function used to display the text. By default, ``builtins.print``
"""
def __init__(self, print_fn: Callable[[str], None] = print):
self.result = CountQueriesResult()
self.print_fn = print_fn
def __enter__(self):
import airflow.settings
event.listen(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
return self.result
def __exit__(self, type_, value, traceback): # noqa pylint: disable=redefined-outer-name
import airflow.settings
event.remove(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
self.print_fn(f"Count SQL queries: {self.result.count}")
def after_cursor_execute(
self,
conn, # pylint: disable=unused-argument
cursor, # pylint: disable=unused-argument
statement, # pylint: disable=unused-argument
parameters, # pylint: disable=unused-argument
context, # pylint: disable=unused-argument
executemany,
): # pylint: disable=unused-argument
"""
Executed after cursor.
:param conn: connection
:param cursor: cursor
:param statement: statement
:param parameters: parameters
:param context: context
:param executemany: whether many statements executed
"""
self.result.count += 1
count_queries = CountQueries # pylint: disable=invalid-name
if __name__ == "__main__":
# Example:
def case():
"""Case of logging om/"""
import logging
from unittest import mock
from airflow.jobs.scheduler_job import DagFileProcessor
with mock.patch.dict(
"os.environ",
{
"PERF_DAGS_COUNT": "200",
"PERF_TASKS_COUNT": "10",
"PERF_START_AGO": "2d",
"PERF_SCHEDULE_INTERVAL": "None",
"PERF_SHAPE": "no_structure",
},
):
log = logging.getLogger(__name__)
processor = DagFileProcessor(dag_ids=[], log=log)
dag_file = os.path.join(os.path.dirname(__file__), os.path.pardir, "dags", "elastic_dag.py")
processor.process_file(file_path=dag_file, callback_requests=[])
with trace_queries(), count_queries():
case()
| apache-2.0 | 9,090,611,227,884,521,000 | 32.732218 | 110 | 0.629496 | false |
ShownX/incubator-mxnet | example/rcnn/rcnn/processing/nms.py | 41 | 2329 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
from ..cython.cpu_nms import cpu_nms
try:
from ..cython.gpu_nms import gpu_nms
except ImportError:
gpu_nms = None
def py_nms_wrapper(thresh):
def _nms(dets):
return nms(dets, thresh)
return _nms
def cpu_nms_wrapper(thresh):
def _nms(dets):
return cpu_nms(dets, thresh)
return _nms
def gpu_nms_wrapper(thresh, device_id):
def _nms(dets):
return gpu_nms(dets, thresh, device_id)
if gpu_nms is not None:
return _nms
else:
return cpu_nms_wrapper(thresh)
def nms(dets, thresh):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
| apache-2.0 | -6,590,217,744,179,866,000 | 27.753086 | 89 | 0.625161 | false |
nuxeh/keystone | keystone/trust/routers.py | 28 | 2500 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI Routers for the Trust service."""
import functools
from keystone.common import json_home
from keystone.common import wsgi
from keystone.trust import controllers
_build_resource_relation = functools.partial(
json_home.build_v3_extension_resource_relation, extension_name='OS-TRUST',
extension_version='1.0')
TRUST_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation(
'OS-TRUST', '1.0', 'trust_id')
class Routers(wsgi.RoutersBase):
def append_v3_routers(self, mapper, routers):
trust_controller = controllers.TrustV3()
self._add_resource(
mapper, trust_controller,
path='/OS-TRUST/trusts',
get_action='list_trusts',
post_action='create_trust',
rel=_build_resource_relation(resource_name='trusts'))
self._add_resource(
mapper, trust_controller,
path='/OS-TRUST/trusts/{trust_id}',
get_action='get_trust',
delete_action='delete_trust',
rel=_build_resource_relation(resource_name='trust'),
path_vars={
'trust_id': TRUST_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, trust_controller,
path='/OS-TRUST/trusts/{trust_id}/roles',
get_action='list_roles_for_trust',
rel=_build_resource_relation(resource_name='trust_roles'),
path_vars={
'trust_id': TRUST_ID_PARAMETER_RELATION,
})
self._add_resource(
mapper, trust_controller,
path='/OS-TRUST/trusts/{trust_id}/roles/{role_id}',
get_head_action='get_role_for_trust',
rel=_build_resource_relation(resource_name='trust_role'),
path_vars={
'trust_id': TRUST_ID_PARAMETER_RELATION,
'role_id': json_home.Parameters.ROLE_ID,
})
| apache-2.0 | -3,644,005,041,020,115,500 | 36.313433 | 78 | 0.6324 | false |
pzajda/eloquence | scons-local-2.5.0/SCons/Platform/win32.py | 3 | 14950 | """SCons.Platform.win32
Platform-specific initialization for Win32 systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/win32.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os
import os.path
import sys
import tempfile
from SCons.Platform.posix import exitvalmap
from SCons.Platform import TempFileMunge
import SCons.Util
try:
import msvcrt
import win32api
import win32con
msvcrt.get_osfhandle
win32api.SetHandleInformation
win32con.HANDLE_FLAG_INHERIT
except ImportError:
parallel_msg = \
"you do not seem to have the pywin32 extensions installed;\n" + \
"\tparallel (-j) builds may not work reliably with open Python files."
except AttributeError:
parallel_msg = \
"your pywin32 extensions do not support file handle operations;\n" + \
"\tparallel (-j) builds may not work reliably with open Python files."
else:
parallel_msg = None
_builtin_file = file
_builtin_open = open
class _scons_file(_builtin_file):
def __init__(self, *args, **kw):
_builtin_file.__init__(self, *args, **kw)
win32api.SetHandleInformation(msvcrt.get_osfhandle(self.fileno()),
win32con.HANDLE_FLAG_INHERIT, 0)
def _scons_open(*args, **kw):
fp = _builtin_open(*args, **kw)
win32api.SetHandleInformation(msvcrt.get_osfhandle(fp.fileno()),
win32con.HANDLE_FLAG_INHERIT,
0)
return fp
file = _scons_file
open = _scons_open
try:
import threading
spawn_lock = threading.Lock()
# This locked version of spawnve works around a Windows
# MSVCRT bug, because its spawnve is not thread-safe.
# Without this, python can randomly crash while using -jN.
# See the python bug at http://bugs.python.org/issue6476
# and SCons issue at
# http://scons.tigris.org/issues/show_bug.cgi?id=2449
def spawnve(mode, file, args, env):
spawn_lock.acquire()
try:
if mode == os.P_WAIT:
ret = os.spawnve(os.P_NOWAIT, file, args, env)
else:
ret = os.spawnve(mode, file, args, env)
finally:
spawn_lock.release()
if mode == os.P_WAIT:
pid, status = os.waitpid(ret, 0)
ret = status >> 8
return ret
except ImportError:
# Use the unsafe method of spawnve.
# Please, don't try to optimize this try-except block
# away by assuming that the threading module is always present.
# In the test test/option-j.py we intentionally call SCons with
# a fake threading.py that raises an import exception right away,
# simulating a non-existent package.
def spawnve(mode, file, args, env):
return os.spawnve(mode, file, args, env)
# The upshot of all this is that, if you are using Python 1.5.2,
# you had better have cmd or command.com in your PATH when you run
# scons.
def piped_spawn(sh, escape, cmd, args, env, stdout, stderr):
# There is no direct way to do that in python. What we do
# here should work for most cases:
# In case stdout (stderr) is not redirected to a file,
# we redirect it into a temporary file tmpFileStdout
# (tmpFileStderr) and copy the contents of this file
# to stdout (stderr) given in the argument
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
else:
# one temporary file for stdout and stderr
tmpFileStdout = os.path.normpath(tempfile.mktemp())
tmpFileStderr = os.path.normpath(tempfile.mktemp())
# check if output is redirected
stdoutRedirected = 0
stderrRedirected = 0
for arg in args:
# are there more possibilities to redirect stdout ?
if (arg.find( ">", 0, 1 ) != -1 or
arg.find( "1>", 0, 2 ) != -1):
stdoutRedirected = 1
# are there more possibilities to redirect stderr ?
if arg.find( "2>", 0, 2 ) != -1:
stderrRedirected = 1
# redirect output of non-redirected streams to our tempfiles
if stdoutRedirected == 0:
args.append(">" + str(tmpFileStdout))
if stderrRedirected == 0:
args.append("2>" + str(tmpFileStderr))
# actually do the spawn
try:
args = [sh, '/C', escape(' '.join(args)) ]
ret = spawnve(os.P_WAIT, sh, args, env)
except OSError, e:
# catch any error
try:
ret = exitvalmap[e[0]]
except KeyError:
sys.stderr.write("scons: unknown OSError exception code %d - %s: %s\n" % (e[0], cmd, e[1]))
if stderr is not None:
stderr.write("scons: %s: %s\n" % (cmd, e[1]))
# copy child output from tempfiles to our streams
# and do clean up stuff
if stdout is not None and stdoutRedirected == 0:
try:
stdout.write(open( tmpFileStdout, "r" ).read())
os.remove( tmpFileStdout )
except (IOError, OSError):
pass
if stderr is not None and stderrRedirected == 0:
try:
stderr.write(open( tmpFileStderr, "r" ).read())
os.remove( tmpFileStderr )
except (IOError, OSError):
pass
return ret
def exec_spawn(l, env):
try:
result = spawnve(os.P_WAIT, l[0], l, env)
except OSError, e:
try:
result = exitvalmap[e[0]]
sys.stderr.write("scons: %s: %s\n" % (l[0], e[1]))
except KeyError:
result = 127
if len(l) > 2:
if len(l[2]) < 1000:
command = ' '.join(l[0:3])
else:
command = l[0]
else:
command = l[0]
sys.stderr.write("scons: unknown OSError exception code %d - '%s': %s\n" % (e[0], command, e[1]))
return result
def spawn(sh, escape, cmd, args, env):
if not sh:
sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
return 127
return exec_spawn([sh, '/C', escape(' '.join(args))], env)
# Windows does not allow special characters in file names anyway, so no
# need for a complex escape function, we will just quote the arg, except
# that "cmd /c" requires that if an argument ends with a backslash it
# needs to be escaped so as not to interfere with closing double quote
# that we add.
def escape(x):
if x[-1] == '\\':
x = x + '\\'
return '"' + x + '"'
# Get the windows system directory name
_system_root = None
def get_system_root():
global _system_root
if _system_root is not None:
return _system_root
# A resonable default if we can't read the registry
val = os.environ.get('SystemRoot', "C:\\WINDOWS")
if SCons.Util.can_read_reg:
try:
# Look for Windows NT system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows NT\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
except SCons.Util.RegError:
try:
# Okay, try the Windows 9x system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
except KeyboardInterrupt:
raise
except:
pass
_system_root = val
return val
# Get the location of the program files directory
def get_program_files_dir():
# Now see if we can look in the registry...
val = ''
if SCons.Util.can_read_reg:
try:
# Look for Windows Program Files directory
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'ProgramFilesDir')
except SCons.Util.RegError:
val = ''
pass
if val == '':
# A reasonable default if we can't read the registry
# (Actually, it's pretty reasonable even if we can :-)
val = os.path.join(os.path.dirname(get_system_root()),"Program Files")
return val
# Determine which windows CPU were running on.
class ArchDefinition(object):
"""
A class for defining architecture-specific settings and logic.
"""
def __init__(self, arch, synonyms=[]):
self.arch = arch
self.synonyms = synonyms
SupportedArchitectureList = [
ArchDefinition(
'x86',
['i386', 'i486', 'i586', 'i686'],
),
ArchDefinition(
'x86_64',
['AMD64', 'amd64', 'em64t', 'EM64T', 'x86_64'],
),
ArchDefinition(
'ia64',
['IA64'],
),
]
SupportedArchitectureMap = {}
for a in SupportedArchitectureList:
SupportedArchitectureMap[a.arch] = a
for s in a.synonyms:
SupportedArchitectureMap[s] = a
def get_architecture(arch=None):
"""Returns the definition for the specified architecture string.
If no string is specified, the system default is returned (as defined
by the PROCESSOR_ARCHITEW6432 or PROCESSOR_ARCHITECTURE environment
variables).
"""
if arch is None:
arch = os.environ.get('PROCESSOR_ARCHITEW6432')
if not arch:
arch = os.environ.get('PROCESSOR_ARCHITECTURE')
return SupportedArchitectureMap.get(arch, ArchDefinition('', ['']))
def generate(env):
# Attempt to find cmd.exe (for WinNT/2k/XP) or
# command.com for Win9x
cmd_interp = ''
# First see if we can look in the registry...
if SCons.Util.can_read_reg:
try:
# Look for Windows NT system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows NT\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
cmd_interp = os.path.join(val, 'System32\\cmd.exe')
except SCons.Util.RegError:
try:
# Okay, try the Windows 9x system root
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Microsoft\\Windows\\CurrentVersion')
val, tok = SCons.Util.RegQueryValueEx(k, 'SystemRoot')
cmd_interp = os.path.join(val, 'command.com')
except KeyboardInterrupt:
raise
except:
pass
# For the special case of not having access to the registry, we
# use a temporary path and pathext to attempt to find the command
# interpreter. If we fail, we try to find the interpreter through
# the env's PATH. The problem with that is that it might not
# contain an ENV and a PATH.
if not cmd_interp:
systemroot = get_system_root()
tmp_path = systemroot + os.pathsep + \
os.path.join(systemroot,'System32')
tmp_pathext = '.com;.exe;.bat;.cmd'
if 'PATHEXT' in os.environ:
tmp_pathext = os.environ['PATHEXT']
cmd_interp = SCons.Util.WhereIs('cmd', tmp_path, tmp_pathext)
if not cmd_interp:
cmd_interp = SCons.Util.WhereIs('command', tmp_path, tmp_pathext)
if not cmd_interp:
cmd_interp = env.Detect('cmd')
if not cmd_interp:
cmd_interp = env.Detect('command')
if 'ENV' not in env:
env['ENV'] = {}
# Import things from the external environment to the construction
# environment's ENV. This is a potential slippery slope, because we
# *don't* want to make builds dependent on the user's environment by
# default. We're doing this for SystemRoot, though, because it's
# needed for anything that uses sockets, and seldom changes, and
# for SystemDrive because it's related.
#
# Weigh the impact carefully before adding other variables to this list.
import_env = [ 'SystemDrive', 'SystemRoot', 'TEMP', 'TMP' ]
for var in import_env:
v = os.environ.get(var)
if v:
env['ENV'][var] = v
if 'COMSPEC' not in env['ENV']:
v = os.environ.get("COMSPEC")
if v:
env['ENV']['COMSPEC'] = v
env.AppendENVPath('PATH', get_system_root() + '\System32')
env['ENV']['PATHEXT'] = '.COM;.EXE;.BAT;.CMD'
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = [ '$LIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX' ]
env['PSPAWN'] = piped_spawn
env['SPAWN'] = spawn
env['SHELL'] = cmd_interp
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
env['MAXLINELENGTH'] = 2048
env['ESCAPE'] = escape
env['HOST_OS'] = 'win32'
env['HOST_ARCH'] = get_architecture().arch
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -1,893,968,727,766,853,400 | 35.198547 | 109 | 0.597324 | false |
chapellu/pekee2.0 | Python/test_joystick2.py | 1 | 2391 |
import pygame
import sys
import time
import serial
ser = serial.Serial('/dev/ttyACM0',115200)
pygame.init()
pygame.joystick.init()
print (pygame.joystick.get_count())
_joystick = pygame.joystick.Joystick(0)
_joystick.init()
print (_joystick.get_init())
print (_joystick.get_id())
print (_joystick.get_name())
print (_joystick.get_numaxes())
print (_joystick.get_numballs())
print (_joystick.get_numbuttons())
print (_joystick.get_numhats())
print (_joystick.get_axis(0))
axes = [ 0.0 ] * _joystick.get_numaxes()
buttons = [ False ] * _joystick.get_numbuttons()
value_xaxes_old = 0
value_yaxes_old = 0
old_message = "a0000"
seuil = 10
arret = False
keep_alive=True
while keep_alive:
event = pygame.event.wait()
pygame.event.clear()
if event.type == pygame.QUIT:
keep_alive = False
elif event.type == pygame.JOYAXISMOTION:
while arret:
event_arret = pygame.event.wait()
pygame.event.clear()
e_arret = event_arret.dict
if 'value' in e_arret.keys():
axes[e_arret['axis']] = e_arret['value']
if axes[0] == 0 and axes[1] == 0:
arret = False
e = event.dict
axes[e['axis']] = e['value']
if abs(axes[1]) > 0.2:
value_axes = int(axes[1]*(axes[3]-1)*50)
if(abs(value_axes - value_yaxes_old) > seuil):
message = "a{:4}".format(value_axes)
value_yaxes_old = value_axes
elif abs(axes[0]) > 0.2:
value_axes = int(axes[0]*(axes[3]-1)*50)
if(abs(value_axes - value_xaxes_old) > seuil):
message = "r{:4}".format(value_axes)
value_xaxes_old = value_axes
else:
message = "a0000"
elif event.type in [pygame.JOYBUTTONUP, pygame.JOYBUTTONDOWN ]:
e = event.dict
buttons[e['button']] ^= True
print(e['button'])
if(buttons[7] == True):
keep_alive = False
buttons[7] = False
if(buttons[0] == True):
arret = True
print("STOP")
message = "s0000"
ser.write(str.encode(message))
buttons[0] = False
print("ENVOI")
if (message != old_message and not arret ):
print(message)
ser.write(str.encode(message))
old_message = message
| mit | 4,542,471,297,770,310,700 | 28.8875 | 67 | 0.549979 | false |
mat12/mytest | lib/python/Components/NimManager.py | 1 | 79035 | from time import localtime, mktime
from datetime import datetime
import xml.etree.cElementTree
from os import path
from enigma import eDVBSatelliteEquipmentControl as secClass, \
eDVBSatelliteLNBParameters as lnbParam, \
eDVBSatelliteDiseqcParameters as diseqcParam, \
eDVBSatelliteSwitchParameters as switchParam, \
eDVBSatelliteRotorParameters as rotorParam, \
eDVBResourceManager, eDVBDB, eEnv
from Tools.HardwareInfo import HardwareInfo
from Tools.BoundFunction import boundFunction
from Components.About import about
from config import config, ConfigSubsection, ConfigSelection, ConfigFloat, ConfigSatlist, ConfigYesNo, ConfigInteger, ConfigSubList, ConfigNothing, ConfigSubDict, ConfigOnOff, ConfigDateTime, ConfigText
maxFixedLnbPositions = 0
# LNB65 3601 All satellites 1 (USALS)
# LNB66 3602 All satellites 2 (USALS)
# LNB67 3603 All satellites 3 (USALS)
# LNB68 3604 All satellites 4 (USALS)
# LNB69 3605 Selecting satellites 1 (USALS)
# LNB70 3606 Selecting satellites 2 (USALS)
MAX_LNB_WILDCARDS = 6
MAX_ORBITPOSITION_WILDCARDS = 6
#magic numbers
ORBITPOSITION_LIMIT = 3600
def getConfigSatlist(orbpos, satlist):
default_orbpos = None
for x in satlist:
if x[0] == orbpos:
default_orbpos = orbpos
break
return ConfigSatlist(satlist, default_orbpos)
class SecConfigure:
def getConfiguredSats(self):
return self.configuredSatellites
def addSatellite(self, sec, orbpos):
sec.addSatellite(orbpos)
self.configuredSatellites.add(orbpos)
def addLNBSimple(self, sec, slotid, diseqcmode, toneburstmode = diseqcParam.NO, diseqcpos = diseqcParam.SENDNO, orbpos = 0, longitude = 0, latitude = 0, loDirection = 0, laDirection = 0, turningSpeed = rotorParam.FAST, useInputPower=True, inputPowerDelta=50, fastDiSEqC = False, setVoltageTone = True, diseqc13V = False, CircularLNB = False):
if orbpos is None or orbpos == 3600 or orbpos == 3601:
return
#simple defaults
if sec.addLNB():
print "No space left on m_lnbs (mac No. 144 LNBs exceeded)"
return
tunermask = 1 << slotid
if self.equal.has_key(slotid):
for slot in self.equal[slotid]:
tunermask |= (1 << slot)
if self.linked.has_key(slotid):
for slot in self.linked[slotid]:
tunermask |= (1 << slot)
sec.setLNBSatCR(-1)
sec.setLNBSatCRpositionnumber(1)
sec.setLNBLOFL(CircularLNB and 10750000 or 9750000)
sec.setLNBLOFH(CircularLNB and 10750000 or 10600000)
sec.setLNBThreshold(CircularLNB and 10750000 or 11700000)
sec.setLNBIncreasedVoltage(False)
sec.setRepeats(0)
sec.setFastDiSEqC(fastDiSEqC)
sec.setSeqRepeat(False)
sec.setCommandOrder(0)
#user values
sec.setDiSEqCMode(3 if diseqcmode == 4 else diseqcmode)
sec.setToneburst(toneburstmode)
sec.setCommittedCommand(diseqcpos)
sec.setUncommittedCommand(0) # SENDNO
if 0 <= diseqcmode < 3:
self.addSatellite(sec, orbpos)
if setVoltageTone:
if diseqc13V:
sec.setVoltageMode(switchParam.HV_13)
else:
sec.setVoltageMode(switchParam.HV)
sec.setToneMode(switchParam.HILO)
else:
# noinspection PyProtectedMember
sec.setVoltageMode(switchParam._14V)
sec.setToneMode(switchParam.OFF)
elif 3 <= diseqcmode < 5: # diseqc 1.2
if self.satposdepends.has_key(slotid):
for slot in self.satposdepends[slotid]:
tunermask |= (1 << slot)
sec.setLatitude(latitude)
sec.setLaDirection(laDirection)
sec.setLongitude(longitude)
sec.setLoDirection(loDirection)
sec.setUseInputpower(useInputPower)
sec.setInputpowerDelta(inputPowerDelta)
sec.setRotorTurningSpeed(turningSpeed)
user_satList = self.NimManager.satList
if diseqcmode == 4:
user_satList = []
if orbpos and isinstance(orbpos, str):
for user_sat in self.NimManager.satList:
if str(user_sat[0]) in orbpos:
user_satList.append(user_sat)
for x in user_satList:
print "Add sat " + str(x[0])
self.addSatellite(sec, int(x[0]))
if diseqc13V:
sec.setVoltageMode(switchParam.HV_13)
else:
sec.setVoltageMode(switchParam.HV)
sec.setToneMode(switchParam.HILO)
sec.setRotorPosNum(0) # USALS
sec.setLNBSlotMask(tunermask)
def setSatposDepends(self, sec, nim1, nim2):
print "tuner", nim1, "depends on satpos of", nim2
sec.setTunerDepends(nim1, nim2)
def linkInternally(self, slotid):
nim = self.NimManager.getNim(slotid)
if nim.internallyConnectableTo is not None:
nim.setInternalLink()
def linkNIMs(self, sec, nim1, nim2):
print "link tuner", nim1, "to tuner", nim2
# for internally connect tuner A to B
if '7356' not in about.getChipSetString() and nim2 == (nim1 - 1):
self.linkInternally(nim1)
elif '7356' in about.getChipSetString():
self.linkInternally(nim1)
sec.setTunerLinked(nim1, nim2)
def getRoot(self, slotid, connto):
visited = []
while self.NimManager.getNimConfig(connto).configMode.value in ("satposdepends", "equal", "loopthrough"):
connto = int(self.NimManager.getNimConfig(connto).connectedTo.value)
if connto in visited: # prevent endless loop
return slotid
visited.append(connto)
return connto
def update(self):
sec = secClass.getInstance()
self.configuredSatellites = set()
for slotid in self.NimManager.getNimListOfType("DVB-S"):
if self.NimManager.nimInternallyConnectableTo(slotid) is not None:
self.NimManager.nimRemoveInternalLink(slotid)
sec.clear() ## this do unlinking NIMs too !!
print "sec config cleared"
self.linked = { }
self.satposdepends = { }
self.equal = { }
nim_slots = self.NimManager.nim_slots
used_nim_slots = [ ]
for slot in nim_slots:
if slot.type is not None:
used_nim_slots.append((slot.slot, slot.description, slot.config.configMode.value != "nothing" and True or False, slot.isCompatible("DVB-S2"), slot.frontend_id is None and -1 or slot.frontend_id))
eDVBResourceManager.getInstance().setFrontendSlotInformations(used_nim_slots)
try:
for slot in nim_slots:
if slot.frontend_id is not None:
types = [type for type in ["DVB-C", "DVB-T", "DVB-T2", "DVB-S", "DVB-S2", "ATSC"] if eDVBResourceManager.getInstance().frontendIsCompatible(slot.frontend_id, type)]
if "DVB-T2" in types:
# DVB-T2 implies DVB-T support
types.remove("DVB-T")
if "DVB-S2" in types:
# DVB-S2 implies DVB-S support
types.remove("DVB-S")
if len(types) > 1:
slot.multi_type = {}
for type in types:
slot.multi_type[str(types.index(type))] = type
except:
pass
for slot in nim_slots:
x = slot.slot
nim = slot.config
if slot.isCompatible("DVB-S"):
# save what nim we link to/are equal to/satposdepends to.
# this is stored in the *value* (not index!) of the config list
if nim.configMode.value == "equal":
connto = self.getRoot(x, int(nim.connectedTo.value))
if not self.equal.has_key(connto):
self.equal[connto] = []
self.equal[connto].append(x)
elif nim.configMode.value == "loopthrough":
self.linkNIMs(sec, x, int(nim.connectedTo.value))
connto = self.getRoot(x, int(nim.connectedTo.value))
if not self.linked.has_key(connto):
self.linked[connto] = []
self.linked[connto].append(x)
elif nim.configMode.value == "satposdepends":
self.setSatposDepends(sec, x, int(nim.connectedTo.value))
connto = self.getRoot(x, int(nim.connectedTo.value))
if not self.satposdepends.has_key(connto):
self.satposdepends[connto] = []
self.satposdepends[connto].append(x)
for slot in nim_slots:
x = slot.slot
nim = slot.config
hw = HardwareInfo()
if slot.isCompatible("DVB-S"):
print "slot: " + str(x) + " configmode: " + str(nim.configMode.value)
if nim.configMode.value in ( "loopthrough", "satposdepends", "nothing" ):
pass
else:
sec.setSlotNotLinked(x)
if nim.configMode.value == "equal":
pass
elif nim.configMode.value == "simple": #simple config
print "diseqcmode: ", nim.diseqcMode.value
if nim.diseqcMode.value == "single": #single
currentCircular = False
if nim.diseqcA.value in ("360", "560"):
currentCircular = nim.simpleDiSEqCSetCircularLNB.value
if nim.simpleSingleSendDiSEqC.value:
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcA.orbital_position, toneburstmode = diseqcParam.NO, diseqcmode = diseqcParam.V1_0, diseqcpos = diseqcParam.AA, diseqc13V = nim.diseqc13V.value, CircularLNB = currentCircular)
else:
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcA.orbital_position, toneburstmode = diseqcParam.NO, diseqcmode = diseqcParam.NONE, diseqcpos = diseqcParam.SENDNO, diseqc13V = nim.diseqc13V.value, CircularLNB = currentCircular)
elif nim.diseqcMode.value == "toneburst_a_b": #Toneburst A/B
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcA.orbital_position, toneburstmode = diseqcParam.A, diseqcmode = diseqcParam.V1_0, diseqcpos = diseqcParam.SENDNO, diseqc13V = nim.diseqc13V.value)
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcB.orbital_position, toneburstmode = diseqcParam.B, diseqcmode = diseqcParam.V1_0, diseqcpos = diseqcParam.SENDNO, diseqc13V = nim.diseqc13V.value)
elif nim.diseqcMode.value == "diseqc_a_b": #DiSEqC A/B
fastDiSEqC = nim.simpleDiSEqCOnlyOnSatChange.value
setVoltageTone = nim.simpleDiSEqCSetVoltageTone.value
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcA.orbital_position, toneburstmode = diseqcParam.NO, diseqcmode = diseqcParam.V1_0, diseqcpos = diseqcParam.AA, fastDiSEqC = fastDiSEqC, setVoltageTone = setVoltageTone, diseqc13V = nim.diseqc13V.value)
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcB.orbital_position, toneburstmode = diseqcParam.NO, diseqcmode = diseqcParam.V1_0, diseqcpos = diseqcParam.AB, fastDiSEqC = fastDiSEqC, setVoltageTone = setVoltageTone, diseqc13V = nim.diseqc13V.value)
elif nim.diseqcMode.value == "diseqc_a_b_c_d": #DiSEqC A/B/C/D
fastDiSEqC = nim.simpleDiSEqCOnlyOnSatChange.value
setVoltageTone = nim.simpleDiSEqCSetVoltageTone.value
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcA.orbital_position, toneburstmode = diseqcParam.NO, diseqcmode = diseqcParam.V1_0, diseqcpos = diseqcParam.AA, fastDiSEqC = fastDiSEqC, setVoltageTone = setVoltageTone, diseqc13V = nim.diseqc13V.value)
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcB.orbital_position, toneburstmode = diseqcParam.NO, diseqcmode = diseqcParam.V1_0, diseqcpos = diseqcParam.AB, fastDiSEqC = fastDiSEqC, setVoltageTone = setVoltageTone, diseqc13V = nim.diseqc13V.value)
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcC.orbital_position, toneburstmode = diseqcParam.NO, diseqcmode = diseqcParam.V1_0, diseqcpos = diseqcParam.BA, fastDiSEqC = fastDiSEqC, setVoltageTone = setVoltageTone, diseqc13V = nim.diseqc13V.value)
self.addLNBSimple(sec, slotid = x, orbpos = nim.diseqcD.orbital_position, toneburstmode = diseqcParam.NO, diseqcmode = diseqcParam.V1_0, diseqcpos = diseqcParam.BB, fastDiSEqC = fastDiSEqC, setVoltageTone = setVoltageTone, diseqc13V = nim.diseqc13V.value)
elif nim.diseqcMode.value in ("positioner", "positioner_select"): #Positioner
current_mode = 3
sat = 0
if nim.diseqcMode.value == "positioner_select":
current_mode = 4
sat = nim.userSatellitesList.value
if nim.latitudeOrientation.value == "north":
laValue = rotorParam.NORTH
else:
laValue = rotorParam.SOUTH
if nim.longitudeOrientation.value == "east":
loValue = rotorParam.EAST
else:
loValue = rotorParam.WEST
inputPowerDelta=nim.powerThreshold.value
useInputPower=False
turning_speed=0
if nim.powerMeasurement.value:
useInputPower=True
turn_speed_dict = { "fast": rotorParam.FAST, "slow": rotorParam.SLOW }
if turn_speed_dict.has_key(nim.turningSpeed.value):
turning_speed = turn_speed_dict[nim.turningSpeed.value]
else:
beg_time = localtime(nim.fastTurningBegin.value)
end_time = localtime(nim.fastTurningEnd.value)
turning_speed = ((beg_time.tm_hour+1) * 60 + beg_time.tm_min + 1) << 16
turning_speed |= (end_time.tm_hour+1) * 60 + end_time.tm_min + 1
self.addLNBSimple(sec, slotid = x, diseqcmode = current_mode,
orbpos = sat,
longitude = nim.longitude.float,
loDirection = loValue,
latitude = nim.latitude.float,
laDirection = laValue,
turningSpeed = turning_speed,
useInputPower = useInputPower,
inputPowerDelta = inputPowerDelta,
diseqc13V = nim.diseqc13V.value)
elif nim.configMode.value == "advanced": #advanced config
self.updateAdvanced(sec, x)
print "sec config completed"
def updateAdvanced(self, sec, slotid):
try:
if config.Nims[slotid].advanced.unicableconnected is not None:
if config.Nims[slotid].advanced.unicableconnected.value:
config.Nims[slotid].advanced.unicableconnectedTo.save_forced = True
self.linkNIMs(sec, slotid, int(config.Nims[slotid].advanced.unicableconnectedTo.value))
connto = self.getRoot(slotid, int(config.Nims[slotid].advanced.unicableconnectedTo.value))
if not self.linked.has_key(connto):
self.linked[connto] = []
self.linked[connto].append(slotid)
else:
config.Nims[slotid].advanced.unicableconnectedTo.save_forced = False
except:
pass
lnbSat = {}
for x in range(1, 71):
lnbSat[x] = []
#wildcard for all satellites ( for rotor )
for x in range(3601, 3605):
lnb = int(config.Nims[slotid].advanced.sat[x].lnb.value)
if lnb != 0:
for x in self.NimManager.satList:
print "add", x[0], "to", lnb
lnbSat[lnb].append(x[0])
#wildcard for user satellites ( for rotor )
for x in range(3605, 3607):
lnb = int(config.Nims[slotid].advanced.sat[x].lnb.value)
if lnb != 0:
for user_sat in self.NimManager.satList:
if str(user_sat[0]) in config.Nims[slotid].advanced.sat[x].userSatellitesList.value:
print "add", user_sat[0], "to", lnb
lnbSat[lnb].append(user_sat[0])
for x in self.NimManager.satList:
lnb = int(config.Nims[slotid].advanced.sat[x[0]].lnb.value)
if lnb != 0:
print "add", x[0], "to", lnb
lnbSat[lnb].append(x[0])
for x in range(1, 71):
if len(lnbSat[x]) > 0:
currLnb = config.Nims[slotid].advanced.lnb[x]
if sec.addLNB():
print "No space left on m_lnbs (max No. 144 LNBs exceeded)"
return
posnum = 1; #default if LNB movable
if x <= maxFixedLnbPositions:
posnum = x;
sec.setLNBSatCRpositionnumber(x) # LNB has fixed Position
else:
sec.setLNBSatCRpositionnumber(0) # or not (movable LNB)
tunermask = 1 << slotid
if self.equal.has_key(slotid):
for slot in self.equal[slotid]:
tunermask |= (1 << slot)
if self.linked.has_key(slotid):
for slot in self.linked[slotid]:
tunermask |= (1 << slot)
if currLnb.lof.value != "unicable":
sec.setLNBSatCR(-1)
if currLnb.lof.value == "universal_lnb":
sec.setLNBLOFL(9750000)
sec.setLNBLOFH(10600000)
sec.setLNBThreshold(11700000)
elif currLnb.lof.value == "unicable":
def setupUnicable(configManufacturer, ProductDict):
manufacturer_name = configManufacturer.value
manufacturer = ProductDict[manufacturer_name]
product_name = manufacturer.product.value
if product_name == "None" and manufacturer.product.saved_value != "None":
product_name = manufacturer.product.value = manufacturer.product.saved_value
manufacturer_scr = manufacturer.scr
manufacturer_positions_value = manufacturer.positions[product_name][0].value
position_idx = (posnum - 1) % manufacturer_positions_value
if product_name in manufacturer_scr:
diction = manufacturer.diction[product_name].value
if diction !="EN50607" or (posnum <= manufacturer_positions_value and x <= maxFixedLnbPositions): #for every allowed position
if diction =="EN50607":
sec.setLNBSatCRformat(1) #JESS
else:
sec.setLNBSatCRformat(0) #DiSEqC
sec.setLNBSatCR(manufacturer_scr[product_name].index)
sec.setLNBSatCRvco(manufacturer.vco[product_name][manufacturer_scr[product_name].index].value*1000)
sec.setLNBSatCRpositions(manufacturer_positions_value)
sec.setLNBLOFL(manufacturer.lofl[product_name][position_idx].value * 1000)
sec.setLNBLOFH(manufacturer.lofh[product_name][position_idx].value * 1000)
sec.setLNBThreshold(manufacturer.loft[product_name][position_idx].value * 1000)
configManufacturer.save_forced = True
manufacturer.product.save_forced = True
manufacturer.vco[product_name][manufacturer_scr[product_name].index].save_forced = True
else: #positionnumber out of range
print "positionnumber out of range"
else:
print "no product in list"
if currLnb.unicable.value == "unicable_user":
#TODO satpositions for satcruser
if currLnb.dictionuser.value == "EN50607":
sec.setLNBSatCRformat(1)
sec.setLNBSatCR(currLnb.satcruserEN50607.index)
sec.setLNBSatCRvco(currLnb.satcrvcouserEN50607[currLnb.satcruserEN50607.index].value*1000)
else:
sec.setLNBSatCRformat(0)
sec.setLNBSatCR(currLnb.satcruserEN50494.index)
sec.setLNBSatCRvco(currLnb.satcrvcouserEN50494[currLnb.satcruserEN50494.index].value*1000)
sec.setLNBLOFL(currLnb.lofl.value * 1000)
sec.setLNBLOFH(currLnb.lofh.value * 1000)
sec.setLNBThreshold(currLnb.threshold.value * 1000)
sec.setLNBSatCRpositions(1)
elif currLnb.unicable.value == "unicable_matrix":
self.reconstructUnicableDate(currLnb.unicableMatrixManufacturer, currLnb.unicableMatrix, currLnb)
setupUnicable(currLnb.unicableMatrixManufacturer, currLnb.unicableMatrix)
elif currLnb.unicable.value == "unicable_lnb":
self.reconstructUnicableDate(currLnb.unicableLnbManufacturer, currLnb.unicableLnb, currLnb)
setupUnicable(currLnb.unicableLnbManufacturer, currLnb.unicableLnb)
elif currLnb.lof.value == "c_band":
sec.setLNBLOFL(5150000)
sec.setLNBLOFH(5150000)
sec.setLNBThreshold(5150000)
elif currLnb.lof.value == "user_defined":
sec.setLNBLOFL(currLnb.lofl.value * 1000)
sec.setLNBLOFH(currLnb.lofh.value * 1000)
sec.setLNBThreshold(currLnb.threshold.value * 1000)
elif currLnb.lof.value == "circular_lnb":
sec.setLNBLOFL(10750000)
sec.setLNBLOFH(10750000)
sec.setLNBThreshold(10750000)
if currLnb.increased_voltage.value:
sec.setLNBIncreasedVoltage(True)
else:
sec.setLNBIncreasedVoltage(False)
dm = currLnb.diseqcMode.value
if dm == "none":
sec.setDiSEqCMode(diseqcParam.NONE)
elif dm == "1_0":
sec.setDiSEqCMode(diseqcParam.V1_0)
elif dm == "1_1":
sec.setDiSEqCMode(diseqcParam.V1_1)
elif dm == "1_2":
sec.setDiSEqCMode(diseqcParam.V1_2)
if self.satposdepends.has_key(slotid):
for slot in self.satposdepends[slotid]:
tunermask |= (1 << slot)
if dm != "none":
if currLnb.toneburst.value == "none":
sec.setToneburst(diseqcParam.NO)
elif currLnb.toneburst.value == "A":
sec.setToneburst(diseqcParam.A)
elif currLnb.toneburst.value == "B":
sec.setToneburst(diseqcParam.B)
# Committed Diseqc Command
cdc = currLnb.commitedDiseqcCommand.value
c = { "none": diseqcParam.SENDNO,
"AA": diseqcParam.AA,
"AB": diseqcParam.AB,
"BA": diseqcParam.BA,
"BB": diseqcParam.BB }
if c.has_key(cdc):
sec.setCommittedCommand(c[cdc])
else:
sec.setCommittedCommand(long(cdc))
sec.setFastDiSEqC(currLnb.fastDiseqc.value)
sec.setSeqRepeat(currLnb.sequenceRepeat.value)
if currLnb.diseqcMode.value == "1_0":
currCO = currLnb.commandOrder1_0.value
sec.setRepeats(0)
else:
currCO = currLnb.commandOrder.value
udc = int(currLnb.uncommittedDiseqcCommand.value)
if udc > 0:
sec.setUncommittedCommand(0xF0|(udc-1))
else:
sec.setUncommittedCommand(0) # SENDNO
sec.setRepeats({"none": 0, "one": 1, "two": 2, "three": 3}[currLnb.diseqcRepeats.value])
setCommandOrder = False
# 0 "committed, toneburst",
# 1 "toneburst, committed",
# 2 "committed, uncommitted, toneburst",
# 3 "toneburst, committed, uncommitted",
# 4 "uncommitted, committed, toneburst"
# 5 "toneburst, uncommitted, commmitted"
order_map = {"ct": 0, "tc": 1, "cut": 2, "tcu": 3, "uct": 4, "tuc": 5}
sec.setCommandOrder(order_map[currCO])
if dm == "1_2":
latitude = currLnb.latitude.float
sec.setLatitude(latitude)
longitude = currLnb.longitude.float
sec.setLongitude(longitude)
if currLnb.latitudeOrientation.value == "north":
sec.setLaDirection(rotorParam.NORTH)
else:
sec.setLaDirection(rotorParam.SOUTH)
if currLnb.longitudeOrientation.value == "east":
sec.setLoDirection(rotorParam.EAST)
else:
sec.setLoDirection(rotorParam.WEST)
if currLnb.powerMeasurement.value:
sec.setUseInputpower(True)
sec.setInputpowerDelta(currLnb.powerThreshold.value)
turn_speed_dict = { "fast": rotorParam.FAST, "slow": rotorParam.SLOW }
if turn_speed_dict.has_key(currLnb.turningSpeed.value):
turning_speed = turn_speed_dict[currLnb.turningSpeed.value]
else:
beg_time = localtime(currLnb.fastTurningBegin.value)
end_time = localtime(currLnb.fastTurningEnd.value)
turning_speed = ((beg_time.tm_hour + 1) * 60 + beg_time.tm_min + 1) << 16
turning_speed |= (end_time.tm_hour + 1) * 60 + end_time.tm_min + 1
sec.setRotorTurningSpeed(turning_speed)
else:
sec.setUseInputpower(False)
sec.setLNBSlotMask(tunermask)
sec.setLNBPrio(int(currLnb.prio.value))
# finally add the orbital positions
for y in lnbSat[x]:
self.addSatellite(sec, y)
if x > maxFixedLnbPositions:
satpos = x > maxFixedLnbPositions and (3606-(70 - x)) or y
else:
satpos = y
currSat = config.Nims[slotid].advanced.sat[satpos]
if currSat.voltage.value == "polarization":
if config.Nims[slotid].diseqc13V.value:
sec.setVoltageMode(switchParam.HV_13)
else:
sec.setVoltageMode(switchParam.HV)
elif currSat.voltage.value == "13V":
# noinspection PyProtectedMember
sec.setVoltageMode(switchParam._14V)
elif currSat.voltage.value == "18V":
# noinspection PyProtectedMember
sec.setVoltageMode(switchParam._18V)
if currSat.tonemode.value == "band":
sec.setToneMode(switchParam.HILO)
elif currSat.tonemode.value == "on":
sec.setToneMode(switchParam.ON)
elif currSat.tonemode.value == "off":
sec.setToneMode(switchParam.OFF)
if not currSat.usals.value and x <= maxFixedLnbPositions:
sec.setRotorPosNum(currSat.rotorposition.value)
else:
sec.setRotorPosNum(0) #USALS
def reconstructUnicableDate(self, configManufacturer, ProductDict, currLnb):
val = currLnb.content.stored_values
if currLnb.unicable.value == "unicable_lnb":
ManufacturerName = val.get('unicableLnbManufacturer', 'none')
SDict = val.get('unicableLnb', None)
elif currLnb.unicable.value == "unicable_matrix":
ManufacturerName = val.get('unicableMatrixManufacturer', 'none')
SDict = val.get('unicableMatrix', None)
else:
return
# print "[reconstructUnicableDate] SDict %s" % SDict
if SDict is None:
return
print "ManufacturerName %s" % ManufacturerName
PDict = SDict.get(ManufacturerName, None) #dict contained last stored device data
if PDict is None:
return
PN = PDict.get('product', None) #product name
if PN is None:
return
if ManufacturerName in ProductDict.keys(): # manufacture are listed, use its ConfigSubsection
tmp = ProductDict[ManufacturerName]
if PN in tmp.product.choices.choices:
return
else: #if manufacture not in list, then generate new ConfigSubsection
print "[reconstructUnicableDate] Manufacturer %s not in unicable.xml" % ManufacturerName
tmp = ConfigSubsection()
tmp.scr = ConfigSubDict()
tmp.vco = ConfigSubDict()
tmp.lofl = ConfigSubDict()
tmp.lofh = ConfigSubDict()
tmp.loft = ConfigSubDict()
tmp.diction = ConfigSubDict()
tmp.product = ConfigSelection(choices = [], default = None)
if PN not in tmp.product.choices.choices:
print "[reconstructUnicableDate] Product %s not in unicable.xml" % PN
scrlist = []
SatCR = int(PDict.get('scr', {PN,1}).get(PN,1)) - 1
vco = int(PDict.get('vco', {PN,0}).get(PN,0).get(str(SatCR),1))
positionslist=[1,(9750, 10600, 11700)] ##adenin_todo
positions = int(positionslist[0])
tmp.positions = ConfigSubDict()
tmp.positions[PN] = ConfigSubList()
tmp.positions[PN].append(ConfigInteger(default=positions, limits = (positions, positions)))
tmp.vco[PN] = ConfigSubList()
for cnt in range(0,SatCR + 1):
vcofreq = (cnt == SatCR) and vco or 0 # equivalent to vcofreq = (cnt == SatCR) ? 1432 : 0
if vcofreq == 0 :
scrlist.append(("%d" %(cnt+1),"SCR %d " %(cnt+1) +_("not used")))
else:
scrlist.append(("%d" %(cnt+1),"SCR %d" %(cnt+1)))
print "vcofreq %d" % vcofreq
tmp.vco[PN].append(ConfigInteger(default=vcofreq, limits = (vcofreq, vcofreq)))
tmp.scr[PN] = ConfigSelection(choices = scrlist, default = scrlist[SatCR][0])
tmp.lofl[PN] = ConfigSubList()
tmp.lofh[PN] = ConfigSubList()
tmp.loft[PN] = ConfigSubList()
for cnt in range(1,positions+1):
lofl = int(positionslist[cnt][0])
lofh = int(positionslist[cnt][1])
loft = int(positionslist[cnt][2])
tmp.lofl[PN].append(ConfigInteger(default=lofl, limits = (lofl, lofl)))
tmp.lofh[PN].append(ConfigInteger(default=lofh, limits = (lofh, lofh)))
tmp.loft[PN].append(ConfigInteger(default=loft, limits = (loft, loft)))
dictionlist = [("EN50494", "Unicable(EN50494)")] ##adenin_todo
tmp.diction[PN] = ConfigSelection(choices = dictionlist, default = dictionlist[0][0])
tmp.product.choices.choices.append(PN)
tmp.product.choices.default = PN
tmp.scr[PN].save_forced = True
tmp.scr.save_forced = True
tmp.vco.save_forced = True
tmp.product.save_forced = True
ProductDict[ManufacturerName] = tmp
if ManufacturerName not in configManufacturer.choices.choices: #check if name in choices list
configManufacturer.choices.choices.append(ManufacturerName) #add name to choises list
def __init__(self, nimmgr):
self.NimManager = nimmgr
self.configuredSatellites = set()
self.update()
class NIM(object):
def __init__(self, slot, type, description, has_outputs=True, internally_connectable=None, multi_type=None, frontend_id=None, i2c=None, is_empty=False, input_name = None):
if not multi_type: multi_type = {}
self.slot = slot
if type not in ("DVB-S", "DVB-C", "DVB-T", "DVB-S2", "DVB-T2", "DVB-C2", "ATSC", None):
print "warning: unknown NIM type %s, not using." % type
type = None
self.type = type
self.description = description
self.has_outputs = has_outputs
self.internally_connectable = internally_connectable
self.multi_type = multi_type
self.i2c = i2c
self.frontend_id = frontend_id
self.__is_empty = is_empty
self.input_name = input_name
self.compatible = {
None: (None,),
"DVB-S": ("DVB-S", None),
"DVB-C": ("DVB-C", None),
"DVB-T": ("DVB-T", None),
"DVB-S2": ("DVB-S", "DVB-S2", None),
"DVB-C2": ("DVB-C", "DVB-C2", None),
"DVB-T2": ("DVB-T", "DVB-T2", None),
"ATSC": ("ATSC", None),
}
def isCompatible(self, what):
if not self.isSupported():
return False
return what in self.compatible[self.getType()]
def canBeCompatible(self, what):
if not self.isSupported():
return False
if self.isCompatible(what):
return True
for type in self.multi_type.values():
if what in self.compatible[type]:
return True
return False
def getType(self):
try:
if self.isMultiType():
return self.multi_type[self.config.multiType.value]
except:
pass
return self.type
def connectableTo(self):
connectable = {
"DVB-S": ("DVB-S", "DVB-S2"),
"DVB-C": ("DVB-C", "DVB-C2"),
"DVB-T": ("DVB-T","DVB-T2"),
"DVB-S2": ("DVB-S", "DVB-S2"),
"DVB-C2": ("DVB-C", "DVB-C2"),
"DVB-T2": ("DVB-T", "DVB-T2"),
"ATSC": "ATSC",
}
return connectable[self.getType()]
def getSlotInputName(self):
name = self.input_name
if name is None:
name = chr(ord('A') + self.slot)
return name
slot_input_name = property(getSlotInputName)
def getSlotName(self):
# get a friendly description for a slot name.
# we name them "Tuner A/B/C/...", because that's what's usually written on the back
# of the device.
# for DM7080HD "Tuner A1/A2/B/C/..."
descr = _("Tuner ")
return descr + self.getSlotInputName()
slot_name = property(getSlotName)
def getSlotID(self):
return chr(ord('A') + self.slot)
def getI2C(self):
return self.i2c
def hasOutputs(self):
return self.has_outputs
def internallyConnectableTo(self):
return self.internally_connectable
def setInternalLink(self):
if self.internally_connectable is not None:
print "setting internal link on frontend id", self.frontend_id
f = open("/proc/stb/frontend/%d/rf_switch" % self.frontend_id, "w")
f.write("internal")
f.close()
def removeInternalLink(self):
if self.internally_connectable is not None:
print "removing internal link on frontend id", self.frontend_id
f = open("/proc/stb/frontend/%d/rf_switch" % self.frontend_id, "w")
f.write("external")
f.close()
def isMultiType(self):
return len(self.multi_type) > 0
def isEmpty(self):
return self.__is_empty
# empty tuners are supported!
def isSupported(self):
return (self.frontend_id is not None) or self.__is_empty
# returns dict {<slotid>: <type>}
def getMultiTypeList(self):
return self.multi_type
slot_id = property(getSlotID)
def getFriendlyType(self):
return {
"DVB-S": "DVB-S",
"DVB-T": "DVB-T",
"DVB-C": "DVB-C",
"DVB-S2": "DVB-S2",
"DVB-T2": "DVB-T2",
"DVB-C2": "DVB-C2",
"ATSC": "ATSC",
None: _("empty")
}[self.getType()]
friendly_type = property(getFriendlyType)
def getFriendlyFullDescription(self):
nim_text = self.slot_name + ": "
if self.empty:
nim_text += _("(empty)")
elif not self.isSupported():
nim_text += self.description + " (" + _("not supported") + ")"
else:
nim_text += self.description + " (" + self.friendly_type + ")"
return nim_text
friendly_full_description = property(getFriendlyFullDescription)
config_mode = property(lambda self: config.Nims[self.slot].configMode.value)
config = property(lambda self: config.Nims[self.slot])
empty = property(lambda self: self.getType is None)
class NimManager:
def getConfiguredSats(self):
return self.sec.getConfiguredSats()
def getTransponders(self, pos):
if self.transponders.has_key(pos):
return self.transponders[pos]
else:
return []
def getTranspondersCable(self, nim):
nimConfig = config.Nims[nim]
if nimConfig.configMode.value != "nothing" and nimConfig.cable.scan_type.value == "provider":
return self.transponderscable[self.cablesList[nimConfig.cable.scan_provider.index][0]]
return [ ]
def getTranspondersTerrestrial(self, region):
return self.transpondersterrestrial[region]
def getCableDescription(self, nim):
return self.cablesList[config.Nims[nim].scan_provider.index][0]
def getCableFlags(self, nim):
return self.cablesList[config.Nims[nim].scan_provider.index][1]
def getTerrestrialDescription(self, nim):
return self.terrestrialsList[config.Nims[nim].terrestrial.index][0]
def getTerrestrialFlags(self, nim):
return self.terrestrialsList[config.Nims[nim].terrestrial.index][1]
def getSatDescription(self, pos):
return self.satellites[pos]
def sortFunc(self, x):
orbpos = x[0]
if orbpos > 1800:
return orbpos - 3600
else:
return orbpos + 1800
def readTransponders(self):
self.satellites = { }
self.transponders = { }
self.transponderscable = { }
self.transpondersterrestrial = { }
self.transpondersatsc = { }
db = eDVBDB.getInstance()
if self.hasNimType("DVB-S"):
print "Reading satellites.xml"
db.readSatellites(self.satList, self.satellites, self.transponders)
self.satList.sort() # sort by orbpos
if self.hasNimType("DVB-C") or self.hasNimType("DVB-T") or self.hasNimType("DVB-T2"):
print "Reading cables.xml"
db.readCables(self.cablesList, self.transponderscable)
print "Reading terrestrial.xml"
db.readTerrestrials(self.terrestrialsList, self.transpondersterrestrial)
def enumerateNIMs(self):
# enum available NIMs. This is currently very dreambox-centric and uses the /proc/bus/nim_sockets interface.
# the result will be stored into nim_slots.
# the content of /proc/bus/nim_sockets looks like:
# NIM Socket 0:
# Type: DVB-S
# Name: BCM4501 DVB-S2 NIM (internal)
# NIM Socket 1:
# Type: DVB-S
# Name: BCM4501 DVB-S2 NIM (internal)
# NIM Socket 2:
# Type: DVB-T
# Name: Philips TU1216
# NIM Socket 3:
# Type: DVB-S
# Name: Alps BSBE1 702A
#
# Type will be either "DVB-S", "DVB-S2", "DVB-T", "DVB-C" or None.
# nim_slots is an array which has exactly one entry for each slot, even for empty ones.
self.nim_slots = [ ]
try:
nimfile = open("/proc/bus/nim_sockets")
except IOError:
return
current_slot = None
entries = {}
for line in nimfile:
if not line:
break
line = line.strip()
if line.startswith("NIM Socket"):
parts = line.split(" ")
current_slot = int(parts[2][:-1])
entries[current_slot] = {}
elif line.startswith("Type:"):
entries[current_slot]["type"] = str(line[6:])
entries[current_slot]["isempty"] = False
elif line.strip().startswith("Input_Name:"):
entries[current_slot]["input_name"] = str(line.strip()[12:])
elif line.startswith("Name:"):
entries[current_slot]["name"] = str(line[6:])
entries[current_slot]["isempty"] = False
elif line.startswith("Has_Outputs:"):
input = str(line[len("Has_Outputs:") + 1:])
entries[current_slot]["has_outputs"] = (input == "yes")
elif line.startswith("Internally_Connectable:"):
input = int(line[len("Internally_Connectable:") + 1:])
entries[current_slot]["internally_connectable"] = input
elif line.startswith("Frontend_Device:"):
input = int(line[len("Frontend_Device:") + 1:])
entries[current_slot]["frontend_device"] = input
elif line.startswith("Mode"):
# Mode 0: DVB-C
# Mode 1: DVB-T
# "Mode 1: DVB-T" -> ["Mode 1", "DVB-T"]
split = line.split(":")
split[1] = split[1].replace(' ','')
split2 = split[0].split(" ")
modes = entries[current_slot].get("multi_type", {})
modes[split2[1]] = split[1]
entries[current_slot]["multi_type"] = modes
elif line.startswith("I2C_Device:"):
input = int(line[len("I2C_Device:") + 1:])
entries[current_slot]["i2c"] = input
elif line.startswith("empty"):
entries[current_slot]["type"] = None
entries[current_slot]["name"] = _("N/A")
entries[current_slot]["isempty"] = True
nimfile.close()
for id, entry in entries.items():
if not (entry.has_key("name") and entry.has_key("type")):
entry["name"] = _("N/A")
entry["type"] = None
if not (entry.has_key("i2c")):
entry["i2c"] = None
if not (entry.has_key("has_outputs")):
entry["has_outputs"] = True
if entry.has_key("frontend_device"): # check if internally connectable
if path.exists("/proc/stb/frontend/%d/rf_switch" % entry["frontend_device"]):
entry["internally_connectable"] = entry["frontend_device"] - 1
else:
entry["internally_connectable"] = None
else:
entry["frontend_device"] = entry["internally_connectable"] = None
if not (entry.has_key("multi_type")):
if entry["name"] == "DVB-T2/C USB-Stick": # workaround dvbsky hybrit usb stick
entry["multi_type"] = {'0': 'DVB-T'}
entry["multi_type"] = {'1': 'DVB-C'}
else:
entry["multi_type"] = {}
if not (entry.has_key("input_name")):
entry["input_name"] = chr(ord('A') + id)
self.nim_slots.append(NIM(slot = id, description = entry["name"], type = entry["type"], has_outputs = entry["has_outputs"], internally_connectable = entry["internally_connectable"], multi_type = entry["multi_type"], frontend_id = entry["frontend_device"], i2c = entry["i2c"], is_empty = entry["isempty"], input_name = entry.get("input_name", None)))
def hasNimType(self, chktype):
for slot in self.nim_slots:
if slot.isCompatible(chktype):
return True
for type in slot.getMultiTypeList().values():
if chktype == type:
return True
return False
def getNimType(self, slotid):
return self.nim_slots[slotid].type
def getNimDescription(self, slotid):
return self.nim_slots[slotid].friendly_full_description
def getNimName(self, slotid):
return self.nim_slots[slotid].description
def getNimSlotInputName(self, slotid):
# returns just "A", "B", ...
return self.nim_slots[slotid].slot_input_name
def getNim(self, slotid):
return self.nim_slots[slotid]
def getI2CDevice(self, slotid):
return self.nim_slots[slotid].getI2C()
def getNimListOfType(self, type, exception = -1):
# returns a list of indexes for NIMs compatible to the given type, except for 'exception'
list = []
for x in self.nim_slots:
if x.isCompatible(type) and x.slot != exception:
list.append(x.slot)
return list
def __init__(self):
sec = secClass.getInstance()
global maxFixedLnbPositions
maxFixedLnbPositions = sec.getMaxFixedLnbPositions()
self.satList = [ ]
self.cablesList = []
self.terrestrialsList = []
self.atscList = []
self.enumerateNIMs()
self.readTransponders()
InitNimManager(self) #init config stuff
# get a list with the friendly full description
def nimList(self):
list = [ ]
for slot in self.nim_slots:
list.append(slot.friendly_full_description)
return list
def getSlotCount(self):
return len(self.nim_slots)
def hasOutputs(self, slotid):
return self.nim_slots[slotid].hasOutputs()
def nimInternallyConnectableTo(self, slotid):
return self.nim_slots[slotid].internallyConnectableTo()
def nimRemoveInternalLink(self, slotid):
self.nim_slots[slotid].removeInternalLink()
def canConnectTo(self, slotid):
slots = []
if self.nim_slots[slotid].internallyConnectableTo() is not None:
slots.append(self.nim_slots[slotid].internallyConnectableTo())
for type in self.nim_slots[slotid].connectableTo():
for slot in self.getNimListOfType(type, exception = slotid):
if self.hasOutputs(slot):
slots.append(slot)
# remove nims, that have a conntectedTo reference on
for testnim in slots[:]:
for nim in self.getNimListOfType("DVB-S", slotid):
nimConfig = self.getNimConfig(nim)
if nimConfig.content.items.has_key("configMode") and nimConfig.configMode.value == "loopthrough" and int(nimConfig.connectedTo.value) == testnim:
slots.remove(testnim)
break
slots.sort()
return slots
def canEqualTo(self, slotid):
type = self.getNimType(slotid)
type = type[:5] # DVB-S2 --> DVB-S, DVB-T2 --> DVB-T, DVB-C2 --> DVB-C
nimList = self.getNimListOfType(type, slotid)
for nim in nimList[:]:
mode = self.getNimConfig(nim)
if mode.configMode.value == "loopthrough" or mode.configMode.value == "satposdepends":
nimList.remove(nim)
return nimList
def canDependOn(self, slotid):
type = self.getNimType(slotid)
type = type[:5] # DVB-S2 --> DVB-S, DVB-T2 --> DVB-T, DVB-C2 --> DVB-C
nimList = self.getNimListOfType(type, slotid)
positionerList = []
for nim in nimList[:]:
mode = self.getNimConfig(nim)
nimHaveRotor = mode.configMode.value == "simple" and mode.diseqcMode.value in ("positioner", "positioner_select")
if not nimHaveRotor and mode.configMode.value == "advanced":
for x in range(3601, 3607):
lnb = int(mode.advanced.sat[x].lnb.value)
if lnb != 0:
nimHaveRotor = True
break
if not nimHaveRotor:
for sat in mode.advanced.sat.values():
lnb_num = int(sat.lnb.value)
diseqcmode = lnb_num and mode.advanced.lnb[lnb_num].diseqcMode.value or ""
if diseqcmode == "1_2":
nimHaveRotor = True
break
if nimHaveRotor:
alreadyConnected = False
for testnim in nimList:
testmode = self.getNimConfig(testnim)
if testmode.configMode.value == "satposdepends" and int(testmode.connectedTo.value) == int(nim):
alreadyConnected = True
break
if not alreadyConnected:
positionerList.append(nim)
return positionerList
def getNimConfig(self, slotid):
return config.Nims[slotid]
def getSatName(self, pos):
for sat in self.satList:
if sat[0] == pos:
return sat[1]
return _("N/A")
def getSatList(self):
return self.satList
# returns True if something is configured to be connected to this nim
# if slotid == -1, returns if something is connected to ANY nim
def somethingConnected(self, slotid = -1):
if slotid == -1:
connected = False
for id in range(self.getSlotCount()):
if self.somethingConnected(id):
connected = True
return connected
else:
nim = config.Nims[slotid]
configMode = nim.configMode.value
if self.nim_slots[slotid].isCompatible("DVB-S") or self.nim_slots[slotid].isCompatible("DVB-T") or self.nim_slots[slotid].isCompatible("DVB-C"):
return not (configMode == "nothing")
def getSatListForNim(self, slotid):
list = []
if self.nim_slots[slotid].isCompatible("DVB-S"):
nim = config.Nims[slotid]
#print "slotid:", slotid
#print "self.satellites:", self.satList[config.Nims[slotid].diseqcA.index]
#print "diseqcA:", config.Nims[slotid].diseqcA.value
configMode = nim.configMode.value
if configMode == "equal":
slotid = int(nim.connectedTo.value)
nim = config.Nims[slotid]
configMode = nim.configMode.value
elif configMode == "loopthrough":
slotid = self.sec.getRoot(slotid, int(nim.connectedTo.value))
nim = config.Nims[slotid]
configMode = nim.configMode.value
if configMode == "simple":
dm = nim.diseqcMode.value
if dm in ("single", "toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
if nim.diseqcA.orbital_position < 3600:
list.append(self.satList[nim.diseqcA.index - 2])
if dm in ("toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
if nim.diseqcB.orbital_position < 3600:
list.append(self.satList[nim.diseqcB.index - 2])
if dm == "diseqc_a_b_c_d":
if nim.diseqcC.orbital_position < 3600:
list.append(self.satList[nim.diseqcC.index - 2])
if nim.diseqcD.orbital_position < 3600:
list.append(self.satList[nim.diseqcD.index - 2])
if dm == "positioner":
for x in self.satList:
list.append(x)
if dm == "positioner_select":
for x in self.satList:
if str(x[0]) in nim.userSatellitesList.value:
list.append(x)
elif configMode == "advanced":
for x in range(3601, 3605):
if int(nim.advanced.sat[x].lnb.value) != 0:
for x in self.satList:
list.append(x)
if not list:
for x in self.satList:
if int(nim.advanced.sat[x[0]].lnb.value) != 0:
list.append(x)
for x in range(3605, 3607):
if int(nim.advanced.sat[x].lnb.value) != 0:
for user_sat in self.satList:
if str(user_sat[0]) in nim.advanced.sat[x].userSatellitesList.value and user_sat not in list:
list.append(user_sat)
return list
def getRotorSatListForNim(self, slotid):
list = []
if self.nim_slots[slotid].isCompatible("DVB-S"):
nim = config.Nims[slotid]
configMode = nim.configMode.value
if configMode == "simple":
if nim.diseqcMode.value == "positioner":
for x in self.satList:
list.append(x)
elif nim.diseqcMode.value == "positioner_select":
for x in self.satList:
if str(x[0]) in nim.userSatellitesList.value:
list.append(x)
elif configMode == "advanced":
for x in range(3601, 3605):
if int(nim.advanced.sat[x].lnb.value) != 0:
for x in self.satList:
list.append(x)
if not list:
for x in self.satList:
lnbnum = int(nim.advanced.sat[x[0]].lnb.value)
if lnbnum != 0:
lnb = nim.advanced.lnb[lnbnum]
if lnb.diseqcMode.value == "1_2":
list.append(x)
for x in range(3605, 3607):
if int(nim.advanced.sat[x].lnb.value) != 0:
for user_sat in self.satList:
if str(user_sat[0]) in nim.advanced.sat[x].userSatellitesList.value and user_sat not in list:
list.append(user_sat)
return list
def InitSecParams():
config.sec = ConfigSubsection()
x = ConfigInteger(default=25, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_CONT_TONE_DISABLE_BEFORE_DISEQC, configElement.value))
config.sec.delay_after_continuous_tone_disable_before_diseqc = x
x = ConfigInteger(default=10, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_FINAL_CONT_TONE_CHANGE, configElement.value))
config.sec.delay_after_final_continuous_tone_change = x
x = ConfigInteger(default=10, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_FINAL_VOLTAGE_CHANGE, configElement.value))
config.sec.delay_after_final_voltage_change = x
x = ConfigInteger(default=120, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_BETWEEN_DISEQC_REPEATS, configElement.value))
config.sec.delay_between_diseqc_repeats = x
x = ConfigInteger(default=100, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_LAST_DISEQC_CMD, configElement.value))
config.sec.delay_after_last_diseqc_command = x
x = ConfigInteger(default=50, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_TONEBURST, configElement.value))
config.sec.delay_after_toneburst = x
x = ConfigInteger(default=75, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_VOLTAGE_CHANGE_BEFORE_SWITCH_CMDS, configElement.value))
config.sec.delay_after_change_voltage_before_switch_command = x
x = ConfigInteger(default=200, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_ENABLE_VOLTAGE_BEFORE_SWITCH_CMDS, configElement.value))
config.sec.delay_after_enable_voltage_before_switch_command = x
x = ConfigInteger(default=700, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_BETWEEN_SWITCH_AND_MOTOR_CMD, configElement.value))
config.sec.delay_between_switch_and_motor_command = x
x = ConfigInteger(default=500, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_VOLTAGE_CHANGE_BEFORE_MEASURE_IDLE_INPUTPOWER, configElement.value))
config.sec.delay_after_voltage_change_before_measure_idle_inputpower = x
x = ConfigInteger(default=900, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_ENABLE_VOLTAGE_BEFORE_MOTOR_CMD, configElement.value))
config.sec.delay_after_enable_voltage_before_motor_command = x
x = ConfigInteger(default=500, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_MOTOR_STOP_CMD, configElement.value))
config.sec.delay_after_motor_stop_command = x
x = ConfigInteger(default=500, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_VOLTAGE_CHANGE_BEFORE_MOTOR_CMD, configElement.value))
config.sec.delay_after_voltage_change_before_motor_command = x
x = ConfigInteger(default=70, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_BEFORE_SEQUENCE_REPEAT, configElement.value))
config.sec.delay_before_sequence_repeat = x
x = ConfigInteger(default=360, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.MOTOR_RUNNING_TIMEOUT, configElement.value))
config.sec.motor_running_timeout = x
x = ConfigInteger(default=1, limits = (0, 5))
x.addNotifier(lambda configElement: secClass.setParam(secClass.MOTOR_COMMAND_RETRIES, configElement.value))
config.sec.motor_command_retries = x
x = ConfigInteger(default=50, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_DISEQC_RESET_CMD, configElement.value))
config.sec.delay_after_diseqc_reset_cmd = x
x = ConfigInteger(default=150, limits = (0, 9999))
x.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_DISEQC_PERIPHERIAL_POWERON_CMD, configElement.value))
config.sec.delay_after_diseqc_peripherial_poweron_cmd = x
# TODO add support for satpos depending nims to advanced nim configuration
# so a second/third/fourth cable from a motorized lnb can used behind a
# diseqc 1.0 / diseqc 1.1 / toneburst switch
# the C(++) part should can handle this
# the configElement should be only visible when diseqc 1.2 is disabled
jess_alias = ("JESS","UNICABLE2","SCD2","EN50607","EN 50607")
lscr = ("scr1","scr2","scr3","scr4","scr5","scr6","scr7","scr8","scr9","scr10",
"scr11","scr12","scr13","scr14","scr15","scr16","scr17","scr18","scr19","scr20",
"scr21","scr22","scr23","scr24","scr25","scr26","scr27","scr28","scr29","scr30",
"scr31","scr32")
def InitNimManager(nimmgr):
hw = HardwareInfo()
addNimConfig = False
try:
config.Nims
except:
addNimConfig = True
if addNimConfig:
InitSecParams()
config.Nims = ConfigSubList()
for x in range(len(nimmgr.nim_slots)):
config.Nims.append(ConfigSubsection())
lnb_choices = {
"universal_lnb": _("Universal LNB"),
"unicable": _("Unicable / JESS"),
"c_band": _("C-Band"),
"circular_lnb": _("Circular LNB"),
"user_defined": _("User defined")}
lnb_choices_default = "universal_lnb"
unicablelnbproducts = {}
unicablematrixproducts = {}
file = open(eEnv.resolve("${datadir}/enigma2/unicable.xml"), 'r')
doc = xml.etree.cElementTree.parse(file)
file.close()
root = doc.getroot()
entry = root.find("lnb")
for manufacturer in entry.getchildren():
m={}
m_update = m.update
for product in manufacturer.getchildren():
p={} #new dict empty for new product
p_update = p.update
scr=[]
scr_append = scr.append
scr_pop = scr.pop
for i in range(len(lscr)):
scr_append(product.get(lscr[i],"0"))
for i in range(len(lscr)):
if scr[len(lscr)-i-1] == "0":
scr_pop()
else:
break;
p_update({"frequencies":tuple(scr)}) #add scr frequencies to dict product
diction = product.get("format","EN50494").upper()
if diction in jess_alias:
diction = "EN50607"
else:
diction = "EN50494"
p_update({"diction":tuple([diction])}) #add diction to dict product
positions=[]
positions_append = positions.append
positions_append(int(product.get("positions",1)))
for cnt in range(positions[0]):
lof=[]
lof_append = lof.append
lof_append(int(product.get("lofl",9750)))
lof_append(int(product.get("lofh",10600)))
lof_append(int(product.get("threshold",11700)))
positions_append(tuple(lof))
p_update({"positions":tuple(positions)}) #add positons to dict product
m_update({product.get("name"):p}) #add dict product to dict manufacturer
unicablelnbproducts.update({manufacturer.get("name"):m})
entry = root.find("matrix")
for manufacturer in entry.getchildren():
m={}
m_update = m.update
for product in manufacturer.getchildren():
p={} #new dict empty for new product
p_update = p.update
scr=[]
scr_append = scr.append
scr_pop = scr.pop
for i in range(len(lscr)):
scr_append(product.get(lscr[i],"0"))
for i in range(len(lscr)):
if scr[len(lscr)-i-1] == "0":
scr_pop()
else:
break;
p_update({"frequencies":tuple(scr)}) #add scr frequencies to dict product
diction = product.get("format","EN50494").upper()
if diction in jess_alias:
diction = "EN50607"
else:
diction = "EN50494"
p_update({"diction":tuple([diction])}) #add diction to dict product
positions=[]
positions_append = positions.append
positions_append(int(product.get("positions",1)))
for cnt in range(positions[0]):
lof=[]
lof_append = lof.append
lof_append(int(product.get("lofl",9750)))
lof_append(int(product.get("lofh",10600)))
lof_append(int(product.get("threshold",11700)))
positions_append(tuple(lof))
p_update({"positions":tuple(positions)}) #add positons to dict product
m_update({product.get("name"):p}) #add dict product to dict manufacturer
unicablematrixproducts.update({manufacturer.get("name"):m}) #add dict manufacturer to dict unicablematrixproducts
UnicableLnbManufacturers = unicablelnbproducts.keys()
UnicableLnbManufacturers.sort()
UnicableMatrixManufacturers = unicablematrixproducts.keys()
UnicableMatrixManufacturers.sort()
unicable_choices = {
"unicable_lnb": _("Unicable LNB"),
"unicable_matrix": _("Unicable Matrix"),
"unicable_user": "Unicable "+_("User defined")}
unicable_choices_default = "unicable_lnb"
advanced_lnb_satcr_user_choicesEN50494 = [("1", "SatCR 1"), ("2", "SatCR 2"), ("3", "SatCR 3"), ("4", "SatCR 4"), ("5", "SatCR 5"), ("6", "SatCR 6"), ("7", "SatCR 7"), ("8", "SatCR 8")]
advanced_lnb_satcr_user_choicesEN50607 = [("1", "SatCR 1"), ("2", "SatCR 2"), ("3", "SatCR 3"), ("4", "SatCR 4"), ("5", "SatCR 5"), ("6", "SatCR 6"), ("7", "SatCR 7"), ("8", "SatCR 8"),
("9", "SatCR 9"), ("10", "SatCR 10"), ("11", "SatCR 11"), ("12", "SatCR 12"), ("13", "SatCR 13"), ("14", "SatCR 14"), ("15", "SatCR 15"), ("16", "SatCR 16"),
("17", "SatCR 17"), ("18", "SatCR 18"), ("19", "SatCR 19"), ("20", "SatCR 20"), ("21", "SatCR 21"), ("22", "SatCR 22"), ("23", "SatCR 23"), ("24", "SatCR 24"),
("25", "SatCR 25"), ("26", "SatCR 26"), ("27", "SatCR 27"), ("28", "SatCR 28"), ("29", "SatCR 29"), ("30", "SatCR 30"), ("31", "SatCR 31"), ("32", "SatCR 32")]
advanced_lnb_diction_user_choices = [("EN50494", "Unicable(EN50494)"), ("EN50607", "JESS(EN50607)")]
prio_list = [ ("-1", _("Auto")) ]
for prio in range(65)+range(14000,14065)+range(19000,19065):
description = ""
if prio == 0:
description = _(" (disabled)")
elif 0 < prio < 65:
description = _(" (lower than any auto)")
elif 13999 < prio < 14066:
description = _(" (higher than rotor any auto)")
elif 18999 < prio < 19066:
description = _(" (higher than any auto)")
prio_list.append((str(prio), str(prio) + description))
advanced_lnb_csw_choices = [("none", _("None")), ("AA", _("Port A")), ("AB", _("Port B")), ("BA", _("Port C")), ("BB", _("Port D"))]
advanced_lnb_ucsw_choices = [("0", _("None"))] + [(str(y), "Input " + str(y)) for y in range(1, 17)]
diseqc_mode_choices = [
("single", _("Single")), ("toneburst_a_b", _("Toneburst A/B")),
("diseqc_a_b", "DiSEqC A/B"), ("diseqc_a_b_c_d", "DiSEqC A/B/C/D"),
("positioner", _("Positioner")), ("positioner_select", _("Positioner (selecting satellites)"))]
positioner_mode_choices = [("usals", _("USALS")), ("manual", _("manual"))]
diseqc_satlist_choices = [(3600, _('automatic'), 1), (3601, _('nothing connected'), 1)] + nimmgr.satList
longitude_orientation_choices = [("east", _("East")), ("west", _("West"))]
latitude_orientation_choices = [("north", _("North")), ("south", _("South"))]
turning_speed_choices = [("fast", _("Fast")), ("slow", _("Slow")), ("fast epoch", _("Fast epoch"))]
advanced_satlist_choices = nimmgr.satList + [
(3601, _('All satellites 1 (USALS)'), 1), (3602, _('All satellites 2 (USALS)'), 1),
(3603, _('All satellites 3 (USALS)'), 1), (3604, _('All satellites 4 (USALS)'), 1), (3605, _('Selecting satellites 1 (USALS)'), 1), (3606, _('Selecting satellites 2 (USALS)'), 1)]
advanced_lnb_choices = [("0", _("not configured"))] + [(str(y), "LNB " + str(y)) for y in range(1, (maxFixedLnbPositions+1))]
advanced_voltage_choices = [("polarization", _("Polarization")), ("13V", _("13 V")), ("18V", _("18 V"))]
advanced_tonemode_choices = [("band", _("Band")), ("on", _("On")), ("off", _("Off"))]
advanced_lnb_toneburst_choices = [("none", _("None")), ("A", _("A")), ("B", _("B"))]
advanced_lnb_allsat_diseqcmode_choices = [("1_2", _("1.2"))]
advanced_lnb_diseqcmode_choices = [("none", _("None")), ("1_0", _("1.0")), ("1_1", _("1.1")), ("1_2", _("1.2"))]
advanced_lnb_commandOrder1_0_choices = [("ct", "DiSEqC 1.0, toneburst"), ("tc", "toneburst, DiSEqC 1.0")]
advanced_lnb_commandOrder_choices = [
("ct", "DiSEqC 1.0, toneburst"), ("tc", "toneburst, DiSEqC 1.0"),
("cut", "DiSEqC 1.0, DiSEqC 1.1, toneburst"), ("tcu", "toneburst, DiSEqC 1.0, DiSEqC 1.1"),
("uct", "DiSEqC 1.1, DiSEqC 1.0, toneburst"), ("tuc", "toneburst, DiSEqC 1.1, DiSEqC 1.0")]
advanced_lnb_diseqc_repeat_choices = [("none", _("None")), ("one", _("One")), ("two", _("Two")), ("three", _("Three"))]
advanced_lnb_fast_turning_btime = mktime(datetime(1970, 1, 1, 7, 0).timetuple())
advanced_lnb_fast_turning_etime = mktime(datetime(1970, 1, 1, 19, 0).timetuple())
def configLOFChanged(configElement):
if configElement.value == "unicable":
x = configElement.slot_id
lnb = configElement.lnb_id
nim = config.Nims[x]
lnbs = nim.advanced.lnb
section = lnbs[lnb]
if isinstance(section.unicable, ConfigNothing):
if lnb == 1 or lnb > maxFixedLnbPositions:
section.unicable = ConfigSelection(unicable_choices, unicable_choices_default)
# elif lnb == 2:
else:
section.unicable = ConfigSelection(choices = {"unicable_matrix": _("Unicable Matrix"),"unicable_user": "Unicable "+_("User defined")}, default = "unicable_matrix")
# section.unicable = ConfigSelection(choices = {"unicable_user": _("User defined")}, default = "unicable_user")
if 1==1:
def fillUnicableConf(sectionDict, unicableproducts, vco_null_check):
for manufacturer in unicableproducts:
products = unicableproducts[manufacturer].keys()
products.sort()
products_valide = []
products_valide_append = products_valide.append
tmp = ConfigSubsection()
tmp.scr = ConfigSubDict()
tmp.vco = ConfigSubDict()
tmp.lofl = ConfigSubDict()
tmp.lofh = ConfigSubDict()
tmp.loft = ConfigSubDict()
tmp.positions = ConfigSubDict()
tmp.diction = ConfigSubDict()
for article in products:
positionslist = unicableproducts[manufacturer][article].get("positions")
positions = int(positionslist[0])
dictionlist = [unicableproducts[manufacturer][article].get("diction")]
if lnb <= positions or dictionlist[0][0] !="EN50607":
tmp.positions[article] = ConfigSubList()
tmp.positions[article].append(ConfigInteger(default=positions, limits = (positions, positions)))
tmp.diction[article] = ConfigSelection(choices = dictionlist, default = dictionlist[0][0])
scrlist = []
scrlist_append = scrlist.append
vcolist=unicableproducts[manufacturer][article].get("frequencies")
tmp.vco[article] = ConfigSubList()
for cnt in range(1,len(vcolist)+1):
vcofreq = int(vcolist[cnt-1])
if vcofreq == 0 and vco_null_check:
scrlist_append(("%d" %cnt,"SCR %d " %cnt +_("not used")))
else:
scrlist_append(("%d" %cnt,"SCR %d" %cnt))
tmp.vco[article].append(ConfigInteger(default=vcofreq, limits = (vcofreq, vcofreq)))
tmp.scr[article] = ConfigSelection(choices = scrlist, default = scrlist[0][0])
tmp.lofl[article] = ConfigSubList()
tmp.lofh[article] = ConfigSubList()
tmp.loft[article] = ConfigSubList()
tmp_lofl_article_append = tmp.lofl[article].append
tmp_lofh_article_append = tmp.lofh[article].append
tmp_loft_article_append = tmp.loft[article].append
for cnt in range(1,positions+1):
lofl = int(positionslist[cnt][0])
lofh = int(positionslist[cnt][1])
loft = int(positionslist[cnt][2])
tmp_lofl_article_append(ConfigInteger(default=lofl, limits = (lofl, lofl)))
tmp_lofh_article_append(ConfigInteger(default=lofh, limits = (lofh, lofh)))
tmp_loft_article_append(ConfigInteger(default=loft, limits = (loft, loft)))
products_valide_append(article)
if len(products_valide)==0:
products_valide_append("None")
tmp.product = ConfigSelection(choices = products_valide, default = products_valide[0])
sectionDict[manufacturer] = tmp
print "MATRIX"
section.unicableMatrix = ConfigSubDict()
section.unicableMatrixManufacturer = ConfigSelection(UnicableMatrixManufacturers, UnicableMatrixManufacturers[0])
fillUnicableConf(section.unicableMatrix, unicablematrixproducts, True)
print "LNB"
section.unicableLnb = ConfigSubDict()
section.unicableLnbManufacturer = ConfigSelection(UnicableLnbManufacturers, UnicableLnbManufacturers[0])
fillUnicableConf(section.unicableLnb, unicablelnbproducts, False)
#TODO satpositions for satcruser
section.dictionuser = ConfigSelection(advanced_lnb_diction_user_choices, default="EN50494")
section.satcruserEN50494 = ConfigSelection(advanced_lnb_satcr_user_choicesEN50494, default="1")
section.satcruserEN50607 = ConfigSelection(advanced_lnb_satcr_user_choicesEN50607, default="1")
tmp = ConfigSubList()
tmp.append(ConfigInteger(default=1284, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1400, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1516, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1632, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1748, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1864, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1980, limits = (950, 2150)))
tmp.append(ConfigInteger(default=2096, limits = (950, 2150)))
section.satcrvcouserEN50494 = tmp
tmp.append(ConfigInteger(default=1284, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1400, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1516, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1632, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1748, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1864, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1980, limits = (950, 2150)))
tmp.append(ConfigInteger(default=2096, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1284, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1400, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1516, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1632, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1748, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1864, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1980, limits = (950, 2150)))
tmp.append(ConfigInteger(default=2096, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1284, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1400, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1516, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1632, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1748, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1864, limits = (950, 2150)))
tmp.append(ConfigInteger(default=1980, limits = (950, 2150)))
tmp.append(ConfigInteger(default=2096, limits = (950, 2150)))
section.satcrvcouserEN50607 = tmp
nim.advanced.unicableconnected = ConfigYesNo(default=False)
nim.advanced.unicableconnectedTo = ConfigSelection([(str(id), nimmgr.getNimDescription(id)) for id in nimmgr.getNimListOfType("DVB-S") if id != x])
def configDiSEqCModeChanged(configElement):
section = configElement.section
if configElement.value == "1_2" and isinstance(section.longitude, ConfigNothing):
section.longitude = ConfigFloat(default = [5,100], limits = [(0,359),(0,999)])
section.longitudeOrientation = ConfigSelection(longitude_orientation_choices, "east")
section.latitude = ConfigFloat(default = [50,767], limits = [(0,359),(0,999)])
section.latitudeOrientation = ConfigSelection(latitude_orientation_choices, "north")
section.tuningstepsize = ConfigFloat(default = [0,360], limits = [(0,9),(0,999)])
section.rotorPositions = ConfigInteger(default = 99, limits = [1,999])
section.turningspeedH = ConfigFloat(default = [2,3], limits = [(0,9),(0,9)])
section.turningspeedV = ConfigFloat(default = [1,7], limits = [(0,9),(0,9)])
section.powerMeasurement = ConfigYesNo(default=True)
section.powerThreshold = ConfigInteger(default=hw.get_device_name() == "dm7025" and 50 or 15, limits=(0, 100))
section.turningSpeed = ConfigSelection(turning_speed_choices, "fast")
section.fastTurningBegin = ConfigDateTime(default=advanced_lnb_fast_turning_btime, formatstring = _("%H:%M"), increment = 600)
section.fastTurningEnd = ConfigDateTime(default=advanced_lnb_fast_turning_etime, formatstring = _("%H:%M"), increment = 600)
def configLNBChanged(configElement):
x = configElement.slot_id
nim = config.Nims[x]
if isinstance(configElement.value, tuple):
lnb = int(configElement.value[0])
else:
lnb = int(configElement.value)
lnbs = nim.advanced.lnb
if lnb and lnb not in lnbs:
section = lnbs[lnb] = ConfigSubsection()
section.lofl = ConfigInteger(default=9750, limits = (0, 99999))
section.lofh = ConfigInteger(default=10600, limits = (0, 99999))
section.threshold = ConfigInteger(default=11700, limits = (0, 99999))
section.increased_voltage = ConfigYesNo(False)
section.toneburst = ConfigSelection(advanced_lnb_toneburst_choices, "none")
section.longitude = ConfigNothing()
if lnb > maxFixedLnbPositions:
tmp = ConfigSelection(advanced_lnb_allsat_diseqcmode_choices, "1_2")
tmp.section = section
configDiSEqCModeChanged(tmp)
else:
tmp = ConfigSelection(advanced_lnb_diseqcmode_choices, "none")
tmp.section = section
tmp.addNotifier(configDiSEqCModeChanged)
section.diseqcMode = tmp
section.commitedDiseqcCommand = ConfigSelection(advanced_lnb_csw_choices)
section.fastDiseqc = ConfigYesNo(False)
section.sequenceRepeat = ConfigYesNo(False)
section.commandOrder1_0 = ConfigSelection(advanced_lnb_commandOrder1_0_choices, "ct")
section.commandOrder = ConfigSelection(advanced_lnb_commandOrder_choices, "ct")
section.uncommittedDiseqcCommand = ConfigSelection(advanced_lnb_ucsw_choices)
section.diseqcRepeats = ConfigSelection(advanced_lnb_diseqc_repeat_choices, "none")
section.prio = ConfigSelection(prio_list, "-1")
section.unicable = ConfigNothing()
tmp = ConfigSelection(lnb_choices, lnb_choices_default)
tmp.slot_id = x
tmp.lnb_id = lnb
tmp.addNotifier(configLOFChanged, initial_call = False)
section.lof = tmp
def configModeChanged(configMode):
slot_id = configMode.slot_id
nim = config.Nims[slot_id]
if configMode.value == "advanced" and isinstance(nim.advanced, ConfigNothing):
# advanced config:
nim.advanced = ConfigSubsection()
nim.advanced.sat = ConfigSubDict()
nim.advanced.sats = getConfigSatlist(192, advanced_satlist_choices)
nim.advanced.lnb = ConfigSubDict()
nim.advanced.lnb[0] = ConfigNothing()
for x in nimmgr.satList:
tmp = ConfigSubsection()
tmp.voltage = ConfigSelection(advanced_voltage_choices, "polarization")
tmp.tonemode = ConfigSelection(advanced_tonemode_choices, "band")
tmp.usals = ConfigYesNo(True)
tmp.rotorposition = ConfigInteger(default=1, limits=(1, 255))
lnb = ConfigSelection(advanced_lnb_choices, "0")
lnb.slot_id = slot_id
lnb.addNotifier(configLNBChanged, initial_call = False)
tmp.lnb = lnb
nim.advanced.sat[x[0]] = tmp
for x in range(3601, 3607):
tmp = ConfigSubsection()
tmp.voltage = ConfigSelection(advanced_voltage_choices, "polarization")
tmp.tonemode = ConfigSelection(advanced_tonemode_choices, "band")
tmp.usals = ConfigYesNo(default=True)
tmp.userSatellitesList = ConfigText('[]')
tmp.rotorposition = ConfigInteger(default=1, limits=(1, 255))
lnbnum = maxFixedLnbPositions + x - 3600
lnb = ConfigSelection([("0", _("not configured")), (str(lnbnum), "LNB %d"%(lnbnum))], "0")
lnb.slot_id = slot_id
lnb.addNotifier(configLNBChanged, initial_call = False)
tmp.lnb = lnb
nim.advanced.sat[x] = tmp
def scpcSearchRangeChanged(configElement):
fe_id = configElement.fe_id
slot_id = configElement.slot_id
name = nimmgr.nim_slots[slot_id].description
if path.exists("/proc/stb/frontend/%d/use_scpc_optimized_search_range" % fe_id):
f = open("/proc/stb/frontend/%d/use_scpc_optimized_search_range" % fe_id, "w")
f.write(configElement.value)
f.close()
def toneAmplitudeChanged(configElement):
fe_id = configElement.fe_id
slot_id = configElement.slot_id
if path.exists("/proc/stb/frontend/%d/tone_amplitude" % fe_id):
f = open("/proc/stb/frontend/%d/tone_amplitude" % fe_id, "w")
f.write(configElement.value)
f.close()
def connectedToChanged(slot_id, nimmgr, configElement):
configMode = nimmgr.getNimConfig(slot_id).configMode
if configMode.value == 'loopthrough':
internally_connectable = nimmgr.nimInternallyConnectableTo(slot_id)
dest_slot = configElement.value
if internally_connectable is not None and int(internally_connectable) == int(dest_slot):
configMode.choices.updateItemDescription(configMode.index, _("internally loopthrough to"))
else:
configMode.choices.updateItemDescription(configMode.index, _("externally loopthrough to"))
def createSatConfig(nim, x, empty_slots):
try:
nim.toneAmplitude
except:
nim.toneAmplitude = ConfigSelection([("11", "340mV"), ("10", "360mV"), ("9", "600mV"), ("8", "700mV"), ("7", "800mV"), ("6", "900mV"), ("5", "1100mV")], "7")
nim.toneAmplitude.fe_id = x - empty_slots
nim.toneAmplitude.slot_id = x
nim.toneAmplitude.addNotifier(toneAmplitudeChanged)
nim.scpcSearchRange = ConfigSelection([("0", _("no")), ("1", _("yes"))], "0")
nim.scpcSearchRange.fe_id = x - empty_slots
nim.scpcSearchRange.slot_id = x
nim.scpcSearchRange.addNotifier(scpcSearchRangeChanged)
nim.diseqc13V = ConfigYesNo(False)
nim.diseqcMode = ConfigSelection(diseqc_mode_choices, "single")
nim.connectedTo = ConfigSelection([(str(id), nimmgr.getNimDescription(id)) for id in nimmgr.getNimListOfType("DVB-S") if id != x])
nim.simpleSingleSendDiSEqC = ConfigYesNo(False)
nim.simpleDiSEqCSetVoltageTone = ConfigYesNo(True)
nim.simpleDiSEqCOnlyOnSatChange = ConfigYesNo(False)
nim.simpleDiSEqCSetCircularLNB = ConfigYesNo(True)
nim.diseqcA = ConfigSatlist(list = diseqc_satlist_choices)
nim.diseqcB = ConfigSatlist(list = diseqc_satlist_choices)
nim.diseqcC = ConfigSatlist(list = diseqc_satlist_choices)
nim.diseqcD = ConfigSatlist(list = diseqc_satlist_choices)
nim.positionerMode = ConfigSelection(positioner_mode_choices, "usals")
nim.userSatellitesList = ConfigText('[]')
nim.pressOKtoList = ConfigNothing()
nim.longitude = ConfigFloat(default=[5,100], limits=[(0,359),(0,999)])
nim.longitudeOrientation = ConfigSelection(longitude_orientation_choices, "east")
nim.latitude = ConfigFloat(default=[50,767], limits=[(0,359),(0,999)])
nim.latitudeOrientation = ConfigSelection(latitude_orientation_choices, "north")
nim.tuningstepsize = ConfigFloat(default = [0,360], limits = [(0,9),(0,999)])
nim.rotorPositions = ConfigInteger(default = 99, limits = [1,999])
nim.turningspeedH = ConfigFloat(default = [2,3], limits = [(0,9),(0,9)])
nim.turningspeedV = ConfigFloat(default = [1,7], limits = [(0,9),(0,9)])
nim.powerMeasurement = ConfigYesNo(False)
nim.powerThreshold = ConfigInteger(default=hw.get_device_name() == "dm8000" and 15 or 50, limits=(0, 100))
nim.turningSpeed = ConfigSelection(turning_speed_choices, "fast")
btime = datetime(1970, 1, 1, 7, 0)
nim.fastTurningBegin = ConfigDateTime(default = mktime(btime.timetuple()), formatstring = _("%H:%M"), increment = 900)
etime = datetime(1970, 1, 1, 19, 0)
nim.fastTurningEnd = ConfigDateTime(default = mktime(etime.timetuple()), formatstring = _("%H:%M"), increment = 900)
def createCableConfig(nim, x):
try:
nim.cable
except:
list = [ ]
n = 0
for x in nimmgr.cablesList:
list.append((str(n), x[0]))
n += 1
nim.cable = ConfigSubsection()
nim.cable.scan_networkid = ConfigInteger(default = 0, limits = (0, 99999))
possible_scan_types = [("bands", _("Frequency bands")), ("steps", _("Frequency steps"))]
if n:
possible_scan_types.append(("provider", _("Provider")))
nim.cable.scan_provider = ConfigSelection(default = "0", choices = list)
nim.cable.scan_type = ConfigSelection(default = "provider", choices = possible_scan_types)
nim.cable.scan_band_EU_VHF_I = ConfigYesNo(default = True)
nim.cable.scan_band_EU_MID = ConfigYesNo(default = True)
nim.cable.scan_band_EU_VHF_III = ConfigYesNo(default = True)
nim.cable.scan_band_EU_UHF_IV = ConfigYesNo(default = True)
nim.cable.scan_band_EU_UHF_V = ConfigYesNo(default = True)
nim.cable.scan_band_EU_SUPER = ConfigYesNo(default = True)
nim.cable.scan_band_EU_HYPER = ConfigYesNo(default = True)
nim.cable.scan_band_US_LOW = ConfigYesNo(default = False)
nim.cable.scan_band_US_MID = ConfigYesNo(default = False)
nim.cable.scan_band_US_HIGH = ConfigYesNo(default = False)
nim.cable.scan_band_US_SUPER = ConfigYesNo(default = False)
nim.cable.scan_band_US_HYPER = ConfigYesNo(default = False)
nim.cable.scan_frequency_steps = ConfigInteger(default = 1000, limits = (1000, 10000))
nim.cable.scan_mod_qam16 = ConfigYesNo(default = False)
nim.cable.scan_mod_qam32 = ConfigYesNo(default = False)
nim.cable.scan_mod_qam64 = ConfigYesNo(default = True)
nim.cable.scan_mod_qam128 = ConfigYesNo(default = False)
nim.cable.scan_mod_qam256 = ConfigYesNo(default = True)
nim.cable.scan_sr_6900 = ConfigYesNo(default = True)
nim.cable.scan_sr_6875 = ConfigYesNo(default = True)
nim.cable.scan_sr_ext1 = ConfigInteger(default = 0, limits = (0, 7230))
nim.cable.scan_sr_ext2 = ConfigInteger(default = 0, limits = (0, 7230))
def createTerrestrialConfig(nim, x):
try:
nim.terrestrial
except:
list = []
n = 0
for x in nimmgr.terrestrialsList:
list.append((str(n), x[0]))
n += 1
nim.terrestrial = ConfigSelection(choices = list)
nim.terrestrial_5V = ConfigOnOff()
empty_slots = 0
for slot in nimmgr.nim_slots:
x = slot.slot
nim = config.Nims[x]
if slot.isCompatible("DVB-S"):
createSatConfig(nim, x, empty_slots)
config_mode_choices = [("nothing", _("nothing connected")),
("simple", _("simple")), ("advanced", _("advanced"))]
if len(nimmgr.getNimListOfType(slot.type, exception = x)) > 0:
config_mode_choices.append(("equal", _("equal to")))
config_mode_choices.append(("satposdepends", _("second cable of motorized LNB")))
if len(nimmgr.canConnectTo(x)) > 0:
config_mode_choices.append(("loopthrough", _("loopthrough to")))
nim.advanced = ConfigNothing()
tmp = ConfigSelection(config_mode_choices, "simple")
tmp.slot_id = x
tmp.addNotifier(configModeChanged, initial_call = False)
nim.configMode = tmp
nim.configMode.connectedToChanged = boundFunction(connectedToChanged, x, nimmgr)
nim.connectedTo.addNotifier(boundFunction(connectedToChanged, x, nimmgr), initial_call = False)
elif slot.isCompatible("DVB-C"):
nim.configMode = ConfigSelection(
choices = {
"enabled": _("enabled"),
"nothing": _("nothing connected"),
},
default = "enabled")
createCableConfig(nim, x)
elif slot.isCompatible("DVB-T"):
nim.configMode = ConfigSelection(
choices = {
"enabled": _("enabled"),
"nothing": _("nothing connected"),
},
default = "enabled")
createTerrestrialConfig(nim, x)
else:
empty_slots += 1
nim.configMode = ConfigSelection(choices = { "nothing": _("disabled") }, default="nothing")
if slot.type is not None:
print "pls add support for this frontend type!", slot.type
nimmgr.sec = SecConfigure(nimmgr)
def tunerTypeChanged(nimmgr, configElement):
fe_id = configElement.fe_id
eDVBResourceManager.getInstance().setFrontendType(nimmgr.nim_slots[fe_id].frontend_id, nimmgr.nim_slots[fe_id].getType())
if path.exists("/proc/stb/frontend/%d/mode" % fe_id):
cur_type = int(open("/proc/stb/frontend/%d/mode" % fe_id, "r").read())
if cur_type != int(configElement.value):
print "tunerTypeChanged feid %d from %d to mode %d" % (fe_id, cur_type, int(configElement.value))
try:
oldvalue = open("/sys/module/dvb_core/parameters/dvb_shutdown_timeout", "r").readline()
f = open("/sys/module/dvb_core/parameters/dvb_shutdown_timeout", "w")
f.write("0")
f.close()
except:
print "[info] no /sys/module/dvb_core/parameters/dvb_shutdown_timeout available"
frontend = eDVBResourceManager.getInstance().allocateRawChannel(fe_id).getFrontend()
frontend.closeFrontend()
f = open("/proc/stb/frontend/%d/mode" % fe_id, "w")
f.write(configElement.value)
f.close()
frontend.reopenFrontend()
try:
f = open("/sys/module/dvb_core/parameters/dvb_shutdown_timeout", "w")
f.write(oldvalue)
f.close()
except:
print "[info] no /sys/module/dvb_core/parameters/dvb_shutdown_timeout available"
nimmgr.enumerateNIMs()
else:
print "tuner type is already already %d" %cur_type
empty_slots = 0
for slot in nimmgr.nim_slots:
x = slot.slot
nim = config.Nims[x]
addMultiType = False
try:
nim.multiType
except:
if slot.description.find("Sundtek SkyTV Ultimate III") > -1:
print"[NimManager] Sundtek SkyTV Ultimate III detected, multiType = False"
addMultiType = False
else:
addMultiType = True
if slot.isMultiType() and addMultiType:
typeList = []
for id in slot.getMultiTypeList().keys():
type = slot.getMultiTypeList()[id]
typeList.append((id, type))
nim.multiType = ConfigSelection(typeList, "0")
nim.multiType.fe_id = x - empty_slots
nim.multiType.addNotifier(boundFunction(tunerTypeChanged, nimmgr))
print"[NimManager] slotname = %s, slotdescription = %s, multitype = %s" % (slot.input_name, slot.description,(slot.isMultiType() and addMultiType))
empty_slots = 0
for slot in nimmgr.nim_slots:
x = slot.slot
nim = config.Nims[x]
empty = True
if slot.canBeCompatible("DVB-S"):
createSatConfig(nim, x, empty_slots)
empty = False
if slot.canBeCompatible("DVB-C"):
createCableConfig(nim, x)
empty = False
if slot.canBeCompatible("DVB-T"):
createTerrestrialConfig(nim, x)
empty = False
if empty:
empty_slots += 1
nimmanager = NimManager()
| gpl-2.0 | -572,358,455,112,383,550 | 39.655864 | 352 | 0.684798 | false |
codingisacopingstrategy/aa.core | aacore/settings.py | 1 | 1923 | # This file is part of Active Archives.
# Copyright 2006-2011 the Active Archives contributors (see AUTHORS)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Also add information on how to contact you by electronic and paper mail.
from django.conf import settings
import os.path
EXIFTOOL = getattr(settings, 'AA_EXIFTOOL', 'exiftool')
FFMPEG = getattr(settings, 'AA_FFMPEG', 'ffmpeg')
IDENTIFY = getattr(settings, 'AA_IDENTIFY', 'identify')
CONVERT = getattr(settings, 'AA_CONVERT', 'convert')
USER_AGENT = getattr(settings, 'AA_USER_AGENT', "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5")
DEFAULT_REL_NAMESPACE = getattr(settings, 'AA_DEFAULT_REL_NAMESPACE', "aa")
RDF_STORAGE_NAME = getattr(settings, 'AA_RDF_STORAGE_NAME', "aa")
# FIXME: Change this setting to an absolute path as it throws a redland error
# on production
RDF_STORAGE_DIR = getattr(settings, 'AA_RDF_STORAGE_DIR', ".")
# List of models that are indexed by the RDF Store
INDEXED_MODELS = getattr(settings, 'AA_INDEXED_MODELS', ("aacore.models.Resource",))
RESOURCE_DELEGATES = getattr(settings, 'AA_RESOURCE_DELEGATES', ())
CACHE_DIR = getattr(settings, 'AA_CACHE_DIR', os.path.join(settings.MEDIA_ROOT, "cache"))
CACHE_URL = getattr(settings, 'AA_CACHE_URL', os.path.join(settings.MEDIA_URL, "cache"))
| agpl-3.0 | -1,930,031,849,796,650,800 | 44.785714 | 127 | 0.74571 | false |
shepdl/stream-daemon | twitter_local/stream.py | 1 | 3651 | try:
import urllib.request as urllib_request
import urllib.error as urllib_error
import io
except ImportError:
import urllib2 as urllib_request
import urllib2 as urllib_error
import simplejson as json
from ssl import SSLError
import socket
from .api import TwitterCall, wrap_response
import sys
class TwitterJSONIter(object):
def __init__(self, handle, uri, arg_data, block=True):
self.decoder = json.JSONDecoder()
self.handle = handle
self.buf = b""
self.block = block
def __iter__(self):
sock = self.handle.fp._sock.fp._sock
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if not self.block:
sock.setblocking(False)
while True:
utf8_buf = self.buf.decode('utf8').lstrip()
pos = utf8_buf.find("}\r\n{") + 1
if pos:
to_yield = utf8_buf[0:pos]
self.buf = utf8_buf[pos:].encode('utf8')
if to_yield != "":
yield wrap_response(json.loads(to_yield), self.handle.headers)
continue
else:
if self.block:
pass
else:
yield None
# except urllib_error.HTTPError as e:
# raise TwitterHTTPError(e, uri, self.format, arg_data)
# this is a non-blocking read (ie, it will return if any data is available)
try:
self.buf += sock.recv(1024)
except SSLError as e:
if (not self.block) and (e.errno == 2):
# Apparently this means there was nothing in the socket buf
pass
else:
raise
def handle_stream_response(req, uri, arg_data, block):
handle = urllib_request.urlopen(req,)
return iter(TwitterJSONIter(handle, uri, arg_data, block))
class TwitterStreamCall(TwitterCall):
def _handle_response(self, req, uri, arg_data, _timeout=None):
return handle_stream_response(req, uri, arg_data, block=True)
class TwitterStreamCallNonBlocking(TwitterCall):
def _handle_response(self, req, uri, arg_data, _timeout=None):
return handle_stream_response(req, uri, arg_data, block=False)
class TwitterStream(TwitterStreamCall):
"""
The TwitterStream object is an interface to the Twitter Stream API
(stream.twitter.com). This can be used pretty much the same as the
Twitter class except the result of calling a method will be an
iterator that yields objects decoded from the stream. For
example::
twitter_stream = TwitterStream(auth=UserPassAuth('joe', 'joespassword'))
iterator = twitter_stream.statuses.sample()
for tweet in iterator:
...do something with this tweet...
The iterator will yield tweets forever and ever (until the stream
breaks at which point it raises a TwitterHTTPError.)
The `block` parameter controls if the stream is blocking. Default
is blocking (True). When set to False, the iterator will
occasionally yield None when there is no available message.
"""
def __init__(
self, domain="stream.twitter.com", secure=True, auth=None,
api_version='1', block=True):
uriparts = ()
uriparts += (str(api_version),)
if block:
call_cls = TwitterStreamCall
else:
call_cls = TwitterStreamCallNonBlocking
TwitterStreamCall.__init__(
self, auth=auth, format="json", domain=domain,
callable_cls=call_cls,
secure=secure, uriparts=uriparts)
| mit | 6,063,538,529,739,189,000 | 34.794118 | 87 | 0.6086 | false |
mccarrmb/moztrap | scripts/update/update.py | 3 | 1527 | """
Deployment for moztrap
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import task, hostgroups
import commander_settings as settings
@task
def update_code(ctx, tag):
with ctx.lcd(settings.SRC_DIR):
ctx.local("git fetch")
ctx.local("git pull origin %s" % tag)
ctx.local("git submodule sync")
ctx.local("git submodule update --init --recursive")
ctx.local("find . -type f -name '.gitignore' -or -name '*.pyc' -delete")
ctx.local("git rev-parse HEAD > media/revision.txt")
@task
def update_assets(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("LANG=en_US.UTF-8 python2.6 vendor-manage.py collectstatic --noinput")
ctx.local("LANG=en_US.UTF-8 python2.6 vendor-manage.py compress")
@task
def database(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("python2.6 vendor-manage.py syncdb --migrate")
@task
def checkin_changes(ctx):
ctx.local(settings.DEPLOY_SCRIPT)
@hostgroups(settings.WEB_HOSTGROUP, remote_kwargs={'ssh_key': settings.SSH_KEY})
def deploy_app(ctx):
ctx.remote(settings.REMOTE_UPDATE_SCRIPT)
ctx.remote("/bin/touch %s" % settings.REMOTE_WSGI)
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
update_code(ref)
@task
def update(ctx):
update_assets()
database()
@task
def deploy(ctx):
checkin_changes()
deploy_app()
| bsd-2-clause | -664,890,487,265,350,000 | 23.238095 | 88 | 0.681074 | false |
supamii/QttpServer | lib/gmock/scripts/generator/cpp/tokenize.py | 679 | 9703 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenize C++ source code."""
__author__ = '[email protected] (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
from cpp import utils
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
# Add $ as a valid identifier char since so much code uses it.
_letters = 'abcdefghijklmnopqrstuvwxyz'
VALID_IDENTIFIER_CHARS = set(_letters + _letters.upper() + '_0123456789$')
HEX_DIGITS = set('0123456789abcdefABCDEF')
INT_OR_FLOAT_DIGITS = set('01234567890eE-+')
# C++0x string preffixes.
_STR_PREFIXES = set(('R', 'u8', 'u8R', 'u', 'uR', 'U', 'UR', 'L', 'LR'))
# Token types.
UNKNOWN = 'UNKNOWN'
SYNTAX = 'SYNTAX'
CONSTANT = 'CONSTANT'
NAME = 'NAME'
PREPROCESSOR = 'PREPROCESSOR'
# Where the token originated from. This can be used for backtracking.
# It is always set to WHENCE_STREAM in this code.
WHENCE_STREAM, WHENCE_QUEUE = range(2)
class Token(object):
"""Data container to represent a C++ token.
Tokens can be identifiers, syntax char(s), constants, or
pre-processor directives.
start contains the index of the first char of the token in the source
end contains the index of the last char of the token in the source
"""
def __init__(self, token_type, name, start, end):
self.token_type = token_type
self.name = name
self.start = start
self.end = end
self.whence = WHENCE_STREAM
def __str__(self):
if not utils.DEBUG:
return 'Token(%r)' % self.name
return 'Token(%r, %s, %s)' % (self.name, self.start, self.end)
__repr__ = __str__
def _GetString(source, start, i):
i = source.find('"', i+1)
while source[i-1] == '\\':
# Count the trailing backslashes.
backslash_count = 1
j = i - 2
while source[j] == '\\':
backslash_count += 1
j -= 1
# When trailing backslashes are even, they escape each other.
if (backslash_count % 2) == 0:
break
i = source.find('"', i+1)
return i + 1
def _GetChar(source, start, i):
# NOTE(nnorwitz): may not be quite correct, should be good enough.
i = source.find("'", i+1)
while source[i-1] == '\\':
# Need to special case '\\'.
if (i - 2) > start and source[i-2] == '\\':
break
i = source.find("'", i+1)
# Try to handle unterminated single quotes (in a #if 0 block).
if i < 0:
i = start
return i + 1
def GetTokens(source):
"""Returns a sequence of Tokens.
Args:
source: string of C++ source code.
Yields:
Token that represents the next token in the source.
"""
# Cache various valid character sets for speed.
valid_identifier_chars = VALID_IDENTIFIER_CHARS
hex_digits = HEX_DIGITS
int_or_float_digits = INT_OR_FLOAT_DIGITS
int_or_float_digits2 = int_or_float_digits | set('.')
# Only ignore errors while in a #if 0 block.
ignore_errors = False
count_ifs = 0
i = 0
end = len(source)
while i < end:
# Skip whitespace.
while i < end and source[i].isspace():
i += 1
if i >= end:
return
token_type = UNKNOWN
start = i
c = source[i]
if c.isalpha() or c == '_': # Find a string token.
token_type = NAME
while source[i] in valid_identifier_chars:
i += 1
# String and character constants can look like a name if
# they are something like L"".
if (source[i] == "'" and (i - start) == 1 and
source[start:i] in 'uUL'):
# u, U, and L are valid C++0x character preffixes.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif source[i] == "'" and source[start:i] in _STR_PREFIXES:
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == '/' and source[i+1] == '/': # Find // comments.
i = source.find('\n', i)
if i == -1: # Handle EOF.
i = end
continue
elif c == '/' and source[i+1] == '*': # Find /* comments. */
i = source.find('*/', i) + 2
continue
elif c in ':+-<>&|*=': # : or :: (plus other chars).
token_type = SYNTAX
i += 1
new_ch = source[i]
if new_ch == c:
i += 1
elif c == '-' and new_ch == '>':
i += 1
elif new_ch == '=':
i += 1
elif c in '()[]{}~!?^%;/.,': # Handle single char tokens.
token_type = SYNTAX
i += 1
if c == '.' and source[i].isdigit():
token_type = CONSTANT
i += 1
while source[i] in int_or_float_digits:
i += 1
# Handle float suffixes.
for suffix in ('l', 'f'):
if suffix == source[i:i+1].lower():
i += 1
break
elif c.isdigit(): # Find integer.
token_type = CONSTANT
if c == '0' and source[i+1] in 'xX':
# Handle hex digits.
i += 2
while source[i] in hex_digits:
i += 1
else:
while source[i] in int_or_float_digits2:
i += 1
# Handle integer (and float) suffixes.
for suffix in ('ull', 'll', 'ul', 'l', 'f', 'u'):
size = len(suffix)
if suffix == source[i:i+size].lower():
i += size
break
elif c == '"': # Find string.
token_type = CONSTANT
i = _GetString(source, start, i)
elif c == "'": # Find char.
token_type = CONSTANT
i = _GetChar(source, start, i)
elif c == '#': # Find pre-processor command.
token_type = PREPROCESSOR
got_if = source[i:i+3] == '#if' and source[i+3:i+4].isspace()
if got_if:
count_ifs += 1
elif source[i:i+6] == '#endif':
count_ifs -= 1
if count_ifs == 0:
ignore_errors = False
# TODO(nnorwitz): handle preprocessor statements (\ continuations).
while 1:
i1 = source.find('\n', i)
i2 = source.find('//', i)
i3 = source.find('/*', i)
i4 = source.find('"', i)
# NOTE(nnorwitz): doesn't handle comments in #define macros.
# Get the first important symbol (newline, comment, EOF/end).
i = min([x for x in (i1, i2, i3, i4, end) if x != -1])
# Handle #include "dir//foo.h" properly.
if source[i] == '"':
i = source.find('"', i+1) + 1
assert i > 0
continue
# Keep going if end of the line and the line ends with \.
if not (i == i1 and source[i-1] == '\\'):
if got_if:
condition = source[start+4:i].lstrip()
if (condition.startswith('0') or
condition.startswith('(0)')):
ignore_errors = True
break
i += 1
elif c == '\\': # Handle \ in code.
# This is different from the pre-processor \ handling.
i += 1
continue
elif ignore_errors:
# The tokenizer seems to be in pretty good shape. This
# raise is conditionally disabled so that bogus code
# in an #if 0 block can be handled. Since we will ignore
# it anyways, this is probably fine. So disable the
# exception and return the bogus char.
i += 1
else:
sys.stderr.write('Got invalid token in %s @ %d token:%s: %r\n' %
('?', i, c, source[i-10:i+10]))
raise RuntimeError('unexpected token')
if i <= 0:
print('Invalid index, exiting now.')
return
yield Token(token_type, source[start:i], start, i)
if __name__ == '__main__':
def main(argv):
"""Driver mostly for testing purposes."""
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
for token in GetTokens(source):
print('%-12s: %s' % (token.token_type, token.name))
# print('\r%6.2f%%' % (100.0 * index / token.end),)
sys.stdout.write('\n')
main(sys.argv)
| mit | -7,603,571,542,110,601,000 | 32.808362 | 79 | 0.495311 | false |
Kast0rTr0y/ansible | lib/ansible/modules/cloud/misc/ovirt.py | 17 | 17885 | #!/usr/bin/python
# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt
author: "Vincent Van der Kussen (@vincentvdk)"
short_description: oVirt/RHEV platform management
description:
- allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
version_added: "1.4"
options:
user:
description:
- the user to authenticate with
default: null
required: true
aliases: []
url:
description:
- the url of the oVirt instance
default: null
required: true
aliases: []
instance_name:
description:
- the name of the instance to use
default: null
required: true
aliases: [ vmname ]
password:
description:
- password of the user to authenticate with
default: null
required: true
aliases: []
image:
description:
- template to use for the instance
default: null
required: false
aliases: []
resource_type:
description:
- whether you want to deploy an image or create an instance from scratch.
default: null
required: false
aliases: []
choices: [ 'new', 'template' ]
zone:
description:
- deploy the image to this oVirt cluster
default: null
required: false
aliases: []
instance_disksize:
description:
- size of the instance's disk in GB
default: null
required: false
aliases: [ vm_disksize]
instance_cpus:
description:
- the instance's number of cpu's
default: 1
required: false
aliases: [ vmcpus ]
instance_nic:
description:
- name of the network interface in oVirt/RHEV
default: null
required: false
aliases: [ vmnic ]
instance_network:
description:
- the logical network the machine should belong to
default: rhevm
required: false
aliases: [ vmnetwork ]
instance_mem:
description:
- the instance's amount of memory in MB
default: null
required: false
aliases: [ vmmem ]
instance_type:
description:
- define if the instance is a server or desktop
default: server
required: false
aliases: [ vmtype ]
choices: [ 'server', 'desktop' ]
disk_alloc:
description:
- define if disk is thin or preallocated
default: thin
required: false
aliases: []
choices: [ 'thin', 'preallocated' ]
disk_int:
description:
- interface type of the disk
default: virtio
required: false
aliases: []
choices: [ 'virtio', 'ide' ]
instance_os:
description:
- type of Operating System
default: null
required: false
aliases: [ vmos ]
instance_cores:
description:
- define the instance's number of cores
default: 1
required: false
aliases: [ vmcores ]
sdomain:
description:
- the Storage Domain where you want to create the instance's disk on.
default: null
required: false
aliases: []
region:
description:
- the oVirt/RHEV datacenter where you want to deploy to
default: null
required: false
aliases: []
instance_dns:
description:
- define the instance's Primary DNS server
required: false
aliases: [ dns ]
version_added: "2.1"
instance_domain:
description:
- define the instance's Domain
required: false
aliases: [ domain ]
version_added: "2.1"
instance_hostname:
description:
- define the instance's Hostname
required: false
aliases: [ hostname ]
version_added: "2.1"
instance_ip:
description:
- define the instance's IP
required: false
aliases: [ ip ]
version_added: "2.1"
instance_netmask:
description:
- define the instance's Netmask
required: false
aliases: [ netmask ]
version_added: "2.1"
instance_rootpw:
description:
- define the instance's Root password
required: false
aliases: [ rootpw ]
version_added: "2.1"
instance_key:
description:
- define the instance's Authorized key
required: false
aliases: [ key ]
version_added: "2.1"
state:
description:
- create, terminate or remove instances
default: 'present'
required: false
aliases: []
choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
requirements:
- "python >= 2.6"
- "ovirt-engine-sdk-python"
'''
EXAMPLES = '''
# Basic example provisioning from image.
ovirt:
user: admin@internal
url: https://ovirt.example.com
instance_name: ansiblevm04
password: secret
image: centos_64
zone: cluster01
resource_type: template"
# Full example to create new instance from scratch
ovirt:
instance_name: testansible
resource_type: new
instance_type: server
user: admin@internal
password: secret
url: https://ovirt.example.com
instance_disksize: 10
zone: cluster01
region: datacenter1
instance_cpus: 1
instance_nic: nic1
instance_network: rhevm
instance_mem: 1000
disk_alloc: thin
sdomain: FIBER01
instance_cores: 1
instance_os: rhel_6x64
disk_int: virtio"
# stopping an instance
ovirt:
instance_name: testansible
state: stopped
user: admin@internal
password: secret
url: https://ovirt.example.com
# starting an instance
ovirt:
instance_name: testansible
state: started
user: admin@internal
password: secret
url: https://ovirt.example.com
# starting an instance with cloud init information
ovirt:
instance_name: testansible
state: started
user: admin@internal
password: secret
url: https://ovirt.example.com
hostname: testansible
domain: ansible.local
ip: 192.0.2.100
netmask: 255.255.255.0
gateway: 192.0.2.1
rootpw: bigsecret
'''
try:
from ovirtsdk.api import API
from ovirtsdk.xml import params
HAS_OVIRTSDK = True
except ImportError:
HAS_OVIRTSDK = False
# ------------------------------------------------------------------- #
# create connection with API
#
def conn(url, user, password):
api = API(url=url, username=user, password=password, insecure=True)
try:
value = api.test()
except:
raise Exception("error connecting to the oVirt API")
return api
# ------------------------------------------------------------------- #
# Create VM from scratch
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
if vmdisk_alloc == 'thin':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
elif vmdisk_alloc == 'preallocated':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
try:
conn.vms.add(vmparams)
except:
raise Exception("Error creating VM with specified parameters")
vm = conn.vms.get(name=vmname)
try:
vm.disks.add(vmdisk)
except:
raise Exception("Error attaching disk")
try:
vm.nics.add(nic_net1)
except:
raise Exception("Error adding nic")
# create an instance from a template
def create_vm_template(conn, vmname, image, zone):
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
try:
conn.vms.add(vmparams)
except:
raise Exception('error adding template %s' % image)
# start instance
def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
domain=None, dns=None, rootpw=None, key=None):
vm = conn.vms.get(name=vmname)
use_cloud_init = False
nics = None
nic = None
if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
use_cloud_init = True
if ip and netmask and gateway:
ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
nics = params.Nics()
nics = params.GuestNicsConfiguration(nic_configuration=[nic])
initialization=params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
root_password=rootpw, nic_configurations=nics, dns_servers=dns,
authorized_ssh_keys=key)
action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
vm.start(action=action)
# Stop instance
def vm_stop(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.stop()
# restart instance
def vm_restart(conn, vmname):
state = vm_status(conn, vmname)
vm = conn.vms.get(name=vmname)
vm.stop()
while conn.vms.get(vmname).get_status().get_state() != 'down':
time.sleep(5)
vm.start()
# remove an instance
def vm_remove(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.delete()
# ------------------------------------------------------------------- #
# VM statuses
#
# Get the VMs status
def vm_status(conn, vmname):
status = conn.vms.get(name=vmname).status.state
return status
# Get VM object and return it's name if object exists
def get_vm(conn, vmname):
vm = conn.vms.get(name=vmname)
if vm is None:
name = "empty"
else:
name = vm.get_name()
return name
# ------------------------------------------------------------------- #
# Hypervisor operations
#
# not available yet
# ------------------------------------------------------------------- #
# Main
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
#name = dict(required=True),
user = dict(required=True),
url = dict(required=True),
instance_name = dict(required=True, aliases=['vmname']),
password = dict(required=True, no_log=True),
image = dict(),
resource_type = dict(choices=['new', 'template']),
zone = dict(),
instance_disksize = dict(aliases=['vm_disksize']),
instance_cpus = dict(default=1, aliases=['vmcpus']),
instance_nic = dict(aliases=['vmnic']),
instance_network = dict(default='rhevm', aliases=['vmnetwork']),
instance_mem = dict(aliases=['vmmem']),
instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
disk_int = dict(default='virtio', choices=['virtio', 'ide']),
instance_os = dict(aliases=['vmos']),
instance_cores = dict(default=1, aliases=['vmcores']),
instance_hostname = dict(aliases=['hostname']),
instance_ip = dict(aliases=['ip']),
instance_netmask = dict(aliases=['netmask']),
instance_gateway = dict(aliases=['gateway']),
instance_domain = dict(aliases=['domain']),
instance_dns = dict(aliases=['dns']),
instance_rootpw = dict(aliases=['rootpw']),
instance_key = dict(aliases=['key']),
sdomain = dict(),
region = dict(),
)
)
if not HAS_OVIRTSDK:
module.fail_json(msg='ovirtsdk required for this module')
state = module.params['state']
user = module.params['user']
url = module.params['url']
vmname = module.params['instance_name']
password = module.params['password']
image = module.params['image'] # name of the image to deploy
resource_type = module.params['resource_type'] # template or from scratch
zone = module.params['zone'] # oVirt cluster
vmdisk_size = module.params['instance_disksize'] # disksize
vmcpus = module.params['instance_cpus'] # number of cpu
vmnic = module.params['instance_nic'] # network interface
vmnetwork = module.params['instance_network'] # logical network
vmmem = module.params['instance_mem'] # mem size
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
vmos = module.params['instance_os'] # Operating System
vmtype = module.params['instance_type'] # server or desktop
vmcores = module.params['instance_cores'] # number of cores
sdomain = module.params['sdomain'] # storage domain to store disk on
region = module.params['region'] # oVirt Datacenter
hostname = module.params['instance_hostname']
ip = module.params['instance_ip']
netmask = module.params['instance_netmask']
gateway = module.params['instance_gateway']
domain = module.params['instance_domain']
dns = module.params['instance_dns']
rootpw = module.params['instance_rootpw']
key = module.params['instance_key']
#initialize connection
try:
c = conn(url+"/api", user, password)
except Exception as e:
module.fail_json(msg='%s' % e)
if state == 'present':
if get_vm(c, vmname) == "empty":
if resource_type == 'template':
try:
create_vm_template(c, vmname, image, zone)
except Exception as e:
module.fail_json(msg='%s' % e)
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
elif resource_type == 'new':
# FIXME: refactor, use keyword args.
try:
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
except Exception as e:
module.fail_json(msg='%s' % e)
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
else:
module.exit_json(changed=False, msg="You did not specify a resource type")
else:
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
if state == 'started':
if vm_status(c, vmname) == 'up':
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
else:
#vm_start(c, vmname)
vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
module.exit_json(changed=True, msg="VM %s started" % vmname)
if state == 'shutdown':
if vm_status(c, vmname) == 'down':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
else:
vm_stop(c, vmname)
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
if state == 'restart':
if vm_status(c, vmname) == 'up':
vm_restart(c, vmname)
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
else:
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
if state == 'absent':
if get_vm(c, vmname) == "empty":
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
else:
vm_remove(c, vmname)
module.exit_json(changed=True, msg="VM %s removed" % vmname)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -5,253,022,133,488,718,000 | 32.937381 | 268 | 0.612972 | false |
MattCrystal/shiny-octo-happiness | scripts/gcc-wrapper.py | 1276 | 3382 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 | 5,666,666,240,075,491,000 | 34.229167 | 97 | 0.668539 | false |
zero-rp/miniblink49 | v8_7_5/tools/testrunner/local/statusfile.py | 5 | 11573 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import os
import re
from variants import ALL_VARIANTS
from utils import Freeze
# Possible outcomes
FAIL = "FAIL"
PASS = "PASS"
TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
# Outcomes only for status file, need special handling
FAIL_OK = "FAIL_OK"
FAIL_SLOPPY = "FAIL_SLOPPY"
# Modifiers
SKIP = "SKIP"
SLOW = "SLOW"
NO_VARIANTS = "NO_VARIANTS"
FAIL_PHASE_ONLY = "FAIL_PHASE_ONLY"
ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, NO_VARIANTS, FAIL_SLOPPY,
ALWAYS, FAIL_PHASE_ONLY]:
KEYWORDS[key] = key
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little", "android",
"android_arm", "android_arm64", "android_ia32", "android_x64",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
"x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
"linux", "aix", "r1", "r2", "r3", "r5", "r6"]:
VARIABLES[var] = var
# Allow using variants as keywords.
for var in ALL_VARIANTS:
VARIABLES[var] = var
class StatusFile(object):
def __init__(self, path, variables):
"""
_rules: {variant: {test name: [rule]}}
_prefix_rules: {variant: {test name prefix: [rule]}}
"""
with open(path) as f:
self._rules, self._prefix_rules = ReadStatusFile(f.read(), variables)
def get_outcomes(self, testname, variant=None):
"""Merges variant dependent and independent rules."""
outcomes = frozenset()
for key in set([variant or '', '']):
rules = self._rules.get(key, {})
prefix_rules = self._prefix_rules.get(key, {})
if testname in rules:
outcomes |= rules[testname]
for prefix in prefix_rules:
if testname.startswith(prefix):
outcomes |= prefix_rules[prefix]
return outcomes
def warn_unused_rules(self, tests, check_variant_rules=False):
"""Finds and prints unused rules in status file.
Rule X is unused when it doesn't apply to any tests, which can also mean
that all matching tests were skipped by another rule before evaluating X.
Args:
tests: list of pairs (testname, variant)
check_variant_rules: if set variant dependent rules are checked
"""
if check_variant_rules:
variants = list(ALL_VARIANTS)
else:
variants = ['']
used_rules = set()
for testname, variant in tests:
variant = variant or ''
if testname in self._rules.get(variant, {}):
used_rules.add((testname, variant))
if SKIP in self._rules[variant][testname]:
continue
for prefix in self._prefix_rules.get(variant, {}):
if testname.startswith(prefix):
used_rules.add((prefix, variant))
if SKIP in self._prefix_rules[variant][prefix]:
break
for variant in variants:
for rule, value in (
list(self._rules.get(variant, {}).iteritems()) +
list(self._prefix_rules.get(variant, {}).iteritems())):
if (rule, variant) not in used_rules:
if variant == '':
variant_desc = 'variant independent'
else:
variant_desc = 'variant: %s' % variant
print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
def _JoinsPassAndFail(outcomes1, outcomes2):
"""Indicates if we join PASS and FAIL from two different outcome sets and
the first doesn't already contain both.
"""
return (
PASS in outcomes1 and
not (FAIL in outcomes1 or FAIL_OK in outcomes1) and
(FAIL in outcomes2 or FAIL_OK in outcomes2)
)
VARIANT_EXPRESSION = object()
def _EvalExpression(exp, variables):
"""Evaluates expression and returns its result. In case of NameError caused by
undefined "variant" identifier returns VARIANT_EXPRESSION marker.
"""
try:
return eval(exp, variables)
except NameError as e:
identifier = re.match("name '(.*)' is not defined", e.message).group(1)
assert identifier == "variant", "Unknown identifier: %s" % identifier
return VARIANT_EXPRESSION
def _EvalVariantExpression(
condition, section, variables, variant, rules, prefix_rules):
variables_with_variant = dict(variables)
variables_with_variant["variant"] = variant
result = _EvalExpression(condition, variables_with_variant)
assert result != VARIANT_EXPRESSION
if result is True:
_ReadSection(
section,
variables_with_variant,
rules[variant],
prefix_rules[variant],
)
else:
assert result is False, "Make sure expressions evaluate to boolean values"
def _ParseOutcomeList(rule, outcomes, variables, target_dict):
"""Outcome list format: [condition, outcome, outcome, ...]"""
result = set([])
if type(outcomes) == str:
outcomes = [outcomes]
for item in outcomes:
if type(item) == str:
result.add(item)
elif type(item) == list:
condition = item[0]
exp = _EvalExpression(condition, variables)
assert exp != VARIANT_EXPRESSION, (
"Nested variant expressions are not supported")
if exp is False:
continue
# Ensure nobody uses an identifier by mistake, like "default",
# which would evaluate to true here otherwise.
assert exp is True, "Make sure expressions evaluate to boolean values"
for outcome in item[1:]:
assert type(outcome) == str
result.add(outcome)
else:
assert False
if len(result) == 0:
return
if rule in target_dict:
# A FAIL without PASS in one rule has always precedence over a single
# PASS (without FAIL) in another. Otherwise the default PASS expectation
# in a rule with a modifier (e.g. PASS, SLOW) would be joined to a FAIL
# from another rule (which intended to mark a test as FAIL and not as
# PASS and FAIL).
if _JoinsPassAndFail(target_dict[rule], result):
target_dict[rule] -= set([PASS])
if _JoinsPassAndFail(result, target_dict[rule]):
result -= set([PASS])
target_dict[rule] |= result
else:
target_dict[rule] = result
def ReadContent(content):
return eval(content, KEYWORDS)
def ReadStatusFile(content, variables):
"""Status file format
Status file := [section]
section = [CONDITION, section_rules]
section_rules := {path: outcomes}
outcomes := outcome | [outcome, ...]
outcome := SINGLE_OUTCOME | [CONDITION, SINGLE_OUTCOME, SINGLE_OUTCOME, ...]
"""
# Empty defaults for rules and prefix_rules. Variant-independent
# rules are mapped by "", others by the variant name.
rules = {variant: {} for variant in ALL_VARIANTS}
rules[""] = {}
prefix_rules = {variant: {} for variant in ALL_VARIANTS}
prefix_rules[""] = {}
variables.update(VARIABLES)
for conditional_section in ReadContent(content):
assert type(conditional_section) == list
assert len(conditional_section) == 2
condition, section = conditional_section
exp = _EvalExpression(condition, variables)
# The expression is variant-independent and evaluates to False.
if exp is False:
continue
# The expression is variant-independent and evaluates to True.
if exp is True:
_ReadSection(
section,
variables,
rules[''],
prefix_rules[''],
)
continue
# The expression is variant-dependent (contains "variant" keyword)
if exp == VARIANT_EXPRESSION:
# If the expression contains one or more "variant" keywords, we evaluate
# it for all possible variants and create rules for those that apply.
for variant in ALL_VARIANTS:
_EvalVariantExpression(
condition, section, variables, variant, rules, prefix_rules)
continue
assert False, "Make sure expressions evaluate to boolean values"
return Freeze(rules), Freeze(prefix_rules)
def _ReadSection(section, variables, rules, prefix_rules):
assert type(section) == dict
for rule, outcome_list in section.iteritems():
assert type(rule) == str
if rule[-1] == '*':
_ParseOutcomeList(rule[:-1], outcome_list, variables, prefix_rules)
else:
_ParseOutcomeList(rule, outcome_list, variables, rules)
JS_TEST_PATHS = {
'debugger': [[]],
'inspector': [[]],
'intl': [[]],
'message': [[]],
'mjsunit': [[]],
'mozilla': [['data']],
'test262': [['data', 'test'], ['local-tests', 'test']],
'webkit': [[]],
}
def PresubmitCheck(path):
with open(path) as f:
contents = ReadContent(f.read())
basename = os.path.basename(os.path.dirname(path))
root_prefix = basename + "/"
status = {"success": True}
def _assert(check, message): # Like "assert", but doesn't throw.
if not check:
print("%s: Error: %s" % (path, message))
status["success"] = False
try:
for section in contents:
_assert(type(section) == list, "Section must be a list")
_assert(len(section) == 2, "Section list must have exactly 2 entries")
section = section[1]
_assert(type(section) == dict,
"Second entry of section must be a dictionary")
for rule in section:
_assert(type(rule) == str, "Rule key must be a string")
_assert(not rule.startswith(root_prefix),
"Suite name prefix must not be used in rule keys")
_assert(not rule.endswith('.js'),
".js extension must not be used in rule keys.")
_assert('*' not in rule or (rule.count('*') == 1 and rule[-1] == '*'),
"Only the last character of a rule key can be a wildcard")
if basename in JS_TEST_PATHS and '*' not in rule:
_assert(any(os.path.exists(os.path.join(os.path.dirname(path),
*(paths + [rule + ".js"])))
for paths in JS_TEST_PATHS[basename]),
"missing file for %s test %s" % (basename, rule))
return status["success"]
except Exception as e:
print(e)
return False
| apache-2.0 | 4,204,661,375,995,003,000 | 33.443452 | 80 | 0.650912 | false |
florian-dacosta/OpenUpgrade | addons/l10n_fr_hr_payroll/report/__init__.py | 424 | 1091 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fiche_paye
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,726,227,241,592,279,000 | 40.961538 | 80 | 0.615949 | false |
devendermishrajio/nova | nova/api/openstack/compute/legacy_v2/contrib/extended_ips.py | 79 | 3098 | # Copyright 2013 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Ips API extension."""
import itertools
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
authorize = extensions.soft_extension_authorizer('compute', 'extended_ips')
class ExtendedIpsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedIpsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, context, server, instance):
key = "%s:type" % Extended_ips.alias
networks = common.get_networks_for_instance(context, instance)
for label, network in networks.items():
# NOTE(vish): ips are hidden in some states via the
# hide_server_addresses extension.
if label in server['addresses']:
all_ips = itertools.chain(network["ips"],
network["floating_ips"])
for i, ip in enumerate(all_ips):
server['addresses'][label][i][key] = ip['type']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(context, server, db_instance)
class Extended_ips(extensions.ExtensionDescriptor):
"""Adds type parameter to the ip list."""
name = "ExtendedIps"
alias = "OS-EXT-IPS"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_ips/api/v1.1")
updated = "2013-01-06T00:00:00Z"
def get_controller_extensions(self):
controller = ExtendedIpsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 | 6,312,549,496,354,266,000 | 38.717949 | 79 | 0.632021 | false |
KaiSzuttor/espresso | testsuite/python/rotational_inertia.py | 2 | 6230 | # Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import tests_common
@utx.skipIfMissingFeatures(["MASS", "ROTATIONAL_INERTIA"])
class RotationalInertia(ut.TestCase):
longMessage = True
# Handle for espresso system
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.cell_system.skin = 0
# Particle's angular momentum: initial and ongoing
L_0_lab = np.zeros((3))
L_lab = np.zeros((3))
# Angular momentum
def L_body(self, part):
return self.system.part[part].omega_body[:] * \
self.system.part[part].rinertia[:]
# Set the angular momentum
def set_L_0(self, part):
L_0_body = self.L_body(part)
self.L_0_lab = tests_common.convert_vec_body_to_space(
self.system, part, L_0_body)
def set_L(self, part):
L_body = self.L_body(part)
self.L_lab = tests_common.convert_vec_body_to_space(
self.system, part, L_body)
def test_stability(self):
self.system.part.clear()
self.system.part.add(
pos=np.array([0.0, 0.0, 0.0]), id=0, rotation=(1, 1, 1))
# Inertial motion around the stable and unstable axes
tol = 4E-3
# Anisotropic inertial moment. Stable axes correspond to J[1] and J[2].
# The unstable axis corresponds to J[0]. These values relation is J[1]
# < J[0] < J[2].
J = np.array([5, 0.5, 18.5])
self.system.part[0].rinertia = J[:]
# Validation of J[1] stability
# ----------------------------
self.system.time_step = 0.0006
# Stable omega component should be larger than other components.
stable_omega = 57.65
self.system.part[0].omega_body = np.array([0.15, stable_omega, -0.043])
self.set_L_0(0)
for i in range(100):
self.set_L(0)
for k in range(3):
self.assertAlmostEqual(
self.L_lab[k], self.L_0_lab[k], delta=tol,
msg='Inertial motion around stable axis J1: Deviation in '
'angular momentum is too large. Step {0}, coordinate '
'{1}, expected {2}, got {3}'.format(
i, k, self.L_0_lab[k], self.L_lab[k]))
self.assertAlmostEqual(
self.system.part[0].omega_body[1], stable_omega, delta=tol,
msg='Inertial motion around stable axis J1: Deviation in omega '
'is too large. Step {0}, coordinate 1, expected {1}, got {2}'
.format(i, stable_omega, self.system.part[0].omega_body[1]))
self.system.integrator.run(10)
# Validation of J[2] stability
# ----------------------------
self.system.time_step = 0.01
# Stable omega component should be larger than other components.
stable_omega = 3.2
self.system.part[0].omega_body = np.array(
[0.011, -0.043, stable_omega])
self.set_L_0(0)
for i in range(100):
self.set_L(0)
for k in range(3):
self.assertAlmostEqual(
self.L_lab[k], self.L_0_lab[k], delta=tol,
msg='Inertial motion around stable axis J2: Deviation in '
'angular momentum is too large. Step {0}, coordinate '
'{1}, expected {2}, got {3}'.format(
i, k, self.L_0_lab[k], self.L_lab[k]))
self.assertAlmostEqual(
self.system.part[0].omega_body[2], stable_omega, delta=tol,
msg='Inertial motion around stable axis J2: Deviation in omega '
'is too large. Step {0}, coordinate 2, expected {1}, got {2}'
.format(i, stable_omega, self.system.part[0].omega_body[2]))
self.system.integrator.run(10)
# Validation of J[0]
# ------------------
self.system.time_step = 0.001
# Unstable omega component should be larger than other components.
unstable_omega = 5.76
self.system.part[0].omega_body = np.array(
[unstable_omega, -0.043, 0.15])
self.set_L_0(0)
for i in range(100):
self.set_L(0)
for k in range(3):
self.assertAlmostEqual(
self.L_lab[k], self.L_0_lab[k], delta=tol,
msg='Inertial motion around stable axis J0: Deviation in '
'angular momentum is too large. Step {0}, coordinate '
'{1}, expected {2}, got {3}'.format(
i, k, self.L_0_lab[k], self.L_lab[k]))
self.system.integrator.run(10)
def energy(self, p):
return 0.5 * np.dot(p.rinertia, p.omega_body**2)
def momentum(self, p):
return np.linalg.norm(p.rinertia * p.omega_body)
def test_energy_and_momentum_conservation(self):
system = self.system
system.part.clear()
system.thermostat.turn_off()
p = system.part.add(pos=(0, 0, 0), rinertia=(1.1, 1.3, 1.5),
rotation=(1, 1, 1), omega_body=(2, 1, 4))
E0 = self.energy(p)
m0 = self.momentum(p)
system.time_step = 0.001
for _ in range(1000):
system.integrator.run(100)
self.assertAlmostEqual(self.energy(p), E0, places=3)
self.assertAlmostEqual(self.momentum(p), m0, places=3)
if __name__ == '__main__':
ut.main()
| gpl-3.0 | 7,217,527,633,960,234,000 | 39.193548 | 81 | 0.561477 | false |
zirou30/MITMf | core/sslstrip/DnsCache.py | 5 | 1699 | # Copyright (c) 2014-2016 Moxie Marlinspike, Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging
mitmf_logger = logging.getLogger('mitmf')
class DnsCache:
'''
The DnsCache maintains a cache of DNS lookups, mirroring the browser experience.
'''
_instance = None
def __init__(self):
self.customAddress = None
self.cache = {}
@staticmethod
def getInstance():
if DnsCache._instance == None:
DnsCache._instance = DnsCache()
return DnsCache._instance
def cacheResolution(self, host, address):
self.cache[host] = address
def getCachedAddress(self, host):
if host in self.cache:
return self.cache[host]
return None
def setCustomRes(self, host, ip_address=None):
if ip_address is not None:
self.cache[host] = ip_address
mitmf_logger.debug("[DNSCache] DNS entry set: %s -> %s" %(host, ip_address))
else:
if self.customAddress is not None:
self.cache[host] = self.customAddress
def setCustomAddress(self, ip_address):
self.customAddress = ip_address
| gpl-3.0 | 1,187,270,855,137,955,800 | 27.316667 | 81 | 0.725721 | false |
seecr/meresco-html | meresco/html/login/basichtmlloginform.py | 1 | 17676 | ## begin license ##
#
# "Meresco Html" is a template engine based on generators, and a sequel to Slowfoot.
# It is also known as "DynamicHtml" or "Seecr Html".
#
# Copyright (C) 2012 Meertens Instituut (KNAW) http://meertens.knaw.nl
# Copyright (C) 2012-2018, 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2014 Stichting Bibliotheek.nl (BNL) http://www.bibliotheek.nl
# Copyright (C) 2015, 2020-2021 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2020-2021 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2020-2021 SURF https://www.surf.nl
# Copyright (C) 2020-2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "Meresco Html"
#
# "Meresco Html" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Html" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Html"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from meresco.components.http.utils import redirectHttp, CRLF, insertHeader, findCookies, okJson
from xml.sax.saxutils import quoteattr, escape as xmlEscape
from os.path import join
from .securezone import ORIGINAL_PATH
from simplejson import dumps, loads
from meresco.html import PostActions
from .labels import getLabel
from urllib.parse import urlencode
from meresco.html.utils import parse_qs
from time import time
from ._constants import UNAUTHORIZED
TWO_WEEKS = 2*7*24*3600
class BasicHtmlLoginForm(PostActions):
def __init__(self, action, loginPath, home="/", name=None, lang='en', rememberMeCookie=False):
PostActions.__init__(self, name=name)
self._action = action
self._loginPath = loginPath
self._home = home
self.registerAction('changepassword', self.handleChangePassword)
self.registerAction('remove', self.handleRemove)
self.registerAction('newUser', self.handleNewUser)
self.defaultAction(self.handleLogin)
self._lang = lang
self._rememberMeCookie = rememberMeCookie
def handleLogin(self, session=None, Body=None, **kwargs):
accept = kwargs.get("Headers", {}).get("Accept", '')
jsonResponse = 'application/json' in accept
strBody = str(Body, encoding='utf-8')
bodyArgs = {d['name']:[d['value']] for d in loads(strBody)} if jsonResponse else parse_qs(strBody, keep_blank_values=True)
username = bodyArgs.get('username', [None])[0]
password = bodyArgs.get('password', [None])[0]
rememberMe = bodyArgs.get('rememberMe', [None])[0] != None
if self.call.validateUser(username=username, password=password):
user = self.loginAsUser(username)
session[USER] = user
url = session.pop(ORIGINAL_PATH, self._home)
response = redirectHttp
if rememberMe and self._rememberMeCookie:
cookieValues = self.call.createCookie(user)
status, headers = response.split(CRLF, 1)
response = CRLF.join([status, cookieValues['header'], headers])
if jsonResponse:
yield bytes(okJson, encoding="utf-8")
yield dumps(dict(success=True))
return
yield bytes(response % url, encoding='utf-8')
else:
session['BasicHtmlLoginForm.formValues'] = {
'username': username,
'errorMessage': getLabel(self._lang, 'loginForm', 'invalid')
}
if jsonResponse:
yield bytes(okJson, encoding="utf-8")
yield dumps(dict(success=False, message=getLabel(self._lang, 'loginForm', 'invalid')))
return
yield bytes(redirectHttp % self._loginPath, encoding='utf-8')
def getUser(self, username):
return self._checkAndCreateUser(username)
loginAsUser = getUser
def loginForm(self, session, path, lang=None, **kwargs):
lang = lang or self._lang
formValues = session.get('BasicHtmlLoginForm.formValues', {}) if session else {}
yield """<div id="login-form">\n"""
if 'errorMessage' in formValues:
yield ' <p class="error">%s</p>\n' % xmlEscape(formValues['errorMessage'])
values = dict(
username=quoteattr(formValues.get('username', '')),
action=quoteattr(self._action),
formUrl=quoteattr(path),
lblUsername=getLabel(lang, 'loginForm', 'username'),
lblPassword=getLabel(lang, 'loginForm', 'password'),
lblLogin=getLabel(lang, 'loginForm', 'login'),
lblRememberMe=getLabel(lang, 'loginForm', 'rememberMe')
)
yield """
<form method="POST" name="login" action=%(action)s>
<input type="hidden" name="formUrl" value=%(formUrl)s/>
<dl>
<dt>%(lblUsername)s</dt>
<dd><input type="text" name="username" value=%(username)s/></dd>
<dt>%(lblPassword)s</dt>
<dd><input type="password" name="password"/></dd>""" % values
if self._rememberMeCookie:
yield """
<dt> </dt><dd class="rememberMe"><input type="checkbox" name="rememberMe" id="rememberMe" /><label for="rememberMe">%(lblRememberMe)s</label></dd>""" % values
yield """
<dd class="submit"><input type="submit" id="submitLogin" value="%(lblLogin)s"/></dd>
</dl>
</form>
<script type="text/javascript">
document.getElementById("submitLogin").focus()
</script>
</div>""" % values
session.pop('BasicHtmlLoginForm.formValues', None)
def newUserForm(self, session, path, lang=None, extraFields="", **kwargs):
lang = lang or self._lang
formValues = session.get('BasicHtmlLoginForm.newUserFormValues', {}) if session else {}
yield """<div id="login-new-user-form">\n"""
if not USER in session:
yield '<p class="error">Please login to add new users.</p>\n</div>'
return
if 'errorMessage' in formValues:
yield ' <p class="error">%s</p>\n' % xmlEscape(formValues['errorMessage'])
if 'successMessage' in formValues:
yield ' <p class="success">%s</p>\n' % xmlEscape(formValues['successMessage'])
values = dict(
username=quoteattr(formValues.get('username', '')),
action=quoteattr(join(self._action, 'newUser')),
formUrl=quoteattr(path),
returnUrl=quoteattr(kwargs.get('returnUrl', path)),
lblUsername=getLabel(lang, 'newuserForm', 'username'),
lblPassword=getLabel(lang, 'newuserForm', 'password'),
lblPasswordRepeat=getLabel(lang, 'newuserForm', 'password-repeat'),
lblCreate=getLabel(lang, 'newuserForm', 'create'),
extraFields=extraFields
)
yield """
<form method="POST" name="newUser" action=%(action)s>
<input type="hidden" name="formUrl" value=%(formUrl)s/>
<input type="hidden" name="returnUrl" value=%(returnUrl)s/>
<dl>
<dt>%(lblUsername)s</dt>
<dd><input type="text" name="username" value=%(username)s/></dd>
<dt>%(lblPassword)s</dt>
<dd><input type="password" name="password"/></dd>
<dt>%(lblPasswordRepeat)s</dt>
<dd><input type="password" name="retypedPassword"/></dd>
%(extraFields)s
<dd class="submit"><input type="submit" value="%(lblCreate)s"/></dd>
</dl>
</form>
</div>""" % values
session.pop('BasicHtmlLoginForm.newUserFormValues', None)
def handleNewUser(self, session, Body, **kwargs):
handlingUser = session.get(USER)
if handlingUser is None or not handlingUser.canEdit():
yield UNAUTHORIZED
return
bodyArgs = parse_qs(str(Body, encoding='utf-8'), keep_blank_values=True) if Body else {}
username = bodyArgs.get('username', [None])[0]
password = bodyArgs.get('password', [None])[0]
retypedPassword = bodyArgs.get('retypedPassword', [None])[0]
formUrl = bodyArgs.get('formUrl', [self._home])[0]
returnUrl = bodyArgs.get('returnUrl', [formUrl])[0]
targetUrl = formUrl
if password != retypedPassword:
session['BasicHtmlLoginForm.newUserFormValues']={'username': username, 'errorMessage': getLabel(self._lang, "newuserForm", 'dontMatch')}
else:
try:
self.do.addUser(username=username, password=password)
self.do.handleNewUser(username=username, Body=Body)
session['BasicHtmlLoginForm.newUserFormValues']={'successMessage': '%s "%s"' % (getLabel(self._lang, 'newuserForm', 'added'), username)}
targetUrl = returnUrl
except ValueError as e:
session['BasicHtmlLoginForm.newUserFormValues']={'username': username, 'errorMessage': str(e)}
yield redirectHttp % targetUrl.format(username=username)
def handleChangePassword(self, session, Body, **kwargs):
bodyArgs = parse_qs(str(Body, encoding='utf-8'), keep_blank_values=True) if Body else {}
username = bodyArgs.get('username', [None])[0]
oldPassword = bodyArgs.get('oldPassword', [None])[0]
newPassword = bodyArgs.get('newPassword', [None])[0]
retypedPassword = bodyArgs.get('retypedPassword', [None])[0]
formUrl = bodyArgs.get('formUrl', [self._home])[0]
returnUrl = bodyArgs.get('returnUrl', [formUrl])[0]
targetUrl = formUrl
handlingUser = session.get(USER)
if not handlingUser:
session['BasicHtmlLoginForm.formValues']={
'username': username,
'errorMessage': getLabel(self._lang, 'changepasswordForm', 'loginRequired')}
yield redirectHttp % targetUrl
return
if newPassword != retypedPassword:
session['BasicHtmlLoginForm.formValues']={
'username': username,
'errorMessage': getLabel(self._lang, 'changepasswordForm', 'dontMatch')}
else:
if (not oldPassword and handlingUser.canEdit(username) and handlingUser.name != username) or self.call.validateUser(username=username, password=oldPassword):
try:
self.call.setPassword(username, newPassword)
targetUrl = returnUrl
except ValueError:
session['BasicHtmlLoginForm.formValues']={
'username': username,
'errorMessage': getLabel(self._lang, 'changepasswordForm', 'passwordInvalid')}
else:
session['BasicHtmlLoginForm.formValues']={
'username': username,
'errorMessage': getLabel(self._lang, 'changepasswordForm', 'usernamePasswordDontMatch')}
yield redirectHttp % targetUrl.format(username=username)
def changePasswordForm(self, session, path, arguments, user=None, lang=None, onlyNewPassword=False, **kwargs):
lang = lang or self._lang
formValues = session.get('BasicHtmlLoginForm.formValues', {}) if session else {}
yield """<div id="login-change-password-form">\n"""
if not USER in session:
yield '<p class="error">Please login to change password.</p>\n</div>'
return
if 'errorMessage' in formValues:
yield ' <p class="error">%s</p>\n' % xmlEscape(formValues['errorMessage'])
formUrl = path
if arguments:
formUrl += "?" + urlencode(sorted(arguments.items()), doseq=True)
username = session[USER].name if user is None else (user if isinstance(user, str) else user.name)
values = dict(
action=quoteattr(join(self._action, 'changepassword')),
formUrl=quoteattr(formUrl),
returnUrl=quoteattr(kwargs.get('returnUrl', path)),
username=quoteattr(username),
lblOldPassword=getLabel(lang, "changepasswordForm", "old-password"),
lblNewPassword=getLabel(lang, "changepasswordForm", "new-password"),
lblNewPasswordRepeat=getLabel(lang, "changepasswordForm", "new-password-repeat"),
lblChange=getLabel(lang, "changepasswordForm", "change"),
)
yield """<form method="POST" name="changePassword" action=%(action)s>
<input type="hidden" name="formUrl" value=%(formUrl)s/>
<input type="hidden" name="returnUrl" value=%(returnUrl)s/>
<input type="hidden" name="username" value=%(username)s/>
<dl>
""" % values
if not onlyNewPassword:
yield """<dt>%(lblOldPassword)s</dt>
<dd><input type="password" name="oldPassword"/></dd>""" % values
yield """
<dt>%(lblNewPassword)s</dt>
<dd><input type="password" name="newPassword"/></dd>
<dt>%(lblNewPasswordRepeat)s</dt>
<dd><input type="password" name="retypedPassword"/></dd>
<dd class="submit"><input type="submit" value="%(lblChange)s"/></dd>
</dl>
</form>
</div>""" % values
session.pop('BasicHtmlLoginForm.formValues', None)
def userList(self, session, path, userLink=None, **kwargs):
yield """<div id="login-user-list">\n"""
if not USER in session:
yield '<p class="error">Please login to show user list.</p>\n</div>'
return
sessionUser = session[USER]
if sessionUser.canEdit():
yield """<script type="text/javascript">
function deleteUser(username) {
if (confirm("Are you sure?")) {
document.removeUser.username.value = username;
document.removeUser.submit();
}
}
</script>"""
yield """<form name="removeUser" method="POST" action=%s>
<input type="hidden" name="formUrl" value=%s/>
<input type="hidden" name="username"/>""" % (
quoteattr(join(self._action, 'remove')),
quoteattr(path),
)
yield '</form>\n'
yield '<ul>\n'
for user in sorted(self._listUsers(), key=lambda u:u.title()):
yield '<li>'
if userLink:
yield '<a href="%s?user=%s">%s</a>' % (userLink, xmlEscape(user.name), xmlEscape(user.title()))
else:
yield xmlEscape(user.title())
if sessionUser.name != user.name and (
sessionUser.canEdit(user.name)
):
yield """ <a href="javascript:deleteUser('%s');">delete</a>""" % user.name
yield '</li>\n'
yield '</ul>\n'
yield '</div>\n'
def _sessionUserMayDeleteAUser(self, sessionUser, user):
return user is not None and \
sessionUser is not None and \
sessionUser.name != user.name and \
sessionUser.canEdit(user.name)
def handleRemove(self, session, Body, **kwargs):
bodyArgs = parse_qs(str(Body, encoding='utf-8'), keep_blank_values=True) if Body else {}
formUrl = bodyArgs.get('formUrl', [self._home])[0]
sessionUser = session.get(USER)
user = self._checkAndCreateUser(bodyArgs.get('username', [None])[0])
if not self._sessionUserMayDeleteAUser(sessionUser, user):
yield UNAUTHORIZED
return
self.do.removeUser(user.name)
self.do.removeCookies(filter=lambda anObject: anObject.name == user.name if isinstance(anObject, self.User) else False)
self.do.removeCookies(filter=lambda anObject: anObject.get(USER).name == user.name if isinstance(anObject, dict) else False)
yield redirectHttp % formUrl
def logout(self, session, Headers, **ignored):
session.pop(USER, None)
redirectUrl = self._home
response = redirectHttp % redirectUrl
if not self._rememberMeCookie:
yield response
return
cookieName = self.call.cookieName()
for cookie in findCookies(Headers=Headers, name=cookieName):
self.call.removeCookie(cookie)
yield insertHeader([response], 'Set-Cookie: {}=; expires=Thu, 01 Jan 1970 00:00:00 GMT; path=/'.format(cookieName))
def _listUsers(self):
return [self._createUser(username) for username in self.call.listUsernames()]
def _checkAndCreateUser(self, username):
if not self.call.hasUser(username):
return None
return self._createUser(username)
def _createUser(self, username):
user = self.User(username)
user.isValid = lambda: self.call.hasUser(username)
self.do.enrichUser(user)
return user
def _now(self):
return time()
class User(object):
def __init__(inner, name):
inner.name = name
def title(inner):
return inner.name
def isAdmin(inner):
return inner.name == 'admin'
def canEdit(inner, username=None):
username = username.name if hasattr(username, 'name') else username
return inner.isAdmin() or inner.name == username
USER = 'user'
| gpl-2.0 | 4,736,577,999,649,226,000 | 44.091837 | 175 | 0.609584 | false |
vrieni/orange | Orange/OrangeCanvas/gui/toolgrid.py | 6 | 14520 | """
A widget containing a grid of clickable actions/buttons.
"""
from collections import namedtuple, deque
from PyQt4.QtGui import (
QFrame, QAction, QToolButton, QGridLayout, QFontMetrics,
QSizePolicy, QStyleOptionToolButton, QStylePainter, QStyle
)
from PyQt4.QtCore import Qt, QObject, QSize, QVariant, QEvent, QSignalMapper
from PyQt4.QtCore import pyqtSignal as Signal
from . import utils
_ToolGridSlot = namedtuple(
"_ToolGridSlot",
["button",
"action",
"row",
"column"
]
)
class _ToolGridButton(QToolButton):
def __init__(self, *args, **kwargs):
QToolButton.__init__(self, *args, **kwargs)
self.__text = ""
def actionEvent(self, event):
QToolButton.actionEvent(self, event)
if event.type() == QEvent.ActionChanged or \
event.type() == QEvent.ActionAdded:
self.__textLayout()
def resizeEvent(self, event):
QToolButton.resizeEvent(self, event)
self.__textLayout()
def __textLayout(self):
fm = QFontMetrics(self.font())
text = unicode(self.defaultAction().iconText())
words = deque(text.split())
lines = []
curr_line = ""
curr_line_word_count = 0
option = QStyleOptionToolButton()
option.initFrom(self)
margin = self.style().pixelMetric(QStyle.PM_ButtonMargin, option, self)
width = self.width() - 2 * margin
while words:
w = words.popleft()
if curr_line_word_count:
line_extended = " ".join([curr_line, w])
else:
line_extended = w
line_w = fm.boundingRect(line_extended).width()
if line_w >= width:
if curr_line_word_count == 0 or len(lines) == 1:
# A single word that is too long must be elided.
# Also if the text overflows 2 lines
# Warning: hardcoded max lines
curr_line = fm.elidedText(line_extended, Qt.ElideRight,
width)
curr_line = unicode(curr_line)
else:
# Put the word back
words.appendleft(w)
lines.append(curr_line)
curr_line = ""
curr_line_word_count = 0
if len(lines) == 2:
break
else:
curr_line = line_extended
curr_line_word_count += 1
if curr_line:
lines.append(curr_line)
text = "\n".join(lines)
self.__text = text
def paintEvent(self, event):
p = QStylePainter(self)
opt = QStyleOptionToolButton()
self.initStyleOption(opt)
if self.__text:
# Replace the text
opt.text = self.__text
p.drawComplexControl(QStyle.CC_ToolButton, opt)
p.end()
class ToolGrid(QFrame):
"""
A widget containing a grid of actions/buttons.
Actions can be added using standard :func:`QWidget.addAction(QAction)`
and :func:`QWidget.insertAction(int, QAction)` methods.
Parameters
----------
parent : :class:`QWidget`
Parent widget.
columns : int
Number of columns in the grid layout.
buttonSize : :class:`QSize`, optional
Size of tool buttons in the grid.
iconSize : :class:`QSize`, optional
Size of icons in the buttons.
toolButtonStyle : :class:`Qt.ToolButtonStyle`
Tool button style.
"""
actionTriggered = Signal(QAction)
actionHovered = Signal(QAction)
def __init__(self, parent=None, columns=4, buttonSize=None,
iconSize=None, toolButtonStyle=Qt.ToolButtonTextUnderIcon):
QFrame.__init__(self, parent)
if buttonSize is not None:
buttonSize = QSize(buttonSize)
if iconSize is not None:
iconSize = QSize(iconSize)
self.__columns = columns
self.__buttonSize = buttonSize or QSize(50, 50)
self.__iconSize = iconSize or QSize(26, 26)
self.__toolButtonStyle = toolButtonStyle
self.__gridSlots = []
self.__buttonListener = ToolButtonEventListener(self)
self.__buttonListener.buttonRightClicked.connect(
self.__onButtonRightClick)
self.__buttonListener.buttonEnter.connect(
self.__onButtonEnter)
self.__mapper = QSignalMapper()
self.__mapper.mapped[QObject].connect(self.__onClicked)
self.__setupUi()
def __setupUi(self):
layout = QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.setSizeConstraint(QGridLayout.SetFixedSize)
self.setLayout(layout)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.MinimumExpanding)
def setButtonSize(self, size):
"""
Set the button size.
"""
if self.__buttonSize != size:
self.__buttonSize = size
for slot in self.__gridSlots:
slot.button.setFixedSize(size)
def buttonSize(self):
"""
Return the button size.
"""
return QSize(self.__buttonSize)
def setIconSize(self, size):
"""
Set the button icon size.
"""
if self.__iconSize != size:
self.__iconSize = size
for slot in self.__gridSlots:
slot.button.setIconSize(size)
def iconSize(self):
"""
Return the icon size
"""
return QSize(self.__iconSize)
def setToolButtonStyle(self, style):
"""
Set the tool button style.
"""
if self.__toolButtonStyle != style:
self.__toolButtonStyle = style
for slot in self.__gridSlots:
slot.button.setToolButtonStyle(style)
def toolButtonStyle(self):
"""
Return the tool button style.
"""
return self.__toolButtonStyle
def setColumnCount(self, columns):
"""
Set the number of button/action columns.
"""
if self.__columns != columns:
self.__columns = columns
self.__relayout()
def columns(self):
"""
Return the number of columns in the grid.
"""
return self.__columns
def clear(self):
"""
Clear all actions/buttons.
"""
for slot in reversed(list(self.__gridSlots)):
self.removeAction(slot.action)
self.__gridSlots = []
def insertAction(self, before, action):
"""
Insert a new action at the position currently occupied
by `before` (can also be an index).
Parameters
----------
before : :class:`QAction` or int
Position where the `action` should be inserted.
action : :class:`QAction`
Action to insert
"""
if isinstance(before, int):
actions = list(self.actions())
if len(actions) == 0 or before >= len(actions):
# Insert as the first action or the last action.
return self.addAction(action)
before = actions[before]
return QFrame.insertAction(self, before, action)
def setActions(self, actions):
"""
Clear the grid and add `actions`.
"""
self.clear()
for action in actions:
self.addAction(action)
def buttonForAction(self, action):
"""
Return the :class:`QToolButton` instance button for `action`.
"""
actions = [slot.action for slot in self.__gridSlots]
index = actions.index(action)
return self.__gridSlots[index].button
def createButtonForAction(self, action):
"""
Create and return a :class:`QToolButton` for action.
"""
button = _ToolGridButton(self)
button.setDefaultAction(action)
if self.__buttonSize.isValid():
button.setFixedSize(self.__buttonSize)
if self.__iconSize.isValid():
button.setIconSize(self.__iconSize)
button.setToolButtonStyle(self.__toolButtonStyle)
button.setProperty("tool-grid-button", QVariant(True))
return button
def count(self):
"""
Return the number of buttons/actions in the grid.
"""
return len(self.__gridSlots)
def actionEvent(self, event):
QFrame.actionEvent(self, event)
if event.type() == QEvent.ActionAdded:
# Note: the action is already in the self.actions() list.
actions = list(self.actions())
index = actions.index(event.action())
self.__insertActionButton(index, event.action())
elif event.type() == QEvent.ActionRemoved:
self.__removeActionButton(event.action())
def __insertActionButton(self, index, action):
"""Create a button for the action and add it to the layout
at index.
"""
self.__shiftGrid(index, 1)
button = self.createButtonForAction(action)
row = index / self.__columns
column = index % self.__columns
self.layout().addWidget(
button, row, column,
Qt.AlignLeft | Qt.AlignTop
)
self.__gridSlots.insert(
index, _ToolGridSlot(button, action, row, column)
)
self.__mapper.setMapping(button, action)
button.clicked.connect(self.__mapper.map)
button.installEventFilter(self.__buttonListener)
button.installEventFilter(self)
def __removeActionButton(self, action):
"""Remove the button for the action from the layout and delete it.
"""
actions = [slot.action for slot in self.__gridSlots]
index = actions.index(action)
slot = self.__gridSlots.pop(index)
slot.button.removeEventFilter(self.__buttonListener)
slot.button.removeEventFilter(self)
self.__mapper.removeMappings(slot.button)
self.layout().removeWidget(slot.button)
self.__shiftGrid(index + 1, -1)
slot.button.deleteLater()
def __shiftGrid(self, start, count=1):
"""Shift all buttons starting at index `start` by `count` cells.
"""
button_count = self.layout().count()
direction = 1 if count >= 0 else -1
if direction == 1:
start, end = button_count - 1, start - 1
else:
start, end = start, button_count
for index in range(start, end, -direction):
item = self.layout().itemAtPosition(index / self.__columns,
index % self.__columns)
if item:
button = item.widget()
new_index = index + count
self.layout().addWidget(button, new_index / self.__columns,
new_index % self.__columns,
Qt.AlignLeft | Qt.AlignTop)
def __relayout(self):
"""Relayout the buttons.
"""
for i in reversed(range(self.layout().count())):
self.layout().takeAt(i)
self.__gridSlots = [_ToolGridSlot(slot.button, slot.action,
i / self.__columns,
i % self.__columns)
for i, slot in enumerate(self.__gridSlots)]
for slot in self.__gridSlots:
self.layout().addWidget(slot.button, slot.row, slot.column,
Qt.AlignLeft | Qt.AlignTop)
def __indexOf(self, button):
"""Return the index of button widget.
"""
buttons = [slot.button for slot in self.__gridSlots]
return buttons.index(button)
def __onButtonRightClick(self, button):
pass
def __onButtonEnter(self, button):
action = button.defaultAction()
self.actionHovered.emit(action)
def __onClicked(self, action):
self.actionTriggered.emit(action)
def paintEvent(self, event):
return utils.StyledWidget_paintEvent(self, event)
def eventFilter(self, obj, event):
etype = event.type()
if etype == QEvent.KeyPress and obj.hasFocus():
key = event.key()
if key in [Qt.Key_Up, Qt.Key_Down, Qt.Key_Left, Qt.Key_Right]:
if self.__focusMove(obj, key):
event.accept()
return True
return QFrame.eventFilter(self, obj, event)
def __focusMove(self, focus, key):
assert(focus is self.focusWidget())
try:
index = self.__indexOf(focus)
except IndexError:
return False
if key == Qt.Key_Down:
index += self.__columns
elif key == Qt.Key_Up:
index -= self.__columns
elif key == Qt.Key_Left:
index -= 1
elif key == Qt.Key_Right:
index += 1
if index >= 0 and index < self.count():
button = self.__gridSlots[index].button
button.setFocus(Qt.TabFocusReason)
return True
else:
return False
class ToolButtonEventListener(QObject):
"""
An event listener(filter) for :class:`QToolButtons`.
"""
buttonLeftClicked = Signal(QToolButton)
buttonRightClicked = Signal(QToolButton)
buttonEnter = Signal(QToolButton)
buttonLeave = Signal(QToolButton)
def __init__(self, parent=None):
QObject.__init__(self, parent)
self.button_down = None
self.button = None
self.button_down_pos = None
def eventFilter(self, obj, event):
if not isinstance(obj, QToolButton):
return False
if event.type() == QEvent.MouseButtonPress:
self.button = obj
self.button_down = event.button()
self.button_down_pos = event.pos()
elif event.type() == QEvent.MouseButtonRelease:
if self.button.underMouse():
if event.button() == Qt.RightButton:
self.buttonRightClicked.emit(self.button)
elif event.button() == Qt.LeftButton:
self.buttonLeftClicked.emit(self.button)
elif event.type() == QEvent.Enter:
self.buttonEnter.emit(obj)
elif event.type() == QEvent.Leave:
self.buttonLeave.emit(obj)
return False
| gpl-3.0 | 2,217,599,110,034,603,800 | 29.376569 | 79 | 0.556956 | false |
flok99/constatus | examples/lcdproc-overlay.py | 1 | 1079 | #! /usr/bin/python3
# this code uses the 'lcdproc-python3' library:
# https://github.com/jinglemansweep/lcdproc
# for more details about lcdproc, see:
# http://lcdproc.org/
from lcdproc.server import Server
import time
import urllib.request
def main():
# 'localhost' must be replaced by the network address on which the
# constatus server runs
lcd = Server('localhost', debug=True)
lcd.start_session()
# you can have multiple screens. constatus will switch between them
# every 4 seconds
screen1 = lcd.add_screen('Screen1')
# a screen can have multiple widgets
widget1 = screen1.add_string_widget('strwidget1', text='Bitcoin rate:', x=1, y=1)
widget2 = screen1.add_string_widget('strwidget2', text='', x=1, y=2)
while True:
# retrieve bitcoin rate (don't depend on this: this service is often behind
# for weeks)
h = urllib.request.urlopen('https://vps001.vanheusden.com/btc/latest.txt')
widget2.set_text(h.read().decode('utf-8'))
time.sleep(30)
if __name__ == '__main__':
main()
| agpl-3.0 | -6,633,762,631,587,860,000 | 27.394737 | 85 | 0.667285 | false |
ivmech/iviny-scope | lib/xlsxwriter/test/table/test_table07.py | 1 | 2031 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...table import Table
from ...worksheet import Worksheet
from ...workbook import WorksheetMeta
from ...sharedstrings import SharedStringTable
class TestAssembleTable(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
# Set the table properties.
worksheet.add_table('C3:F14', {'total_row': 1})
worksheet._prepare_tables(1)
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F14" totalsRowCount="1">
<autoFilter ref="C3:F13"/>
<tableColumns count="4">
<tableColumn id="1" name="Column1"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Column4"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,153,449,970,051,362,300 | 31.758065 | 167 | 0.551452 | false |
kcompher/topik | topik/intermediaries/raw_data.py | 1 | 9075 | """
This file is concerned with providing a simple interface for data stored in
Elasticsearch. The class(es) defined here are fed into the preprocessing step.
"""
import logging
import time
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from elasticsearch import Elasticsearch, helpers
def _get_hash_identifier(input_data, id_field):
return hash(input_data[id_field])
class CorpusInterface(with_metaclass(ABCMeta)):
@abstractmethod
def __iter__(self):
"""This is expected to iterate over your data, returning tuples of (doc_id, <selected field>)"""
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def get_generator_without_id(self, field=None):
"""Returns a generator that yields field content without doc_id associate"""
raise NotImplementedError
@abstractmethod
def append_to_record(self, record_id, field_name, field_value):
"""Used to store preprocessed output alongside input data.
Field name is destination. Value is processed value."""
raise NotImplementedError
class ElasticSearchCorpus(CorpusInterface):
def __init__(self, host, index, content_field, port=9200, username=None,
password=None, doc_type=None, query=None, iterable=None):
super(ElasticSearchCorpus, self).__init__()
self.host = host
self.port = port
self.username = username
self.password = password
self.instance = Elasticsearch(hosts=[{"host": host, "port": port,
"http_auth": "{}:{}".format(username, password)}
])
self.index = index
self.content_field = content_field
self.doc_type = doc_type
self.query = query
if iterable:
self.import_from_iterable(iterable, content_field)
def __iter__(self):
results = helpers.scan(self.instance, index=self.index,
query=self.query, doc_type=self.doc_type)
for result in results:
yield result["_id"], result['_source'][self.content_field]
def __len__(self):
return self.instance.count(index=self.index, doc_type=self.doc_type)["count"]
def get_generator_without_id(self, field=None):
if not field:
field = self.content_field
results = helpers.scan(self.instance, index=self.index,
query=self.query, doc_type=self.doc_type)
for result in results:
yield result["_source"][field]
def append_to_record(self, record_id, field_name, field_value):
self.instance.update(index=self.index, id=record_id, doc_type="continuum",
body={"doc": {field_name: field_value}})
def get_field(self, field=None):
"""Get a different field to iterate over, keeping all other
connection details."""
if not field:
field = self.content_field
return ElasticSearchCorpus(self.host, self.index, field, self.port,
self.username, self.password, self.doc_type,
self.query)
def import_from_iterable(self, iterable, id_field="text", batch_size=500):
"""Load data into Elasticsearch from iterable.
iterable: generally a list of dicts, but possibly a list of strings
This is your data. Your dictionary structure defines the schema
of the elasticsearch index.
id_field: string identifier of field to hash for content ID. For
list of dicts, a valid key value in the dictionary is required. For
list of strings, a dictionary with one key, "text" is created and
used.
"""
batch = []
for item in iterable:
if isinstance(item, basestring):
item = {id_field: item}
id = _get_hash_identifier(item, id_field)
batch.append({"_id": id, "_source": item, "_type": "continuum"})
if len(batch) >= batch_size:
helpers.bulk(client=self.instance, actions=batch, index=self.index)
batch = []
if batch:
helpers.bulk(client=self.instance, actions=batch, index=self.index)
# TODO: generalize for datetimes
# TODO: validate input data to ensure that it has valid year data
def get_data_by_year(self, start_year, end_year, year_field="year"):
"""Queries elasticsearch for all documents within the specified year range
and returns a generator of the results"""
index = self.index
if self.instance.indices.get_field_mapping(field=year_field,
index=index,
doc_type="continuum") != 'date':
index = self.index+"_{}_date".format(year_field)
if not self.instance.indices.exists(index) or self.instance.indices.get_field_mapping(field=year_field,
index=index,
doc_type="continuum") != 'date':
mapping = self.instance.indices.get_mapping(index=self.index,
doc_type="continuum")
mapping[self.index]["mappings"]["continuum"]["properties"][year_field] = {"type": "date"}
self.instance.indices.put_alias(index=self.index,
name=index,
body=mapping)
while self.instance.count(index=self.index) != self.instance.count(index=index):
logging.info("Waiting for date indexed data to be indexed...")
time.sleep(1)
results = helpers.scan(self.instance, index=index, scroll='5m',
query={"query":
{"range":
{year_field:
{"gte": start_year,
"lte": end_year}}}})
for result in results:
yield result["_id"], result['_source'][self.content_field]
class DictionaryCorpus(CorpusInterface):
def __init__(self, content_field, iterable=None, generate_id=True):
super(DictionaryCorpus, self).__init__()
self.content_field = content_field
self._documents = []
self.idx = 0
if iterable:
self.import_from_iterable(iterable, content_field, generate_id)
def __iter__(self):
for doc in self._documents:
yield doc["_id"], doc["_source"][self.content_field]
def __len__(self):
return len(self._documents)
def append_to_record(self, record_id, field_name, field_value):
for doc in self._documents:
if doc["_id"] == record_id:
doc["_source"][field_name] = field_value
return
raise ValueError("No record with id '{}' was found.".format(record_id))
def get_field(self, field=None):
"""Get a different field to iterate over, keeping all other details."""
if not field:
field = self.content_field
return DictionaryCorpus(content_field=field, iterable=self._documents,
generate_id=False)
def get_generator_without_id(self, field=None):
if not field:
field = self.content_field
for doc in self._documents:
yield doc["_source"][field]
def import_from_iterable(self, iterable, content_field, generate_id=True):
"""
iterable: generally a list of dicts, but possibly a list of strings
This is your data. Your dictionary structure defines the schema
of the elasticsearch index.
"""
if generate_id:
self._documents = [{"_id": hash(doc[content_field]),
"_source": doc} for doc in iterable]
else:
self._documents = [item for item in iterable]
def get_number_of_items_stored(self):
return len(self._documents)
# TODO: generalize for datetimes
# TODO: validate input data to ensure that it has valid year data
def get_data_by_year(self, start_year, end_year, year_field="year"):
for result in self._documents:
if start_year <= int(result["_source"][year_field]) <= end_year:
yield result["_id"], result["_source"][self.content_field]
# Collection of output formats: people put files, folders, etc in, and they can choose from these to be the output
# These consume the iterable collection of dictionaries produced by the various iter_ functions.
output_formats = {"elasticsearch": ElasticSearchCorpus,
"dictionary": DictionaryCorpus,
}
| bsd-3-clause | 1,154,407,252,155,769,300 | 42.421053 | 115 | 0.572562 | false |
romain-dartigues/ansible | lib/ansible/modules/cloud/vmware/vsphere_copy.py | 35 | 6503 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vsphere_copy
short_description: Copy a file to a vCenter datastore
description:
- Upload files to a vCenter datastore
version_added: 2.0
author:
- Dag Wieers (@dagwieers)
options:
host:
description:
- The vCenter server on which the datastore is available.
required: true
aliases: ['hostname']
login:
description:
- The login name to authenticate on the vCenter server.
required: true
aliases: ['username']
password:
description:
- The password to authenticate on the vCenter server.
required: true
src:
description:
- The file to push to vCenter
required: true
datacenter:
description:
- The datacenter on the vCenter server that holds the datastore.
required: true
datastore:
description:
- The datastore on the vCenter server to push files to.
required: true
path:
description:
- The file to push to the datastore on the vCenter server.
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
set to C(no) when no other option exists.
default: 'yes'
type: bool
timeout:
description:
- The timeout in seconds for the upload to the datastore.
default: 10
type: int
version_added: "2.8"
notes:
- "This module ought to be run from a system that can access vCenter directly and has the file to transfer.
It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
- Tested on vSphere 5.5
'''
EXAMPLES = '''
- vsphere_copy:
host: '{{ vhost }}'
login: '{{ vuser }}'
password: '{{ vpass }}'
src: /some/local/file
datacenter: DC1 Someplace
datastore: datastore1
path: some/remote/file
delegate_to: localhost
- vsphere_copy:
host: '{{ vhost }}'
login: '{{ vuser }}'
password: '{{ vpass }}'
src: /other/local/file
datacenter: DC2 Someplace
datastore: datastore2
path: other/remote/file
delegate_to: other_system
'''
import atexit
import errno
import mmap
import socket
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
def vmware_path(datastore, datacenter, path):
''' Constructs a URL path that VSphere accepts reliably '''
path = "/folder/%s" % quote(path.lstrip("/"))
# Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
# The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
datacenter = datacenter.replace('&', '%26')
if not path.startswith("/"):
path = "/" + path
params = dict(dsName=datastore)
if datacenter:
params["dcPath"] = datacenter
params = urlencode(params)
return "%s?%s" % (path, params)
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True, aliases=['hostname']),
login=dict(required=True, aliases=['username']),
password=dict(required=True, no_log=True),
src=dict(required=True, aliases=['name']),
datacenter=dict(required=True),
datastore=dict(required=True),
dest=dict(required=True, aliases=['path']),
validate_certs=dict(default=True, type='bool'),
timeout=dict(default=10, type='int')
),
# Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
supports_check_mode=False,
)
host = module.params.get('host')
login = module.params.get('login')
password = module.params.get('password')
src = module.params.get('src')
datacenter = module.params.get('datacenter')
datastore = module.params.get('datastore')
dest = module.params.get('dest')
validate_certs = module.params.get('validate_certs')
timeout = module.params.get('timeout')
fd = open(src, "rb")
atexit.register(fd.close)
data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
atexit.register(data.close)
remote_path = vmware_path(datastore, datacenter, dest)
url = 'https://%s%s' % (host, remote_path)
headers = {
"Content-Type": "application/octet-stream",
"Content-Length": str(len(data)),
}
try:
r = open_url(url, data=data, headers=headers, method='PUT', timeout=timeout,
url_username=login, url_password=password, validate_certs=validate_certs,
force_basic_auth=True)
except socket.error as e:
if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET:
# VSphere resets connection if the file is in use and cannot be replaced
module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=to_native(e), url=url)
else:
module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e),
url=url, exception=traceback.format_exc())
except Exception as e:
error_code = -1
try:
if isinstance(e[0], int):
error_code = e[0]
except KeyError:
pass
module.fail_json(msg=to_native(e), status=None, errno=error_code,
reason=to_native(e), url=url, exception=traceback.format_exc())
status = r.getcode()
if 200 <= status < 300:
module.exit_json(changed=True, status=status, reason=r.msg, url=url)
else:
length = r.headers.get('content-length', None)
if r.headers.get('transfer-encoding', '').lower() == 'chunked':
chunked = 1
else:
chunked = 0
module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url)
if __name__ == '__main__':
main()
| gpl-3.0 | 591,458,113,666,732,400 | 31.515 | 155 | 0.633554 | false |
jtyr/ansible-modules-extras | windows/win_environment.py | 90 | 2870 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_environment
version_added: "2.0"
short_description: Modifies environment variables on windows hosts.
description:
- Uses .net Environment to set or remove environment variables and can set at User, Machine or Process level.
- User level environment variables will be set, but not available until the user has logged off and on again.
options:
state:
description:
- present to ensure environment variable is set, or absent to ensure it is removed
required: false
default: present
choices:
- present
- absent
name:
description:
- The name of the environment variable
required: true
default: no default
value:
description:
- The value to store in the environment variable. Can be omitted for state=absent
required: false
default: no default
level:
description:
- The level at which to set the environment variable.
- Use 'machine' to set for all users.
- Use 'user' to set for the current user that ansible is connected as.
- Use 'process' to set for the current process. Probably not that useful.
required: true
default: no default
choices:
- machine
- process
- user
author: "Jon Hawkesworth (@jhawkesworth)"
notes:
- This module does not broadcast change events.
This means that the minority of windows applications which can have
their environment changed without restarting will not be notified and
therefore will need restarting to pick up new environment settings.
User level environment variables will require the user to log out
and in again before they become available.
'''
EXAMPLES = '''
# Set an environment variable for all users
win_environment:
state: present
name: TestVariable
value: "Test value"
level: machine
# Remove an environment variable for the current users
win_environment:
state: absent
name: TestVariable
level: user
'''
| gpl-3.0 | -9,009,353,416,383,813,000 | 32.372093 | 115 | 0.710801 | false |
40123210/-2015cd_40123210 | static/Brython3.1.1-20150328-091302/Lib/errno.py | 624 | 4096 | """
This module makes available standard errno system symbols.
The value of each symbol is the corresponding integer value,
e.g., on most systems, errno.ENOENT equals the integer 2.
The dictionary errno.errorcode maps numeric codes to symbol names,
e.g., errno.errorcode[2] could be the string 'ENOENT'.
Symbols that are not relevant to the underlying system are not defined.
To map error codes to error messages, use the function os.strerror(),
e.g. os.strerror(2) could return 'No such file or directory'.
"""
errorcode= {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO',
6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN',
12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY',
17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR',
22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY',
27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK',
32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG',
37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG',
43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST',
48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE',
53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT',
59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR',
64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV',
69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT',
74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG',
79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC',
84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK',
89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT',
93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP',
96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE',
99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET',
103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN',
107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT',
111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY',
115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM',
119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT',
123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY',
127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED',
130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
EPERM=1
ENOENT=2
ESRCH=3
EINTR=4
EIO=5
ENXIO=6
E2BIG=7
ENOEXEC=8
EBADF=9
ECHILD=10
EAGAIN=11
ENOMEM=12
EACCES=13
EFAULT=14
ENOTBLK=15
EBUSY=16
EEXIST=17
EXDEV=18
ENODEV=19
ENOTDIR=20
EISDIR=21
EINVAL=22
ENFILE=23
EMFILE=24
ENOTTY=25
ETXTBSY=26
EFBIG=27
ENOSPC=28
ESPIPE=29
EROFS=30
EMLINK=31
EPIPE=32
EDOM=33
ERANGE=34
EDEADLOCK=35
ENAMETOOLONG=36
ENOLCK=37
ENOSYS=38
ENOTEMPTY=39
ELOOP=40
ENOMSG=42
EIDRM=43
ECHRNG=44
EL2NSYNC=45
EL3HLT=46
EL3RST=47
ELNRNG=48
EUNATCH=49
ENOCSI=50
EL2HLT=51
EBADE=52
EBADR=53
EXFULL=54
ENOANO=55
EBADRQC=56
EBADSLT=57
EBFONT=59
ENOSTR=60
ENODATA=61
ETIME=62
ENOSR=63
ENONET=64
ENOPKG=65
EREMOTE=66
ENOLINK=67
EADV=68
ESRMNT=69
ECOMM=70
EPROTO=71
EMULTIHOP=72
EDOTDOT=73
EBADMSG=74
EOVERFLOW=75
ENOTUNIQ=76
EBADFD=77
EREMCHG=78
ELIBACC=79
ELIBBAD=80
ELIBSCN=81
ELIBMAX=82
ELIBEXEC=83
EILSEQ=84
ERESTART=85
ESTRPIPE=86
EUSERS=87
ENOTSOCK=88
EDESTADDRREQ=89
EMSGSIZE=90
EPROTOTYPE=91
ENOPROTOOPT=92
EPROTONOSUPPORT=93
ESOCKTNOSUPPORT=94
ENOTSUP=95
EPFNOSUPPORT=96
EAFNOSUPPORT=97
EADDRINUSE=98
EADDRNOTAVAIL=99
ENETDOWN=100
ENETUNREACH=101
ENETRESET=102
ECONNABORTED=103
ECONNRESET=104
ENOBUFS=105
EISCONN=106
ENOTCONN=107
ESHUTDOWN=108
ETOOMANYREFS=109
ETIMEDOUT=110
ECONNREFUSED=111
EHOSTDOWN=112
EHOSTUNREACH=113
EALREADY=114
EINPROGRESS=115
ESTALE=116
EUCLEAN=117
ENOTNAM=118
ENAVAIL=119
EISNAM=120
EREMOTEIO=121
EDQUOT=122
ENOMEDIUM=123
EMEDIUMTYPE=124
ECANCELED=125
ENOKEY=126
EKEYEXPIRED=127
EKEYREVOKED=128
EKEYREJECTED=129
EOWNERDEAD=130
ENOTRECOVERABLE=131
ERFKILL=132
| gpl-3.0 | -8,667,274,900,113,290,000 | 22.676301 | 78 | 0.709229 | false |
lbdreyer/iris | lib/iris/tests/runner/_runner.py | 2 | 5308 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Provides testing capabilities for installed copies of Iris.
"""
# Because this file is imported by setup.py, there may be additional runtime
# imports later in the file.
import multiprocessing
import os
import sys
# NOTE: Do not inherit from object as distutils does not like it.
class TestRunner:
"""Run the Iris tests under nose and multiprocessor for performance"""
description = (
"Run tests under nose and multiprocessor for performance. "
"Default behaviour is to run all non-gallery tests. "
"Specifying one or more test flags will run *only* those "
"tests."
)
user_options = [
(
"no-data",
"n",
"Override the paths to the data repositories so it "
"appears to the tests that it does not exist.",
),
("stop", "x", "Stop running tests after the first error or failure."),
("system-tests", "s", "Run the limited subset of system tests."),
("gallery-tests", "e", "Run the gallery code tests."),
("default-tests", "d", "Run the default tests."),
(
"coding-tests",
"c",
"Run the coding standards tests. (These are a "
"subset of the default tests.)",
),
(
"num-processors=",
"p",
"The number of processors used for running " "the tests.",
),
("create-missing", "m", "Create missing test result files."),
]
boolean_options = [
"no-data",
"system-tests",
"stop",
"gallery-tests",
"default-tests",
"coding-tests",
"create-missing",
]
def initialize_options(self):
self.no_data = False
self.stop = False
self.system_tests = False
self.gallery_tests = False
self.default_tests = False
self.coding_tests = False
self.num_processors = None
self.create_missing = False
def finalize_options(self):
# These enviroment variables will be propagated to all the
# processes that nose.run creates.
if self.no_data:
print("Running tests in no-data mode...")
import iris.config
iris.config.TEST_DATA_DIR = None
if self.create_missing:
os.environ["IRIS_TEST_CREATE_MISSING"] = "true"
tests = []
if self.system_tests:
tests.append("system")
if self.default_tests:
tests.append("default")
if self.coding_tests:
tests.append("coding")
if self.gallery_tests:
tests.append("gallery")
if not tests:
tests.append("default")
print("Running test suite(s): {}".format(", ".join(tests)))
if self.stop:
print("Stopping tests after the first error or failure")
if self.num_processors is None:
# Choose a magic number that works reasonably well for the default
# number of processes.
self.num_processors = (multiprocessing.cpu_count() + 1) // 4 + 1
else:
self.num_processors = int(self.num_processors)
def run(self):
import nose
if hasattr(self, "distribution") and self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
tests = []
if self.system_tests:
tests.append("iris.tests.system_test")
if self.default_tests:
tests.append("iris.tests")
if self.coding_tests:
tests.append("iris.tests.test_coding_standards")
if self.gallery_tests:
import iris.config
default_doc_path = os.path.join(sys.path[0], "docs", "iris")
doc_path = iris.config.get_option(
"Resources", "doc_dir", default=default_doc_path
)
gallery_path = os.path.join(doc_path, "gallery_tests")
if os.path.exists(gallery_path):
tests.append(gallery_path)
else:
print(
"WARNING: Gallery path %s does not exist." % (gallery_path)
)
if not tests:
tests.append("iris.tests")
regexp_pat = r"--match=^([Tt]est(?![Mm]ixin)|[Ss]ystem)"
n_processors = max(self.num_processors, 1)
args = [
"",
None,
"--processes=%s" % n_processors,
"--verbosity=2",
regexp_pat,
"--process-timeout=180",
]
if self.stop:
args.append("--stop")
result = True
for test in tests:
args[1] = test
print()
print(
"Running test discovery on %s with %s processors."
% (test, n_processors)
)
# run the tests at module level i.e. my_module.tests
# - test must start with test/Test and must not contain the
# word Mixin.
result &= nose.run(argv=args)
if result is False:
exit(1)
| lgpl-3.0 | 5,360,826,476,672,065,000 | 31.564417 | 79 | 0.549359 | false |
yvess/cmsplugin-filer | cmsplugin_filer_file/migrations/0001_initial.py | 13 | 1221 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import filer.fields.file
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20140926_2347'),
('filer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FilerFile',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('title', models.CharField(max_length=255, null=True, verbose_name='title', blank=True)),
('target_blank', models.BooleanField(default=False, verbose_name='Open link in new window')),
('style', models.CharField(default=settings.CMSPLUGIN_FILER_FILE_DEFAULT_STYLE, choices=settings.CMSPLUGIN_FILER_FILE_STYLE_CHOICES, verbose_name='Style', blank=True, max_length=255)),
('file', filer.fields.file.FilerFileField(verbose_name='file', to='filer.File')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| bsd-3-clause | -2,011,703,993,929,395,700 | 38.387097 | 200 | 0.605242 | false |
AlanD88/website | web2py/gluon/contrib/login_methods/oneall_account.py | 8 | 4595 | #!/usr/bin/env python
# coding: utf8
"""
Oneall Authentication for web2py
Developed by Nathan Freeze (Copyright © 2013)
Email <[email protected]>
This file contains code to allow using onall.com
authentication services with web2py
"""
import os
import base64
from gluon import *
from gluon.storage import Storage
from gluon.contrib.simplejson import JSONDecodeError
from gluon.tools import fetch
import gluon.contrib.simplejson as json
class OneallAccount(object):
"""
from gluon.contrib.login_methods.oneall_account import OneallAccount
auth.settings.actions_disabled=['register','change_password',
'request_reset_password']
auth.settings.login_form = OneallAccount(request,
public_key="...",
private_key="...",
domain="...",
url = "http://localhost:8000/%s/default/user/login" % request.application)
"""
def __init__(self, request, public_key="", private_key="", domain="",
url=None, providers=None, on_login_failure=None):
self.request = request
self.public_key = public_key
self.private_key = private_key
self.url = url
self.domain = domain
self.profile = None
self.on_login_failure = on_login_failure
self.providers = providers or ["facebook", "google", "yahoo", "openid"]
self.mappings = Storage()
def defaultmapping(profile):
name = profile.get('name',{})
dname = name.get('formatted',profile.get('displayName'))
email=profile.get('emails', [{}])[0].get('value')
reg_id=profile.get('identity_token','')
username=profile.get('preferredUsername',email)
first_name=name.get('givenName', dname.split(' ')[0])
last_name=profile.get('familyName', dname.split(' ')[1] if(dname.count(' ') > 0) else None)
return dict(registration_id=reg_id,username=username,email=email,
first_name=first_name,last_name=last_name)
self.mappings.default = defaultmapping
def get_user(self):
request = self.request
user = None
if request.vars.connection_token:
auth_url = "https://%s.api.oneall.com/connections/%s.json" % \
(self.domain, request.vars.connection_token)
auth_pw = "%s:%s" % (self.public_key,self.private_key)
auth_pw = base64.b64encode(auth_pw)
headers = dict(Authorization="Basic %s" % auth_pw)
try:
auth_info_json = fetch(auth_url,headers=headers)
auth_info = json.loads(auth_info_json)
data = auth_info['response']['result']['data']
if data['plugin']['key'] == 'social_login':
if data['plugin']['data']['status'] == 'success':
userdata = data['user']
self.profile = userdata['identity']
source = self.profile['source']['key']
mapping = self.mappings.get(source,self.mappings['default'])
user = mapping(self.profile)
except (JSONDecodeError, KeyError):
pass
if user is None and self.on_login_failure:
redirect(self.on_login_failure)
return user
def login_form(self):
scheme = self.request.env.wsgi_url_scheme
oneall_url = scheme + "://%s.api.oneall.com/socialize/library.js" % self.domain
oneall_lib = SCRIPT(_src=oneall_url,_type='text/javascript')
container = DIV(_id="oa_social_login_container")
widget = SCRIPT('oneall.api.plugins.social_login.build("oa_social_login_container",',
'{providers : %s,' % self.providers,
'callback_uri: "%s"});' % self.url,
_type="text/javascript")
form = DIV(oneall_lib,container,widget)
return form
def use_oneall(auth, filename='private/oneall.key', **kwargs):
path = os.path.join(current.request.folder, filename)
if os.path.exists(path):
request = current.request
domain, public_key, private_key = open(path, 'r').read().strip().split(':')
url = URL('default', 'user', args='login', scheme=True)
auth.settings.actions_disabled =\
['register', 'change_password', 'request_reset_password']
auth.settings.login_form = OneallAccount(
request, public_key=public_key,private_key=private_key,
domain=domain, url=url, **kwargs)
| mit | 8,773,400,201,012,808,000 | 41.934579 | 103 | 0.585329 | false |
gangadharkadam/saloon_erp_install | erpnext/selling/report/customer_credit_balance/customer_credit_balance.py | 96 | 1688 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from erpnext.selling.doctype.customer.customer import get_customer_outstanding, get_credit_limit
def execute(filters=None):
if not filters: filters = {}
#Check if customer id is according to naming series or customer name
customer_naming_type = frappe.db.get_value("Selling Settings", None, "cust_master_name")
columns = get_columns(customer_naming_type)
data = []
customer_list = get_details(filters)
for d in customer_list:
row = []
outstanding_amt = get_customer_outstanding(d.name, filters.get("company"))
credit_limit = get_credit_limit(d.name, filters.get("company"))
bal = flt(credit_limit) - flt(outstanding_amt)
if customer_naming_type == "Naming Series":
row = [d.name, d.customer_name, credit_limit, outstanding_amt, bal]
else:
row = [d.name, credit_limit, outstanding_amt, bal]
if credit_limit:
data.append(row)
return columns, data
def get_columns(customer_naming_type):
columns = [
_("Customer") + ":Link/Customer:120", _("Credit Limit") + ":Currency:120",
_("Outstanding Amt") + ":Currency:100", _("Credit Balance") + ":Currency:120"
]
if customer_naming_type == "Naming Series":
columns.insert(1, _("Customer Name") + ":Data:120")
return columns
def get_details(filters):
conditions = ""
if filters.get("customer"):
conditions += " where name = %(customer)s"
return frappe.db.sql("""select name, customer_name from `tabCustomer` %s"""
% conditions, filters, as_dict=1)
| agpl-3.0 | -6,820,646,756,613,552,000 | 30.259259 | 96 | 0.708531 | false |
mezz64/home-assistant | homeassistant/components/tellduslive/binary_sensor.py | 14 | 1068 | """Support for binary sensors using Tellstick Net."""
from homeassistant.components import binary_sensor, tellduslive
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .entry import TelldusLiveEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up tellduslive sensors dynamically."""
async def async_discover_binary_sensor(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[tellduslive.DOMAIN]
async_add_entities([TelldusLiveSensor(client, device_id)])
async_dispatcher_connect(
hass,
tellduslive.TELLDUS_DISCOVERY_NEW.format(
binary_sensor.DOMAIN, tellduslive.DOMAIN
),
async_discover_binary_sensor,
)
class TelldusLiveSensor(TelldusLiveEntity, BinarySensorEntity):
"""Representation of a Tellstick sensor."""
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.is_on
| apache-2.0 | 8,650,834,080,875,974,000 | 32.375 | 69 | 0.714419 | false |
publicRoman/spark | examples/src/main/python/ml/logistic_regression_summary_example.py | 71 | 2442 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.classification import LogisticRegression
# $example off$
from pyspark.sql import SparkSession
"""
An example demonstrating Logistic Regression Summary.
Run with:
bin/spark-submit examples/src/main/python/ml/logistic_regression_summary_example.py
"""
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("LogisticRegressionSummary") \
.getOrCreate()
# Load training data
training = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)
# Fit the model
lrModel = lr.fit(training)
# $example on$
# Extract the summary from the returned LogisticRegressionModel instance trained
# in the earlier example
trainingSummary = lrModel.summary
# Obtain the objective per iteration
objectiveHistory = trainingSummary.objectiveHistory
print("objectiveHistory:")
for objective in objectiveHistory:
print(objective)
# Obtain the receiver-operating characteristic as a dataframe and areaUnderROC.
trainingSummary.roc.show()
print("areaUnderROC: " + str(trainingSummary.areaUnderROC))
# Set the model threshold to maximize F-Measure
fMeasure = trainingSummary.fMeasureByThreshold
maxFMeasure = fMeasure.groupBy().max('F-Measure').select('max(F-Measure)').head()
bestThreshold = fMeasure.where(fMeasure['F-Measure'] == maxFMeasure['max(F-Measure)']) \
.select('threshold').head()['threshold']
lr.setThreshold(bestThreshold)
# $example off$
spark.stop()
| apache-2.0 | -5,827,339,761,341,732,000 | 34.911765 | 92 | 0.730958 | false |
bdero/edx-platform | common/djangoapps/student/migrations/0002_text_to_varchar_and_indexes.py | 188 | 9581 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.name'
db.alter_column('auth_userprofile', 'name', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['name']
db.create_index('auth_userprofile', ['name'])
# Changing field 'UserProfile.language'
db.alter_column('auth_userprofile', 'language', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['language']
db.create_index('auth_userprofile', ['language'])
# Changing field 'UserProfile.courseware'
db.alter_column('auth_userprofile', 'courseware', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'UserProfile.location'
db.alter_column('auth_userprofile', 'location', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'UserProfile', fields ['location']
db.create_index('auth_userprofile', ['location'])
def backwards(self, orm):
# Removing index on 'UserProfile', fields ['location']
db.delete_index('auth_userprofile', ['location'])
# Removing index on 'UserProfile', fields ['language']
db.delete_index('auth_userprofile', ['language'])
# Removing index on 'UserProfile', fields ['name']
db.delete_index('auth_userprofile', ['name'])
# Changing field 'UserProfile.name'
db.alter_column('auth_userprofile', 'name', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.language'
db.alter_column('auth_userprofile', 'language', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.courseware'
db.alter_column('auth_userprofile', 'courseware', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.location'
db.alter_column('auth_userprofile', 'location', self.gf('django.db.models.fields.TextField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['student']
| agpl-3.0 | -2,019,794,324,044,532,000 | 66.471831 | 182 | 0.578854 | false |
friebsch/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/separation_plot.py | 86 | 1494 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
| mit | 3,354,848,887,991,838,000 | 26.163636 | 84 | 0.548862 | false |
kaltsimon/youtube-dl | youtube_dl/extractor/cbsnews.py | 96 | 3204 | # encoding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
class CBSNewsIE(InfoExtractor):
IE_DESC = 'CBS News'
_VALID_URL = r'http://(?:www\.)?cbsnews\.com/(?:[^/]+/)+(?P<id>[\da-z_-]+)'
_TESTS = [
{
'url': 'http://www.cbsnews.com/news/tesla-and-spacex-elon-musks-industrial-empire/',
'info_dict': {
'id': 'tesla-and-spacex-elon-musks-industrial-empire',
'ext': 'flv',
'title': 'Tesla and SpaceX: Elon Musk\'s industrial empire',
'thumbnail': 'http://beta.img.cbsnews.com/i/2014/03/30/60147937-2f53-4565-ad64-1bdd6eb64679/60-0330-pelley-640x360.jpg',
'duration': 791,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.cbsnews.com/videos/fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack/',
'info_dict': {
'id': 'fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack',
'ext': 'flv',
'title': 'Fort Hood shooting: Army downplays mental illness as cause of attack',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 205,
},
'params': {
# rtmp download
'skip_download': True,
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_info = json.loads(self._html_search_regex(
r'(?:<ul class="media-list items" id="media-related-items"><li data-video-info|<div id="cbsNewsVideoPlayer" data-video-player-options)=\'({.+?})\'',
webpage, 'video JSON info'))
item = video_info['item'] if 'item' in video_info else video_info
title = item.get('articleTitle') or item.get('hed')
duration = item.get('duration')
thumbnail = item.get('mediaImage') or item.get('thumbnail')
formats = []
for format_id in ['RtmpMobileLow', 'RtmpMobileHigh', 'Hls', 'RtmpDesktop']:
uri = item.get('media' + format_id + 'URI')
if not uri:
continue
fmt = {
'url': uri,
'format_id': format_id,
}
if uri.startswith('rtmp'):
fmt.update({
'app': 'ondemand?auth=cbs',
'play_path': 'mp4:' + uri.split('<break>')[-1],
'player_url': 'http://www.cbsnews.com/[[IMPORT]]/vidtech.cbsinteractive.com/player/3_3_0/CBSI_PLAYER_HD.swf',
'page_url': 'http://www.cbsnews.com',
'ext': 'flv',
})
elif uri.endswith('.m3u8'):
fmt['ext'] = 'mp4'
formats.append(fmt)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| unlicense | -4,473,809,882,856,195,600 | 35.827586 | 160 | 0.494382 | false |
Micronaet/micronaet-migration | __UNPORTED__/log_and_mail/__init__.py | 1 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import log_and_mail
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,687,576,701,648,465,000 | 43.833333 | 78 | 0.608736 | false |
quietcoolwu/python-playground | pipeg/gravitate/Board.py | 4 | 10467 | #!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import collections
import heapq
import math
import random
import tkinter as tk
import tkinter.messagebox as messagebox
from Globals import *
# Need to allow for them to be darkened/lightened for 3D shadow.
COLORS = [
"#7F0000", # Red
"#007F00", # Green
"#00007F", # Blue
"#007F7F", # Cyan
"#7F007F", # Magenta
"#7F7F00", # Yellow
"#A0A0A4", # Gray
"#A52A2A", # Brown
]
DEF_COLUMNS = 9
MIN_COLUMNS = 5
MAX_COLUMNS = 30
DEF_ROWS = 9
MIN_ROWS = 5
MAX_ROWS = 30
DEF_MAX_COLORS = 4
MIN_MAX_COLORS = 2
MAX_MAX_COLORS = len(COLORS)
class Board(tk.Canvas):
def __init__(self, master, set_status_text, scoreText,
columns=DEF_COLUMNS, rows=DEF_ROWS, maxColors=DEF_MAX_COLORS,
delay=500, size=40, outline="#DFDFDF"):
self.columns = columns
self.rows = rows
self.maxColors = maxColors
self.delay = delay
self.outline = outline
self.size = size
self.set_status_text = set_status_text
self.scoreText = scoreText
self.score = 0
self.highScore = 0
super().__init__(master, width=self.columns * self.size,
height=self.rows * self.size)
self.pack(fill=tk.BOTH, expand=True)
self.bind("<ButtonRelease>", self._click)
self.new_game()
def new_game(self, event=None):
self.score = 0
random.shuffle(COLORS)
colors = COLORS[:self.maxColors]
self.tiles = []
for x in range(self.columns):
self.tiles.append([])
for y in range(self.rows):
self.tiles[x].append(random.choice(colors))
self._draw()
self.update_score()
def _draw(self, *args):
self.delete("all")
self.config(width=self.columns * self.size,
height=self.rows * self.size)
for x in range(self.columns):
x0 = x * self.size
x1 = x0 + self.size
for y in range(self.rows):
y0 = y * self.size
y1 = y0 + self.size
self._draw_square(self.size, x0, y0, x1, y1,
self.tiles[x][y], self.outline)
self.update()
# |\__t__/|
# |l| m |r|
# |/-----\|
# ----b----
#
def _draw_square(self, size, x0, y0, x1, y1, color, outline):
if color is None:
light, color, dark = (outline,) * 3
else:
light, color, dark = self._three_colors(color)
offset = 4
self.create_polygon( # top
x0, y0,
x0 + offset, y0 + offset,
x1 - offset, y0 + offset,
x1, y0,
fill=light, outline=light)
self.create_polygon( # left
x0, y0,
x0, y1,
x0 + offset, y1 - offset,
x0 + offset, y0 + offset,
fill=light, outline=light)
self.create_polygon( # right
x1 - offset, y0 + offset,
x1, y0,
x1, y1,
x1 - offset, y1 - offset,
fill=dark, outline=dark)
self.create_polygon( # bottom
x0, y1,
x0 + offset, y1 - offset,
x1 - offset, y1 - offset,
x1, y1,
fill=dark, outline=dark)
self.create_rectangle( # middle
x0 + offset, y0 + offset,
x1 - offset, y1 - offset,
fill=color, outline=color)
def _three_colors(self, color):
r, g, b = self.winfo_rgb(color)
color = "#{:04X}{:04X}{:04X}".format(r, g, b)
dark = "#{:04X}{:04X}{:04X}".format(max(0, int(r * 0.5)),
max(0, int(g * 0.5)), max(0, int(b * 0.5)))
light = "#{:04X}{:04X}{:04X}".format(min(0xFFFF, int(r * 1.5)),
min(0xFFFF, int(g * 1.5)), min(0xFFFF, int(b * 1.5)))
return light, color, dark
def _click(self, event):
x = event.x // self.size
y = event.y // self.size
color = self.tiles[x][y]
if color is None or not self._is_legal(x, y, color):
return
self._dim_adjoining(x, y, color)
def _is_legal(self, x, y, color):
"""A legal click is on a colored tile that is adjacent to
another tile of the same color."""
if x > 0 and self.tiles[x - 1][y] == color:
return True
if x + 1 < self.columns and self.tiles[x + 1][y] == color:
return True
if y > 0 and self.tiles[x][y - 1] == color:
return True
if y + 1 < self.rows and self.tiles[x][y + 1] == color:
return True
return False
def _dim_adjoining(self, x, y, color):
adjoining = set()
self._populate_adjoining(x, y, color, adjoining)
self.score += len(adjoining) ** (self.maxColors - 2)
for x, y in adjoining:
self.tiles[x][y] = "#F0F0F0"
self._draw()
self.after(self.delay, lambda: self._delete_adjoining(adjoining))
def _populate_adjoining(self, x, y, color, adjoining):
if not ((0 <= x < self.columns) and (0 <= y < self.rows)):
return # Fallen off an edge
if (x, y) in adjoining or self.tiles[x][y] != color:
return # Color doesn't match or already done
adjoining.add((x, y))
self._populate_adjoining(x - 1, y, color, adjoining)
self._populate_adjoining(x + 1, y, color, adjoining)
self._populate_adjoining(x, y - 1, color, adjoining)
self._populate_adjoining(x, y + 1, color, adjoining)
def _delete_adjoining(self, adjoining):
for x, y in adjoining:
self.tiles[x][y] = None
self._draw()
self.after(self.delay, self._close_up)
def _close_up(self):
self._move()
self._draw()
self._check_game_over()
def _move(self):
moved = True
while moved:
moved = False
for x in range(self.columns):
for y in range(self.rows):
if self.tiles[x][y] is not None:
if self._move_if_possible(x, y):
moved = True
break
def _move_if_possible(self, x, y):
empty_neighbours = self._empty_neighbours(x, y)
if empty_neighbours:
move, nx, ny = self._nearest_to_middle(x, y, empty_neighbours)
if move:
self.tiles[nx][ny] = self.tiles[x][y]
self.tiles[x][y] = None
return True
return False
def _empty_neighbours(self, x, y):
neighbours = set()
for nx, ny in ((x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)):
if (0 <= nx < self.columns and 0 <= ny < self.rows and
self.tiles[nx][ny] is None):
neighbours.add((nx, ny))
return neighbours
def _nearest_to_middle(self, x, y, empty_neighbours):
color = self.tiles[x][y]
midX = self.columns // 2
midY = self.rows // 2
Δold = math.hypot(midX - x, midY - y)
heap = []
for nx, ny in empty_neighbours:
if self._is_square(nx, ny):
Δnew = math.hypot(midX - nx, midY - ny)
if self._is_legal(nx, ny, color):
Δnew -= 0.1 # Make same colors slightly attractive
heapq.heappush(heap, (Δnew, nx, ny))
Δnew, nx, ny = heap[0]
return (True, nx, ny) if Δold > Δnew else (False, x, y)
def _is_square(self, x, y):
if x > 0 and self.tiles[x - 1][y] is not None:
return True
if x + 1 < self.columns and self.tiles[x + 1][y] is not None:
return True
if y > 0 and self.tiles[x][y - 1] is not None:
return True
if y + 1 < self.rows and self.tiles[x][y + 1] is not None:
return True
return False
def _check_game_over(self):
userWon, canMove = self._check_tiles()
title = message = None
if userWon:
title, message = self._user_won()
elif not canMove:
title = "Game Over"
message = "Game over with a score of {:,}.".format(
self.score)
if title is not None:
messagebox.showinfo("{} — {}".format(title, APPNAME), message,
parent=self)
self.new_game()
else:
self.update_score()
def _check_tiles(self):
countForColor = collections.defaultdict(int)
userWon = True
canMove = False
for x in range(self.columns):
for y in range(self.rows):
color = self.tiles[x][y]
if color is not None:
countForColor[color] += 1
userWon = False
if self._is_legal(x, y, color): # We _can_ move
canMove = True
if 1 in countForColor.values():
canMove = False
return userWon, canMove
def _user_won(self):
title = "Winner!"
message = "You won with a score of {:,}.".format(self.score)
if self.score > self.highScore:
self.highScore = self.score
message += "\nThat's a new high score!"
return title, message
def update_score(self):
self.scoreText.set("{:,} ({:,})".format(self.score,
self.highScore))
if __name__ == "__main__":
import sys
if sys.stdout.isatty():
application = tk.Tk()
application.title("Board")
scoreText = tk.StringVar()
board = Board(application, print, scoreText)
application.mainloop()
else:
print("Loaded OK")
| mit | 9,150,149,905,156,909,000 | 31.883648 | 74 | 0.515922 | false |
johannfaouzi/pyts | pyts/metrics/tests/test_boss.py | 1 | 1130 | """Testing for BOSS metric."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
import pytest
import re
from math import sqrt
from pyts.metrics import boss
x = np.arange(1, 6)
y = np.arange(1, 6)[::-1]
z = [0, 0, 0, 10, 0]
@pytest.mark.parametrize(
'x, y, err_msg',
[(x.reshape(1, -1), y, "'x' must a one-dimensional array."),
(x, y.reshape(1, -1), "'y' must a one-dimensional array."),
(x[:2], y, "'x' and 'y' must have the same shape.")]
)
def test_parameter_check(x, y, err_msg):
"""Test parameter validation."""
with pytest.raises(ValueError, match=re.escape(err_msg)):
boss(x, y)
@pytest.mark.parametrize(
'x, y, arr_desired',
[(x, y, sqrt(np.sum((x - y) ** 2))),
(y, x, sqrt(np.sum((x - y) ** 2))),
(x, z, sqrt(np.sum((x - z) ** 2))),
(z, x, 6),
(y, z, sqrt(np.sum((y - z) ** 2))),
(z, y, 8)]
)
def test_actual_results(x, y, arr_desired):
"""Test that the actual results are the expected ones."""
arr_actual = boss(x, y)
np.testing.assert_allclose(arr_actual, arr_desired, atol=1e-5, rtol=0.)
| bsd-3-clause | 1,450,211,842,367,647,000 | 25.904762 | 75 | 0.577876 | false |
Nikoala/CouchPotatoServer | libs/guessit/slogging.py | 94 | 3388 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Smewt - A smart collection manager
# Copyright (c) 2011 Nicolas Wack <[email protected]>
#
# Smewt is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Smewt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import logging
import sys
import os, os.path
GREEN_FONT = "\x1B[0;32m"
YELLOW_FONT = "\x1B[0;33m"
BLUE_FONT = "\x1B[0;34m"
RED_FONT = "\x1B[0;31m"
RESET_FONT = "\x1B[0m"
def setupLogging(colored=True, with_time=False, with_thread=False, filename=None, with_lineno=False):
"""Set up a nice colored logger as the main application logger."""
class SimpleFormatter(logging.Formatter):
def __init__(self, with_time, with_thread):
self.fmt = (('%(asctime)s ' if with_time else '') +
'%(levelname)-8s ' +
'[%(name)s:%(funcName)s' +
(':%(lineno)s' if with_lineno else '') + ']' +
('[%(threadName)s]' if with_thread else '') +
' -- %(message)s')
logging.Formatter.__init__(self, self.fmt)
class ColoredFormatter(logging.Formatter):
def __init__(self, with_time, with_thread):
self.fmt = (('%(asctime)s ' if with_time else '') +
'-CC-%(levelname)-8s ' +
BLUE_FONT + '[%(name)s:%(funcName)s' +
(':%(lineno)s' if with_lineno else '') + ']' +
RESET_FONT + ('[%(threadName)s]' if with_thread else '') +
' -- %(message)s')
logging.Formatter.__init__(self, self.fmt)
def format(self, record):
modpath = record.name.split('.')
record.mname = modpath[0]
record.mmodule = '.'.join(modpath[1:])
result = logging.Formatter.format(self, record)
if record.levelno == logging.DEBUG:
color = BLUE_FONT
elif record.levelno == logging.INFO:
color = GREEN_FONT
elif record.levelno == logging.WARNING:
color = YELLOW_FONT
else:
color = RED_FONT
result = result.replace('-CC-', color)
return result
if filename is not None:
# make sure we can write to our log file
logdir = os.path.dirname(filename)
if not os.path.exists(logdir):
os.makedirs(logdir)
ch = logging.FileHandler(filename, mode='w')
ch.setFormatter(SimpleFormatter(with_time, with_thread))
else:
ch = logging.StreamHandler()
if colored and sys.platform != 'win32':
ch.setFormatter(ColoredFormatter(with_time, with_thread))
else:
ch.setFormatter(SimpleFormatter(with_time, with_thread))
logging.getLogger().addHandler(ch)
| gpl-3.0 | 2,610,876,338,542,086,000 | 37.067416 | 101 | 0.577332 | false |
mikewiebe-ansible/ansible | lib/ansible/modules/cloud/google/gcp_compute_reservation_info.py | 13 | 10575 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_reservation_info
description:
- Gather info for GCP Reservation
short_description: Gather info for GCP Reservation
version_added: '2.10'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
type: list
zone:
description:
- The zone where the reservation is made.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on a reservation
gcp_compute_reservation_info:
zone: us-central1-a
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
commitment:
description:
- Full or partial URL to a parent commitment. This field displays for reservations
that are tied to a commitment.
returned: success
type: str
specificReservationRequired:
description:
- When set to true, only VMs that target this reservation by name can consume
this reservation. Otherwise, it can be consumed by VMs with affinity for any
reservation. Defaults to false.
returned: success
type: bool
status:
description:
- The status of the reservation.
returned: success
type: str
specificReservation:
description:
- Reservation for instances with specific machine shapes.
returned: success
type: complex
contains:
count:
description:
- The number of resources that are allocated.
returned: success
type: int
inUseCount:
description:
- How many instances are in use.
returned: success
type: int
instanceProperties:
description:
- The instance properties for the reservation.
returned: success
type: complex
contains:
machineType:
description:
- The name of the machine type to reserve.
returned: success
type: str
minCpuPlatform:
description:
- The minimum CPU platform for the reservation. For example, `"Intel
Skylake"`. See U(https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#availablezones)
for information on available CPU platforms.
returned: success
type: str
guestAccelerators:
description:
- Guest accelerator type and count.
returned: success
type: complex
contains:
acceleratorType:
description:
- 'The full or partial URL of the accelerator type to attach to
this instance. For example: `projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100`
If you are creating an instance template, specify only the accelerator
name.'
returned: success
type: str
acceleratorCount:
description:
- The number of the guest accelerator cards exposed to this instance.
returned: success
type: int
localSsds:
description:
- The amount of local ssd to reserve with each instance. This reserves
disks of type `local-ssd`.
returned: success
type: complex
contains:
interface:
description:
- The disk interface to use for attaching this disk, one of `SCSI`
or `NVME`. The default is `SCSI`.
returned: success
type: str
diskSizeGb:
description:
- The size of the disk in base-2 GB.
returned: success
type: int
zone:
description:
- The zone where the reservation is made.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), zone=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
return_value = {'resources': fetch_list(module, collection(module), query_options(module.params['filters']))}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/reservations".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
return auth.list(link, return_if_object, array_name='items', params={'filter': query})
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 | 6,478,356,719,612,284,000 | 32.894231 | 133 | 0.58487 | false |
mikewiebe-ansible/ansible | test/units/modules/network/netvisor/test_pn_vrouter_pim_config.py | 23 | 2463 | # Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_vrouter_pim_config
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule
class TestVrouterPimConfigModule(TestNvosModule):
module = pn_vrouter_pim_config
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_vrouter_pim_config.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
self.mock_run_check_cli = patch('ansible.modules.network.netvisor.pn_vrouter_pim_config.check_cli')
self.run_check_cli = self.mock_run_check_cli.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
self.mock_run_check_cli.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['update'] == 'vrouter-pim-config-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
if state == 'update':
self.run_check_cli.return_value = True
def test_vrouter_pim_config_t1(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_query_interval': '10',
'pn_querier_timeout': '30', 'pn_vrouter_name': 'foo-vrouter', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 vrouter-pim-config-modify vrouter-name foo-vrouter '
expected_cmd += 'querier-timeout 30 query-interval 10'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_vrouter_pim_config_t2(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_query_interval': '30',
'pn_hello_interval': '120', 'pn_vrouter_name': 'foo-vrouter', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 vrouter-pim-config-modify vrouter-name foo-vrouter '
expected_cmd += 'hello-interval 120 query-interval 30'
self.assertEqual(result['cli_cmd'], expected_cmd)
| gpl-3.0 | -5,268,861,543,392,243,000 | 43.781818 | 109 | 0.653268 | false |
shrkey/ardupilot | Tools/LogAnalyzer/tests/TestDualGyroDrift.py | 273 | 5396 | from LogAnalyzer import Test,TestResult
import DataflashLog
# import scipy
# import pylab #### TEMP!!! only for dev
# from scipy import signal
class TestDualGyroDrift(Test):
'''test for gyro drift between dual IMU data'''
def __init__(self):
Test.__init__(self)
self.name = "Gyro Drift"
self.enable = False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# if "IMU" not in logdata.channels or "IMU2" not in logdata.channels:
# self.result.status = TestResult.StatusType.NA
# return
# imuX = logdata.channels["IMU"]["GyrX"].listData
# imu2X = logdata.channels["IMU2"]["GyrX"].listData
# # NOTE: weird thing about Holger's log is that the counts of IMU+IMU2 are different
# print "length 1: %.2f, length 2: %.2f" % (len(imuX),len(imu2X))
# #assert(len(imuX) == len(imu2X))
# # divide the curve into segments and get the average of each segment
# # we will get the diff between those averages, rather than a per-sample diff as the IMU+IMU2 arrays are often not the same length
# diffThresholdWARN = 0.03
# diffThresholdFAIL = 0.05
# nSamples = 10
# imu1XAverages, imu1YAverages, imu1ZAverages, imu2XAverages, imu2YAverages, imu2ZAverages = ([],[],[],[],[],[])
# imuXDiffAverages, imuYDiffAverages, imuZDiffAverages = ([],[],[])
# maxDiffX, maxDiffY, maxDiffZ = (0,0,0)
# sliceLength1 = len(logdata.channels["IMU"]["GyrX"].dictData.values()) / nSamples
# sliceLength2 = len(logdata.channels["IMU2"]["GyrX"].dictData.values()) / nSamples
# for i in range(0,nSamples):
# imu1XAverages.append(numpy.mean(logdata.channels["IMU"]["GyrX"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu1YAverages.append(numpy.mean(logdata.channels["IMU"]["GyrY"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu1ZAverages.append(numpy.mean(logdata.channels["IMU"]["GyrZ"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu2XAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrX"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imu2YAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrY"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imu2ZAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrZ"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imuXDiffAverages.append(imu2XAverages[-1]-imu1XAverages[-1])
# imuYDiffAverages.append(imu2YAverages[-1]-imu1YAverages[-1])
# imuZDiffAverages.append(imu2ZAverages[-1]-imu1ZAverages[-1])
# if abs(imuXDiffAverages[-1]) > maxDiffX:
# maxDiffX = imuXDiffAverages[-1]
# if abs(imuYDiffAverages[-1]) > maxDiffY:
# maxDiffY = imuYDiffAverages[-1]
# if abs(imuZDiffAverages[-1]) > maxDiffZ:
# maxDiffZ = imuZDiffAverages[-1]
# if max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdFAIL:
# self.result.status = TestResult.StatusType.FAIL
# self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdFAIL
# elif max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdWARN:
# self.result.status = TestResult.StatusType.WARN
# self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdWARN
# # pylab.plot(zip(*imuX)[0], zip(*imuX)[1], 'g')
# # pylab.plot(zip(*imu2X)[0], zip(*imu2X)[1], 'r')
# #pylab.plot(range(0,(nSamples*sliceLength1),sliceLength1), imu1ZAverages, 'b')
# print "Gyro averages1X: " + `imu1XAverages`
# print "Gyro averages1Y: " + `imu1YAverages`
# print "Gyro averages1Z: " + `imu1ZAverages` + "\n"
# print "Gyro averages2X: " + `imu2XAverages`
# print "Gyro averages2Y: " + `imu2YAverages`
# print "Gyro averages2Z: " + `imu2ZAverages` + "\n"
# print "Gyro averages diff X: " + `imuXDiffAverages`
# print "Gyro averages diff Y: " + `imuYDiffAverages`
# print "Gyro averages diff Z: " + `imuZDiffAverages`
# # lowpass filter using numpy
# # cutoff = 100
# # fs = 10000.0
# # b,a = scipy.signal.filter_design.butter(5,cutoff/(fs/2))
# # imuXFiltered = scipy.signal.filtfilt(b,a,zip(*imuX)[1])
# # imu2XFiltered = scipy.signal.filtfilt(b,a,zip(*imu2X)[1])
# #pylab.plot(imuXFiltered, 'r')
# # TMP: DISPLAY BEFORE+AFTER plots
# pylab.show()
# # print "imuX average before lowpass filter: %.8f" % logdata.channels["IMU"]["GyrX"].avg()
# # print "imuX average after lowpass filter: %.8f" % numpy.mean(imuXFiltered)
# # print "imu2X average before lowpass filter: %.8f" % logdata.channels["IMU2"]["GyrX"].avg()
# # print "imu2X average after lowpass filter: %.8f" % numpy.mean(imu2XFiltered)
# avg1X = logdata.channels["IMU"]["GyrX"].avg()
# avg1Y = logdata.channels["IMU"]["GyrY"].avg()
# avg1Z = logdata.channels["IMU"]["GyrZ"].avg()
# avg2X = logdata.channels["IMU2"]["GyrX"].avg()
# avg2Y = logdata.channels["IMU2"]["GyrY"].avg()
# avg2Z = logdata.channels["IMU2"]["GyrZ"].avg()
# avgRatioX = (max(avg1X,avg2X) - min(avg1X,avg2X)) / #abs(max(avg1X,avg2X) / min(avg1X,avg2X))
# avgRatioY = abs(max(avg1Y,avg2Y) / min(avg1Y,avg2Y))
# avgRatioZ = abs(max(avg1Z,avg2Z) / min(avg1Z,avg2Z))
# self.result.statusMessage = "IMU gyro avg: %.4f,%.4f,%.4f\nIMU2 gyro avg: %.4f,%.4f,%.4f\nAvg ratio: %.4f,%.4f,%.4f" % (avg1X,avg1Y,avg1Z, avg2X,avg2Y,avg2Z, avgRatioX,avgRatioY,avgRatioZ)
| gpl-3.0 | 4,474,231,960,514,473,500 | 44.344538 | 193 | 0.686805 | false |
siddhartharay007/buck | third-party/py/twitter-commons/src/python/twitter/common/python/translator.py | 18 | 6084 | from __future__ import absolute_import
from abc import abstractmethod
import os
import shutil
from .common import chmod_plus_w, safe_rmtree, safe_mkdir, safe_mkdtemp
from .compatibility import AbstractClass
from .installer import WheelInstaller
from .interpreter import PythonInterpreter
from .package import (
EggPackage,
Package,
SourcePackage,
WheelPackage,
)
from .platforms import Platform
from .tracer import TRACER
from .util import DistributionHelper
class TranslatorBase(AbstractClass):
"""
Translate a link into a distribution.
"""
@abstractmethod
def translate(self, link):
pass
class ChainedTranslator(TranslatorBase):
"""
Glue a sequence of Translators together in priority order. The first Translator to resolve a
requirement wins.
"""
def __init__(self, *translators):
self._translators = list(filter(None, translators))
for tx in self._translators:
if not isinstance(tx, TranslatorBase):
raise ValueError('Expected a sequence of translators, got %s instead.' % type(tx))
def translate(self, package):
for tx in self._translators:
dist = tx.translate(package)
if dist:
return dist
class SourceTranslator(TranslatorBase):
@classmethod
def run_2to3(cls, path):
from lib2to3.refactor import get_fixers_from_package, RefactoringTool
rt = RefactoringTool(get_fixers_from_package('lib2to3.fixes'))
with TRACER.timed('Translating %s' % path):
for root, dirs, files in os.walk(path):
for fn in files:
full_fn = os.path.join(root, fn)
if full_fn.endswith('.py'):
with TRACER.timed('%s' % fn, V=3):
try:
chmod_plus_w(full_fn)
rt.refactor_file(full_fn, write=True)
except IOError as e:
TRACER.log('Failed to translate %s: %s' % (fn, e))
def __init__(self,
install_cache=None,
interpreter=PythonInterpreter.get(),
platform=Platform.current(),
use_2to3=False,
conn_timeout=None,
installer_impl=WheelInstaller):
self._interpreter = interpreter
self._installer_impl = installer_impl
self._use_2to3 = use_2to3
self._install_cache = install_cache or safe_mkdtemp()
safe_mkdir(self._install_cache)
self._conn_timeout = conn_timeout
self._platform = platform
def translate(self, package):
"""From a SourcePackage, translate to a binary distribution."""
if not isinstance(package, SourcePackage):
return None
unpack_path, installer = None, None
version = self._interpreter.version
try:
unpack_path = package.fetch(conn_timeout=self._conn_timeout)
except package.UnreadableLink as e:
TRACER.log('Failed to fetch %s: %s' % (package, e))
return None
try:
if self._use_2to3 and version >= (3,):
with TRACER.timed('Translating 2->3 %s' % package.name):
self.run_2to3(unpack_path)
installer = self._installer_impl(
unpack_path,
interpreter=self._interpreter,
strict=(package.name not in ('distribute', 'setuptools')))
with TRACER.timed('Packaging %s' % package.name):
try:
dist_path = installer.bdist()
except self._installer_impl.InstallFailure:
return None
target_path = os.path.join(self._install_cache, os.path.basename(dist_path))
# TODO: Make this atomic.
shutil.move(dist_path, target_path)
target_package = Package.from_href(target_path)
if not target_package:
return None
if not target_package.compatible(self._interpreter.identity, platform=self._platform):
return None
return DistributionHelper.distribution_from_path(target_path)
finally:
if installer:
installer.cleanup()
if unpack_path:
safe_rmtree(unpack_path)
class BinaryTranslator(TranslatorBase):
def __init__(self,
package_type,
install_cache=None,
interpreter=PythonInterpreter.get(),
platform=Platform.current(),
conn_timeout=None):
self._package_type = package_type
self._install_cache = install_cache or safe_mkdtemp()
self._platform = platform
self._identity = interpreter.identity
self._conn_timeout = conn_timeout
def translate(self, package):
"""From a binary package, translate to a local binary distribution."""
if not isinstance(package, self._package_type):
return None
if not package.compatible(identity=self._identity, platform=self._platform):
return None
try:
bdist = package.fetch(location=self._install_cache, conn_timeout=self._conn_timeout)
except package.UnreadableLink as e:
TRACER.log('Failed to fetch %s: %s' % (package, e))
return None
return DistributionHelper.distribution_from_path(bdist)
class EggTranslator(BinaryTranslator):
def __init__(self, **kw):
super(EggTranslator, self).__init__(EggPackage, **kw)
class WheelTranslator(BinaryTranslator):
def __init__(self, **kw):
super(WheelTranslator, self).__init__(WheelPackage, **kw)
class Translator(object):
@staticmethod
def default(install_cache=None,
platform=Platform.current(),
interpreter=None,
conn_timeout=None):
# TODO(user) Consider interpreter=None to indicate "universal" packages
# since the .whl format can support this.
# Also consider platform=None to require platform-inspecific packages.
interpreter = interpreter or PythonInterpreter.get()
shared_options = dict(
install_cache=install_cache,
interpreter=interpreter,
conn_timeout=conn_timeout)
whl_translator = WheelTranslator(platform=platform, **shared_options)
egg_translator = EggTranslator(platform=platform, **shared_options)
source_translator = SourceTranslator(**shared_options)
return ChainedTranslator(whl_translator, egg_translator, source_translator)
| apache-2.0 | -1,293,689,036,037,734,000 | 32.61326 | 97 | 0.659599 | false |
foreni-packages/golismero | thirdparty_libs/django/core/serializers/pyyaml.py | 110 | 2353 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import decimal
import yaml
from io import StringIO
from django.db import models
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import six
class DjangoSafeDumper(yaml.SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.safe_load(stream), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
raise DeserializationError(e)
| gpl-2.0 | -3,207,120,520,895,233,000 | 34.119403 | 88 | 0.694433 | false |
eteq/ginga | ginga/gtkw/ImageViewCanvasGtk.py | 4 | 1290 | #
# ImageViewCanvasGtk.py -- A FITS image widget with canvas drawing in Gtk
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.gtkw import ImageViewGtk
from ginga.canvas.mixins import DrawingMixin, CanvasMixin, CompoundMixin
from ginga.util.toolbox import ModeIndicator
class ImageViewCanvasError(ImageViewGtk.ImageViewGtkError):
pass
class ImageViewCanvas(ImageViewGtk.ImageViewZoom,
DrawingMixin, CanvasMixin, CompoundMixin):
def __init__(self, logger=None, rgbmap=None, settings=None,
bindmap=None, bindings=None):
ImageViewGtk.ImageViewZoom.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings,
bindmap=bindmap,
bindings=bindings)
CompoundMixin.__init__(self)
CanvasMixin.__init__(self)
DrawingMixin.__init__(self)
# we are both a viewer and a canvas
self.set_canvas(self, private_canvas=self)
self._mi = ModeIndicator(self)
#END
| bsd-3-clause | 7,138,814,110,296,551,000 | 33.864865 | 73 | 0.612403 | false |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pygments/lexers/_tsql_builtins.py | 31 | 15484 | # -*- coding: utf-8 -*-
"""
pygments.lexers._tsql_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These are manually translated lists from https://msdn.microsoft.com.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# See https://msdn.microsoft.com/en-us/library/ms174986.aspx.
OPERATORS = (
'!<',
'!=',
'!>',
'<',
'<=',
'<>',
'=',
'>',
'>=',
'+',
'+=',
'-',
'-=',
'*',
'*=',
'/',
'/=',
'%',
'%=',
'&',
'&=',
'|',
'|=',
'^',
'^=',
'~',
'::',
)
OPERATOR_WORDS = (
'all',
'and',
'any',
'between',
'except',
'exists',
'in',
'intersect',
'like',
'not',
'or',
'some',
'union',
)
_KEYWORDS_SERVER = (
'add',
'all',
'alter',
'and',
'any',
'as',
'asc',
'authorization',
'backup',
'begin',
'between',
'break',
'browse',
'bulk',
'by',
'cascade',
'case',
'catch',
'check',
'checkpoint',
'close',
'clustered',
'coalesce',
'collate',
'column',
'commit',
'compute',
'constraint',
'contains',
'containstable',
'continue',
'convert',
'create',
'cross',
'current',
'current_date',
'current_time',
'current_timestamp',
'current_user',
'cursor',
'database',
'dbcc',
'deallocate',
'declare',
'default',
'delete',
'deny',
'desc',
'disk',
'distinct',
'distributed',
'double',
'drop',
'dump',
'else',
'end',
'errlvl',
'escape',
'except',
'exec',
'execute',
'exists',
'exit',
'external',
'fetch',
'file',
'fillfactor',
'for',
'foreign',
'freetext',
'freetexttable',
'from',
'full',
'function',
'goto',
'grant',
'group',
'having',
'holdlock',
'identity',
'identity_insert',
'identitycol',
'if',
'in',
'index',
'inner',
'insert',
'intersect',
'into',
'is',
'join',
'key',
'kill',
'left',
'like',
'lineno',
'load',
'merge',
'national',
'nocheck',
'nonclustered',
'not',
'null',
'nullif',
'of',
'off',
'offsets',
'on',
'open',
'opendatasource',
'openquery',
'openrowset',
'openxml',
'option',
'or',
'order',
'outer',
'over',
'percent',
'pivot',
'plan',
'precision',
'primary',
'print',
'proc',
'procedure',
'public',
'raiserror',
'read',
'readtext',
'reconfigure',
'references',
'replication',
'restore',
'restrict',
'return',
'revert',
'revoke',
'right',
'rollback',
'rowcount',
'rowguidcol',
'rule',
'save',
'schema',
'securityaudit',
'select',
'semantickeyphrasetable',
'semanticsimilaritydetailstable',
'semanticsimilaritytable',
'session_user',
'set',
'setuser',
'shutdown',
'some',
'statistics',
'system_user',
'table',
'tablesample',
'textsize',
'then',
'throw',
'to',
'top',
'tran',
'transaction',
'trigger',
'truncate',
'try',
'try_convert',
'tsequal',
'union',
'unique',
'unpivot',
'update',
'updatetext',
'use',
'user',
'values',
'varying',
'view',
'waitfor',
'when',
'where',
'while',
'with',
'within',
'writetext',
)
_KEYWORDS_FUTURE = (
'absolute',
'action',
'admin',
'after',
'aggregate',
'alias',
'allocate',
'are',
'array',
'asensitive',
'assertion',
'asymmetric',
'at',
'atomic',
'before',
'binary',
'bit',
'blob',
'boolean',
'both',
'breadth',
'call',
'called',
'cardinality',
'cascaded',
'cast',
'catalog',
'char',
'character',
'class',
'clob',
'collation',
'collect',
'completion',
'condition',
'connect',
'connection',
'constraints',
'constructor',
'corr',
'corresponding',
'covar_pop',
'covar_samp',
'cube',
'cume_dist',
'current_catalog',
'current_default_transform_group',
'current_path',
'current_role',
'current_schema',
'current_transform_group_for_type',
'cycle',
'data',
'date',
'day',
'dec',
'decimal',
'deferrable',
'deferred',
'depth',
'deref',
'describe',
'descriptor',
'destroy',
'destructor',
'deterministic',
'diagnostics',
'dictionary',
'disconnect',
'domain',
'dynamic',
'each',
'element',
'end-exec',
'equals',
'every',
'exception',
'false',
'filter',
'first',
'float',
'found',
'free',
'fulltexttable',
'fusion',
'general',
'get',
'global',
'go',
'grouping',
'hold',
'host',
'hour',
'ignore',
'immediate',
'indicator',
'initialize',
'initially',
'inout',
'input',
'int',
'integer',
'intersection',
'interval',
'isolation',
'iterate',
'language',
'large',
'last',
'lateral',
'leading',
'less',
'level',
'like_regex',
'limit',
'ln',
'local',
'localtime',
'localtimestamp',
'locator',
'map',
'match',
'member',
'method',
'minute',
'mod',
'modifies',
'modify',
'module',
'month',
'multiset',
'names',
'natural',
'nchar',
'nclob',
'new',
'next',
'no',
'none',
'normalize',
'numeric',
'object',
'occurrences_regex',
'old',
'only',
'operation',
'ordinality',
'out',
'output',
'overlay',
'pad',
'parameter',
'parameters',
'partial',
'partition',
'path',
'percent_rank',
'percentile_cont',
'percentile_disc',
'position_regex',
'postfix',
'prefix',
'preorder',
'prepare',
'preserve',
'prior',
'privileges',
'range',
'reads',
'real',
'recursive',
'ref',
'referencing',
'regr_avgx',
'regr_avgy',
'regr_count',
'regr_intercept',
'regr_r2',
'regr_slope',
'regr_sxx',
'regr_sxy',
'regr_syy',
'relative',
'release',
'result',
'returns',
'role',
'rollup',
'routine',
'row',
'rows',
'savepoint',
'scope',
'scroll',
'search',
'second',
'section',
'sensitive',
'sequence',
'session',
'sets',
'similar',
'size',
'smallint',
'space',
'specific',
'specifictype',
'sql',
'sqlexception',
'sqlstate',
'sqlwarning',
'start',
'state',
'statement',
'static',
'stddev_pop',
'stddev_samp',
'structure',
'submultiset',
'substring_regex',
'symmetric',
'system',
'temporary',
'terminate',
'than',
'time',
'timestamp',
'timezone_hour',
'timezone_minute',
'trailing',
'translate_regex',
'translation',
'treat',
'true',
'uescape',
'under',
'unknown',
'unnest',
'usage',
'using',
'value',
'var_pop',
'var_samp',
'varchar',
'variable',
'whenever',
'width_bucket',
'window',
'within',
'without',
'work',
'write',
'xmlagg',
'xmlattributes',
'xmlbinary',
'xmlcast',
'xmlcomment',
'xmlconcat',
'xmldocument',
'xmlelement',
'xmlexists',
'xmlforest',
'xmliterate',
'xmlnamespaces',
'xmlparse',
'xmlpi',
'xmlquery',
'xmlserialize',
'xmltable',
'xmltext',
'xmlvalidate',
'year',
'zone',
)
_KEYWORDS_ODBC = (
'absolute',
'action',
'ada',
'add',
'all',
'allocate',
'alter',
'and',
'any',
'are',
'as',
'asc',
'assertion',
'at',
'authorization',
'avg',
'begin',
'between',
'bit',
'bit_length',
'both',
'by',
'cascade',
'cascaded',
'case',
'cast',
'catalog',
'char',
'char_length',
'character',
'character_length',
'check',
'close',
'coalesce',
'collate',
'collation',
'column',
'commit',
'connect',
'connection',
'constraint',
'constraints',
'continue',
'convert',
'corresponding',
'count',
'create',
'cross',
'current',
'current_date',
'current_time',
'current_timestamp',
'current_user',
'cursor',
'date',
'day',
'deallocate',
'dec',
'decimal',
'declare',
'default',
'deferrable',
'deferred',
'delete',
'desc',
'describe',
'descriptor',
'diagnostics',
'disconnect',
'distinct',
'domain',
'double',
'drop',
'else',
'end',
'end-exec',
'escape',
'except',
'exception',
'exec',
'execute',
'exists',
'external',
'extract',
'false',
'fetch',
'first',
'float',
'for',
'foreign',
'fortran',
'found',
'from',
'full',
'get',
'global',
'go',
'goto',
'grant',
'group',
'having',
'hour',
'identity',
'immediate',
'in',
'include',
'index',
'indicator',
'initially',
'inner',
'input',
'insensitive',
'insert',
'int',
'integer',
'intersect',
'interval',
'into',
'is',
'isolation',
'join',
'key',
'language',
'last',
'leading',
'left',
'level',
'like',
'local',
'lower',
'match',
'max',
'min',
'minute',
'module',
'month',
'names',
'national',
'natural',
'nchar',
'next',
'no',
'none',
'not',
'null',
'nullif',
'numeric',
'octet_length',
'of',
'on',
'only',
'open',
'option',
'or',
'order',
'outer',
'output',
'overlaps',
'pad',
'partial',
'pascal',
'position',
'precision',
'prepare',
'preserve',
'primary',
'prior',
'privileges',
'procedure',
'public',
'read',
'real',
'references',
'relative',
'restrict',
'revoke',
'right',
'rollback',
'rows',
'schema',
'scroll',
'second',
'section',
'select',
'session',
'session_user',
'set',
'size',
'smallint',
'some',
'space',
'sql',
'sqlca',
'sqlcode',
'sqlerror',
'sqlstate',
'sqlwarning',
'substring',
'sum',
'system_user',
'table',
'temporary',
'then',
'time',
'timestamp',
'timezone_hour',
'timezone_minute',
'to',
'trailing',
'transaction',
'translate',
'translation',
'trim',
'true',
'union',
'unique',
'unknown',
'update',
'upper',
'usage',
'user',
'using',
'value',
'values',
'varchar',
'varying',
'view',
'when',
'whenever',
'where',
'with',
'work',
'write',
'year',
'zone',
)
# See https://msdn.microsoft.com/en-us/library/ms189822.aspx.
KEYWORDS = sorted(set(_KEYWORDS_FUTURE + _KEYWORDS_ODBC + _KEYWORDS_SERVER))
# See https://msdn.microsoft.com/en-us/library/ms187752.aspx.
TYPES = (
'bigint',
'binary',
'bit',
'char',
'cursor',
'date',
'datetime',
'datetime2',
'datetimeoffset',
'decimal',
'float',
'hierarchyid',
'image',
'int',
'money',
'nchar',
'ntext',
'numeric',
'nvarchar',
'real',
'smalldatetime',
'smallint',
'smallmoney',
'sql_variant',
'table',
'text',
'time',
'timestamp',
'tinyint',
'uniqueidentifier',
'varbinary',
'varchar',
'xml',
)
# See https://msdn.microsoft.com/en-us/library/ms174318.aspx.
FUNCTIONS = (
'$partition',
'abs',
'acos',
'app_name',
'applock_mode',
'applock_test',
'ascii',
'asin',
'assemblyproperty',
'atan',
'atn2',
'avg',
'binary_checksum',
'cast',
'ceiling',
'certencoded',
'certprivatekey',
'char',
'charindex',
'checksum',
'checksum_agg',
'choose',
'col_length',
'col_name',
'columnproperty',
'compress',
'concat',
'connectionproperty',
'context_info',
'convert',
'cos',
'cot',
'count',
'count_big',
'current_request_id',
'current_timestamp',
'current_transaction_id',
'current_user',
'cursor_status',
'database_principal_id',
'databasepropertyex',
'dateadd',
'datediff',
'datediff_big',
'datefromparts',
'datename',
'datepart',
'datetime2fromparts',
'datetimefromparts',
'datetimeoffsetfromparts',
'day',
'db_id',
'db_name',
'decompress',
'degrees',
'dense_rank',
'difference',
'eomonth',
'error_line',
'error_message',
'error_number',
'error_procedure',
'error_severity',
'error_state',
'exp',
'file_id',
'file_idex',
'file_name',
'filegroup_id',
'filegroup_name',
'filegroupproperty',
'fileproperty',
'floor',
'format',
'formatmessage',
'fulltextcatalogproperty',
'fulltextserviceproperty',
'get_filestream_transaction_context',
'getansinull',
'getdate',
'getutcdate',
'grouping',
'grouping_id',
'has_perms_by_name',
'host_id',
'host_name',
'iif',
'index_col',
'indexkey_property',
'indexproperty',
'is_member',
'is_rolemember',
'is_srvrolemember',
'isdate',
'isjson',
'isnull',
'isnumeric',
'json_modify',
'json_query',
'json_value',
'left',
'len',
'log',
'log10',
'lower',
'ltrim',
'max',
'min',
'min_active_rowversion',
'month',
'nchar',
'newid',
'newsequentialid',
'ntile',
'object_definition',
'object_id',
'object_name',
'object_schema_name',
'objectproperty',
'objectpropertyex',
'opendatasource',
'openjson',
'openquery',
'openrowset',
'openxml',
'original_db_name',
'original_login',
'parse',
'parsename',
'patindex',
'permissions',
'pi',
'power',
'pwdcompare',
'pwdencrypt',
'quotename',
'radians',
'rand',
'rank',
'replace',
'replicate',
'reverse',
'right',
'round',
'row_number',
'rowcount_big',
'rtrim',
'schema_id',
'schema_name',
'scope_identity',
'serverproperty',
'session_context',
'session_user',
'sign',
'sin',
'smalldatetimefromparts',
'soundex',
'sp_helplanguage',
'space',
'sqrt',
'square',
'stats_date',
'stdev',
'stdevp',
'str',
'string_escape',
'string_split',
'stuff',
'substring',
'sum',
'suser_id',
'suser_name',
'suser_sid',
'suser_sname',
'switchoffset',
'sysdatetime',
'sysdatetimeoffset',
'system_user',
'sysutcdatetime',
'tan',
'textptr',
'textvalid',
'timefromparts',
'todatetimeoffset',
'try_cast',
'try_convert',
'try_parse',
'type_id',
'type_name',
'typeproperty',
'unicode',
'upper',
'user_id',
'user_name',
'var',
'varp',
'xact_state',
'year',
)
| gpl-3.0 | 6,833,957,967,043,161,000 | 14.422311 | 76 | 0.47139 | false |
stevenbrichards/boto | boto/cognito/__init__.py | 473 | 1123 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit | 68,578,901,372,306,024 | 52.47619 | 77 | 0.770258 | false |
oy-vey/algorithms-and-data-structures | 6-GenomeAssemblyProgrammingChallenge/Week3/optimal_kmer_size.py | 1 | 2363 | # python3
class Edge:
def __init__(self, u, v, value):
self.u = u
self.v = v
self.value = value
class Graph:
def __init__(self):
self.edges = []
self.incoming_edges = []
self.graph = dict () #[[] for _ in range(n)]
self.incoming_graph = dict()
def add_edge(self, from_, to, value):
edge = Edge(from_, to, value)
if self.graph.get(from_) is not None:
self.graph[from_].append(len(self.edges))
else:
self.graph[from_] = [len(self.edges)]
if self.graph.get(to) is None:
self.graph[to] = []
self.edges.append(edge)
if self.incoming_graph.get(to) is not None:
self.incoming_graph[to].append(len(self.incoming_edges))
else:
self.incoming_graph[to] = [len(self.incoming_edges)]
if self.incoming_graph.get(from_) is None:
self.incoming_graph[from_] = []
self.incoming_edges.append(edge)
def size(self):
return len(self.graph)
def get_ids(self, from_):
return self.graph[from_]
def get_incoming_ids(self, to):
return self.incoming_graph[to]
def get_edge(self, id):
return self.edges[id]
def get_incoming_edge(self, id):
return self.incoming_edges[id]
def read_data():
n = 400
reads = []
for i in range(n):
reads.append(input())
return reads
def get_graph(reads):
edge_count = len(reads)
graph = Graph()
for read in reads:
u, v, value = read[:-1], read[1:], read
graph.add_edge(u, v, value)
return graph, edge_count, graph.size()
def check_if_balanced(graph):
for k in graph.graph.keys():
out_ids = graph.get_ids(k)
in_ids = graph.get_incoming_ids(k)
if len(out_ids) != len(in_ids):
return False
return True
def generate_new_reads(reads, k):
new_reads = []
for read in reads:
for s in range(len(read) - (k - 1)):
new_read = read[s:][:k]
new_reads.append(new_read)
return list(set(new_reads))
reads = read_data()
graph, edge_count, vertex_count = get_graph(reads)
k = len(reads[0])
while not check_if_balanced(graph):
k -= 1
reads = generate_new_reads(reads, k)
graph, edge_count, vertex_count = get_graph(reads)
print(k) | mit | -6,686,848,352,147,527,000 | 23.122449 | 68 | 0.561151 | false |
GiladE/birde | venv/lib/python2.7/site-packages/psycopg2/tests/test_cancel.py | 62 | 3705 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_cancel.py - unit test for query cancellation
#
# Copyright (C) 2010-2011 Jan Urbański <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import threading
import psycopg2
import psycopg2.extensions
from psycopg2 import extras
from testconfig import dsn
from testutils import unittest, ConnectingTestCase, skip_before_postgres
class CancelTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
cur = self.conn.cursor()
cur.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
self.conn.commit()
def test_empty_cancel(self):
self.conn.cancel()
@skip_before_postgres(8, 2)
def test_cancel(self):
errors = []
def neverending(conn):
cur = conn.cursor()
try:
self.assertRaises(psycopg2.extensions.QueryCanceledError,
cur.execute, "select pg_sleep(60)")
# make sure the connection still works
conn.rollback()
cur.execute("select 1")
self.assertEqual(cur.fetchall(), [(1, )])
except Exception, e:
errors.append(e)
raise
def canceller(conn):
cur = conn.cursor()
try:
conn.cancel()
except Exception, e:
errors.append(e)
raise
thread1 = threading.Thread(target=neverending, args=(self.conn, ))
# wait a bit to make sure that the other thread is already in
# pg_sleep -- ugly and racy, but the chances are ridiculously low
thread2 = threading.Timer(0.3, canceller, args=(self.conn, ))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
self.assertEqual(errors, [])
@skip_before_postgres(8, 2)
def test_async_cancel(self):
async_conn = psycopg2.connect(dsn, async=True)
self.assertRaises(psycopg2.OperationalError, async_conn.cancel)
extras.wait_select(async_conn)
cur = async_conn.cursor()
cur.execute("select pg_sleep(10000)")
self.assertTrue(async_conn.isexecuting())
async_conn.cancel()
self.assertRaises(psycopg2.extensions.QueryCanceledError,
extras.wait_select, async_conn)
cur.execute("select 1")
extras.wait_select(async_conn)
self.assertEqual(cur.fetchall(), [(1, )])
def test_async_connection_cancel(self):
async_conn = psycopg2.connect(dsn, async=True)
async_conn.close()
self.assertTrue(async_conn.closed)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit | 491,531,503,256,006,460 | 32.369369 | 74 | 0.634449 | false |
romankagan/DDBWorkbench | python/lib/Lib/site-packages/django/template/loaders/filesystem.py | 229 | 2358 | """
Wrapper for loading templates from the filesystem.
"""
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
load_template_source.is_usable = True
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
# For backwards compatibility
import warnings
warnings.warn(
"'django.template.loaders.filesystem.load_template_source' is deprecated; use 'django.template.loaders.filesystem.Loader' instead.",
DeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = True
| apache-2.0 | 7,047,290,332,778,797,000 | 37.655738 | 140 | 0.630619 | false |
Justin-Yuan/Image2Music-Generator | library/jython2.5.3/Lib/test/test_os_jy.py | 5 | 1551 | """Misc os module tests
Made for Jython.
"""
import os
import unittest
from test import test_support
class OSTestCase(unittest.TestCase):
def setUp(self):
open(test_support.TESTFN, 'w').close()
def tearDown(self):
if os.path.exists(test_support.TESTFN):
os.remove(test_support.TESTFN)
def test_issue1727(self):
os.stat(*(test_support.TESTFN,))
def test_issue1755(self):
os.remove(test_support.TESTFN)
self.assertRaises(OSError, os.utime, test_support.TESTFN, None)
def test_issue1824(self):
os.remove(test_support.TESTFN)
self.assertRaises(OSError, os.link,
test_support.TESTFN, test_support.TESTFN)
def test_issue1825(self):
os.remove(test_support.TESTFN)
testfnu = unicode(test_support.TESTFN)
try:
os.open(testfnu, os.O_RDONLY)
except OSError, e:
self.assertTrue(isinstance(e.filename, unicode))
self.assertEqual(e.filename, testfnu)
else:
self.assertTrue(False)
# XXX: currently fail
#for fn in os.chdir, os.listdir, os.rmdir:
for fn in (os.rmdir,):
try:
fn(testfnu)
except OSError, e:
self.assertTrue(isinstance(e.filename, unicode))
self.assertEqual(e.filename, testfnu)
else:
self.assertTrue(False)
def test_main():
test_support.run_unittest(OSTestCase)
if __name__ == '__main__':
test_main()
| gpl-2.0 | -874,241,020,285,828,500 | 25.741379 | 71 | 0.589942 | false |
webOS-ports/qtwebkit | Tools/Scripts/webkitpy/common/system/executive_mock.py | 117 | 7106 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import StringIO
from webkitpy.common.system.executive import ScriptError
_log = logging.getLogger(__name__)
class MockProcess(object):
def __init__(self, stdout='MOCK STDOUT\n', stderr=''):
self.pid = 42
self.stdout = StringIO.StringIO(stdout)
self.stderr = StringIO.StringIO(stderr)
self.stdin = StringIO.StringIO()
self.returncode = 0
def wait(self):
return
# FIXME: This should be unified with MockExecutive2
class MockExecutive(object):
PIPE = "MOCK PIPE"
STDOUT = "MOCK STDOUT"
@staticmethod
def ignore_error(error):
pass
def __init__(self, should_log=False, should_throw=False, should_throw_when_run=None):
self._should_log = should_log
self._should_throw = should_throw
self._should_throw_when_run = should_throw_when_run or set()
# FIXME: Once executive wraps os.getpid() we can just use a static pid for "this" process.
self._running_pids = {'test-webkitpy': os.getpid()}
self._proc = None
self.calls = []
self.pid_to_system_pid = {}
def check_running_pid(self, pid):
return pid in self._running_pids.values()
def running_pids(self, process_name_filter):
running_pids = []
for process_name, process_pid in self._running_pids.iteritems():
if process_name_filter(process_name):
running_pids.append(process_pid)
_log.info("MOCK running_pids: %s" % running_pids)
return running_pids
def run_and_throw_if_fail(self, args, quiet=False, cwd=None, env=None):
if self._should_log:
env_string = ""
if env:
env_string = ", env=%s" % env
_log.info("MOCK run_and_throw_if_fail: %s, cwd=%s%s" % (args, cwd, env_string))
if self._should_throw_when_run.intersection(args):
raise ScriptError("Exception for %s" % args, output="MOCK command output")
return "MOCK output of child process"
def command_for_printing(self, args):
string_args = map(unicode, args)
return " ".join(string_args)
def run_command(self,
args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None):
self.calls.append(args)
assert(isinstance(args, list) or isinstance(args, tuple))
if self._should_log:
env_string = ""
if env:
env_string = ", env=%s" % env
input_string = ""
if input:
input_string = ", input=%s" % input
_log.info("MOCK run_command: %s, cwd=%s%s%s" % (args, cwd, env_string, input_string))
output = "MOCK output of child process"
if self._should_throw_when_run.intersection(args):
raise ScriptError("Exception for %s" % args, output="MOCK command output")
if self._should_throw:
raise ScriptError("MOCK ScriptError", output=output)
return output
def cpu_count(self):
return 2
def kill_all(self, process_name):
pass
def kill_process(self, pid):
pass
def popen(self, args, cwd=None, env=None, **kwargs):
self.calls.append(args)
if self._should_log:
cwd_string = ""
if cwd:
cwd_string = ", cwd=%s" % cwd
env_string = ""
if env:
env_string = ", env=%s" % env
_log.info("MOCK popen: %s%s%s" % (args, cwd_string, env_string))
if not self._proc:
self._proc = MockProcess()
return self._proc
def run_in_parallel(self, commands):
num_previous_calls = len(self.calls)
command_outputs = []
for cmd_line, cwd in commands:
command_outputs.append([0, self.run_command(cmd_line, cwd=cwd), ''])
new_calls = self.calls[num_previous_calls:]
self.calls = self.calls[:num_previous_calls]
self.calls.append(new_calls)
return command_outputs
class MockExecutive2(MockExecutive):
"""MockExecutive2 is like MockExecutive except it doesn't log anything."""
def __init__(self, output='', exit_code=0, exception=None, run_command_fn=None, stderr=''):
self._output = output
self._stderr = stderr
self._exit_code = exit_code
self._exception = exception
self._run_command_fn = run_command_fn
self.calls = []
def run_command(self,
args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None):
self.calls.append(args)
assert(isinstance(args, list) or isinstance(args, tuple))
if self._exception:
raise self._exception # pylint: disable=E0702
if self._run_command_fn:
return self._run_command_fn(args)
if return_exit_code:
return self._exit_code
if self._exit_code and error_handler:
script_error = ScriptError(script_args=args, exit_code=self._exit_code, output=self._output)
error_handler(script_error)
if return_stderr:
return self._output + self._stderr
return self._output
| gpl-2.0 | 4,018,812,982,827,541,000 | 36.010417 | 104 | 0.609203 | false |
RaoUmer/django | django/contrib/localflavor/si/si_postalcodes.py | 110 | 13147 | # *-* coding: utf-8 *-*
from __future__ import unicode_literals
SI_POSTALCODES = [
(1000, 'Ljubljana'),
(1215, 'Medvode'),
(1216, 'Smlednik'),
(1217, 'Vodice'),
(1218, 'Komenda'),
(1219, 'Laze v Tuhinju'),
(1221, 'Motnik'),
(1222, 'Trojane'),
(1223, 'Blagovica'),
(1225, 'Lukovica'),
(1230, 'Dom\u017eale'),
(1233, 'Dob'),
(1234, 'Menge\u0161'),
(1235, 'Radomlje'),
(1236, 'Trzin'),
(1241, 'Kamnik'),
(1242, 'Stahovica'),
(1251, 'Morav\u010de'),
(1252, 'Va\u010de'),
(1262, 'Dol pri Ljubljani'),
(1270, 'Litija'),
(1272, 'Pol\u0161nik'),
(1273, 'Dole pri Litiji'),
(1274, 'Gabrovka'),
(1275, '\u0160martno pri Litiji'),
(1276, 'Primskovo'),
(1281, 'Kresnice'),
(1282, 'Sava'),
(1290, 'Grosuplje'),
(1291, '\u0160kofljica'),
(1292, 'Ig'),
(1293, '\u0160marje - Sap'),
(1294, 'Vi\u0161nja Gora'),
(1295, 'Ivan\u010dna Gorica'),
(1296, '\u0160entvid pri Sti\u010dni'),
(1301, 'Krka'),
(1303, 'Zagradec'),
(1310, 'Ribnica'),
(1311, 'Turjak'),
(1312, 'Videm - Dobrepolje'),
(1313, 'Struge'),
(1314, 'Rob'),
(1315, 'Velike La\u0161\u010de'),
(1316, 'Ortnek'),
(1317, 'Sodra\u017eica'),
(1318, 'Lo\u0161ki Potok'),
(1319, 'Draga'),
(1330, 'Ko\u010devje'),
(1331, 'Dolenja vas'),
(1332, 'Stara Cerkev'),
(1336, 'Kostel'),
(1337, 'Osilnica'),
(1338, 'Ko\u010devska Reka'),
(1351, 'Brezovica pri Ljubljani'),
(1352, 'Preserje'),
(1353, 'Borovnica'),
(1354, 'Horjul'),
(1355, 'Polhov Gradec'),
(1356, 'Dobrova'),
(1357, 'Notranje Gorice'),
(1358, 'Log pri Brezovici'),
(1360, 'Vrhnika'),
(1370, 'Logatec'),
(1372, 'Hotedr\u0161ica'),
(1373, 'Rovte'),
(1380, 'Cerknica'),
(1381, 'Rakek'),
(1382, 'Begunje pri Cerknici'),
(1384, 'Grahovo'),
(1385, 'Nova vas'),
(1386, 'Stari trg pri Lo\u017eu'),
(1410, 'Zagorje ob Savi'),
(1411, 'Izlake'),
(1412, 'Kisovec'),
(1413, '\u010cem\u0161enik'),
(1414, 'Podkum'),
(1420, 'Trbovlje'),
(1423, 'Dobovec'),
(1430, 'Hrastnik'),
(1431, 'Dol pri Hrastniku'),
(1432, 'Zidani Most'),
(1433, 'Rade\u010de'),
(1434, 'Loka pri Zidanem Mostu'),
(2000, 'Maribor'),
(2201, 'Zgornja Kungota'),
(2204, 'Miklav\u017e na Dravskem polju'),
(2205, 'Star\u0161e'),
(2206, 'Marjeta na Dravskem polju'),
(2208, 'Pohorje'),
(2211, 'Pesnica pri Mariboru'),
(2212, '\u0160entilj v Slovenskih goricah'),
(2213, 'Zgornja Velka'),
(2214, 'Sladki vrh'),
(2215, 'Cer\u0161ak'),
(2221, 'Jarenina'),
(2222, 'Jakobski Dol'),
(2223, 'Jurovski Dol'),
(2229, 'Male\u010dnik'),
(2230, 'Lenart v Slovenskih goricah'),
(2231, 'Pernica'),
(2232, 'Voli\u010dina'),
(2233, 'Sveta Ana v Slovenskih goricah'),
(2234, 'Benedikt'),
(2235, 'Sveta Trojica v Slovenskih goricah'),
(2236, 'Cerkvenjak'),
(2241, 'Spodnji Duplek'),
(2242, 'Zgornja Korena'),
(2250, 'Ptuj'),
(2252, 'Dornava'),
(2253, 'Destrnik'),
(2254, 'Trnovska vas'),
(2255, 'Vitomarci'),
(2256, 'Jur\u0161inci'),
(2257, 'Polen\u0161ak'),
(2258, 'Sveti Toma\u017e'),
(2259, 'Ivanjkovci'),
(2270, 'Ormo\u017e'),
(2272, 'Gori\u0161nica'),
(2273, 'Podgorci'),
(2274, 'Velika Nedelja'),
(2275, 'Miklav\u017e pri Ormo\u017eu'),
(2276, 'Kog'),
(2277, 'Sredi\u0161\u010de ob Dravi'),
(2281, 'Markovci'),
(2282, 'Cirkulane'),
(2283, 'Zavr\u010d'),
(2284, 'Videm pri Ptuju'),
(2285, 'Zgornji Leskovec'),
(2286, 'Podlehnik'),
(2287, '\u017detale'),
(2288, 'Hajdina'),
(2289, 'Stoperce'),
(2310, 'Slovenska Bistrica'),
(2311, 'Ho\u010de'),
(2312, 'Orehova vas'),
(2313, 'Fram'),
(2314, 'Zgornja Polskava'),
(2315, '\u0160martno na Pohorju'),
(2316, 'Zgornja Lo\u017enica'),
(2317, 'Oplotnica'),
(2318, 'Laporje'),
(2319, 'Polj\u010dane'),
(2321, 'Makole'),
(2322, 'Maj\u0161perk'),
(2323, 'Ptujska Gora'),
(2324, 'Lovrenc na Dravskem polju'),
(2325, 'Kidri\u010devo'),
(2326, 'Cirkovce'),
(2327, 'Ra\u010de'),
(2331, 'Pragersko'),
(2341, 'Limbu\u0161'),
(2342, 'Ru\u0161e'),
(2343, 'Fala'),
(2344, 'Lovrenc na Pohorju'),
(2345, 'Bistrica ob Dravi'),
(2351, 'Kamnica'),
(2352, 'Selnica ob Dravi'),
(2353, 'Sv. Duh na Ostrem Vrhu'),
(2354, 'Bresternica'),
(2360, 'Radlje ob Dravi'),
(2361, 'O\u017ebalt'),
(2362, 'Kapla'),
(2363, 'Podvelka'),
(2364, 'Ribnica na Pohorju'),
(2365, 'Vuhred'),
(2366, 'Muta'),
(2367, 'Vuzenica'),
(2370, 'Dravograd'),
(2371, 'Trbonje'),
(2372, 'Libeli\u010de'),
(2373, '\u0160entjan\u017e pri Dravogradu'),
(2380, 'Slovenj Gradec'),
(2381, 'Podgorje pri Slovenj Gradcu'),
(2382, 'Mislinja'),
(2383, '\u0160martno pri Slovenj Gradcu'),
(2390, 'Ravne na Koro\u0161kem'),
(2391, 'Prevalje'),
(2392, 'Me\u017eica'),
(2393, '\u010crna na Koro\u0161kem'),
(2394, 'Kotlje'),
(3000, 'Celje'),
(3201, '\u0160martno v Ro\u017eni dolini'),
(3202, 'Ljube\u010dna'),
(3203, 'Nova Cerkev'),
(3204, 'Dobrna'),
(3205, 'Vitanje'),
(3206, 'Stranice'),
(3210, 'Slovenske Konjice'),
(3211, '\u0160kofja vas'),
(3212, 'Vojnik'),
(3213, 'Frankolovo'),
(3214, 'Zre\u010de'),
(3215, 'Lo\u010de'),
(3220, '\u0160tore'),
(3221, 'Teharje'),
(3222, 'Dramlje'),
(3223, 'Loka pri \u017dusmu'),
(3224, 'Dobje pri Planini'),
(3225, 'Planina pri Sevnici'),
(3230, '\u0160entjur'),
(3231, 'Grobelno'),
(3232, 'Ponikva'),
(3233, 'Kalobje'),
(3240, '\u0160marje pri Jel\u0161ah'),
(3241, 'Podplat'),
(3250, 'Roga\u0161ka Slatina'),
(3252, 'Rogatec'),
(3253, 'Pristava pri Mestinju'),
(3254, 'Pod\u010detrtek'),
(3255, 'Bu\u010de'),
(3256, 'Bistrica ob Sotli'),
(3257, 'Podsreda'),
(3260, 'Kozje'),
(3261, 'Lesi\u010dno'),
(3262, 'Prevorje'),
(3263, 'Gorica pri Slivnici'),
(3264, 'Sveti \u0160tefan'),
(3270, 'La\u0161ko'),
(3271, '\u0160entrupert'),
(3272, 'Rimske Toplice'),
(3273, 'Jurklo\u0161ter'),
(3301, 'Petrov\u010de'),
(3302, 'Gri\u017ee'),
(3303, 'Gomilsko'),
(3304, 'Tabor'),
(3305, 'Vransko'),
(3310, '\u017dalec'),
(3311, '\u0160empeter v Savinjski dolini'),
(3312, 'Prebold'),
(3313, 'Polzela'),
(3314, 'Braslov\u010de'),
(3320, 'Velenje - dostava'),
(3322, 'Velenje - po\u0161tni predali'),
(3325, '\u0160o\u0161tanj'),
(3326, 'Topol\u0161ica'),
(3327, '\u0160martno ob Paki'),
(3330, 'Mozirje'),
(3331, 'Nazarje'),
(3332, 'Re\u010dica ob Savinji'),
(3333, 'Ljubno ob Savinji'),
(3334, 'Lu\u010de'),
(3335, 'Sol\u010dava'),
(3341, '\u0160martno ob Dreti'),
(3342, 'Gornji Grad'),
(4000, 'Kranj'),
(4201, 'Zgornja Besnica'),
(4202, 'Naklo'),
(4203, 'Duplje'),
(4204, 'Golnik'),
(4205, 'Preddvor'),
(4206, 'Zgornje Jezersko'),
(4207, 'Cerklje na Gorenjskem'),
(4208, '\u0160en\u010dur'),
(4209, '\u017dabnica'),
(4210, 'Brnik - aerodrom'),
(4211, 'Mav\u010di\u010de'),
(4212, 'Visoko'),
(4220, '\u0160kofja Loka'),
(4223, 'Poljane nad \u0160kofjo Loko'),
(4224, 'Gorenja vas'),
(4225, 'Sovodenj'),
(4226, '\u017diri'),
(4227, 'Selca'),
(4228, '\u017delezniki'),
(4229, 'Sorica'),
(4240, 'Radovljica'),
(4243, 'Brezje'),
(4244, 'Podnart'),
(4245, 'Kropa'),
(4246, 'Kamna Gorica'),
(4247, 'Zgornje Gorje'),
(4248, 'Lesce'),
(4260, 'Bled'),
(4263, 'Bohinjska Bela'),
(4264, 'Bohinjska Bistrica'),
(4265, 'Bohinjsko jezero'),
(4267, 'Srednja vas v Bohinju'),
(4270, 'Jesenice'),
(4273, 'Blejska Dobrava'),
(4274, '\u017dirovnica'),
(4275, 'Begunje na Gorenjskem'),
(4276, 'Hru\u0161ica'),
(4280, 'Kranjska Gora'),
(4281, 'Mojstrana'),
(4282, 'Gozd Martuljek'),
(4283, 'Rate\u010de - Planica'),
(4290, 'Tr\u017ei\u010d'),
(4294, 'Kri\u017ee'),
(5000, 'Nova Gorica'),
(5210, 'Deskle'),
(5211, 'Kojsko'),
(5212, 'Dobrovo v Brdih'),
(5213, 'Kanal'),
(5214, 'Kal nad Kanalom'),
(5215, 'Ro\u010dinj'),
(5216, 'Most na So\u010di'),
(5220, 'Tolmin'),
(5222, 'Kobarid'),
(5223, 'Breginj'),
(5224, 'Srpenica'),
(5230, 'Bovec'),
(5231, 'Log pod Mangartom'),
(5232, 'So\u010da'),
(5242, 'Grahovo ob Ba\u010di'),
(5243, 'Podbrdo'),
(5250, 'Solkan'),
(5251, 'Grgar'),
(5252, 'Trnovo pri Gorici'),
(5253, '\u010cepovan'),
(5261, '\u0160empas'),
(5262, '\u010crni\u010de'),
(5263, 'Dobravlje'),
(5270, 'Ajdov\u0161\u010dina'),
(5271, 'Vipava'),
(5272, 'Podnanos'),
(5273, 'Col'),
(5274, '\u010crni Vrh nad Idrijo'),
(5275, 'Godovi\u010d'),
(5280, 'Idrija'),
(5281, 'Spodnja Idrija'),
(5282, 'Cerkno'),
(5283, 'Slap ob Idrijci'),
(5290, '\u0160empeter pri Gorici'),
(5291, 'Miren'),
(5292, 'Ren\u010de'),
(5293, 'Vol\u010dja Draga'),
(5294, 'Dornberk'),
(5295, 'Branik'),
(5296, 'Kostanjevica na Krasu'),
(5297, 'Prva\u010dina'),
(6000, 'Koper'),
(6210, 'Se\u017eana'),
(6215, 'Diva\u010da'),
(6216, 'Podgorje'),
(6217, 'Vremski Britof'),
(6219, 'Lokev'),
(6221, 'Dutovlje'),
(6222, '\u0160tanjel'),
(6223, 'Komen'),
(6224, 'Seno\u017ee\u010de'),
(6225, 'Hru\u0161evje'),
(6230, 'Postojna'),
(6232, 'Planina'),
(6240, 'Kozina'),
(6242, 'Materija'),
(6243, 'Obrov'),
(6244, 'Podgrad'),
(6250, 'Ilirska Bistrica'),
(6251, 'Ilirska Bistrica - Trnovo'),
(6253, 'Kne\u017eak'),
(6254, 'Jel\u0161ane'),
(6255, 'Prem'),
(6256, 'Ko\u0161ana'),
(6257, 'Pivka'),
(6258, 'Prestranek'),
(6271, 'Dekani'),
(6272, 'Gra\u010di\u0161\u010de'),
(6273, 'Marezige'),
(6274, '\u0160marje'),
(6275, '\u010crni Kal'),
(6276, 'Pobegi'),
(6280, 'Ankaran - Ancarano'),
(6281, '\u0160kofije'),
(6310, 'Izola - Isola'),
(6320, 'Portoro\u017e - Portorose'),
(6330, 'Piran - Pirano'),
(6333, 'Se\u010dovlje - Sicciole'),
(8000, 'Novo mesto'),
(8210, 'Trebnje'),
(8211, 'Dobrni\u010d'),
(8212, 'Velika Loka'),
(8213, 'Veliki Gaber'),
(8216, 'Mirna Pe\u010d'),
(8220, '\u0160marje\u0161ke Toplice'),
(8222, 'Oto\u010dec'),
(8230, 'Mokronog'),
(8231, 'Trebelno'),
(8232, '\u0160entrupert'),
(8233, 'Mirna'),
(8250, 'Bre\u017eice'),
(8251, '\u010cate\u017e ob Savi'),
(8253, 'Arti\u010de'),
(8254, 'Globoko'),
(8255, 'Pi\u0161ece'),
(8256, 'Sromlje'),
(8257, 'Dobova'),
(8258, 'Kapele'),
(8259, 'Bizeljsko'),
(8261, 'Jesenice na Dolenjskem'),
(8262, 'Kr\u0161ka vas'),
(8263, 'Cerklje ob Krki'),
(8270, 'Kr\u0161ko'),
(8272, 'Zdole'),
(8273, 'Leskovec pri Kr\u0161kem'),
(8274, 'Raka'),
(8275, '\u0160kocjan'),
(8276, 'Bu\u010dka'),
(8280, 'Brestanica'),
(8281, 'Senovo'),
(8282, 'Koprivnica'),
(8283, 'Blanca'),
(8290, 'Sevnica'),
(8292, 'Zabukovje'),
(8293, 'Studenec'),
(8294, 'Bo\u0161tanj'),
(8295, 'Tr\u017ei\u0161\u010de'),
(8296, 'Krmelj'),
(8297, '\u0160entjan\u017e'),
(8310, '\u0160entjernej'),
(8311, 'Kostanjevica na Krki'),
(8312, 'Podbo\u010dje'),
(8321, 'Brusnice'),
(8322, 'Stopi\u010de'),
(8323, 'Ur\u0161na sela'),
(8330, 'Metlika'),
(8331, 'Suhor'),
(8332, 'Gradac'),
(8333, 'Semi\u010d'),
(8340, '\u010crnomelj'),
(8341, 'Adle\u0161i\u010di'),
(8342, 'Stari trg ob Kolpi'),
(8343, 'Dragatu\u0161'),
(8344, 'Vinica pri \u010crnomlju'),
(8350, 'Dolenjske Toplice'),
(8351, 'Stra\u017ea'),
(8360, '\u017du\u017eemberk'),
(8361, 'Dvor'),
(8362, 'Hinje'),
(9000, 'Murska Sobota'),
(9201, 'Puconci'),
(9202, 'Ma\u010dkovci'),
(9203, 'Petrovci'),
(9204, '\u0160alovci'),
(9205, 'Hodo\u0161 - Hodos'),
(9206, 'Kri\u017eevci'),
(9207, 'Prosenjakovci - Partosfalva'),
(9208, 'Fokovci'),
(9220, 'Lendava - Lendva'),
(9221, 'Martjanci'),
(9222, 'Bogojina'),
(9223, 'Dobrovnik - Dobronak'),
(9224, 'Turni\u0161\u010de'),
(9225, 'Velika Polana'),
(9226, 'Moravske Toplice'),
(9227, 'Kobilje'),
(9231, 'Beltinci'),
(9232, '\u010cren\u0161ovci'),
(9233, 'Odranci'),
(9240, 'Ljutomer'),
(9241, 'Ver\u017eej'),
(9242, 'Kri\u017eevci pri Ljutomeru'),
(9243, 'Mala Nedelja'),
(9244, 'Sveti Jurij ob \u0160\u010davnici'),
(9245, 'Spodnji Ivanjci'),
(9250, 'Gornja Radgona'),
(9251, 'Ti\u0161ina'),
(9252, 'Radenci'),
(9253, 'Apa\u010de'),
(9261, 'Cankova'),
(9262, 'Roga\u0161ovci'),
(9263, 'Kuzma'),
(9264, 'Grad'),
(9265, 'Bodonci'),
]
SI_POSTALCODES_CHOICES = sorted(SI_POSTALCODES, key=lambda k: k[1])
| bsd-3-clause | -4,111,504,154,962,944,500 | 26.97234 | 67 | 0.537765 | false |
victorbriz/rethinkdb | external/v8_3.30.33.16/tools/run-tests.py | 33 | 22539 | #!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
import itertools
import multiprocessing
import optparse
import os
from os.path import join
import platform
import random
import shlex
import subprocess
import sys
import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
from testrunner.objects import context
ARCH_GUESS = utils.DefaultArch()
DEFAULT_TESTS = [
"mjsunit",
"unittests",
"cctest",
"message",
"preparser",
]
# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in seperate steps on the bots.
TEST_MAP = {
"default": [
"mjsunit",
"cctest",
"message",
"preparser",
],
"optimize_for_size": [
"mjsunit",
"cctest",
"webkit",
],
"unittests": [
"unittests",
],
}
TIMEOUT_DEFAULT = 60
TIMEOUT_SCALEFACTOR = {"debug" : 4,
"release" : 1 }
# Use this to run several variants of the tests.
VARIANT_FLAGS = {
"default": [],
"stress": ["--stress-opt", "--always-opt"],
"turbofan": ["--turbo-asm", "--turbo-filter=*", "--always-opt"],
"nocrankshaft": ["--nocrankshaft"]}
VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
MODE_FLAGS = {
"debug" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--debug-code", "--verify-heap"],
"release" : ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants"]}
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
"--concurrent-recompilation-queue-length=64",
"--concurrent-recompilation-delay=500",
"--concurrent-recompilation"]
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"arm",
"ia32",
"x87",
"mips",
"mipsel",
"mips64el",
"nacl_ia32",
"nacl_x64",
"x64",
"x32",
"arm64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"arm",
"mips",
"mipsel",
"mips64el",
"nacl_ia32",
"nacl_x64",
"x87",
"arm64"]
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="ia32,x64,arm")
result.add_option("--arch-and-mode",
help="Architecture and mode in the format 'arch.mode'",
default=None)
result.add_option("--asan",
help="Regard test expectations for ASAN",
default=False, action="store_true")
result.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="dontcare")
result.add_option("--slow-tests",
help="Regard slow tests (run|skip|dontcare)",
default="dontcare")
result.add_option("--pass-fail-tests",
help="Regard pass|fail tests (run|skip|dontcare)",
default="dontcare")
result.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
result.add_option("-m", "--mode",
help="The test modes in which to run (comma-separated)",
default="release,debug")
result.add_option("--no-i18n", "--noi18n",
help="Skip internationalization tests",
default=False, action="store_true")
result.add_option("--no-network", "--nonetwork",
help="Don't distribute tests on the network",
default=(utils.GuessOS() != "linux"),
dest="no_network", action="store_true")
result.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks',
default=False, dest="no_presubmit", action="store_true")
result.add_option("--no-snap", "--nosnap",
help='Test a build compiled without snapshot.',
default=False, dest="no_snap", action="store_true")
result.add_option("--no-sorting", "--nosorting",
help="Don't sort tests according to duration of last run.",
default=False, dest="no_sorting", action="store_true")
result.add_option("--no-stress", "--nostress",
help="Don't run crankshaft --always-opt --stress-op test",
default=False, dest="no_stress", action="store_true")
result.add_option("--no-variants", "--novariants",
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
result.add_option("--variants",
help="Comma-separated list of testing variants")
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("--predictable",
help="Compare output of several reruns of each test",
default=False, action="store_true")
result.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow/flaky tests)"))
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("--json-test-results",
help="Path to a file for storing json results.")
result.add_option("--rerun-failures-count",
help=("Number of times to rerun each failing test case. "
"Very slow tests will be rerun only once."),
default=0, type="int")
result.add_option("--rerun-failures-max",
help="Maximum number of failing test cases to rerun.",
default=100, type="int")
result.add_option("--shard-count",
help="Split testsuites into this number of shards",
default=1, type="int")
result.add_option("--shard-run",
help="Run this shard from the split up tests.",
default=1, type="int")
result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
result.add_option("--shell-dir", help="Directory containing executables",
default="")
result.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
result.add_option("--tsan",
help="Regard test expectations for TSAN",
default=False, action="store_true")
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("--junitout", help="File name of the JUnit output")
result.add_option("--junittestsuite",
help="The testsuite name in the JUnit output file",
default="v8tests")
result.add_option("--random-seed", default=0, dest="random_seed",
help="Default seed for initializing random generator")
result.add_option("--msan",
help="Regard test expectations for MSAN",
default=False, action="store_true")
return result
def ProcessOptions(options):
global VARIANT_FLAGS
global VARIANTS
# Architecture and mode related stuff.
if options.arch_and_mode:
options.arch_and_mode = [arch_and_mode.split(".")
for arch_and_mode in options.arch_and_mode.split(",")]
options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
options.mode = options.mode.split(",")
for mode in options.mode:
if not mode.lower() in ["debug", "release", "optdebug"]:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
options.arch = ARCH_GUESS
options.arch = options.arch.split(",")
for arch in options.arch:
if not arch in SUPPORTED_ARCHS:
print "Unknown architecture %s" % arch
return False
# Store the final configuration in arch_and_mode list. Don't overwrite
# predefined arch_and_mode since it is more expressive than arch and mode.
if not options.arch_and_mode:
options.arch_and_mode = itertools.product(options.arch, options.mode)
# Special processing of other options, sorted alphabetically.
if options.buildbot:
# Buildbots run presubmit tests as a separate step.
options.no_presubmit = True
options.no_network = True
if options.command_prefix:
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
options.no_network = True
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
if options.asan:
options.extra_flags.append("--invoke-weak-callbacks")
if options.tsan:
VARIANTS = ["default"]
suppressions_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'sanitizers', 'tsan_suppressions.txt')
tsan_options = '%s suppressions=%s' % (
os.environ.get('TSAN_OPTIONS', ''), suppressions_file)
os.environ['TSAN_OPTIONS'] = tsan_options
if options.j == 0:
options.j = multiprocessing.cpu_count()
while options.random_seed == 0:
options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)
def excl(*args):
"""Returns true if zero or one of multiple arguments are true."""
return reduce(lambda x, y: x + y, args) <= 1
if not excl(options.no_stress, options.stress_only, options.no_variants,
bool(options.variants)):
print("Use only one of --no-stress, --stress-only, --no-variants, "
"or --variants.")
return False
if options.quickcheck:
VARIANTS = ["default", "stress"]
options.flaky_tests = "skip"
options.slow_tests = "skip"
options.pass_fail_tests = "skip"
if options.no_stress:
VARIANTS = ["default", "nocrankshaft"]
if options.no_variants:
VARIANTS = ["default"]
if options.stress_only:
VARIANTS = ["stress"]
if options.variants:
VARIANTS = options.variants.split(",")
if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
return False
if options.predictable:
VARIANTS = ["default"]
options.extra_flags.append("--predictable")
options.extra_flags.append("--verify_predictable")
options.extra_flags.append("--no-inline-new")
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
options.shell_dir = os.path.dirname(options.shell)
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running, so we don't need to set no_network.
options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
def CheckTestMode(name, option):
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
return False
return True
if not CheckTestMode("flaky test", options.flaky_tests):
return False
if not CheckTestMode("slow test", options.slow_tests):
return False
if not CheckTestMode("pass|fail test", options.pass_fail_tests):
return False
if not options.no_i18n:
DEFAULT_TESTS.append("intl")
return True
def ShardTests(tests, shard_count, shard_run):
if shard_count < 2:
return tests
if shard_run < 1 or shard_run > shard_count:
print "shard-run not a valid number, should be in [1:shard-count]"
print "defaulting back to running all tests"
return tests
count = 0
shard = []
for test in tests:
if count % shard_count == shard_run - 1:
shard.append(test)
count += 1
return shard
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
exit_code = 0
workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
if not options.no_presubmit:
print ">>> running presubmit tests"
exit_code = subprocess.call(
[sys.executable, join(workspace, "tools", "presubmit.py")])
suite_paths = utils.GetSuitePaths(join(workspace, "test"))
# Expand arguments with grouped tests. The args should reflect the list of
# suites as otherwise filters would break.
def ExpandTestGroups(name):
if name in TEST_MAP:
return [suite for suite in TEST_MAP[arg]]
else:
return [name]
args = reduce(lambda x, y: x + y,
[ExpandTestGroups(arg) for arg in args],
[])
if len(args) == 0:
suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
else:
args_suites = OrderedDict() # Used as set
for arg in args:
args_suites[arg.split(os.path.sep)[0]] = True
suite_paths = [ s for s in args_suites if s in suite_paths ]
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
suites.append(suite)
if options.download_data:
for s in suites:
s.DownloadData()
for (arch, mode) in options.arch_and_mode:
try:
code = Execute(arch, mode, args, options, suites, workspace)
except KeyboardInterrupt:
return 2
exit_code = exit_code or code
return exit_code
def Execute(arch, mode, args, options, suites, workspace):
print(">>> Running tests for %s.%s" % (arch, mode))
shell_dir = options.shell_dir
if not shell_dir:
if options.buildbot:
shell_dir = os.path.join(workspace, options.outdir, mode)
mode = mode.lower()
else:
shell_dir = os.path.join(workspace, options.outdir,
"%s.%s" % (arch, mode))
shell_dir = os.path.relpath(shell_dir)
if mode == "optdebug":
mode = "debug" # "optdebug" is just an alias.
# Populate context object.
mode_flags = MODE_FLAGS[mode]
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
if arch in SLOW_ARCHS:
timeout = 2 * TIMEOUT_DEFAULT;
else:
timeout = TIMEOUT_DEFAULT;
timeout *= TIMEOUT_SCALEFACTOR[mode]
if options.predictable:
# Predictable mode is slower.
timeout *= 2
ctx = context.Context(arch, mode, shell_dir,
mode_flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
options.extra_flags,
options.no_i18n,
options.random_seed,
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
options.predictable)
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
"arch": arch,
"asan": options.asan,
"deopt_fuzzer": False,
"gc_stress": options.gc_stress,
"isolates": options.isolates,
"mode": mode,
"no_i18n": options.no_i18n,
"no_snap": options.no_snap,
"simulator_run": simulator_run,
"simulator": utils.UseSimulator(arch),
"system": utils.GuessOS(),
"tsan": options.tsan,
"msan": options.msan,
}
all_tests = []
num_tests = 0
test_id = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
options.slow_tests, options.pass_fail_tests)
if options.cat:
verbose.PrintTestSource(s.tests)
continue
variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
s.tests = [ t.CopyAddingFlags(v)
for t in s.tests
for v in s.VariantFlags(t, variant_flags) ]
s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
test_id += 1
if options.cat:
return 0 # We're done here.
if options.report:
verbose.PrintReport(all_tests)
if num_tests == 0:
print "No tests to run."
return 0
# Run the tests, either locally or distributed on the network.
start_time = time.time()
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
if options.junitout:
progress_indicator = progress.JUnitTestProgressIndicator(
progress_indicator, options.junitout, options.junittestsuite)
if options.json_test_results:
progress_indicator = progress.JsonTestProgressIndicator(
progress_indicator, options.json_test_results, arch, mode)
run_networked = not options.no_network
if not run_networked:
print("Network distribution disabled, running tests locally.")
elif utils.GuessOS() != "linux":
print("Network distribution is only supported on Linux, sorry!")
run_networked = False
peers = []
if run_networked:
peers = network_execution.GetPeers()
if not peers:
print("No connection to distribution server; running tests locally.")
run_networked = False
elif len(peers) == 1:
print("No other peers on the network; running tests locally.")
run_networked = False
elif num_tests <= 100:
print("Less than 100 tests, running them locally.")
run_networked = False
if run_networked:
runner = network_execution.NetworkedRunner(suites, progress_indicator,
ctx, peers, workspace)
else:
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
return exit_code
if __name__ == "__main__":
sys.exit(Main())
| agpl-3.0 | -2,116,769,042,752,186 | 36.880672 | 80 | 0.613204 | false |
ahmadiga/min_edx | common/djangoapps/enrollment/views.py | 14 | 28709 | """
The Enrollment API Views should be simple, lean HTTP endpoints for API access. This should
consist primarily of authentication, request validation, and serialization.
"""
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.utils.decorators import method_decorator
from opaque_keys import InvalidKeyError
from course_modes.models import CourseMode
from lms.djangoapps.commerce.utils import audit_log
from openedx.core.djangoapps.user_api.preferences.api import update_email_opt_in
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission, ApiKeyHeaderPermissionIsAuthenticated
from rest_framework import status
from rest_framework.response import Response
from rest_framework.throttling import UserRateThrottle
from rest_framework.views import APIView
from opaque_keys.edx.keys import CourseKey
from embargo import api as embargo_api
from cors_csrf.authentication import SessionAuthenticationCrossDomainCsrf
from cors_csrf.decorators import ensure_csrf_cookie_cross_domain
from openedx.core.lib.api.authentication import (
SessionAuthenticationAllowInactiveUser,
OAuth2AuthenticationAllowInactiveUser,
)
from util.disable_rate_limit import can_disable_rate_limit
from enrollment import api
from enrollment.errors import (
CourseNotFoundError, CourseEnrollmentError,
CourseModeNotFoundError, CourseEnrollmentExistsError
)
from student.auth import user_has_role
from student.models import User
from student.roles import CourseStaffRole, GlobalStaff
log = logging.getLogger(__name__)
REQUIRED_ATTRIBUTES = {
"credit": ["credit:provider_id"],
}
class EnrollmentCrossDomainSessionAuth(SessionAuthenticationAllowInactiveUser, SessionAuthenticationCrossDomainCsrf):
"""Session authentication that allows inactive users and cross-domain requests. """
pass
class ApiKeyPermissionMixIn(object):
"""
This mixin is used to provide a convenience function for doing individual permission checks
for the presence of API keys.
"""
def has_api_key_permissions(self, request):
"""
Checks to see if the request was made by a server with an API key.
Args:
request (Request): the request being made into the view
Return:
True if the request has been made with a valid API key
False otherwise
"""
return ApiKeyHeaderPermission().has_permission(request, self)
class EnrollmentUserThrottle(UserRateThrottle, ApiKeyPermissionMixIn):
"""Limit the number of requests users can make to the enrollment API."""
rate = '40/minute'
def allow_request(self, request, view):
return self.has_api_key_permissions(request) or super(EnrollmentUserThrottle, self).allow_request(request, view)
@can_disable_rate_limit
class EnrollmentView(APIView, ApiKeyPermissionMixIn):
"""
**Use Case**
Get the user's enrollment status for a course.
**Example Request**
GET /api/enrollment/v1/enrollment/{username},{course_id}
**Response Values**
If the request for information about the user is successful, an HTTP 200 "OK" response
is returned.
The HTTP 200 response has the following values.
* course_details: A collection that includes the following
values.
* course_end: The date and time when the course closes. If
null, the course never ends.
* course_id: The unique identifier for the course.
* course_modes: An array of data about the enrollment modes
supported for the course. If the request uses the parameter
include_expired=1, the array also includes expired
enrollment modes.
Each enrollment mode collection includes the following
values.
* currency: The currency of the listed prices.
* description: A description of this mode.
* expiration_datetime: The date and time after which
users cannot enroll in the course in this mode.
* min_price: The minimum price for which a user can
enroll in this mode.
* name: The full name of the enrollment mode.
* slug: The short name for the enrollment mode.
* suggested_prices: A list of suggested prices for
this enrollment mode.
* course_end: The date and time at which the course closes. If
null, the course never ends.
* course_start: The date and time when the course opens. If
null, the course opens immediately when it is created.
* enrollment_end: The date and time after which users cannot
enroll for the course. If null, the enrollment period never
ends.
* enrollment_start: The date and time when users can begin
enrolling in the course. If null, enrollment opens
immediately when the course is created.
* invite_only: A value indicating whether students must be
invited to enroll in the course. Possible values are true or
false.
* created: The date the user account was created.
* is_active: Whether the enrollment is currently active.
* mode: The enrollment mode of the user in this course.
* user: The ID of the user.
"""
authentication_classes = OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser
permission_classes = ApiKeyHeaderPermissionIsAuthenticated,
throttle_classes = EnrollmentUserThrottle,
# Since the course about page on the marketing site uses this API to auto-enroll users,
# we need to support cross-domain CSRF.
@method_decorator(ensure_csrf_cookie_cross_domain)
def get(self, request, course_id=None, username=None):
"""Create, read, or update enrollment information for a user.
HTTP Endpoint for all CRUD operations for a user course enrollment. Allows creation, reading, and
updates of the current enrollment for a particular course.
Args:
request (Request): To get current course enrollment information, a GET request will return
information for the current user and the specified course.
course_id (str): URI element specifying the course location. Enrollment information will be
returned, created, or updated for this particular course.
username (str): The username associated with this enrollment request.
Return:
A JSON serialized representation of the course enrollment.
"""
username = username or request.user.username
# TODO Implement proper permissions
if request.user.username != username and not self.has_api_key_permissions(request) \
and not request.user.is_superuser:
# Return a 404 instead of a 403 (Unauthorized). If one user is looking up
# other users, do not let them deduce the existence of an enrollment.
return Response(status=status.HTTP_404_NOT_FOUND)
try:
return Response(api.get_enrollment(username, course_id))
except CourseEnrollmentError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"An error occurred while retrieving enrollments for user "
u"'{username}' in course '{course_id}'"
).format(username=username, course_id=course_id)
}
)
@can_disable_rate_limit
class EnrollmentCourseDetailView(APIView):
"""
**Use Case**
Get enrollment details for a course.
Response values include the course schedule and enrollment modes
supported by the course. Use the parameter include_expired=1 to
include expired enrollment modes in the response.
**Note:** Getting enrollment details for a course does not require
authentication.
**Example Requests**
GET /api/enrollment/v1/course/{course_id}
GET /api/enrollment/v1/course/{course_id}?include_expired=1
**Response Values**
If the request is successful, an HTTP 200 "OK" response is
returned along with a collection of course enrollments for the
user or for the newly created enrollment.
Each course enrollment contains the following values.
* course_end: The date and time when the course closes. If
null, the course never ends.
* course_id: The unique identifier for the course.
* course_modes: An array of data about the enrollment modes
supported for the course. If the request uses the parameter
include_expired=1, the array also includes expired
enrollment modes.
Each enrollment mode collection includes the following
values.
* currency: The currency of the listed prices.
* description: A description of this mode.
* expiration_datetime: The date and time after which
users cannot enroll in the course in this mode.
* min_price: The minimum price for which a user can
enroll in this mode.
* name: The full name of the enrollment mode.
* slug: The short name for the enrollment mode.
* suggested_prices: A list of suggested prices for
this enrollment mode.
* course_start: The date and time when the course opens. If
null, the course opens immediately when it is created.
* enrollment_end: The date and time after which users cannot
enroll for the course. If null, the enrollment period never
ends.
* enrollment_start: The date and time when users can begin
enrolling in the course. If null, enrollment opens
immediately when the course is created.
* invite_only: A value indicating whether students must be
invited to enroll in the course. Possible values are true or
false.
"""
authentication_classes = []
permission_classes = []
throttle_classes = EnrollmentUserThrottle,
def get(self, request, course_id=None):
"""Read enrollment information for a particular course.
HTTP Endpoint for retrieving course level enrollment information.
Args:
request (Request): To get current course enrollment information, a GET request will return
information for the specified course.
course_id (str): URI element specifying the course location. Enrollment information will be
returned.
Return:
A JSON serialized representation of the course enrollment details.
"""
try:
return Response(api.get_course_enrollment_details(course_id, bool(request.GET.get('include_expired', ''))))
except CourseNotFoundError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"No course found for course ID '{course_id}'"
).format(course_id=course_id)
}
)
@can_disable_rate_limit
class EnrollmentListView(APIView, ApiKeyPermissionMixIn):
"""
**Use Cases**
* Get a list of all course enrollments for the currently signed in user.
* Enroll the currently signed in user in a course.
Currently a user can use this command only to enroll the user in
honor mode. If honor mode is not supported for the course, the
request fails and returns the available modes.
This command can use a server-to-server call to enroll a user in
other modes, such as "verified", "professional", or "credit". If
the mode is not supported for the course, the request will fail
and return the available modes.
You can include other parameters as enrollment attributes for a
specific course mode. For example, for credit mode, you can
include the following parameters to specify the credit provider
attribute.
* namespace: credit
* name: provider_id
* value: institution_name
**Example Requests**
GET /api/enrollment/v1/enrollment
POST /api/enrollment/v1/enrollment {
"mode": "credit",
"course_details":{"course_id": "edX/DemoX/Demo_Course"},
"enrollment_attributes":[{"namespace": "credit","name": "provider_id","value": "hogwarts",},]
}
**POST Parameters**
A POST request can include the following parameters.
* user: Optional. The username of the currently logged in user.
You cannot use the command to enroll a different user.
* mode: Optional. The course mode for the enrollment. Individual
users cannot upgrade their enrollment mode from 'honor'. Only
server-to-server requests can enroll with other modes.
* is_active: Optional. A Boolean value indicating whether the
enrollment is active. Only server-to-server requests are
allowed to deactivate an enrollment.
* course details: A collection that includes the following
information.
* course_id: The unique identifier for the course.
* email_opt_in: Optional. A Boolean value that indicates whether
the user wants to receive email from the organization that runs
this course.
* enrollment_attributes: A dictionary that contains the following
values.
* namespace: Namespace of the attribute
* name: Name of the attribute
* value: Value of the attribute
* is_active: Optional. A Boolean value that indicates whether the
enrollment is active. Only server-to-server requests can
deactivate an enrollment.
* mode: Optional. The course mode for the enrollment. Individual
users cannot upgrade their enrollment mode from "honor". Only
server-to-server requests can enroll with other modes.
* user: Optional. The user ID of the currently logged in user. You
cannot use the command to enroll a different user.
**GET Response Values**
If an unspecified error occurs when the user tries to obtain a
learner's enrollments, the request returns an HTTP 400 "Bad
Request" response.
If the user does not have permission to view enrollment data for
the requested learner, the request returns an HTTP 404 "Not Found"
response.
**POST Response Values**
If the user does not specify a course ID, the specified course
does not exist, or the is_active status is invalid, the request
returns an HTTP 400 "Bad Request" response.
If a user who is not an admin tries to upgrade a learner's course
mode, the request returns an HTTP 403 "Forbidden" response.
If the specified user does not exist, the request returns an HTTP
406 "Not Acceptable" response.
**GET and POST Response Values**
If the request is successful, an HTTP 200 "OK" response is
returned along with a collection of course enrollments for the
user or for the newly created enrollment.
Each course enrollment contains the following values.
* course_details: A collection that includes the following
values.
* course_end: The date and time when the course closes. If
null, the course never ends.
* course_id: The unique identifier for the course.
* course_modes: An array of data about the enrollment modes
supported for the course. If the request uses the parameter
include_expired=1, the array also includes expired
enrollment modes.
Each enrollment mode collection includes the following
values.
* currency: The currency of the listed prices.
* description: A description of this mode.
* expiration_datetime: The date and time after which users
cannot enroll in the course in this mode.
* min_price: The minimum price for which a user can enroll in
this mode.
* name: The full name of the enrollment mode.
* slug: The short name for the enrollment mode.
* suggested_prices: A list of suggested prices for this
enrollment mode.
* course_start: The date and time when the course opens. If
null, the course opens immediately when it is created.
* enrollment_end: The date and time after which users cannot
enroll for the course. If null, the enrollment period never
ends.
* enrollment_start: The date and time when users can begin
enrolling in the course. If null, enrollment opens
immediately when the course is created.
* invite_only: A value indicating whether students must be
invited to enroll in the course. Possible values are true or
false.
* created: The date the user account was created.
* is_active: Whether the enrollment is currently active.
* mode: The enrollment mode of the user in this course.
* user: The username of the user.
"""
authentication_classes = OAuth2AuthenticationAllowInactiveUser, EnrollmentCrossDomainSessionAuth
permission_classes = ApiKeyHeaderPermissionIsAuthenticated,
throttle_classes = EnrollmentUserThrottle,
# Since the course about page on the marketing site
# uses this API to auto-enroll users, we need to support
# cross-domain CSRF.
@method_decorator(ensure_csrf_cookie_cross_domain)
def get(self, request):
"""Gets a list of all course enrollments for a user.
Returns a list for the currently logged in user, or for the user named by the 'user' GET
parameter. If the username does not match that of the currently logged in user, only
courses for which the currently logged in user has the Staff or Admin role are listed.
As a result, a course team member can find out which of his or her own courses a particular
learner is enrolled in.
Only the Staff or Admin role (granted on the Django administrative console as the staff
or instructor permission) in individual courses gives the requesting user access to
enrollment data. Permissions granted at the organizational level do not give a user
access to enrollment data for all of that organization's courses.
Users who have the global staff permission can access all enrollment data for all
courses.
"""
username = request.GET.get('user', request.user.username)
try:
enrollment_data = api.get_enrollments(username)
except CourseEnrollmentError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"An error occurred while retrieving enrollments for user '{username}'"
).format(username=username)
}
)
if username == request.user.username or GlobalStaff().has_user(request.user) or \
self.has_api_key_permissions(request):
return Response(enrollment_data)
filtered_data = []
for enrollment in enrollment_data:
course_key = CourseKey.from_string(enrollment["course_details"]["course_id"])
if user_has_role(request.user, CourseStaffRole(course_key)):
filtered_data.append(enrollment)
return Response(filtered_data)
def post(self, request):
"""Enrolls the currently logged-in user in a course.
Server-to-server calls may deactivate or modify the mode of existing enrollments. All other requests
go through `add_enrollment()`, which allows creation of new and reactivation of old enrollments.
"""
# Get the User, Course ID, and Mode from the request.
username = request.data.get('user', request.user.username)
course_id = request.data.get('course_details', {}).get('course_id')
if not course_id:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={"message": u"Course ID must be specified to create a new enrollment."}
)
try:
course_id = CourseKey.from_string(course_id)
except InvalidKeyError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": u"No course '{course_id}' found for enrollment".format(course_id=course_id)
}
)
mode = request.data.get('mode', CourseMode.HONOR)
has_api_key_permissions = self.has_api_key_permissions(request)
# Check that the user specified is either the same user, or this is a server-to-server request.
if not username:
username = request.user.username
if username != request.user.username and not has_api_key_permissions:
# Return a 404 instead of a 403 (Unauthorized). If one user is looking up
# other users, do not let them deduce the existence of an enrollment.
return Response(status=status.HTTP_404_NOT_FOUND)
if mode != CourseMode.HONOR and not has_api_key_permissions:
return Response(
status=status.HTTP_403_FORBIDDEN,
data={
"message": u"User does not have permission to create enrollment with mode [{mode}].".format(
mode=mode
)
}
)
try:
# Lookup the user, instead of using request.user, since request.user may not match the username POSTed.
user = User.objects.get(username=username)
except ObjectDoesNotExist:
return Response(
status=status.HTTP_406_NOT_ACCEPTABLE,
data={
'message': u'The user {} does not exist.'.format(username)
}
)
embargo_response = embargo_api.get_embargo_response(request, course_id, user)
if embargo_response:
return embargo_response
try:
is_active = request.data.get('is_active')
# Check if the requested activation status is None or a Boolean
if is_active is not None and not isinstance(is_active, bool):
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'message': (u"'{value}' is an invalid enrollment activation status.").format(value=is_active)
}
)
enrollment_attributes = request.data.get('enrollment_attributes')
enrollment = api.get_enrollment(username, unicode(course_id))
mode_changed = enrollment and mode is not None and enrollment['mode'] != mode
active_changed = enrollment and is_active is not None and enrollment['is_active'] != is_active
missing_attrs = []
if enrollment_attributes:
actual_attrs = [
u"{namespace}:{name}".format(**attr)
for attr in enrollment_attributes
]
missing_attrs = set(REQUIRED_ATTRIBUTES.get(mode, [])) - set(actual_attrs)
if has_api_key_permissions and (mode_changed or active_changed):
if mode_changed and active_changed and not is_active:
# if the requester wanted to deactivate but specified the wrong mode, fail
# the request (on the assumption that the requester had outdated information
# about the currently active enrollment).
msg = u"Enrollment mode mismatch: active mode={}, requested mode={}. Won't deactivate.".format(
enrollment["mode"], mode
)
log.warning(msg)
return Response(status=status.HTTP_400_BAD_REQUEST, data={"message": msg})
if len(missing_attrs) > 0:
msg = u"Missing enrollment attributes: requested mode={} required attributes={}".format(
mode, REQUIRED_ATTRIBUTES.get(mode)
)
log.warning(msg)
return Response(status=status.HTTP_400_BAD_REQUEST, data={"message": msg})
response = api.update_enrollment(
username,
unicode(course_id),
mode=mode,
is_active=is_active,
enrollment_attributes=enrollment_attributes
)
else:
# Will reactivate inactive enrollments.
response = api.add_enrollment(username, unicode(course_id), mode=mode, is_active=is_active)
email_opt_in = request.data.get('email_opt_in', None)
if email_opt_in is not None:
org = course_id.org
update_email_opt_in(request.user, org, email_opt_in)
return Response(response)
except CourseModeNotFoundError as error:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"The course mode '{mode}' is not available for course '{course_id}'."
).format(mode=mode, course_id=course_id),
"course_details": error.data
})
except CourseNotFoundError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": u"No course '{course_id}' found for enrollment".format(course_id=course_id)
}
)
except CourseEnrollmentExistsError as error:
return Response(data=error.enrollment)
except CourseEnrollmentError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"message": (
u"An error occurred while creating the new course enrollment for user "
u"'{username}' in course '{course_id}'"
).format(username=username, course_id=course_id)
}
)
finally:
# Assumes that the ecommerce service uses an API key to authenticate.
if has_api_key_permissions:
current_enrollment = api.get_enrollment(username, unicode(course_id))
audit_log(
'enrollment_change_requested',
course_id=unicode(course_id),
requested_mode=mode,
actual_mode=current_enrollment['mode'] if current_enrollment else None,
requested_activation=is_active,
actual_activation=current_enrollment['is_active'] if current_enrollment else None,
user_id=user.id
)
| agpl-3.0 | 5,582,908,258,552,373,000 | 42.697108 | 120 | 0.602947 | false |
jmcarp/osf.io | scripts/staff_public_regs.py | 25 | 1355 | # -*- coding: utf-8 -*-
"""Get public registrations for staff members.
python -m scripts.staff_public_regs
"""
from collections import defaultdict
import logging
from modularodm import Q
from website.models import Node, User
from website.app import init_app
logger = logging.getLogger('staff_public_regs')
STAFF_GUIDS = [
'jk5cv', # Jeff
'cdi38', # Brian
'edb8y', # Johanna
'hsey5', # Courtney
'5hdme', # Melissa
]
def main():
init_app(set_backends=True, routes=False)
staff_registrations = defaultdict(list)
users = [User.load(each) for each in STAFF_GUIDS]
for registration in Node.find(Q('is_registration', 'eq', True) & Q('is_public', 'eq', True)):
for user in users:
if registration in user.node__contributed:
staff_registrations[user._id].append(registration)
for uid in staff_registrations:
user = User.load(uid)
user_regs = staff_registrations[uid]
logger.info('{} ({}) on {} Public Registrations:'.format(
user.fullname,
user._id,
len(user_regs))
)
for registration in user_regs:
logger.info('\t{} ({}): {}'.format(registration.title,
registration._id,
registration.absolute_url)
)
if __name__ == '__main__':
main()
| apache-2.0 | 7,287,444,329,684,283,000 | 27.229167 | 97 | 0.597786 | false |
MarceloCorpucci/lettucetutorial | specs/glue/register_fields_steps.py | 1 | 2016 | __author__ = 'corpu'
from lettuce import step, world
import webtest
class RegisterFieldSteps():
#Background steps
@step(u'Dado que la aplicacion ACL esta en "([^"]*)"')
def dado_que_la_aplicacion_acl_esta_en_group1(step, url):
world.webApp = webtest.TestApp(url)
@step(u'Cuando accedo a la misma')
def cuando_accedo_a_la_misma(step):
world.response = world.webApp.get('/')
# Opcion de registracion disponible
@step(u'Entonces debe aparecer en pantalla la opcion de registracion')
def entonces_debe_aparecer_en_pantalla_la_opcion_de_registracion(step):
dom = str(world.response.html)
assert dom.__contains__('<li><a href="/register/" id="register_btn">Register</a></li>') is True
# Campos disponibles para registrarse
@step(u'Entonces deben aparecer los campos de "([^"]*)", "([^"]*)", "([^"]*)"')
def entonces_deben_aparecer_los_campos_de_group1_group2_group3(step, email_label, pass1_label, pass2_label):
world.register_response = world.webApp.get('/register/')
world.dom = str(world.register_response.body)
assert world.dom.__contains__(email_label)
assert world.dom.__contains__('<input class="form-control" id="email" name="email" placeholder="Email address" type="text" value="">')
assert world.dom.__contains__(pass1_label)
assert world.dom.__contains__('<input class="form-control" id="password" name="password" placeholder="Password" type="password" value="">')
assert world.dom.__contains__(pass2_label)
assert world.dom.__contains__('<input class="form-control" id="password2" name="password2" placeholder="Repeat password" type="password" value="">')
@step(u'Y el boton "([^"]*)" para realizar la registracion')
def y_el_boton_group1_para_realizar_la_registracion(step, register_button):
assert world.dom.__contains__(register_button)
assert world.dom.__contains__('<button type="submit" class="btn btn-success">Register</button>') | gpl-2.0 | 771,311,736,563,944,200 | 48.195122 | 156 | 0.667163 | false |
jgoclawski/django | tests/generic_inline_admin/tests.py | 154 | 22749 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from .admin import MediaInline, MediaPermanentInline, site as admin_site
from .models import Category, Episode, EpisodePermanent, Media, PhoneNumber
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
# Set DEBUG to True to ensure {% include %} will raise exceptions.
# That is how inlines are rendered and #9498 will bubble up if it is an issue.
@override_settings(
DEBUG=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls",
)
class GenericAdminViewTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse('admin:generic_inline_admin_episode_add'))
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:generic_inline_admin_episode_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_generic_inline_formset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def test_generic_inline_formset_factory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminParametersTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
self.factory = RequestFactory()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def test_no_param(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def test_extra_param(self):
"""
With extra=0, there should be one form.
"""
class ExtraInline(GenericTabularInline):
model = Media
extra = 0
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [ExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
class MaxNumInline(GenericTabularInline):
model = Media
extra = 5
max_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def test_min_num_param(self):
"""
With extra=3 and min_num=2, there should be five forms.
"""
class MinNumInline(GenericTabularInline):
model = Media
extra = 3
min_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 5)
self.assertEqual(formset.initial_form_count(), 1)
def test_get_extra(self):
class GetExtraInline(GenericTabularInline):
model = Media
extra = 4
def get_extra(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.extra, 2)
def test_get_min_num(self):
class GetMinNumInline(GenericTabularInline):
model = Media
min_num = 5
def get_min_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.min_num, 2)
def test_get_max_num(self):
class GetMaxNumInline(GenericTabularInline):
model = Media
extra = 5
def get_max_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.max_num, 2)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminWithUniqueTogetherTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
def test_add(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get(reverse('admin:generic_inline_admin_contact_add'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('admin:generic_inline_admin_contact_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_delete(self):
from .models import Contact
c = Contact.objects.create(name='foo')
PhoneNumber.objects.create(
object_id=c.id,
content_type=ContentType.objects.get_for_model(Contact),
phone_number="555-555-5555",
)
response = self.client.post(reverse('admin:generic_inline_admin_contact_delete', args=[c.pk]))
self.assertContains(response, 'Are you sure you want to delete')
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class NoInlineDeletionTest(SimpleTestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineModelAdminTest(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
def test_get_formsets_with_inlines_returns_tuples(self):
"""
Ensure that get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
| bsd-3-clause | 8,687,456,767,391,364,000 | 45.712526 | 530 | 0.6486 | false |
kyledewey/z3 | scripts/mk_win_dist.py | 1 | 8708 | ############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Scripts for automatically generating
# Window distribution zip files.
#
# Author: Leonardo de Moura (leonardo)
############################################
import os
import glob
import re
import getopt
import sys
import shutil
import subprocess
import zipfile
from mk_exception import *
from mk_project import *
import mk_util
BUILD_DIR='build-dist'
BUILD_X64_DIR=os.path.join('build-dist', 'x64')
BUILD_X86_DIR=os.path.join('build-dist', 'x86')
VERBOSE=True
DIST_DIR='dist'
FORCE_MK=False
DOTNET_ENABLED=True
JAVA_ENABLED=True
GIT_HASH=False
def set_verbose(flag):
global VERBOSE
VERBOSE = flag
def is_verbose():
return VERBOSE
def mk_dir(d):
if not os.path.exists(d):
os.makedirs(d)
def set_build_dir(path):
global BUILD_DIR, BUILD_X86_DIR, BUILD_X64_DIR
BUILD_DIR = path
BUILD_X86_DIR = os.path.join(path, 'x86')
BUILD_X64_DIR = os.path.join(path, 'x64')
mk_dir(BUILD_X86_DIR)
mk_dir(BUILD_X64_DIR)
def display_help():
print("mk_win_dist.py: Z3 Windows distribution generator\n")
print("This script generates the zip files containing executables, dlls, header files for Windows.")
print("It must be executed from the Z3 root directory.")
print("\nOptions:")
print(" -h, --help display this message.")
print(" -s, --silent do not print verbose messages.")
print(" -b <sudir>, --build=<subdir> subdirectory where x86 and x64 Z3 versions will be built (default: build-dist).")
print(" -f, --force force script to regenerate Makefiles.")
print(" --nodotnet do not include .NET bindings in the binary distribution files.")
print(" --nojava do not include Java bindings in the binary distribution files.")
print(" --githash include git hash in the Zip file.")
exit(0)
# Parse configuration option for mk_make script
def parse_options():
global FORCE_MK, JAVA_ENABLED, GIT_HASH, DOTNET_ENABLED
path = BUILD_DIR
options, remainder = getopt.gnu_getopt(sys.argv[1:], 'b:hsf', ['build=',
'help',
'silent',
'force',
'nojava',
'nodotnet',
'githash'
])
for opt, arg in options:
if opt in ('-b', '--build'):
if arg == 'src':
raise MKException('The src directory should not be used to host the Makefile')
path = arg
elif opt in ('-s', '--silent'):
set_verbose(False)
elif opt in ('-h', '--help'):
display_help()
elif opt in ('-f', '--force'):
FORCE_MK = True
elif opt == '--nodotnet':
DOTNET_ENABLED = False
elif opt == '--nojava':
JAVA_ENABLED = False
elif opt == '--githash':
GIT_HASH = True
else:
raise MKException("Invalid command line option '%s'" % opt)
set_build_dir(path)
# Check whether build directory already exists or not
def check_build_dir(path):
return os.path.exists(path) and os.path.exists(os.path.join(path, 'Makefile'))
# Create a build directory using mk_make.py
def mk_build_dir(path, x64):
if not check_build_dir(path) or FORCE_MK:
opts = ["python", os.path.join('scripts', 'mk_make.py'), "--parallel=24", "-b", path]
if DOTNET_ENABLED:
opts.append('--dotnet')
if JAVA_ENABLED:
opts.append('--java')
if x64:
opts.append('-x')
if GIT_HASH:
opts.append('--githash=%s' % mk_util.git_hash())
if subprocess.call(opts) != 0:
raise MKException("Failed to generate build directory at '%s'" % path)
# Create build directories
def mk_build_dirs():
mk_build_dir(BUILD_X86_DIR, False)
mk_build_dir(BUILD_X64_DIR, True)
# Check if on Visual Studio command prompt
def check_vc_cmd_prompt():
try:
DEVNULL = open(os.devnull, 'wb')
subprocess.call(['cl'], stdout=DEVNULL, stderr=DEVNULL)
except:
raise MKException("You must execute the mk_win_dist.py script on a Visual Studio Command Prompt")
def exec_cmds(cmds):
cmd_file = 'z3_tmp.cmd'
f = open(cmd_file, 'w')
for cmd in cmds:
f.write(cmd)
f.write('\n')
f.close()
res = 0
try:
res = subprocess.call(cmd_file, shell=True)
except:
res = 1
try:
os.erase(cmd_file)
except:
pass
return res
# Compile Z3 (if x64 == True, then it builds it in x64 mode).
def mk_z3_core(x64):
cmds = []
if x64:
cmds.append('call "%VCINSTALLDIR%vcvarsall.bat" amd64')
cmds.append('cd %s' % BUILD_X64_DIR)
else:
cmds.append('call "%VCINSTALLDIR%vcvarsall.bat" x86')
cmds.append('cd %s' % BUILD_X86_DIR)
cmds.append('nmake')
if exec_cmds(cmds) != 0:
raise MKException("Failed to make z3, x64: %s" % x64)
def mk_z3():
mk_z3_core(False)
mk_z3_core(True)
def get_z3_name(x64):
major, minor, build, revision = get_version()
if x64:
platform = "x64"
else:
platform = "x86"
if GIT_HASH:
return 'z3-%s.%s.%s.%s-%s-win' % (major, minor, build, mk_util.git_hash(), platform)
else:
return 'z3-%s.%s.%s-%s-win' % (major, minor, build, platform)
def mk_dist_dir_core(x64):
if x64:
platform = "x64"
build_path = BUILD_X64_DIR
else:
platform = "x86"
build_path = BUILD_X86_DIR
dist_path = os.path.join(DIST_DIR, get_z3_name(x64))
mk_dir(dist_path)
mk_util.DOTNET_ENABLED = DOTNET_ENABLED
mk_util.JAVA_ENABLED = JAVA_ENABLED
mk_win_dist(build_path, dist_path)
if is_verbose():
print("Generated %s distribution folder at '%s'" % (platform, dist_path))
def mk_dist_dir():
mk_dist_dir_core(False)
mk_dist_dir_core(True)
def get_dist_path(x64):
return get_z3_name(x64)
def mk_zip_core(x64):
dist_path = get_dist_path(x64)
old = os.getcwd()
try:
os.chdir(DIST_DIR)
zfname = '%s.zip' % dist_path
zipout = zipfile.ZipFile(zfname, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(dist_path):
for f in files:
zipout.write(os.path.join(root, f))
if is_verbose():
print("Generated '%s'" % zfname)
except:
pass
os.chdir(old)
# Create a zip file for each platform
def mk_zip():
mk_zip_core(False)
mk_zip_core(True)
VS_RUNTIME_PATS = [re.compile('vcomp.*\.dll'),
re.compile('msvcp.*\.dll'),
re.compile('msvcr.*\.dll')]
VS_RUNTIME_FILES = []
def cp_vs_runtime_visitor(pattern, dir, files):
global VS_RUNTIME_FILES
for filename in files:
for pat in VS_RUNTIME_PATS:
if pat.match(filename):
if fnmatch(filename, pattern):
fname = os.path.join(dir, filename)
if not os.path.isdir(fname):
VS_RUNTIME_FILES.append(fname)
break
# Copy Visual Studio Runtime libraries
def cp_vs_runtime_core(x64):
global VS_RUNTIME_FILES
if x64:
platform = "x64"
else:
platform = "x86"
vcdir = os.environ['VCINSTALLDIR']
path = '%sredist\\%s' % (vcdir, platform)
VS_RUNTIME_FILES = []
os.walk(path, cp_vs_runtime_visitor, '*.dll')
bin_dist_path = os.path.join(DIST_DIR, get_dist_path(x64), 'bin')
for f in VS_RUNTIME_FILES:
shutil.copy(f, bin_dist_path)
if is_verbose():
print("Copied '%s' to '%s'" % (f, bin_dist_path))
def cp_vs_runtime():
cp_vs_runtime_core(True)
cp_vs_runtime_core(False)
def cp_license():
shutil.copy("LICENSE.txt", os.path.join(DIST_DIR, get_dist_path(True)))
shutil.copy("LICENSE.txt", os.path.join(DIST_DIR, get_dist_path(False)))
# Entry point
def main():
if os.name != 'nt':
raise MKException("This script is for Windows only")
parse_options()
check_vc_cmd_prompt()
mk_build_dirs()
mk_z3()
init_project_def()
mk_dist_dir()
cp_license()
cp_vs_runtime()
mk_zip()
main()
| mit | -2,921,153,309,072,303,600 | 30.436823 | 124 | 0.550413 | false |
liucode/tempest-master | tempest/services/object_storage/container_client.py | 9 | 6222 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.etree import ElementTree as etree
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.common import service_client
class ContainerClient(service_client.ServiceClient):
def create_container(
self, container_name,
metadata=None,
remove_metadata=None,
metadata_prefix='X-Container-Meta-',
remove_metadata_prefix='X-Remove-Container-Meta-'):
"""
Creates a container, with optional metadata passed in as a
dictionary
"""
url = str(container_name)
headers = {}
if metadata is not None:
for key in metadata:
headers[metadata_prefix + key] = metadata[key]
if remove_metadata is not None:
for key in remove_metadata:
headers[remove_metadata_prefix + key] = remove_metadata[key]
resp, body = self.put(url, body=None, headers=headers)
self.expected_success([201, 202], resp.status)
return resp, body
def delete_container(self, container_name):
"""Deletes the container (if it's empty)."""
url = str(container_name)
resp, body = self.delete(url)
self.expected_success(204, resp.status)
return resp, body
def update_container_metadata(
self, container_name,
metadata=None,
remove_metadata=None,
metadata_prefix='X-Container-Meta-',
remove_metadata_prefix='X-Remove-Container-Meta-'):
"""Updates arbitrary metadata on container."""
url = str(container_name)
headers = {}
if metadata is not None:
for key in metadata:
headers[metadata_prefix + key] = metadata[key]
if remove_metadata is not None:
for key in remove_metadata:
headers[remove_metadata_prefix + key] = remove_metadata[key]
resp, body = self.post(url, body=None, headers=headers)
self.expected_success(204, resp.status)
return resp, body
def delete_container_metadata(self, container_name, metadata,
metadata_prefix='X-Remove-Container-Meta-'):
"""Deletes arbitrary metadata on container."""
url = str(container_name)
headers = {}
if metadata is not None:
for item in metadata:
headers[metadata_prefix + item] = metadata[item]
resp, body = self.post(url, body=None, headers=headers)
self.expected_success(204, resp.status)
return resp, body
def list_container_metadata(self, container_name):
"""
Retrieves container metadata headers
"""
url = str(container_name)
resp, body = self.head(url)
self.expected_success(204, resp.status)
return resp, body
def list_all_container_objects(self, container, params=None):
"""
Returns complete list of all objects in the container, even if
item count is beyond 10,000 item listing limit.
Does not require any parameters aside from container name.
"""
# TODO(dwalleck): Rewrite using json format to avoid newlines at end of
# obj names. Set limit to API limit - 1 (max returned items = 9999)
limit = 9999
if params is not None:
if 'limit' in params:
limit = params['limit']
if 'marker' in params:
limit = params['marker']
resp, objlist = self.list_container_contents(
container,
params={'limit': limit, 'format': 'json'})
self.expected_success(200, resp.status)
return objlist
def list_container_contents(self, container, params=None):
"""
List the objects in a container, given the container name
Returns the container object listing as a plain text list, or as
xml or json if that option is specified via the 'format' argument.
Optional Arguments:
limit = integer
For an integer value n, limits the number of results to at most
n values.
marker = 'string'
Given a string value x, return object names greater in value
than the specified marker.
prefix = 'string'
For a string value x, causes the results to be limited to names
beginning with the substring x.
format = 'json' or 'xml'
Specify either json or xml to return the respective serialized
response.
If json, returns a list of json objects
if xml, returns a string of xml
path = 'string'
For a string value x, return the object names nested in the
pseudo path (assuming preconditions are met - see below).
delimiter = 'character'
For a character c, return all the object names nested in the
container (without the need for the directory marker objects).
"""
url = str(container)
if params:
url += '?'
url += '&%s' % urllib.urlencode(params)
resp, body = self.get(url, headers={})
if params and params.get('format') == 'json':
body = json.loads(body)
elif params and params.get('format') == 'xml':
body = etree.fromstring(body)
self.expected_success([200, 204], resp.status)
return resp, body
| apache-2.0 | -904,130,707,056,104,600 | 35.816568 | 79 | 0.598843 | false |
wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| gpl-3.0 | 1,258,221,678,260,830,200 | 55.124444 | 70 | 0.549176 | false |
louyihua/edx-platform | lms/djangoapps/certificates/tests/test_signals.py | 29 | 1324 | """ Unit tests for enabling self-generated certificates by default
for self-paced courses.
"""
from certificates import api as certs_api
from certificates.models import CertificateGenerationConfiguration
from certificates.signals import _listen_for_course_publish
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class SelfGeneratedCertsSignalTest(ModuleStoreTestCase):
""" Tests for enabling self-generated certificates by default
for self-paced courses.
"""
def setUp(self):
super(SelfGeneratedCertsSignalTest, self).setUp()
SelfPacedConfiguration(enabled=True).save()
self.course = CourseFactory.create(self_paced=True)
# Enable the feature
CertificateGenerationConfiguration.objects.create(enabled=True)
def test_cert_generation_enabled_for_self_paced(self):
""" Verify the signal enable the self-generated certificates by default for
self-paced courses.
"""
self.assertFalse(certs_api.cert_generation_enabled(self.course.id))
_listen_for_course_publish('store', self.course.id)
self.assertTrue(certs_api.cert_generation_enabled(self.course.id))
| agpl-3.0 | 1,896,101,612,970,942,500 | 41.709677 | 83 | 0.759819 | false |
adelina-t/nova | nova/api/openstack/compute/contrib/hosts.py | 48 | 14023 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from oslo_log import log as logging
import six
import webob.exc
from nova.api.openstack import extensions
from nova import compute
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LI
from nova import objects
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'hosts')
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute.HostAPI()
super(HostController, self).__init__()
def index(self, req):
"""Returns a dict in the format:
| {'hosts': [{'host_name': 'some.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.other.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.celly.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'console1.host.com',
| 'service': 'consoleauth',
| 'zone': 'internal'},
| {'host_name': 'network1.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'netwwork2.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'compute1.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'compute2.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'sched1.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'sched2.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'vol1.host.com',
| 'service': 'volume',
| 'zone': 'internal'}]}
"""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks
nova_context.require_admin_context(context)
filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
services = self.api.service_get_all(context, filters=filters,
set_zones=True)
hosts = []
for service in services:
hosts.append({'host_name': service['host'],
'service': service['topic'],
'zone': service['availability_zone']})
return {'hosts': hosts}
def update(self, req, id, body):
"""Updates a specified body.
:param body: example format {'status': 'enable',
'maintenance_mode': 'enable'}
"""
def read_enabled(orig_val, msg):
"""Checks a specified orig_val and returns True for 'enabled'
and False for 'disabled'.
:param orig_val: A string with either 'enable' or 'disable'. May
be surrounded by whitespace, and case doesn't
matter
:param msg: The message to be passed to HTTPBadRequest. A single
%s will be replaced with orig_val.
"""
val = orig_val.strip().lower()
if val == "enable":
return True
elif val == "disable":
return False
else:
raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
# See what the user wants to 'update'
params = {k.strip().lower(): v for k, v in six.iteritems(body)}
orig_status = status = params.pop('status', None)
orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
# Validate the request
if len(params) > 0:
# Some extra param was passed. Fail.
explanation = _("Invalid update setting: '%s'") % params.keys()[0]
raise webob.exc.HTTPBadRequest(explanation=explanation)
if orig_status is not None:
status = read_enabled(orig_status, _("Invalid status: '%s'"))
if orig_maint_mode is not None:
maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
if status is None and maint_mode is None:
explanation = _("'status' or 'maintenance_mode' needed for "
"host update")
raise webob.exc.HTTPBadRequest(explanation=explanation)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
result['status'] = self._set_enabled_status(context, id, status)
if maint_mode is not None:
result['maintenance_mode'] = self._set_host_maintenance(context,
id, maint_mode)
return result
def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
LOG.info(_LI("Putting host %(host_name)s in maintenance mode "
"%(mode)s."),
{'host_name': host_name, 'mode': mode})
try:
result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _set_enabled_status(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances.
:param enabled: a boolean - if False no new VMs will be able to start
on the host
"""
if enabled:
LOG.info(_LI("Enabling host %s.") % host_name)
else:
LOG.info(_LI("Disabling host %s.") % host_name)
try:
result = self.api.set_host_enabled(context, host_name=host_name,
enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("enabled", "disabled"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
try:
result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return {"host": host_name, "power_action": result}
def startup(self, req, id):
return self._host_power_action(req, host_name=id, action="startup")
def shutdown(self, req, id):
return self._host_power_action(req, host_name=id, action="shutdown")
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
@staticmethod
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node['vcpus'],
'memory_mb': compute_node['memory_mb'],
'disk_gb': compute_node['local_gb']}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node['vcpus_used'],
'memory_mb': compute_node['memory_mb_used'],
'disk_gb': compute_node['local_gb_used']}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for instance in instances:
cpu_sum += instance['vcpus']
mem_sum += instance['memory_mb']
hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
return {'resource': {'host': host_name,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}}
@staticmethod
def _get_resources_by_project(host_name, instances):
# Getting usage resource per project
project_map = {}
for instance in instances:
resource = project_map.setdefault(instance['project_id'],
{'host': host_name,
'project': instance['project_id'],
'cpu': 0,
'memory_mb': 0,
'disk_gb': 0})
resource['cpu'] += instance['vcpus']
resource['memory_mb'] += instance['memory_mb']
resource['disk_gb'] += (instance['root_gb'] +
instance['ephemeral_gb'])
return project_map
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks. This has to be left only for API v2.0 because
# this version has to be stable even if it means that only admins
# can call this method while the policy could be changed.
nova_context.require_admin_context(context)
host_name = id
try:
compute_node = (
objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host_name))
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
instances = self.api.instance_get_all_by_host(context, host_name)
resources = [self._get_total_resources(host_name, compute_node)]
resources.append(self._get_used_now_resources(host_name,
compute_node))
resources.append(self._get_resource_totals_from_instances(host_name,
instances))
by_proj_resources = self._get_resources_by_project(host_name,
instances)
for resource in six.itervalues(by_proj_resources):
resources.append({'resource': resource})
return {'host': resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration."""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/compute/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00Z"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={'update': 'PUT'},
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources
| apache-2.0 | 2,141,364,429,776,899,000 | 41.237952 | 79 | 0.559581 | false |
orbnauticus/Pique | pique/rawtty.py | 1 | 5205 | #!/usr/bin/python
#
# Copyright (c) 2010, Ryan Marquardt
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import Queue
import select
import signal
import sys
import termios
import threading
ESCAPE = '\x1b'
NAMES = {
'escape':'\x1b',
'eof':'\x04',
'left':'\x1b[D', 'shift+left':'\x1b[1;2D',
'right':'\x1b[C', 'shift+right':'\x1b[1;2C',
'down':'\x1b[B', 'shift+down':'\x1b[1;2B',
'up':'\x1b[A', 'shift+up':'\x1b[1;2A',
'home':'\x1bOH',
'end':'\x1bOF',
'insert':'\x1b[2~', 'shift+insert':'\x1b[2;2~',
'delete':'\x1b[3~', 'shift+delete':'\x1b[3;2~',
'page_up':'\x1b[5~',
'page_down':'\x1b[6~',
'f1':'\x1bOP', 'shift+f1':'\x1bO1;2P',
'f2':'\x1bOQ', 'shift+f2':'\x1bO1;2Q',
'f3':'\x1bOR', 'shift+f3':'\x1bO1;2R',
'f4':'\x1bOS', 'shift+f4':'\x1bO1;2S',
'f5':'\x1b[15~', 'shift+f5':'\x1b[15;2~',
'f6':'\x1b[17~', 'shift+f6':'\x1b[17;2~',
'f7':'\x1b[18~', 'shift+f7':'\x1b[18;2~',
'f8':'\x1b[19~', 'shift+f8':'\x1b[19;2~',
'f9':'\x1b[20~', 'shift+f9':'\x1b[20;2~',
'f10':'\x1b[21~', 'shift+f10':'\x1b[21;2~',
'f11':'\x1b[23~', 'shift+f11':'\x1b[23;2~',
'f12':'\x1b[24~', 'shift+f12':'\x1b[24;2~',
'bksp':'\x7f',
'tab':'\t', 'shift+tab':'\x1b[Z',
'enter':'\n',
'space':' ',
}
for k,v in NAMES.items():
NAMES[v] = k
KNOWN_SEQUENCES = NAMES.values()
class EOF(Exception):
pass
class rawtty(object):
def __init__(self, fd=sys.stdin, echo=False, timeout=1, quit='eof'):
self.fd = fd
self.echo = echo
self.timeout = timeout
self.quit = quit
self.old = termios.tcgetattr(self.fd.fileno())
self.q = Queue.Queue()
def __enter__(self):
self.start()
def __exit__(self, type, value, traceback):
self.restore()
def start(self):
try:
signal.signal(signal.SIGINT, self._recv_interrupt)
except:
pass
new = termios.tcgetattr(self.fd.fileno())
new[3] &= ~termios.ICANON
if not self.echo:
new[3] &= ~termios.ECHO
termios.tcsetattr(self.fd.fileno(), termios.TCSANOW, new)
def restore(self):
termios.tcsetattr(self.fd.fileno(), termios.TCSADRAIN, self.old)
try:
signal.signal(signal.SIGINT, signal.SIG_DFL)
except:
pass
def _recv_interrupt(self, sig, frame):
self.q.put(KeyboardInterrupt)
def __iter__(self):
self.start()
def readthread():
try:
c = True
while c:
c = self.fd.read(1)
self.q.put(c)
finally:
self.q.put('')
self.readthread = threading.Thread(target=readthread)
self.readthread.daemon = True
self.readthread.start()
seq = self.q.get()
while True:
if not seq:
raise EOF
elif seq == KeyboardInterrupt:
raise KeyboardInterrupt
elif seq == ESCAPE:
try:
seq += self.q.get(timeout=self.timeout)
except Queue.Empty:
pass
#Assume that only escape was pressed
else:
if not any(s.startswith(seq) for s in KNOWN_SEQUENCES):
#Escape key, followed by another sequence
yield 'escape'
seq = seq[1:]
continue
else:
#Probably not the escape key by itself
#Continue reads until we have a full sequence or error
while any(s.startswith(seq) for s in KNOWN_SEQUENCES):
if seq not in KNOWN_SEQUENCES:
seq += self.q.get()
else:
break
if seq not in KNOWN_SEQUENCES:
#No match
raise IOError('Unrecognized Sequence %r' % seq)
#print repr(seq), repr(NAMES.get(quit,quit)), seq == NAMES.get(quit,quit)
if seq != NAMES.get(self.quit,self.quit):
yield NAMES.get(seq,seq)
else:
return
seq = self.q.get()
def getch(fd=sys.stdin, echo=False):
with rawtty(fd):
return fd.read(1)
if __name__=='__main__':
print KNOWN_SEQUENCES
while True:
try:
for key in keypresses():
print repr(key)
except IOError, e:
print e
break
else:
break
| bsd-3-clause | 440,016,784,700,069,760 | 28.573864 | 76 | 0.655331 | false |
dmlc/tvm | python/tvm/contrib/rocm.py | 3 | 5347 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility for ROCm backend"""
import subprocess
from os.path import join, exists
import tvm._ffi
from tvm._ffi.base import py_str
import tvm.runtime
import tvm.target
from . import utils
def find_lld(required=True):
"""Find ld.lld in system.
Parameters
----------
required : bool
Whether it is required,
runtime error will be raised if the compiler is required.
Returns
-------
valid_list : list of str
List of possible paths.
Note
----
This function will first search ld.lld that
matches the major llvm version that built with tvm
"""
lld_list = []
major = tvm.target.codegen.llvm_version_major(allow_none=True)
if major is not None:
lld_list += ["ld.lld-%d.0" % major]
lld_list += ["ld.lld-%d" % major]
lld_list += ["ld.lld"]
valid_list = [utils.which(x) for x in lld_list]
valid_list = [x for x in valid_list if x]
if not valid_list and required:
raise RuntimeError("cannot find ld.lld, candidates are: " + str(lld_list))
return valid_list
def rocm_link(in_file, out_file, lld=None):
"""Link relocatable ELF object to shared ELF object using lld
Parameters
----------
in_file : str
Input file name (relocatable ELF object file)
out_file : str
Output file name (shared ELF object file)
lld : str, optional
The lld linker, if not specified,
we will try to guess the matched clang version.
"""
# if our result has undefined symbols, it will fail to load
# (hipModuleLoad/hipModuleLoadData), but with a somewhat opaque message
# so we have ld.lld check this here.
# If you get a complaint about missing symbols you might want to check the
# list of bitcode files below.
args = [
lld if lld is not None else find_lld()[0],
"--no-undefined",
"-shared",
in_file,
"-o",
out_file,
]
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Linking error using ld.lld:\n"
msg += py_str(out)
raise RuntimeError(msg)
@tvm._ffi.register_func("tvm_callback_rocm_link")
def callback_rocm_link(obj_bin):
"""Links object file generated from LLVM to HSA Code Object
Parameters
----------
obj_bin : bytearray
The object file
Return
------
cobj_bin : bytearray
The HSA Code Object
"""
tmp_dir = utils.tempdir()
tmp_obj = tmp_dir.relpath("rocm_kernel.o")
tmp_cobj = tmp_dir.relpath("rocm_kernel.co")
with open(tmp_obj, "wb") as out_file:
out_file.write(bytes(obj_bin))
rocm_link(tmp_obj, tmp_cobj)
cobj_bin = bytearray(open(tmp_cobj, "rb").read())
return cobj_bin
@tvm._ffi.register_func("tvm_callback_rocm_bitcode_path")
def callback_rocm_bitcode_path(rocdl_dir=None):
"""Utility function to find ROCm device library bitcodes
Parameters
----------
rocdl_dir : str
The path to rocm library directory
The default value is the standard location
"""
# seems link order matters.
if rocdl_dir is None:
if exists("/opt/rocm/amdgcn/bitcode/"):
rocdl_dir = "/opt/rocm/amdgcn/bitcode/" # starting with rocm 3.9
else:
rocdl_dir = "/opt/rocm/lib/" # until rocm 3.8
bitcode_names = [
"oclc_daz_opt_on",
"ocml",
"hc",
"irif", # this does not exist in rocm 3.9, drop eventually
"ockl",
"oclc_correctly_rounded_sqrt_off",
"oclc_correctly_rounded_sqrt_on",
"oclc_daz_opt_off",
"oclc_finite_only_off",
"oclc_finite_only_on",
"oclc_isa_version_803", # todo (t-vi): an alternative might be to scan for the
"oclc_isa_version_900", # isa version files (if the linker throws out
"oclc_isa_version_906", # the unneeded ones or we filter for the arch we need)
"oclc_unsafe_math_off",
"oclc_unsafe_math_on",
"oclc_wavefrontsize64_on",
]
bitcode_files = []
for n in bitcode_names:
p = join(rocdl_dir, n + ".bc") # rocm >= 3.9
if not exists(p): # rocm <= 3.8
p = join(rocdl_dir, n + ".amdgcn.bc")
if exists(p):
bitcode_files.append(p)
elif "isa_version" not in n and n not in {"irif"}:
raise RuntimeError("could not find bitcode " + n)
return tvm.runtime.convert(bitcode_files)
| apache-2.0 | -2,652,193,324,622,047,000 | 30.452941 | 100 | 0.62334 | false |
Eddy0402/rt-thread | bsp/mb9bf506r/rtconfig.py | 41 | 3310 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='iar'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'C:/Program Files/CodeSourcery/Sourcery G++ Lite/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
IAR_PATH = 'C:/Program Files/IAR Systems/Embedded Workbench 6.0 Evaluation'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-fm3.map,-cref,-u,Reset_Handler -T rtthread-mb9bf506.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-fm3.map --scatter rtthread-mb9bf506.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
CFLAGS = ''
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -I"' + IAR_PATH + '/arm/INC"'
LFLAGS = ' --config rtthread-mb9bf506.icf'
LFLAGS += ' --semihosting'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = IAR_PATH + '/arm/bin/'
POST_ACTION = 'ielftool.exe --srec --verbose $TARGET rtthread.srec'
| gpl-2.0 | -7,187,407,059,665,664,000 | 26.355372 | 136 | 0.558912 | false |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/ipython-2.2.0-py2.7.egg/IPython/parallel/tests/test_mongodb.py | 12 | 1563 | """Tests for mongodb backend
Authors:
* Min RK
"""
#-------------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import os
from unittest import TestCase
from nose import SkipTest
from pymongo import Connection
from IPython.parallel.controller.mongodb import MongoDB
from . import test_db
conn_kwargs = {}
if 'DB_IP' in os.environ:
conn_kwargs['host'] = os.environ['DB_IP']
if 'DBA_MONGODB_ADMIN_URI' in os.environ:
# On ShiningPanda, we need a username and password to connect. They are
# passed in a mongodb:// URI.
conn_kwargs['host'] = os.environ['DBA_MONGODB_ADMIN_URI']
if 'DB_PORT' in os.environ:
conn_kwargs['port'] = int(os.environ['DB_PORT'])
try:
c = Connection(**conn_kwargs)
except Exception:
c=None
class TestMongoBackend(test_db.TaskDBTest, TestCase):
"""MongoDB backend tests"""
def create_db(self):
try:
return MongoDB(database='iptestdb', _connection=c)
except Exception:
raise SkipTest("Couldn't connect to mongodb")
def teardown(self):
if c is not None:
c.drop_database('iptestdb')
| apache-2.0 | -5,459,744,469,710,275,000 | 26.910714 | 80 | 0.541907 | false |
bob3000/thumbor_aws | vows/result_storage_vows.py | 3 | 3453 | #se!/usr/bin/python
# -*- coding: utf-8 -*-
from pyvows import Vows, expect
from thumbor.context import Context
from tc_aws import Config
from fixtures.storage_fixture import IMAGE_BYTES, get_server
from boto.s3.connection import S3Connection
from moto import mock_s3
from tc_aws.result_storages.s3_storage import Storage
import logging
logging.getLogger('botocore').setLevel(logging.CRITICAL)
s3_bucket = 'thumbor-images-test'
class Request(object):
url = None
@Vows.batch
class S3ResultStorageVows(Vows.Context):
class CanStoreImage(Vows.Context):
@mock_s3
def topic(self):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_RESULT_STORAGE_BUCKET=s3_bucket)
ctx = Context(config=config, server=get_server('ACME-SEC'))
ctx.request = Request
ctx.request.url = 'my-image.jpg'
storage = Storage(ctx)
path = storage.put(IMAGE_BYTES)
return path
def should_be_in_catalog(self, topic):
expect(topic).to_equal('my-image.jpg')
class CanGetImage(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_RESULT_STORAGE_BUCKET=s3_bucket)
ctx = Context(config=config, server=get_server('ACME-SEC'))
ctx.request = Request
ctx.request.url = 'my-image-2.jpg'
storage = Storage(ctx)
storage.put(IMAGE_BYTES)
storage.get(callback=callback)
def should_have_proper_bytes(self, topic):
expect(topic.args[0]).not_to_be_null()
expect(topic.args[0]).not_to_be_an_error()
expect(topic.args[0]).to_equal(IMAGE_BYTES)
class CanGetImageWithMetadata(Vows.Context):
@Vows.async_topic
@mock_s3
def topic(self, callback):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_RESULT_STORAGE_BUCKET=s3_bucket, TC_AWS_STORE_METADATA=True)
ctx = Context(config=config, server=get_server('ACME-SEC'))
ctx.headers = {'Content-Type': 'image/webp', 'Some-Other-Header': 'doge-header'}
ctx.request = Request
ctx.request.url = 'my-image-meta.jpg'
storage = Storage(ctx)
storage.put(IMAGE_BYTES)
file_abspath = storage._normalize_path(ctx.request.url)
storage.storage.get(file_abspath, callback=callback)
def should_have_proper_bytes(self, topic):
expect(topic.args[0].content_type).to_include('image/webp')
expect(topic.args[0].metadata).to_include('some-other-header')
expect(topic.args[0].content_type).to_equal(IMAGE_BYTES)
class HandlesStoragePrefix(Vows.Context):
@mock_s3
def topic(self):
self.conn = S3Connection()
self.conn.create_bucket(s3_bucket)
config = Config(TC_AWS_RESULT_STORAGE_BUCKET=s3_bucket, TC_AWS_RESULT_STORAGE_ROOT_PATH='tata')
ctx = Context(config=config, server=get_server('ACME-SEC'))
storage = Storage(ctx)
return storage._normalize_path('toto')
def should_return_the_same(self, topic):
expect(topic).to_equal("tata/toto")
| mit | 4,733,106,727,057,738,000 | 31.271028 | 107 | 0.615696 | false |
auntieNeo/asterisk-testsuite | lib/python/asterisk/channel_test_condition.py | 1 | 8676 | #!/usr/bin/env python
"""Test condition for channels
Copyright (C) 2011-2012, Digium, Inc.
Matt Jordan <[email protected]>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
"""
from twisted.internet import defer
from test_conditions import TestCondition
import logging
import unittest
import re
class ChannelTestCondition(TestCondition):
"""Test condition that checks for the existence of channels. If channels
are detected and the number of active channels is greater than the
configured amount, an error is raised.
By default, the number of allowed active channels is 0.
"""
def __init__(self, test_config):
"""Constructor
Keyword Arguments:
test_config The TestConfig object for the test
"""
super(ChannelTestCondition, self).__init__(test_config)
self.allowed_channels = 0
if ('allowedchannels' in test_config.config):
self.allowed_channels = test_config.config['allowedchannels']
def evaluate(self, related_test_condition=None):
"""Evaluate this test condition
Keyword Argument:
related_test_condition The test condition that this condition is
related to
Returns:
A deferred that will be called when evaluation is complete
"""
def __channel_callback(result):
"""Callback called from core show channels"""
channel_expression = re.compile('^[A-Za-z0-9]+/')
channel_tokens = result.output.strip().split('\n')
active_channels = 0
referenced_channels = 0
for token in channel_tokens:
if channel_expression.match(token):
referenced_channels += 1
if 'active channels' in token:
active_channel_tokens = token.partition(' ')
active_channels = int(active_channel_tokens[0].strip())
if active_channels > self.allowed_channels:
msg = ("Detected number of active channels %d is greater than "
"the allowed %d on Asterisk %s" %
(active_channels, self.allowed_channels, result.host))
super(ChannelTestCondition, self).fail_check(msg)
elif referenced_channels > self.allowed_channels:
msg = ("Channel leak detected - "
"number of referenced channels %d is greater than "
"the allowed %d on Asterisk %s" %
(referenced_channels, self.allowed_channels,
result.host))
super(ChannelTestCondition, self).fail_check(msg)
return result
def _raise_finished(result, finish_deferred):
"""Raise the deferred callback"""
finish_deferred.callback(self)
return result
finish_deferred = defer.Deferred()
# Set to pass and let a failure override
super(ChannelTestCondition, self).pass_check()
exec_list = [ast.cli_exec('core show channels').
addCallback(__channel_callback) for ast in self.ast]
defer.DeferredList(exec_list).addCallback(_raise_finished,
finish_deferred)
return finish_deferred
class AstMockOutput(object):
"""mock cli output base class"""
def __init__(self):
"""Constructor"""
self.host = "127.0.0.1"
def MockDefer(self, output):
"""use real defer to mock deferred output"""
self.output = output
deferred = defer.Deferred()
deferred.callback(self)
return deferred
class AstMockObjectInactive(AstMockOutput):
"""mock cli output showing no active channels"""
def cli_exec(self, command):
"""presume command is core show channels and generate output"""
output = ""
output += "Channel Location State Application(Data)\n"
output += "0 active channels\n"
output += "0 active calls\n"
output += "2 calls processed\n"
output += "Asterisk ending (0).\n"
return self.MockDefer(output)
class AstMockObjectSingle(AstMockOutput):
"""mock cli output showing single active channel"""
def cli_exec(self, command):
"""presume command is core show channels and generate output"""
output = ""
output += "Channel Location State Application(Data)\n"
output += "Local/123@default-00 (None) Down ()\n"
output += "1 active channels\n"
output += "0 active calls\n"
output += "2 calls processed\n"
output += "Asterisk ending (0).\n"
return self.MockDefer(output)
class AstMockObjectMultiple(AstMockOutput):
"""mock cli output showing multiple active channels"""
def cli_exec(self, command):
"""presume command is core show channels and generate output"""
output = ""
output += "Channel Location State Application(Data)\n"
output += "PJSIP/123@default-00 (None) Down ()\n"
output += "Local/123@default-00 (None) Down ()\n"
output += "SIP/alice@default-00 (None) Down ()\n"
output += "3 active channels\n"
output += "0 active calls\n"
output += "2 calls processed\n"
output += "Asterisk ending (0).\n"
return self.MockDefer(output)
class AstMockObjectLeaked(AstMockOutput):
"""mock cli output showing leaked channel"""
def cli_exec(self, command):
"""presume command is core show channels and generate output"""
output = ""
output += "Channel Location State Application(Data)\n"
output += "Local/123@default-00 (None) Down ()\n"
output += "0 active channels\n"
output += "0 active calls\n"
output += "2 calls processed\n"
output += "Asterisk ending (0).\n"
return self.MockDefer(output)
class TestConfig(object):
"""Fake TestConfig object for unittest"""
def __init__(self):
self.class_type_name = "bogus"
self.config = {}
self.enabled = True
self.pass_expected = True
class ChannelTestConditionUnitTest(unittest.TestCase):
"""Unit Tests for ChannelTestCondition"""
def test_evaluate_inactive(self):
"""test inactive channel condition"""
obj = ChannelTestCondition(TestConfig())
obj.register_asterisk_instance(AstMockObjectInactive())
obj.evaluate()
self.assertEqual(obj.get_status(), 'Passed')
def test_evaluate_multiple_fail(self):
"""test multiple channel condition"""
obj = ChannelTestCondition(TestConfig())
obj.register_asterisk_instance(AstMockObjectMultiple())
obj.evaluate()
self.assertEqual(obj.get_status(), 'Failed')
def test_evaluate_multiple_fail2(self):
"""test multiple channel condition"""
obj = ChannelTestCondition(TestConfig())
obj.allowed_channels = 2
obj.register_asterisk_instance(AstMockObjectMultiple())
obj.evaluate()
self.assertEqual(obj.get_status(), 'Failed')
def test_evaluate_multiple_pass(self):
"""test multiple channel condition"""
obj = ChannelTestCondition(TestConfig())
obj.allowed_channels = 3
obj.register_asterisk_instance(AstMockObjectMultiple())
obj.evaluate()
self.assertEqual(obj.get_status(), 'Passed')
def test_evaluate_single_fail(self):
"""test single channel condition"""
obj = ChannelTestCondition(TestConfig())
obj.register_asterisk_instance(AstMockObjectSingle())
obj.evaluate()
self.assertEqual(obj.get_status(), 'Failed')
def test_evaluate_single_pass(self):
"""test single channel condition"""
obj = ChannelTestCondition(TestConfig())
obj.allowed_channels = 1
obj.register_asterisk_instance(AstMockObjectSingle())
obj.evaluate()
self.assertEqual(obj.get_status(), 'Passed')
def test_evaluate_leaked(self):
"""test leaked channel condition"""
obj = ChannelTestCondition(TestConfig())
obj.register_asterisk_instance(AstMockObjectLeaked())
obj.evaluate()
self.assertEqual(obj.get_status(), 'Failed')
def main():
"""Run the unit tests"""
logging.basicConfig(level=logging.DEBUG)
unittest.main()
if __name__ == "__main__":
main()
| gpl-2.0 | 6,422,128,831,627,382,000 | 35.15 | 89 | 0.602582 | false |
terbolous/SickRage | lib/hachoir_parser/audio/flac.py | 95 | 5626 | """
FLAC (audio) parser
Documentation:
* http://flac.sourceforge.net/format.html
Author: Esteban Loiseau <baal AT tuxfamily.org>
Creation date: 2008-04-09
"""
from hachoir_parser import Parser
from hachoir_core.field import FieldSet, String, Bit, Bits, UInt16, UInt24, RawBytes, Enum, NullBytes
from hachoir_core.stream import BIG_ENDIAN, LITTLE_ENDIAN
from hachoir_core.tools import createDict
from hachoir_parser.container.ogg import parseVorbisComment
class VorbisComment(FieldSet):
endian = LITTLE_ENDIAN
createFields = parseVorbisComment
class StreamInfo(FieldSet):
static_size = 34*8
def createFields(self):
yield UInt16(self, "min_block_size", "The minimum block size (in samples) used in the stream")
yield UInt16(self, "max_block_size", "The maximum block size (in samples) used in the stream")
yield UInt24(self, "min_frame_size", "The minimum frame size (in bytes) used in the stream")
yield UInt24(self, "max_frame_size", "The maximum frame size (in bytes) used in the stream")
yield Bits(self, "sample_hertz", 20, "Sample rate in Hertz")
yield Bits(self, "nb_channel", 3, "Number of channels minus one")
yield Bits(self, "bits_per_sample", 5, "Bits per sample minus one")
yield Bits(self, "total_samples", 36, "Total samples in stream")
yield RawBytes(self, "md5sum", 16, "MD5 signature of the unencoded audio data")
class SeekPoint(FieldSet):
def createFields(self):
yield Bits(self, "sample_number", 64, "Sample number")
yield Bits(self, "offset", 64, "Offset in bytes")
yield Bits(self, "nb_sample", 16)
class SeekTable(FieldSet):
def createFields(self):
while not self.eof:
yield SeekPoint(self, "point[]")
class MetadataBlock(FieldSet):
"Metadata block field: http://flac.sourceforge.net/format.html#metadata_block"
BLOCK_TYPES = {
0: ("stream_info", u"Stream info", StreamInfo),
1: ("padding[]", u"Padding", None),
2: ("application[]", u"Application", None),
3: ("seek_table", u"Seek table", SeekTable),
4: ("comment", u"Vorbis comment", VorbisComment),
5: ("cue_sheet[]", u"Cue sheet", None),
6: ("picture[]", u"Picture", None),
}
BLOCK_TYPE_DESC = createDict(BLOCK_TYPES, 1)
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = 32 + self["metadata_length"].value * 8
try:
key = self["block_type"].value
self._name, self._description, self.handler = self.BLOCK_TYPES[key]
except KeyError:
self.handler = None
def createFields(self):
yield Bit(self, "last_metadata_block", "True if this is the last metadata block")
yield Enum(Bits(self, "block_type", 7, "Metadata block header type"), self.BLOCK_TYPE_DESC)
yield UInt24(self, "metadata_length", "Length of following metadata in bytes (doesn't include this header)")
block_type = self["block_type"].value
size = self["metadata_length"].value
if not size:
return
try:
handler = self.BLOCK_TYPES[block_type][2]
except KeyError:
handler = None
if handler:
yield handler(self, "content", size=size*8)
elif self["block_type"].value == 1:
yield NullBytes(self, "padding", size)
else:
yield RawBytes(self, "rawdata", size)
class Metadata(FieldSet):
def createFields(self):
while not self.eof:
field = MetadataBlock(self,"metadata_block[]")
yield field
if field["last_metadata_block"].value:
break
class Frame(FieldSet):
SAMPLE_RATES = {
0: "get from STREAMINFO metadata block",
1: "88.2kHz",
2: "176.4kHz",
3: "192kHz",
4: "8kHz",
5: "16kHz",
6: "22.05kHz",
7: "24kHz",
8: "32kHz",
9: "44.1kHz",
10: "48kHz",
11: "96kHz",
12: "get 8 bit sample rate (in kHz) from end of header",
13: "get 16 bit sample rate (in Hz) from end of header",
14: "get 16 bit sample rate (in tens of Hz) from end of header",
}
def createFields(self):
yield Bits(self, "sync", 14, "Sync code: 11111111111110")
yield Bit(self, "reserved[]")
yield Bit(self, "blocking_strategy")
yield Bits(self, "block_size", 4)
yield Enum(Bits(self, "sample_rate", 4), self.SAMPLE_RATES)
yield Bits(self, "channel_assign", 4)
yield Bits(self, "sample_size", 3)
yield Bit(self, "reserved[]")
# FIXME: Finish frame header parser
class Frames(FieldSet):
def createFields(self):
while not self.eof:
yield Frame(self, "frame[]")
# FIXME: Parse all frames
return
class FlacParser(Parser):
"Parse FLAC audio files: FLAC is a lossless audio codec"
MAGIC = "fLaC\x00"
PARSER_TAGS = {
"id": "flac",
"category": "audio",
"file_ext": ("flac",),
"mime": (u"audio/x-flac",),
"magic": ((MAGIC, 0),),
"min_size": 4*8,
"description": "FLAC audio",
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return u"Invalid magic string"
return True
def createFields(self):
yield String(self, "signature", 4,charset="ASCII", description="FLAC signature: fLaC string")
yield Metadata(self,"metadata")
yield Frames(self,"frames")
| gpl-3.0 | -1,582,438,814,903,649,500 | 34.834395 | 116 | 0.602026 | false |
toshywoshy/ansible | lib/ansible/modules/network/fortios/fortios_wireless_controller_ble_profile.py | 7 | 13328 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_ble_profile
short_description: Configure Bluetooth Low Energy profile in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller feature and ble_profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_ble_profile:
description:
- Configure Bluetooth Low Energy profile.
default: null
type: dict
suboptions:
advertising:
description:
- Advertising type.
type: str
choices:
- ibeacon
- eddystone-uid
- eddystone-url
beacon_interval:
description:
- Beacon interval .
type: int
ble_scanning:
description:
- Enable/disable Bluetooth Low Energy (BLE) scanning.
type: str
choices:
- enable
- disable
comment:
description:
- Comment.
type: str
eddystone_instance:
description:
- Eddystone instance ID.
type: str
eddystone_namespace:
description:
- Eddystone namespace ID.
type: str
eddystone_url:
description:
- Eddystone URL.
type: str
eddystone_url_encode_hex:
description:
- Eddystone encoded URL hexadecimal string
type: str
ibeacon_uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
major_id:
description:
- Major ID.
type: int
minor_id:
description:
- Minor ID.
type: int
name:
description:
- Bluetooth Low Energy profile name.
required: true
type: str
txpower:
description:
- Transmit power level .
type: str
choices:
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure Bluetooth Low Energy profile.
fortios_wireless_controller_ble_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wireless_controller_ble_profile:
advertising: "ibeacon"
beacon_interval: "4"
ble_scanning: "enable"
comment: "Comment."
eddystone_instance: "<your_own_value>"
eddystone_namespace: "<your_own_value>"
eddystone_url: "<your_own_value>"
eddystone_url_encode_hex: "<your_own_value>"
ibeacon_uuid: "<your_own_value>"
major_id: "12"
minor_id: "13"
name: "default_name_14"
txpower: "0"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_ble_profile_data(json):
option_list = ['advertising', 'beacon_interval', 'ble_scanning',
'comment', 'eddystone_instance', 'eddystone_namespace',
'eddystone_url', 'eddystone_url_encode_hex', 'ibeacon_uuid',
'major_id', 'minor_id', 'name',
'txpower']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_ble_profile(data, fos):
vdom = data['vdom']
state = data['state']
wireless_controller_ble_profile_data = data['wireless_controller_ble_profile']
filtered_data = underscore_to_hyphen(filter_wireless_controller_ble_profile_data(wireless_controller_ble_profile_data))
if state == "present":
return fos.set('wireless-controller',
'ble-profile',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller',
'ble-profile',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller(data, fos):
if data['wireless_controller_ble_profile']:
resp = wireless_controller_ble_profile(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_ble_profile": {
"required": False, "type": "dict", "default": None,
"options": {
"advertising": {"required": False, "type": "str",
"choices": ["ibeacon", "eddystone-uid", "eddystone-url"]},
"beacon_interval": {"required": False, "type": "int"},
"ble_scanning": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"comment": {"required": False, "type": "str"},
"eddystone_instance": {"required": False, "type": "str"},
"eddystone_namespace": {"required": False, "type": "str"},
"eddystone_url": {"required": False, "type": "str"},
"eddystone_url_encode_hex": {"required": False, "type": "str"},
"ibeacon_uuid": {"required": False, "type": "str"},
"major_id": {"required": False, "type": "int"},
"minor_id": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"txpower": {"required": False, "type": "str",
"choices": ["0", "1", "2",
"3", "4", "5",
"6", "7", "8",
"9", "10", "11",
"12"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,563,749,026,531,248,000 | 31.271186 | 123 | 0.548394 | false |
home-assistant/home-assistant | homeassistant/components/guardian/switch.py | 2 | 7888 | """Switches for the Elexa Guardian integration."""
from __future__ import annotations
from aioguardian import Client
from aioguardian.errors import GuardianError
import voluptuous as vol
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_FILENAME, CONF_PORT, CONF_URL
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import ValveControllerEntity
from .const import (
API_VALVE_STATUS,
CONF_UID,
DATA_CLIENT,
DATA_COORDINATOR,
DATA_PAIRED_SENSOR_MANAGER,
DOMAIN,
LOGGER,
)
ATTR_AVG_CURRENT = "average_current"
ATTR_INST_CURRENT = "instantaneous_current"
ATTR_INST_CURRENT_DDT = "instantaneous_current_ddt"
ATTR_TRAVEL_COUNT = "travel_count"
SERVICE_DISABLE_AP = "disable_ap"
SERVICE_ENABLE_AP = "enable_ap"
SERVICE_PAIR_SENSOR = "pair_sensor"
SERVICE_REBOOT = "reboot"
SERVICE_RESET_VALVE_DIAGNOSTICS = "reset_valve_diagnostics"
SERVICE_UNPAIR_SENSOR = "unpair_sensor"
SERVICE_UPGRADE_FIRMWARE = "upgrade_firmware"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Guardian switches based on a config entry."""
platform = entity_platform.async_get_current_platform()
for service_name, schema, method in [
(SERVICE_DISABLE_AP, {}, "async_disable_ap"),
(SERVICE_ENABLE_AP, {}, "async_enable_ap"),
(SERVICE_PAIR_SENSOR, {vol.Required(CONF_UID): cv.string}, "async_pair_sensor"),
(SERVICE_REBOOT, {}, "async_reboot"),
(SERVICE_RESET_VALVE_DIAGNOSTICS, {}, "async_reset_valve_diagnostics"),
(
SERVICE_UPGRADE_FIRMWARE,
{
vol.Optional(CONF_URL): cv.url,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_FILENAME): cv.string,
},
"async_upgrade_firmware",
),
(
SERVICE_UNPAIR_SENSOR,
{vol.Required(CONF_UID): cv.string},
"async_unpair_sensor",
),
]:
platform.async_register_entity_service(service_name, schema, method)
async_add_entities(
[
ValveControllerSwitch(
entry,
hass.data[DOMAIN][DATA_CLIENT][entry.entry_id],
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id],
)
]
)
class ValveControllerSwitch(ValveControllerEntity, SwitchEntity):
"""Define a switch to open/close the Guardian valve."""
def __init__(
self,
entry: ConfigEntry,
client: Client,
coordinators: dict[str, DataUpdateCoordinator],
) -> None:
"""Initialize."""
super().__init__(
entry, coordinators, "valve", "Valve Controller", None, "mdi:water"
)
self._client = client
self._is_on = True
@property
def available(self) -> bool:
"""Return whether the entity is available."""
return self.coordinators[API_VALVE_STATUS].last_update_success
@property
def is_on(self) -> bool:
"""Return True if the valve is open."""
return self._is_on
async def _async_continue_entity_setup(self):
"""Register API interest (and related tasks) when the entity is added."""
self.async_add_coordinator_update_listener(API_VALVE_STATUS)
@callback
def _async_update_from_latest_data(self) -> None:
"""Update the entity."""
self._is_on = self.coordinators[API_VALVE_STATUS].data["state"] in (
"start_opening",
"opening",
"finish_opening",
"opened",
)
self._attrs.update(
{
ATTR_AVG_CURRENT: self.coordinators[API_VALVE_STATUS].data[
"average_current"
],
ATTR_INST_CURRENT: self.coordinators[API_VALVE_STATUS].data[
"instantaneous_current"
],
ATTR_INST_CURRENT_DDT: self.coordinators[API_VALVE_STATUS].data[
"instantaneous_current_ddt"
],
ATTR_TRAVEL_COUNT: self.coordinators[API_VALVE_STATUS].data[
"travel_count"
],
}
)
async def async_disable_ap(self):
"""Disable the device's onboard access point."""
try:
async with self._client:
await self._client.wifi.disable_ap()
except GuardianError as err:
LOGGER.error("Error while disabling valve controller AP: %s", err)
async def async_enable_ap(self):
"""Enable the device's onboard access point."""
try:
async with self._client:
await self._client.wifi.enable_ap()
except GuardianError as err:
LOGGER.error("Error while enabling valve controller AP: %s", err)
async def async_pair_sensor(self, *, uid):
"""Add a new paired sensor."""
try:
async with self._client:
await self._client.sensor.pair_sensor(uid)
except GuardianError as err:
LOGGER.error("Error while adding paired sensor: %s", err)
return
await self.hass.data[DOMAIN][DATA_PAIRED_SENSOR_MANAGER][
self._entry.entry_id
].async_pair_sensor(uid)
async def async_reboot(self):
"""Reboot the device."""
try:
async with self._client:
await self._client.system.reboot()
except GuardianError as err:
LOGGER.error("Error while rebooting valve controller: %s", err)
async def async_reset_valve_diagnostics(self):
"""Fully reset system motor diagnostics."""
try:
async with self._client:
await self._client.valve.reset()
except GuardianError as err:
LOGGER.error("Error while resetting valve diagnostics: %s", err)
async def async_unpair_sensor(self, *, uid):
"""Add a new paired sensor."""
try:
async with self._client:
await self._client.sensor.unpair_sensor(uid)
except GuardianError as err:
LOGGER.error("Error while removing paired sensor: %s", err)
return
await self.hass.data[DOMAIN][DATA_PAIRED_SENSOR_MANAGER][
self._entry.entry_id
].async_unpair_sensor(uid)
async def async_upgrade_firmware(self, *, url, port, filename):
"""Upgrade the device firmware."""
try:
async with self._client:
await self._client.system.upgrade_firmware(
url=url,
port=port,
filename=filename,
)
except GuardianError as err:
LOGGER.error("Error while upgrading firmware: %s", err)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the valve off (closed)."""
try:
async with self._client:
await self._client.valve.close()
except GuardianError as err:
LOGGER.error("Error while closing the valve: %s", err)
return
self._is_on = False
self.async_write_ha_state()
async def async_turn_on(self, **kwargs) -> None:
"""Turn the valve on (open)."""
try:
async with self._client:
await self._client.valve.open()
except GuardianError as err:
LOGGER.error("Error while opening the valve: %s", err)
return
self._is_on = True
self.async_write_ha_state()
| apache-2.0 | 848,871,815,524,247,400 | 33.147186 | 88 | 0.592672 | false |
nitzmahone/ansible-modules-extras | network/haproxy.py | 26 | 13390 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ravi Bhure <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: haproxy
version_added: "1.9"
short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands.
description:
- Enable, disable, and set weights for HAProxy backend servers using socket
commands.
notes:
- Enable and disable commands are restricted and can only be issued on
sockets configured for level 'admin'. For example, you can add the line
'stats socket /var/run/haproxy.sock level admin' to the general section of
haproxy.cfg. See http://haproxy.1wt.eu/download/1.5/doc/configuration.txt.
options:
backend:
description:
- Name of the HAProxy backend pool.
required: false
default: auto-detected
host:
description:
- Name of the backend host to change.
required: true
default: null
shutdown_sessions:
description:
- When disabling a server, immediately terminate all the sessions attached
to the specified server. This can be used to terminate long-running
sessions after a server is put into maintenance mode.
required: false
default: false
socket:
description:
- Path to the HAProxy socket file.
required: false
default: /var/run/haproxy.sock
state:
description:
- Desired state of the provided backend host.
required: true
default: null
choices: [ "enabled", "disabled" ]
fail_on_not_found:
description:
- Fail whenever trying to enable/disable a backend host that does not exist
required: false
default: false
version_added: "2.2"
wait:
description:
- Wait until the server reports a status of 'UP' when `state=enabled`, or
status of 'MAINT' when `state=disabled`.
required: false
default: false
version_added: "2.0"
wait_interval:
description:
- Number of seconds to wait between retries.
required: false
default: 5
version_added: "2.0"
wait_retries:
description:
- Number of times to check for status after changing the state.
required: false
default: 25
version_added: "2.0"
weight:
description:
- The value passed in argument. If the value ends with the `%` sign, then
the new weight will be relative to the initially configured weight.
Relative weights are only permitted between 0 and 100% and absolute
weights are permitted between 0 and 256.
required: false
default: null
'''
EXAMPLES = '''
# disable server in 'www' backend pool
- haproxy: state=disabled host={{ inventory_hostname }} backend=www
# disable server without backend pool name (apply to all available backend pool)
- haproxy: state=disabled host={{ inventory_hostname }}
# disable server, provide socket file
- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www
# disable server, provide socket file, wait until status reports in maintenance
- haproxy: state=disabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock backend=www wait=yes
# disable backend server in 'www' backend pool and drop open sessions to it
- haproxy: state=disabled host={{ inventory_hostname }} backend=www socket=/var/run/haproxy.sock shutdown_sessions=true
# disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
- haproxy: state=disabled host={{ inventory_hostname }} fail_on_not_found=yes
# enable server in 'www' backend pool
- haproxy: state=enabled host={{ inventory_hostname }} backend=www
# enable server in 'www' backend pool wait until healthy
- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes
# enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
- haproxy: state=enabled host={{ inventory_hostname }} backend=www wait=yes wait_retries=10 wait_interval=5
# enable server in 'www' backend pool with change server(s) weight
- haproxy: state=enabled host={{ inventory_hostname }} socket=/var/run/haproxy.sock weight=10 backend=www
author: "Ravi Bhure (@ravibhure)"
'''
import socket
import csv
import time
from string import Template
DEFAULT_SOCKET_LOCATION="/var/run/haproxy.sock"
RECV_SIZE = 1024
ACTION_CHOICES = ['enabled', 'disabled']
WAIT_RETRIES=25
WAIT_INTERVAL=5
######################################################################
class TimeoutException(Exception):
pass
class HAProxy(object):
"""
Used for communicating with HAProxy through its local UNIX socket interface.
Perform common tasks in Haproxy related to enable server and
disable server.
The complete set of external commands Haproxy handles is documented
on their website:
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
"""
def __init__(self, module):
self.module = module
self.state = self.module.params['state']
self.host = self.module.params['host']
self.backend = self.module.params['backend']
self.weight = self.module.params['weight']
self.socket = self.module.params['socket']
self.shutdown_sessions = self.module.params['shutdown_sessions']
self.fail_on_not_found = self.module.params['fail_on_not_found']
self.wait = self.module.params['wait']
self.wait_retries = self.module.params['wait_retries']
self.wait_interval = self.module.params['wait_interval']
self.command_results = {}
def execute(self, cmd, timeout=200, capture_output=True):
"""
Executes a HAProxy command by sending a message to a HAProxy's local
UNIX socket and waiting up to 'timeout' milliseconds for the response.
"""
self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.client.connect(self.socket)
self.client.sendall('%s\n' % cmd)
result = ''
buf = ''
buf = self.client.recv(RECV_SIZE)
while buf:
result += buf
buf = self.client.recv(RECV_SIZE)
if capture_output:
self.capture_command_output(cmd, result.strip())
self.client.close()
return result
def capture_command_output(self, cmd, output):
"""
Capture the output for a command
"""
if not 'command' in self.command_results.keys():
self.command_results['command'] = []
self.command_results['command'].append(cmd)
if not 'output' in self.command_results.keys():
self.command_results['output'] = []
self.command_results['output'].append(output)
def discover_all_backends(self):
"""
Discover all entries with svname = 'BACKEND' and return a list of their corresponding
pxnames
"""
data = self.execute('show stat', 200, False).lstrip('# ')
r = csv.DictReader(data.splitlines())
return map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r))
def execute_for_backends(self, cmd, pxname, svname, wait_for_status = None):
"""
Run some command on the specified backends. If no backends are provided they will
be discovered automatically (all backends)
"""
# Discover backends if none are given
if pxname is None:
backends = self.discover_all_backends()
else:
backends = [pxname]
# Run the command for each requested backend
for backend in backends:
# Fail when backends were not found
state = self.get_state_for(backend, svname)
if (self.fail_on_not_found or self.wait) and state is None:
self.module.fail_json(msg="The specified backend '%s/%s' was not found!" % (backend, svname))
self.execute(Template(cmd).substitute(pxname = backend, svname = svname))
if self.wait:
self.wait_until_status(backend, svname, wait_for_status)
def get_state_for(self, pxname, svname):
"""
Find the state of specific services. When pxname is not set, get all backends for a specific host.
Returns a list of dictionaries containing the status and weight for those services.
"""
data = self.execute('show stat', 200, False).lstrip('# ')
r = csv.DictReader(data.splitlines())
state = map(lambda d: { 'status': d['status'], 'weight': d['weight'] }, filter(lambda d: (pxname is None or d['pxname'] == pxname) and d['svname'] == svname, r))
return state or None
def wait_until_status(self, pxname, svname, status):
"""
Wait for a service to reach the specified status. Try RETRIES times
with INTERVAL seconds of sleep in between. If the service has not reached
the expected status in that time, the module will fail. If the service was
not found, the module will fail.
"""
for i in range(1, self.wait_retries):
state = self.get_state_for(pxname, svname)
# We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
if state[0]['status'] == status:
return True
else:
time.sleep(self.wait_interval)
self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." % (pxname, svname, status, self.wait_retries))
def enabled(self, host, backend, weight):
"""
Enabled action, marks server to UP and checks are re-enabled,
also supports to get current weight for server (default) and
set the weight for haproxy backend server when provides.
"""
cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
if weight:
cmd += "; set weight $pxname/$svname %s" % weight
self.execute_for_backends(cmd, backend, host, 'UP')
def disabled(self, host, backend, shutdown_sessions):
"""
Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
performed on the server until it leaves maintenance,
also it shutdown sessions while disabling backend host server.
"""
cmd = "get weight $pxname/$svname; disable server $pxname/$svname"
if shutdown_sessions:
cmd += "; shutdown sessions server $pxname/$svname"
self.execute_for_backends(cmd, backend, host, 'MAINT')
def act(self):
"""
Figure out what you want to do from ansible, and then do it.
"""
# Get the state before the run
state_before = self.get_state_for(self.backend, self.host)
self.command_results['state_before'] = state_before
# toggle enable/disbale server
if self.state == 'enabled':
self.enabled(self.host, self.backend, self.weight)
elif self.state == 'disabled':
self.disabled(self.host, self.backend, self.shutdown_sessions)
else:
self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
# Get the state after the run
state_after = self.get_state_for(self.backend, self.host)
self.command_results['state_after'] = state_after
# Report change status
if state_before != state_after:
self.command_results['changed'] = True
self.module.exit_json(**self.command_results)
else:
self.command_results['changed'] = False
self.module.exit_json(**self.command_results)
def main():
# load ansible module object
module = AnsibleModule(
argument_spec = dict(
state = dict(required=True, default=None, choices=ACTION_CHOICES),
host=dict(required=True, default=None),
backend=dict(required=False, default=None),
weight=dict(required=False, default=None),
socket = dict(required=False, default=DEFAULT_SOCKET_LOCATION),
shutdown_sessions=dict(required=False, default=False, type='bool'),
fail_on_not_found=dict(required=False, default=False, type='bool'),
wait=dict(required=False, default=False, type='bool'),
wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'),
wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'),
),
)
if not socket:
module.fail_json(msg="unable to locate haproxy socket")
ansible_haproxy = HAProxy(module)
ansible_haproxy.act()
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | -8,091,297,574,006,713,000 | 37.148148 | 169 | 0.654593 | false |
robmcl4/Coinex | arbitrage.py | 1 | 16316 | """
arbitrage.py
Check for arbitrage opportunities.
USAGE: python arbitrage.py [--all]
--all Display all arbitrage opportunities, not just profitable ones
"""
from models import *
from decimal import *
import utils
import sys
# the coinex transaction fee
TRANSAC_FEE = 0.002
# the minimum amount of to_currency required for a transaction
MIN_TRANSAC = 0.01
class SmartExchange(Exchange):
"""
Defines a SmartExchange, which can tell the current trading price
via a weighted average
"""
def __init__(self, exc):
"""
Make a new SmartExchange around the given exchange
"""
self._loaded = exc._loaded
self.id = exc.id
self.from_currency = exc.from_currency
self.to_currency = exc.to_currency
def get_orders(self):
"""
Memoize getting the orders
"""
if hasattr(self, '_orders'):
return self._orders
self._orders = super().get_orders()
return self._orders
def get_best_offer(self, target_cur):
"""
Memoize getting the best offer for a currency
"""
if hasattr(self, '_best_offers'):
if target_cur.id in self._best_offers:
return self._best_offers[target_cur.id]
else:
self._best_offers = dict()
ret = super().get_best_offer(target_cur)
self._best_offers[target_cur.id] = ret
return ret
def convert_to_other(self, amt, target_cur):
"""
Convert the given amount of coin to the target currency using the most
fiar trade, returns the amount of the new currency
"""
amt = Decimal(amt)
if target_cur == self.to_currency:
return amt / self.get_best_offer(target_cur).rate
elif target_cur == self.from_currency:
return amt * self.get_best_offer(target_cur).rate
else:
raise ValueError(
'Unsupported currency for this exchange ' +
target_cur.abbreviation
)
def is_enough(self, amt, cur):
"""
Returns True if the given amt is enough to be traded.
amt: a Decimal of the amount to check
cur: the currency of amt
Otherwise returns False
"""
if cur == self.to_currency:
return amt > MIN_TRANSAC
elif cur == self.from_currency:
new_amt = amt / self.get_best_offer(self.to_currency).rate
return new_amt > MIN_TRANSAC
else:
raise ValueError("Invalid currency")
def max_currency(self, target_cur):
"""
Returns a Decimal of the maximum amount of currency that can
be exchanged into target_cur in units of the currency
that is not target_cur
NOTE: this accounts for the transaction fee
"""
tfee = Decimal(1 - TRANSAC_FEE)
if target_cur == self.to_currency:
# we need to end up with units of from_currency
best_order = self.get_lowest_ask()
# filter out non-asks
orders = filter(
lambda x: x.bid is False,
self.get_orders()
)
# filter out orders not of the same rate
# maybe multiple orders exist?
orders = filter(
lambda x: x.rate == best_order.rate,
orders
)
# we need to return in units of from_currency
# amount is in units of to_currency
# order.rate is in from_currency per to_currency
ret = Decimal(0)
for order in orders:
ret += (order.amount - order.filled) * order.rate
return Decimal(ret * tfee)
elif target_cur == self.from_currency:
best_order = self.get_highest_bid()
# filter out non-bids
orders = filter(
lambda x: x.bid is True,
self.get_orders()
)
# filter out orders not of the same rate
orders = filter(
lambda x: x.rate == best_order.rate,
orders
)
# we need to return in units of to_currency
# balance.amount is in units of to_currency
# order.rate is in from_currency per to_currency
ret = Decimal(0)
for order in orders:
ret += order.amount - order.filled
return Decimal(ret * tfee)
raise ValueError(
'Unsupported currency for this exchange ' +
target_cur.abbreviation
)
class ArbitrageChain:
"""
Defines the series of exchanges through which an arbitrage can be run
"""
def __init__(self, ex1, ex2, ex3):
self._roi = None
self.ex1 = ex1
self.ex2 = ex2
self.ex3 = ex3
self.cur1 = ex1.from_currency
self.cur2 = ex1.to_currency
if ex2.to_currency == ex1.to_currency:
self.cur3 = ex2.from_currency
elif ex2.from_currency == ex1.to_currency:
self.cur3 = ex2.to_currency
else:
raise ValueError("Unsupported 2nd exchange combination")
# verify the third exchange's validity
ex3_curs = [ex3.to_currency, ex3.from_currency]
if not self.cur1 in ex3_curs or not self.cur3 in ex3_curs:
raise ValueError("Unsupported 3rd exchange combination")
def get_roi(self):
"""
Get the return on investment.
Returns a Decimal of the ROI or None if this chain cannot be executed
NOTE: 100% is returned as Decimal(1.0)
NOTE: this is memoized
"""
if self._roi is not None:
return self._roi
tfee = Decimal(1 - TRANSAC_FEE)
# we are starting with 1 unit of ex1.from_currency
amt = Decimal(1)
# make sure it is enough to convert
if not self.ex1.is_enough(amt, self.cur1):
return None
# now convert to cur2
amt = (self.ex1.convert_to_other(amt, self.cur2)) * tfee
# again make sure it is enough to convert
if not self.ex2.is_enough(amt, self.cur2):
return None
# now convert to cur3
amt = (self.ex2.convert_to_other(amt, self.cur3)) * tfee
# again make sure it is enough to convert
if not self.ex3.is_enough(amt, self.cur3):
return None
# now convert back to cur1
amt = (self.ex3.convert_to_other(amt, self.cur1)) * tfee
# let's see what we got back! return the ROI
self._roi = Decimal(amt - Decimal(1))
return self._roi
def get_max_transfer(self):
"""
Get the max that can be transferred through this chain
returns in units of currency 1
NOTE: this is memoized
"""
if hasattr(self, '_max_transfer'):
return self._max_transfer
tfee = Decimal(1 - TRANSAC_FEE)
# max3 is currently in units of cur3, convert to cur1 backward
max3 = self.ex3.max_currency(target_cur=self.cur1)
# max3 is now in units of cur2
max3 = self.ex2.convert_to_other(amt=max3, target_cur=self.cur2) / tfee
# max3 is now in units of cur1
max3 = self.ex1.convert_to_other(amt=max3, target_cur=self.cur1) / tfee
# max2 is currently in units of cur2, convert to cur1 backward
max2 = self.ex2.max_currency(target_cur=self.cur3)
# max2 is now in units of cur1
max2 = self.ex1.convert_to_other(amt=max2, target_cur=self.cur1) / tfee
# max1 is currently in units of cur1
max1 = self.ex1.max_currency(target_cur=self.cur2)
ret = min(max1, max2, max3)
self._max_transfer = ret
return ret
def get_min_transfer(self):
"""
Get the least amount of cur1 that can be put through
the system
NOTE: this is memoized
"""
if hasattr(self, '_min_transfer'):
return self._min_transfer
tfee = Decimal(1 - TRANSAC_FEE)
# get the minimum of cur1 we can trade
if self.cur1 == self.ex3.to_currency:
# min1 is in units of cur1, convert backward through the chain
min1 = Decimal(MIN_TRANSAC)
min1 = self.ex3.convert_to_other(amt=min1, target_cur=self.cur3)
min1 /= tfee
min1 = self.ex2.convert_to_other(amt=min1, target_cur=self.cur2)
min1 /= tfee
min1 = self.ex1.convert_to_other(amt=min1, target_cur=self.cur1)
min1 /= tfee
else:
min1 = Decimal(0)
if self.cur3 in [self.ex2.to_currency, self.ex3.to_currency]:
min3 = Decimal(MIN_TRANSAC)
min3 = self.ex2.convert_to_other(amt=min3, target_cur=self.cur2)
min3 /= tfee
min3 = self.ex1.convert_to_other(amt=min3, target_cur=self.cur1)
min3 /= tfee
else:
min3 = Decimal(0)
if self.cur2 in [self.ex1.to_currency, self.ex2.to_currency]:
min2 = Decimal(MIN_TRANSAC)
min2 = self.ex1.convert_to_other(amt=min2, target_cur=self.cur1)
min2 /= tfee
else:
min2 = Decimal(0)
ret = max(min1, min2, min3)
self._min_transfer = ret
return ret
def can_execute(self):
"""
Returns true if the user currently has some of the first currency and
this chain's max is greater than the min.
NOTE: this memoizes the wallet balances
"""
if self.get_min_transfer() >= self.get_max_transfer():
return False
if not hasattr(ArbitrageChain, '_bals') or not ArbitrageChain._bals:
ArbitrageChain._bals = Wallet.get_balances()
for bal in ArbitrageChain._bals:
if bal.currency == self.cur1 and bal.amount > 0:
print("{0} {1}".format(bal.currency.abbreviation, bal.amount))
return True
return False
def perform_chain_operation(self, amt, target_cur, exchange):
"""
Trade the given amount (of not target_cur) over the exchange.
Returns the amount of target_cur that we now have
"""
tfee = Decimal(1 - Decimal(TRANSAC_FEE))
from_cur = exchange.from_currency
if exchange.from_currency == target_cur:
from_cur = exchange.to_currency
best = exchange.get_best_offer(target_cur)
print('Buying {0} of {1}'.format(
str(amt),
target_cur.abbreviation
))
# amount must always be in terms of the 'to_currency',
# convert if needed
if exchange.to_currency != from_cur:
amt = self.ex1.convert_to_other(amt, target_cur)
ordr = best.get_compliment(max_amt=amt)
try:
ordr.submit()
except Exception as e:
if hasattr(e, 'read'):
print(e.read())
raise e
if (ordr.complete is not True):
print("waiting for order to complete")
ordr = utils.wait_for_order_to_complete(ordr.id)
amt *= tfee
print("now have {0} of {1}".format(
str(amt),
target_cur.abbreviation
))
return amt
def execute(self):
"""
Perform the trades necessary to complete this chain
"""
while True:
try:
amt = input("How much currency to use? ({0}) ".format(
self.cur1.abbreviation
))
amt = Decimal(amt)
break
except InvalidOperation:
print("Invalid amount. Enter again.")
amt = self.perform_chain_operation(
amt,
self.cur2,
self.ex1
)
amt = self.perform_chain_operation(
amt,
self.cur3,
self.ex2
)
amt = self.perform_chain_operation(
amt,
self.cur1,
self.ex3
)
# reset the record of balances
ArbitrageChain._bals = None
print("finished")
def __str__(self):
ret = ''
ret += self.cur1.abbreviation.rjust(4)
ret += ' -> '
ret += self.cur2.abbreviation.rjust(4)
ret += ' -> '
ret += self.cur3.abbreviation.rjust(4)
ret += ' -> '
ret += self.cur1.abbreviation.rjust(4)
roi = self.get_roi()
if roi:
ret += ' ({0})%'.format(str(roi * 100))
else:
ret += ' (Not Exchangeable)'
ret += ' ({0} to {1} {2})'.format(
str(self.get_max_transfer()),
str(self.get_min_transfer()),
self.cur1.abbreviation
)
ret += '\n'
def describe_exchange(ex, to_currency):
return '-> {0} {1}/{2}'.format(
ex.get_best_offer(to_currency).rate,
ex.from_currency.abbreviation,
ex.to_currency.abbreviation
)
ret += describe_exchange(self.ex1, self.cur2) + '\n'
ret += describe_exchange(self.ex2, self.cur3) + '\n'
ret += describe_exchange(self.ex3, self.cur1)
return ret
def offer_execute_chain(chain):
"""
Ask the user if they would like to execute a given chain. If they
answer positively, the chain is executed
"""
answer = input("Would you like to execute this chain? (y/N) ")
if answer.lower() in ['y', 'yes']:
chain.execute()
else:
print("Not executing chain")
def valid(exc, cur1, cur2=None, exclude=None, exclude_cur=None):
"""
Find if the given exc satisfies currency 1
(currency 2) (and is not exclude) (and currency is not exclude)
"""
if exclude is not None and exc == exclude:
return False
curs = [exc.to_currency, exc.from_currency]
if exclude_cur is not None and exclude_cur in curs:
return False
if cur2 is not None:
return cur1 in curs and cur2 in curs
return cur1 in curs
def get_chains():
"""
Get a list of all arbitrage chains
"""
excs = Exchange.get_all()
ret = []
for ex1 in excs:
exld = ex1.from_currency
viable1 = filter(
lambda x: valid(x, ex1.to_currency, exclude=ex1, exclude_cur=exld),
excs
)
for ex2 in viable1:
if ex2.to_currency == ex1.to_currency:
cur = ex2.from_currency
else:
cur = ex2.to_currency
viable2 = filter(
lambda x: valid(x, cur, ex1.from_currency, ex2),
excs
)
for ex3 in viable2:
ex1 = SmartExchange(ex1)
ex2 = SmartExchange(ex2)
ex3 = SmartExchange(ex3)
ret.append(ArbitrageChain(ex1, ex2, ex3))
return ret
def get_profitable_chains(len_cb=None, iter_cb=None):
"""
Get alist of all profitable arbitrage chains
"""
chains = get_chains()
if len_cb:
len_cb(len(chains))
for chain in chains:
if iter_cb:
iter_cb()
roi = chain.get_roi()
if roi and roi > 0:
yield chain
def show_all():
"""
Print out all possible arbitrages, regardless of profit
"""
print("-------Getting All Chains-------")
chains = get_chains()
for chain in chains:
print(str(chain))
if chain.can_execute():
offer_execute_chain(chain)
else:
print('This chain cannot be executed')
print('Found {0} arbitrage chains'.format(len(chains)))
def show_profitable():
"""
Print out only profitable arbitrages
"""
print("-------Getting Profitable Chains-------")
chains = get_profitable_chains()
n = 0
for chain in chains:
print(str(chain))
if chain.can_execute():
offer_execute_chain(chain)
else:
print('This chain cannot be executed')
n += 1
print('Found {0} arbitrage chains'.format(n))
def main():
try:
if '--all' in sys.argv:
show_all()
else:
show_profitable()
except KeyboardInterrupt:
print("Exiting")
if __name__ == '__main__':
main()
| apache-2.0 | -8,020,515,266,900,259,000 | 30.867188 | 79 | 0.550625 | false |
KingsCross/Quadrotor | Tools/autotest/dump_logs.py | 229 | 1762 | #!/usr/bin/env python
# dump flash logs from SITL
# Andrew Tridgell, April 2013
import pexpect, os, sys, shutil, atexit
import optparse, fnmatch, time, glob, traceback, signal
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pysim'))
import util
############## main program #############
parser = optparse.OptionParser(sys.argv[0])
parser.add_option("--cli", action='store_true', default=False, help='put us in the CLI menu in logs')
opts, args = parser.parse_args()
os.environ['PYTHONUNBUFFERED'] = '1'
def dump_logs(atype):
'''dump DataFlash logs'''
logfile = '%s.log' % atype
print("Dumping logs for %s to %s" % (atype, logfile))
sil = util.start_SIL(atype)
log = open(logfile, mode='w')
mavproxy = util.start_MAVProxy_SIL(atype, setup=True, logfile=log)
mavproxy.send('\n\n\n')
print("navigating menus")
mavproxy.expect(']')
mavproxy.send("logs\n")
if opts.cli:
mavproxy.interact()
return
mavproxy.expect("logs enabled:")
lognums = []
i = mavproxy.expect(["No logs", "(\d+) logs"])
if i == 0:
numlogs = 0
else:
numlogs = int(mavproxy.match.group(1))
for i in range(numlogs):
mavproxy.expect("Log (\d+)")
lognums.append(int(mavproxy.match.group(1)))
mavproxy.expect("Log]")
for i in range(numlogs):
print("Dumping log %u (i=%u)" % (lognums[i], i))
mavproxy.send("dump %u\n" % lognums[i])
mavproxy.expect("logs enabled:", timeout=120)
mavproxy.expect("Log]")
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
log.close()
print("Saved log for %s to %s" % (atype, logfile))
return True
vehicle = os.path.basename(os.getcwd())
dump_logs(vehicle)
| gpl-3.0 | 597,518,460,353,266,800 | 29.37931 | 101 | 0.61975 | false |
seaotterman/tensorflow | tensorflow/contrib/linalg/python/ops/linear_operator_addition.py | 30 | 15248 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Add one or more `LinearOperators` efficiently."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_diag
from tensorflow.contrib.linalg.python.ops import linear_operator_full_matrix
from tensorflow.contrib.linalg.python.ops import linear_operator_identity
from tensorflow.contrib.linalg.python.ops import linear_operator_tril
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
__all__ = []
def add_operators(operators,
operator_name=None,
addition_tiers=None,
name=None):
"""Efficiently add one or more linear operators.
Given operators `[A1, A2,...]`, this `Op` returns a possibly shorter list of
operators `[B1, B2,...]` such that
```sum_k Ak.apply(x) = sum_k Bk.apply(x).```
The operators `Bk` result by adding some of the `Ak`, as allowed by
`addition_tiers`.
Example of efficient adding of diagonal operators.
```python
A1 = LinearOperatorDiag(diag=[1., 1.], name="A1")
A2 = LinearOperatorDiag(diag=[2., 2.], name="A2")
# Use two tiers, the first contains an Adder that returns Diag. Since both
# A1 and A2 are Diag, they can use this Adder. The second tier will not be
# used.
addition_tiers = [
[_AddAndReturnDiag()],
[_AddAndReturnMatrix()]]
B_list = add_operators([A1, A2], addition_tiers=addition_tiers)
len(B_list)
==> 1
B_list[0].__class__.__name__
==> 'LinearOperatorDiag'
B_list[0].to_dense()
==> [[3., 0.],
[0., 3.]]
B_list[0].name
==> 'Add/A1__A2/'
```
Args:
operators: Iterable of `LinearOperator` objects with same `dtype`, domain
and range dimensions, and broadcastable batch shapes.
operator_name: String name for returned `LinearOperator`. Defaults to
concatenation of "Add/A__B/" that indicates the order of addition steps.
addition_tiers: List tiers, like `[tier_0, tier_1, ...]`, where `tier_i`
is a list of `Adder` objects. This function attempts to do all additions
in tier `i` before trying tier `i + 1`.
name: A name for this `Op`. Defaults to `add_operators`.
Returns:
Subclass of `LinearOperator`. Class and order of addition may change as new
(and better) addition strategies emerge.
Raises:
ValueError: If `operators` argument is empty.
ValueError: If shapes are incompatible.
"""
# Default setting
if addition_tiers is None:
addition_tiers = _DEFAULT_ADDITION_TIERS
# Argument checking.
check_ops.assert_proper_iterable(operators)
operators = list(reversed(operators))
if len(operators) < 1:
raise ValueError(
"Argument 'operators' must contain at least one operator. "
"Found: %s" % operators)
if not all(
isinstance(op, linear_operator.LinearOperator) for op in operators):
raise TypeError(
"Argument 'operators' must contain only LinearOperator instances. "
"Found: %s" % operators)
_static_check_for_same_dimensions(operators)
_static_check_for_broadcastable_batch_shape(operators)
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
with ops.name_scope(name or "add_operators", values=graph_parents):
# Additions done in one of the tiers. Try tier 0, 1,...
ops_to_try_at_next_tier = list(operators)
for tier in addition_tiers:
ops_to_try_at_this_tier = ops_to_try_at_next_tier
ops_to_try_at_next_tier = []
while ops_to_try_at_this_tier:
op1 = ops_to_try_at_this_tier.pop()
op2, adder = _pop_a_match_at_tier(op1, ops_to_try_at_this_tier, tier)
if op2 is not None:
# Will try to add the result of this again at this same tier.
new_operator = adder.add(op1, op2, operator_name)
ops_to_try_at_this_tier.append(new_operator)
else:
ops_to_try_at_next_tier.append(op1)
return ops_to_try_at_next_tier
def _pop_a_match_at_tier(op1, operator_list, tier):
# Search from the back of list to the front in order to create nice default
# order of operations.
for i in range(1, len(operator_list) + 1):
op2 = operator_list[-i]
for adder in tier:
if adder.can_add(op1, op2):
return operator_list.pop(-i), adder
return None, None
def _infer_hints_allowing_override(op1, op2, hints):
"""Infer hints from op1 and op2. hints argument is an override.
Args:
op1: LinearOperator
op2: LinearOperator
hints: _Hints object holding "is_X" boolean hints to use for returned
operator.
If some hint is None, try to set using op1 and op2. If the
hint is provided, ignore op1 and op2 hints. This allows an override
of previous hints, but does not allow forbidden hints (e.g. you still
cannot say a real diagonal operator is not self-adjoint.
Returns:
_Hints object.
"""
hints = hints or _Hints()
# If A, B are self-adjoint, then so is A + B.
if hints.is_self_adjoint is None:
is_self_adjoint = op1.is_self_adjoint and op2.is_self_adjoint
else:
is_self_adjoint = hints.is_self_adjoint
# If A, B are positive definite, then so is A + B.
if hints.is_positive_definite is None:
is_positive_definite = op1.is_positive_definite and op2.is_positive_definite
else:
is_positive_definite = hints.is_positive_definite
# A positive definite operator is always non-singular.
if is_positive_definite and hints.is_positive_definite is None:
is_non_singular = True
else:
is_non_singular = hints.is_non_singular
return _Hints(
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite)
def _static_check_for_same_dimensions(operators):
"""ValueError if operators determined to have different dimensions."""
if len(operators) < 2:
return
domain_dimensions = [(op.name, op.domain_dimension.value) for op in operators
if op.domain_dimension.value is not None]
if len(set(value for name, value in domain_dimensions)) > 1:
raise ValueError("Operators must have the same domain dimension. Found: %s"
% domain_dimensions)
range_dimensions = [(op.name, op.range_dimension.value) for op in operators
if op.range_dimension.value is not None]
if len(set(value for name, value in range_dimensions)) > 1:
raise ValueError("Operators must have the same range dimension. Found: %s" %
range_dimensions)
def _static_check_for_broadcastable_batch_shape(operators):
"""ValueError if operators determined to have non-broadcastable shapes."""
if len(operators) < 2:
return
# This will fail if they cannot be broadcast together.
batch_shape = operators[0].batch_shape
for op in operators[1:]:
batch_shape = array_ops.broadcast_static_shape(batch_shape, op.batch_shape)
class _Hints(object):
"""Holds 'is_X' flags that every LinearOperator is initialized with."""
def __init__(self,
is_non_singular=None,
is_positive_definite=None,
is_self_adjoint=None):
self.is_non_singular = is_non_singular
self.is_positive_definite = is_positive_definite
self.is_self_adjoint = is_self_adjoint
################################################################################
# Classes to add two linear operators.
################################################################################
@six.add_metaclass(abc.ABCMeta)
class _Adder(object):
"""Abstract base class to add two operators.
Each `Adder` acts independently, adding everything it can, paying no attention
as to whether another `Adder` could have done the addition more efficiently.
"""
@property
def name(self):
return self.__class__.__name__
@abc.abstractmethod
def can_add(self, op1, op2):
"""Returns `True` if this `Adder` can add `op1` and `op2`. Else `False`."""
pass
@abc.abstractmethod
def _add(self, op1, op2, operator_name, hints):
# Derived classes can assume op1 and op2 have been validated, e.g. they have
# the same dtype, and their domain/range dimensions match.
pass
def add(self, op1, op2, operator_name, hints=None):
"""Return new `LinearOperator` acting like `op1 + op2`.
Args:
op1: `LinearOperator`
op2: `LinearOperator`, with `shape` and `dtype` such that adding to
`op1` is allowed.
operator_name: `String` name to give to returned `LinearOperator`
hints: `_Hints` object. Returned `LinearOperator` will be created with
these hints.
Returns:
`LinearOperator`
"""
updated_hints = _infer_hints_allowing_override(op1, op2, hints)
if operator_name is None:
operator_name = "Add/" + op1.name + "__" + op2.name + "/"
values = op1.graph_parents + op2.graph_parents
scope_name = self.name
if scope_name.startswith("_"):
scope_name = scope_name[1:]
with ops.name_scope(scope_name, values=values):
return self._add(op1, op2, operator_name, updated_hints)
class _AddAndReturnScaledIdentity(_Adder):
"""Handles additions resulting in an Identity family member.
The Identity (`LinearOperatorScaledIdentity`, `LinearOperatorIdentity`) family
is closed under addition. This `Adder` respects that, and returns an Identity
"""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_IDENTITY_FAMILY)
def _add(self, op1, op2, operator_name, hints):
# Will build a LinearOperatorScaledIdentity.
if _type(op1) == _SCALED_IDENTITY:
multiplier_1 = op1.multiplier
else:
multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype)
if _type(op2) == _SCALED_IDENTITY:
multiplier_2 = op2.multiplier
else:
multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype)
return linear_operator_identity.LinearOperatorScaledIdentity(
num_rows=op1.range_dimension_tensor(),
multiplier=multiplier_1 + multiplier_2,
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
class _AddAndReturnDiag(_Adder):
"""Handles additions resulting in a Diag operator."""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_DIAG_LIKE)
def _add(self, op1, op2, operator_name, hints):
return linear_operator_diag.LinearOperatorDiag(
diag=op1.diag_part() + op2.diag_part(),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
class _AddAndReturnTriL(_Adder):
"""Handles additions resulting in a TriL operator."""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_DIAG_LIKE.union({_TRIL}))
def _add(self, op1, op2, operator_name, hints):
if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:
op_add_to_tensor, op_other = op1, op2
else:
op_add_to_tensor, op_other = op2, op1
return linear_operator_tril.LinearOperatorTriL(
tril=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
class _AddAndReturnMatrix(_Adder):
""""Handles additions resulting in a `LinearOperatorFullMatrix`."""
def can_add(self, op1, op2): # pylint: disable=unused-argument
return isinstance(op1, linear_operator.LinearOperator) and isinstance(
op2, linear_operator.LinearOperator)
def _add(self, op1, op2, operator_name, hints):
if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:
op_add_to_tensor, op_other = op1, op2
else:
op_add_to_tensor, op_other = op2, op1
return linear_operator_full_matrix.LinearOperatorFullMatrix(
matrix=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
################################################################################
# Constants designating types of LinearOperators
################################################################################
# Type name constants for LinearOperator classes.
_IDENTITY = "identity"
_SCALED_IDENTITY = "scaled_identity"
_DIAG = "diag"
_TRIL = "tril"
_MATRIX = "matrix"
# Groups of operators.
_DIAG_LIKE = {_DIAG, _IDENTITY, _SCALED_IDENTITY}
_IDENTITY_FAMILY = {_IDENTITY, _SCALED_IDENTITY}
# operators with an efficient .add_to_tensor() method.
_EFFICIENT_ADD_TO_TENSOR = _DIAG_LIKE
def _type(operator):
"""Returns the type name constant (e.g. _TRIL) for operator."""
if isinstance(operator, linear_operator_diag.LinearOperatorDiag):
return _DIAG
if isinstance(operator, linear_operator_tril.LinearOperatorTriL):
return _TRIL
if isinstance(operator, linear_operator_full_matrix.LinearOperatorFullMatrix):
return _MATRIX
if isinstance(operator, linear_operator_identity.LinearOperatorIdentity):
return _IDENTITY
if isinstance(operator,
linear_operator_identity.LinearOperatorScaledIdentity):
return _SCALED_IDENTITY
raise TypeError("Operator type unknown: %s" % operator)
################################################################################
# Addition tiers:
# We attempt to use Adders in tier K before K+1.
#
# Organize tiers to
# (i) reduce O(..) complexity of forming final operator, and
# (ii) produce the "most efficient" final operator.
# Dev notes:
# * Results of addition at tier K will be added at tier K or higher.
# * Tiers may change, and we warn the user that it may change.
################################################################################
# Note that the final tier, _AddAndReturnMatrix, will convert everything to a
# dense matrix. So it is sometimes very inefficient.
_DEFAULT_ADDITION_TIERS = [
[_AddAndReturnScaledIdentity()],
[_AddAndReturnDiag()],
[_AddAndReturnTriL()],
[_AddAndReturnMatrix()],
]
| apache-2.0 | -4,139,310,233,372,339,000 | 34.37819 | 80 | 0.658709 | false |
vicnet/weboob | modules/meslieuxparis/module.py | 1 | 1444 | # -*- coding: utf-8 -*-
# Copyright(C) 2018 Vincent A
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.tools.backend import Module
from weboob.capabilities.contact import CapDirectory
from .browser import MeslieuxparisBrowser
__all__ = ['MeslieuxparisModule']
class MeslieuxparisModule(Module, CapDirectory):
NAME = 'meslieuxparis'
DESCRIPTION = 'MesLieux public Paris places'
MAINTAINER = 'Vincent A'
EMAIL = '[email protected]'
LICENSE = 'AGPLv3+'
VERSION = '1.6'
BROWSER = MeslieuxparisBrowser
def search_contacts(self, query, sortby):
if query.city and query.city.lower() != 'paris':
return []
return self.browser.search_contacts(query.name.lower())
| lgpl-3.0 | 4,421,024,983,590,817,300 | 31.088889 | 77 | 0.723684 | false |
PLyczkowski/Sticky-Keymap | 2.74/python/lib/encodings/mac_centeuro.py | 257 | 14102 | """ Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 | 2,331,040,439,172,502,500 | 44.934853 | 118 | 0.553255 | false |
lcostantino/healing-os | healing/openstack/common/log.py | 1 | 26305 | # Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from healing.openstack.common.gettextutils import _
from healing.openstack.common import importutils
from healing.openstack.common import jsonutils
from healing.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*).*?([\s])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will chang in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message. '),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message. '),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"healing.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| apache-2.0 | 5,974,308,001,666,819,000 | 35.534722 | 79 | 0.588785 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.