filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_2 | # A sample recursive neural network for text classification
# @Time: 8/13/2020
# @Author: lnblanke
# @Email: [email protected]
# @File: cnn.py
import numpy as np
import tensorflow as tf
from blocks import RNN, Dense
from model import Model
import os
path = os.path.join("glove.6B.100d.txt")
embedding_indices = {}
with open(path) as f:
for line in f:
word, coef = line.split(maxsplit = 1)
coef = np.fromstring(coef, "f", sep = " ")
embedding_indices[word] = coef
def embedding(x):
word_idx = tf.keras.datasets.imdb.get_word_index()
embedding_dim = 100
l, w = x.shape
embed = np.zeros((l, w, embedding_dim))
vec_to_word = {vec + 3: ww for ww, vec in word_idx.items()}
vec_to_word[0] = "<pad>"
vec_to_word[1] = "<sos>"
vec_to_word[2] = "<unk>"
for i in range(l):
for j in range(w):
embedding_vec = embedding_indices.get(vec_to_word[x[i][j]])
if embedding_vec is not None:
embed[i][j] = embedding_vec
return embed
word_size = 15000
(train_x, train_y), (test_x, test_y) = tf.keras.datasets.imdb.load_data(num_words = word_size)
max_len = 300
train_x = tf.keras.preprocessing.sequence.pad_sequences(train_x, max_len)[:1000]
train_y = train_y[:1000]
test_x = tf.keras.preprocessing.sequence.pad_sequences(test_x, max_len)[:200]
test_y = test_y[:200]
train_x_embed = embedding(train_x)
test_x_embed = embedding(test_x)
rate = 1e-2 # Learning rate
epoch = 100 # Learning epochs
patience = 10 # Early stop patience
model = Model("RNN")
model.add(RNN(input_size = 100, output_size = 64, units = 128))
model.add(Dense(64, 2, activation = "softmax"))
if __name__ == '__main__':
model.fit(train_x_embed, train_y, loss_func = "cross entropy loss", epochs = epoch, learning_rate = rate,
patience = patience)
pred = model.predict(test_x_embed)
print("Accuracy: %.2f" % (np.sum(pred == test_y) / len(test_y) * 100) + "%")
|
the-stack_0_3 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateDocument
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_Documents_CreateDocument_sync]
from google.cloud import dialogflow_v2
def sample_create_document():
# Create a client
client = dialogflow_v2.DocumentsClient()
# Initialize request argument(s)
document = dialogflow_v2.Document()
document.content_uri = "content_uri_value"
document.display_name = "display_name_value"
document.mime_type = "mime_type_value"
document.knowledge_types = "AGENT_FACING_SMART_REPLY"
request = dialogflow_v2.CreateDocumentRequest(
parent="parent_value",
document=document,
)
# Make the request
operation = client.create_document(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_v2_generated_Documents_CreateDocument_sync]
|
the-stack_0_6 | import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django_admin_monitoring',
version='0.1.3',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='A simple Django app that provides ability to monitor such things as user feedback in admin',
long_description=README,
url='https://github.com/eternalfame/django_admin_monitoring',
author='Vyacheslav Sukhenko',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) |
the-stack_0_7 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import warnings
from abc import ABCMeta, abstractmethod
from joblib import Parallel
from ..base import clone, is_classifier
from ._base import LinearClassifierMixin, SparseCoefMixin
from ._base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
from ..exceptions import ConvergenceWarning
from ..model_selection import StratifiedShuffleSplit, ShuffleSplit
from ._sgd_fast import _plain_sgd
from ..utils import compute_class_weight
from ._sgd_fast import Hinge
from ._sgd_fast import SquaredHinge
from ._sgd_fast import Log
from ._sgd_fast import ModifiedHuber
from ._sgd_fast import SquaredLoss
from ._sgd_fast import Huber
from ._sgd_fast import EpsilonInsensitive
from ._sgd_fast import SquaredEpsilonInsensitive
from ..utils.fixes import _joblib_parallel_args
from ..utils import deprecated
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"adaptive": 4, "pa1": 5, "pa2": 6}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
MAX_INT = np.iinfo(np.int32).max
class _ValidationScoreCallback:
"""Callback for early stopping based on validation score"""
def __init__(self, estimator, X_val, y_val, sample_weight_val,
classes=None):
self.estimator = clone(estimator)
self.estimator.t_ = 1 # to pass check_is_fitted
if classes is not None:
self.estimator.classes_ = classes
self.X_val = X_val
self.y_val = y_val
self.sample_weight_val = sample_weight_val
def __call__(self, coef, intercept):
est = self.estimator
est.coef_ = coef.reshape(1, -1)
est.intercept_ = np.atleast_1d(intercept)
return est.score(self.X_val, self.y_val, self.sample_weight_val)
class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for SGD classification and regression."""
@_deprecate_positional_args
def __init__(self, loss, *, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.warm_start = warm_start
self.average = average
self.max_iter = max_iter
self.tol = tol
# current tests expect init to do parameter validation
# but we are not allowed to set attributes
self._validate_params()
def set_params(self, **kwargs):
"""Set and validate the parameters of estimator.
Parameters
----------
**kwargs : dict
Estimator parameters.
Returns
-------
self : object
Estimator instance.
"""
super().set_params(**kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self, for_partial_fit=False):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False")
if self.early_stopping and for_partial_fit:
raise ValueError("early_stopping should be False with partial_fit")
if self.max_iter is not None and self.max_iter <= 0:
raise ValueError("max_iter must be > zero. Got %f" % self.max_iter)
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.n_iter_no_change < 1:
raise ValueError("n_iter_no_change must be >= 1")
if not (0.0 < self.validation_fraction < 1.0):
raise ValueError("validation_fraction must be in range (0, 1)")
if self.learning_rate in ("constant", "invscaling", "adaptive"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError as e:
raise ValueError("The loss %s is not supported. " % loss) from e
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError as e:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate) from e
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError as e:
raise ValueError("Penalty %s is not supported. " % penalty) from e
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self._standard_coef = self.coef_
self._standard_intercept = self.intercept_
self._average_coef = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self._average_intercept = np.zeros(self._standard_intercept.shape,
dtype=np.float64,
order="C")
def _make_validation_split(self, y):
"""Split the dataset between training set and validation set.
Parameters
----------
y : ndarray of shape (n_samples, )
Target values.
Returns
-------
validation_mask : ndarray of shape (n_samples, )
Equal to 1 on the validation set, 0 on the training set.
"""
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
if not self.early_stopping:
# use the full set for training, with an empty validation set
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(test_size=self.validation_fraction,
random_state=self.random_state)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (n_samples, self.validation_fraction, idx_train.shape[0],
idx_val.shape[0]))
validation_mask[idx_val] = 1
return validation_mask
def _make_validation_score_cb(self, validation_mask, X, y, sample_weight,
classes=None):
if not self.early_stopping:
return None
return _ValidationScoreCallback(
self, X[validation_mask], y[validation_mask],
sample_weight[validation_mask], classes=classes)
# mypy error: Decorated property not supported
@deprecated("Attribute standard_coef_ was deprecated " # type: ignore
"in version 0.23 and will be removed in 1.0 "
"(renaming of 0.25).")
@property
def standard_coef_(self):
return self._standard_coef
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute standard_intercept_ was deprecated "
"in version 0.23 and will be removed in 1.0 (renaming of 0.25)."
)
@property
def standard_intercept_(self):
return self._standard_intercept
# mypy error: Decorated property not supported
@deprecated("Attribute average_coef_ was deprecated " # type: ignore
"in version 0.23 and will be removed in 1.0 "
"(renaming of 0.25).")
@property
def average_coef_(self):
return self._average_coef
# mypy error: Decorated property not supported
@deprecated("Attribute average_intercept_ was deprecated " # type: ignore
"in version 0.23 and will be removed in 1.0 "
"(renaming of 0.25).")
@property
def average_intercept_(self):
return self._average_intercept
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept, average_coef, average_intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est._standard_coef.ravel()
intercept = est._standard_intercept[0]
average_coef = est._average_coef.ravel()
average_intercept = est._average_intercept[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est._standard_coef[i]
intercept = est._standard_intercept[i]
average_coef = est._average_coef[i]
average_intercept = est._average_intercept[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter,
pos_weight, neg_weight, sample_weight, validation_mask=None,
random_state=None):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : string
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ], default=None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
dataset, intercept_decay = make_dataset(
X, y_i, sample_weight, random_state=random_state)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(
validation_mask, X, y_i, sample_weight, classes=classes)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd(
coef, intercept, average_coef, average_intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio, dataset, validation_mask,
est.early_stopping, validation_score_cb, int(est.n_iter_no_change),
max_iter, tol, int(est.fit_intercept), int(est.verbose),
int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type,
est.eta0, est.power_t, est.t_, intercept_decay, est.average)
if est.average:
if len(est.classes_) == 2:
est._average_intercept[0] = average_intercept
else:
est._average_intercept[i] = average_intercept
return coef, intercept, n_iter_
class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
@_deprecate_positional_args
def __init__(self, loss="hinge", *, penalty='l2', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False,
validation_fraction=0.1, n_iter_no_change=5,
class_weight=None, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = n_jobs
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, max_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C", accept_large_sparse=False)
n_samples, n_features = X.shape
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(
self.class_weight, classes=self.classes_, y=y)
sample_weight = _check_sample_weight(sample_weight, X)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
else:
raise ValueError(
"The number of classes has to be greater than one;"
" got %d class" % n_classes)
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if hasattr(self, "classes_"):
self.classes_ = None
X, y = self._validate_data(X, y, accept_sparse='csr',
dtype=np.float64, order="C",
accept_large_sparse=False)
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self._standard_coef = self.coef_
self._standard_intercept = self.intercept_
self._average_coef = None
self._average_intercept = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.max_iter,
classes, sample_weight, coef_init, intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, max_iter):
"""Fit a binary classifier on X and y. """
coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C,
learning_rate, max_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight,
random_state=self.random_state)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self._average_coef.reshape(1, -1)
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef.reshape(1, -1)
self._standard_intercept = np.atleast_1d(intercept)
self.intercept_ = self._standard_intercept
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
"""
# Precompute the validation split using the multiclass labels
# to ensure proper balancing of the classes.
validation_mask = self._make_validation_split(y)
# Use joblib to fit OvA in parallel.
# Pick the random seed for each job outside of fit_binary to avoid
# sharing the estimator random state between threads which could lead
# to non-deterministic behavior
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
max_iter, self._expanded_class_weight[i],
1., sample_weight,
validation_mask=validation_mask,
random_state=seed)
for i, seed in enumerate(seeds))
# take the maximum of n_iter_ over every binary fit
n_iter_ = 0.
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self._average_coef
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef
self._standard_intercept = np.atleast_1d(self.intercept_)
self.intercept_ = self._standard_intercept
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : ndarray of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self :
Returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', "
"classes=classes, y=y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self :
Returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, etc.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning via the `partial_fit` method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default='hinge'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The possible options are 'hinge', 'log', 'modified_huber',
'squared_hinge', 'perceptron', or a regression loss: 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see
:class:`~sklearn.linear_model.SGDRegressor` for a description.
More details about the losses formulas can be found in the
:ref:`User Guide <sgd_mathematical_formulation>`.
penalty : {'l2', 'l1', 'elasticnet'}, default='l2'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float, default=0.0001
Constant that multiplies the regularization term. The higher the
value, the stronger the regularization.
Also used to compute the learning rate when set to `learning_rate` is
set to 'optimal'.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Only used if `penalty` is 'elasticnet'.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float, default=1e-3
The stopping criterion. If it is not None, training will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : int, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used for shuffling the data, when ``shuffle`` is set to ``True``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
learning_rate : str, default='optimal'
The learning rate schedule:
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where t0 is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
.. versionadded:: 0.20
Added 'adaptive' option
eta0 : double, default=0.0
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.0 as eta0 is not used by
the default schedule 'optimal'.
power_t : double, default=0.5
The exponent for inverse scaling learning rate [default 0.5].
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score returned by the `score` method is not
improving by at least tol for n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
Added 'early_stopping' option
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if `early_stopping` is True.
.. versionadded:: 0.20
Added 'validation_fraction' option
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
Added 'n_iter_no_change' option
class_weight : dict, {class_label: weight} or "balanced", default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights accross all
updates and stores the result in the ``coef_`` attribute. If set to
an int greater than 1, averaging will begin once the total number of
samples seen reaches `average`. So ``average=10`` will begin
averaging after seeing 10 samples.
Attributes
----------
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_iter_ : int
The actual number of iterations before reaching the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
loss_function_ : concrete ``LossFunction``
classes_ : array of shape (n_classes,)
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
See Also
--------
sklearn.svm.LinearSVC : Linear support vector classification.
LogisticRegression : Logistic regression.
Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to
``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant",
penalty=None)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import SGDClassifier
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> # Always scale the input. The most convenient way is to use a pipeline.
>>> clf = make_pipeline(StandardScaler(),
... SGDClassifier(max_iter=1000, tol=1e-3))
>>> clf.fit(X, Y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('sgdclassifier', SGDClassifier())])
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
@_deprecate_positional_args
def __init__(self, loss="hinge", *, penalty='l2', alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, class_weight=None, warm_start=False,
average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, class_weight=class_weight,
warm_start=warm_start, average=average)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`~sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data for prediction.
Returns
-------
ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
check_is_fitted(self)
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data for prediction.
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
class BaseSGDRegressor(RegressorMixin, BaseSGD):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
@_deprecate_positional_args
def __init__(self, loss="squared_loss", *, penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
max_iter, sample_weight, coef_init, intercept_init):
X, y = self._validate_data(X, y, accept_sparse="csr", copy=False,
order='C', dtype=np.float64,
accept_large_sparse=False)
y = y.astype(np.float64, copy=False)
n_samples, n_features = X.shape
sample_weight = _check_sample_weight(sample_weight, X)
# Allocate datastructures from input arguments
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features, coef_init,
intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "_average_coef", None) is None:
self._average_coef = np.zeros(n_features,
dtype=np.float64,
order="C")
self._average_intercept = np.zeros(1, dtype=np.float64, order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
sample_weight=sample_weight, coef_init=None,
intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.max_iter, sample_weight, coef_init,
intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : ndarray of shape (n_samples,)
Target values
coef_init : ndarray of shape (n_features,), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (1,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
validation_mask = self._make_validation_split(y)
validation_score_cb = self._make_validation_score_cb(
validation_mask, X, y, sample_weight)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self.tol if self.tol is not None else -np.inf
if self.average:
coef = self._standard_coef
intercept = self._standard_intercept
average_coef = self._average_coef
average_intercept = self._average_intercept
else:
coef = self.coef_
intercept = self.intercept_
average_coef = None # Not used
average_intercept = [0] # Not used
coef, intercept, average_coef, average_intercept, self.n_iter_ = \
_plain_sgd(coef,
intercept[0],
average_coef,
average_intercept[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
validation_mask, self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.t_ += self.n_iter_ * X.shape[0]
if self.average > 0:
self._average_intercept = np.atleast_1d(average_intercept)
self._standard_intercept = np.atleast_1d(intercept)
if self.average <= self.t_ - 1.0:
# made enough updates for averaging to be taken into account
self.coef_ = average_coef
self.intercept_ = np.atleast_1d(average_intercept)
else:
self.coef_ = coef
self.intercept_ = np.atleast_1d(intercept)
else:
self.intercept_ = np.atleast_1d(intercept)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default='squared_loss'
The loss function to be used. The possible values are 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'
The 'squared_loss' refers to the ordinary least squares fit.
'huber' modifies 'squared_loss' to focus less on getting outliers
correct by switching from squared to linear loss past a distance of
epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is
linear past that; this is the loss function used in SVR.
'squared_epsilon_insensitive' is the same but becomes squared loss past
a tolerance of epsilon.
More details about the losses formulas can be found in the
:ref:`User Guide <sgd_mathematical_formulation>`.
penalty : {'l2', 'l1', 'elasticnet'}, default='l2'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float, default=0.0001
Constant that multiplies the regularization term. The higher the
value, the stronger the regularization.
Also used to compute the learning rate when set to `learning_rate` is
set to 'optimal'.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Only used if `penalty` is 'elasticnet'.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float, default=1e-3
The stopping criterion. If it is not None, training will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
random_state : int, RandomState instance, default=None
Used for shuffling the data, when ``shuffle`` is set to ``True``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
learning_rate : string, default='invscaling'
The learning rate schedule:
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where t0 is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
.. versionadded:: 0.20
Added 'adaptive' option
eta0 : double, default=0.01
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.01.
power_t : double, default=0.25
The exponent for inverse scaling learning rate.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate
training when validation score returned by the `score` method is not
improving by at least `tol` for `n_iter_no_change` consecutive
epochs.
.. versionadded:: 0.20
Added 'early_stopping' option
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if `early_stopping` is True.
.. versionadded:: 0.20
Added 'validation_fraction' option
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
Added 'n_iter_no_change' option
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights accross all
updates and stores the result in the ``coef_`` attribute. If set to
an int greater than 1, averaging will begin once the total number of
samples seen reaches `average`. So ``average=10`` will begin
averaging after seeing 10 samples.
Attributes
----------
coef_ : ndarray of shape (n_features,)
Weights assigned to the features.
intercept_ : ndarray of shape (1,)
The intercept term.
average_coef_ : ndarray of shape (n_features,)
Averaged weights assigned to the features. Only available
if ``average=True``.
.. deprecated:: 0.23
Attribute ``average_coef_`` was deprecated
in version 0.23 and will be removed in 1.0 (renaming of 0.25).
average_intercept_ : ndarray of shape (1,)
The averaged intercept term. Only available if ``average=True``.
.. deprecated:: 0.23
Attribute ``average_intercept_`` was deprecated
in version 0.23 and will be removed in 1.0 (renaming of 0.25).
n_iter_ : int
The actual number of iterations before reaching the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import SGDRegressor
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> # Always scale the input. The most convenient way is to use a pipeline.
>>> reg = make_pipeline(StandardScaler(),
... SGDRegressor(max_iter=1000, tol=1e-3))
>>> reg.fit(X, y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('sgdregressor', SGDRegressor())])
See Also
--------
Ridge, ElasticNet, Lasso, sklearn.svm.SVR
"""
@_deprecate_positional_args
def __init__(self, loss="squared_loss", *, penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
|
the-stack_0_9 | from datetime import datetime, timedelta
import pytest
import pytz
from kaffepause.breaks.selectors import get_pending_break_invitations
from kaffepause.breaks.test.factories import BreakFactory, BreakInvitationFactory
pytestmark = pytest.mark.django_db
def test_get_break_invitations_awaiting_reply_returns_unanswered_invitations(user):
"""Should return all non-expired break invitations the user has not replied to."""
unanswered_break_invitation = BreakInvitationFactory()
unanswered_break_invitation.subject.connect(BreakFactory())
unanswered_break_invitation.addressees.connect(user)
an_hour_ago = datetime.now(pytz.utc) - timedelta(hours=10)
expired_break = BreakFactory()
expired_break.starting_at = an_hour_ago
expired_break.save()
expired_break_invitation = BreakInvitationFactory()
expired_break_invitation.subject.connect(expired_break)
expired_break_invitation.addressees.connect(user)
accepted_break_invitation = BreakInvitationFactory()
accepted_break_invitation.subject.connect(BreakFactory())
accepted_break_invitation.addressees.connect(user)
accepted_break_invitation.acceptees.connect(user)
declined_break_invitation = BreakInvitationFactory()
declined_break_invitation.subject.connect(BreakFactory())
declined_break_invitation.addressees.connect(user)
declined_break_invitation.declinees.connect(user)
actual_break_invitations = get_pending_break_invitations(actor=user)
assert unanswered_break_invitation in actual_break_invitations
assert expired_break_invitation not in actual_break_invitations
assert accepted_break_invitation not in actual_break_invitations
assert declined_break_invitation not in actual_break_invitations
def test_get_break_invitations_awaiting_reply_returns_unanswered_invitations_expired_five_minutes_ago(
user,
):
"""Should return unanswered invitations who's break has started within 5 minutes ago."""
two_minutes_ago = datetime.now(pytz.utc) - timedelta(minutes=2)
non_expired_break = BreakFactory()
non_expired_break.starting_at = two_minutes_ago
non_expired_break.save()
non_expired_break_invitation = BreakInvitationFactory()
non_expired_break_invitation.subject.connect(non_expired_break)
non_expired_break_invitation.addressees.connect(user)
ten_minutes_ago = datetime.now(pytz.utc) - timedelta(minutes=10)
expired_break = BreakFactory()
expired_break.starting_at = ten_minutes_ago
expired_break.save()
expired_break_invitation = BreakInvitationFactory()
expired_break_invitation.subject.connect(expired_break)
expired_break_invitation.addressees.connect(user)
actual_break_invitations = get_pending_break_invitations(actor=user)
assert non_expired_break_invitation in actual_break_invitations
assert expired_break_invitation not in actual_break_invitations
|
the-stack_0_10 | import numpy as np
from scipy.sparse import diags
from scipy.sparse import kron
from scipy.sparse import eye
from .two_particles import TwoParticles
from ..util.constants import *
from .. import Eigenstates
class TwoFermions(TwoParticles):
def get_eigenstates(self, H, max_states, eigenvalues, eigenvectors):
eigenvectors = eigenvectors.T.reshape(( max_states, *[H.N]*H.ndim) )
# Normalize the eigenvectors
eigenvectors = eigenvectors/np.sqrt(H.dx**H.ndim)
energies = []
eigenstates_array = []
#antisymmetrize eigenvectors: This is made by applying (𝜓(r1 , s1, r2 , s2) - 𝜓(r2 , s2, r1 , s1))/sqrt(2) to each state.
for i in range(max_states):
eigenstate_tmp = (eigenvectors[i] - eigenvectors[i].swapaxes(0,1))/np.sqrt(2)
norm = np.sum(eigenstate_tmp*eigenstate_tmp)*H.dx**H.ndim
TOL = 0.02
# check if is eigenstate_tmp is a normalizable eigenstate. (norm shouldn't be zero)
if norm > TOL :
# for some reason when the eigenstate is degenerated it isn't normalized
#print("norm",norm)
eigenstate_tmp = eigenstate_tmp/np.sqrt(norm)
if eigenstates_array != []: #check if it's the first eigenstate
inner_product = np.sum(eigenstates_array[-1]* eigenstate_tmp)*H.dx**H.ndim
#print("inner_product",inner_product)
else:
inner_product = 0
if np.abs(inner_product) < TOL: # check if is eigenstate_tmp is repeated. (inner_product should be zero)
eigenstates_array += [eigenstate_tmp]
energies += [eigenvalues[i]]
if H.spatial_ndim == 1:
type = "TwoIdenticalParticles1D"
elif H.spatial_ndim == 2:
type = "TwoIdenticalParticles2D"
eigenstates = Eigenstates(energies, eigenstates_array, H.extent, H.N, type)
return eigenstates |
the-stack_0_13 | """
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Activation generator helper classes for TCAV"""
'''
The following class was modified to enable numeric class labels
'''
from abc import ABCMeta
from abc import abstractmethod
from multiprocessing import dummy as multiprocessing
import os.path
import numpy as np
import PIL.Image
import tensorflow as tf
class ActivationGeneratorInterface(object):
"""Interface for an activation generator for a model"""
__metaclass__ = ABCMeta
@abstractmethod
def process_and_load_activations(self, bottleneck_names, concepts):
pass
@abstractmethod
def get_model(self):
pass
class ActivationGeneratorBase(ActivationGeneratorInterface):
"""Basic abstract activation generator for a model"""
def __init__(self, model, acts_dir, max_examples=500):
self.model = model
self.acts_dir = acts_dir
self.max_examples = max_examples
def get_model(self):
return self.model
@abstractmethod
def get_examples_for_concept(self, concept):
pass
def get_activations_for_concept(self, concept, bottleneck):
examples = self.get_examples_for_concept(concept)
return self.get_activations_for_examples(examples, bottleneck)
def get_activations_for_examples(self, examples, bottleneck):
acts = self.model.run_examples(examples, bottleneck)
return self.model.reshape_activations(acts).squeeze()
def process_and_load_activations(self, bottleneck_names, concepts):
acts = {}
if self.acts_dir and not tf.gfile.Exists(self.acts_dir):
tf.gfile.MakeDirs(self.acts_dir)
for concept in concepts:
if concept not in acts:
acts[concept] = {}
for bottleneck_name in bottleneck_names:
acts_path = os.path.join(self.acts_dir, 'acts_{}_{}'.format(
concept, bottleneck_name)) if self.acts_dir else None
if acts_path and tf.gfile.Exists(acts_path):
with tf.gfile.Open(acts_path, 'rb') as f:
acts[concept][bottleneck_name] = np.load(f).squeeze()
tf.logging.info('Loaded {} shape {}'.format(
acts_path, acts[concept][bottleneck_name].shape))
else:
acts[concept][bottleneck_name] = self.get_activations_for_concept(
concept, bottleneck_name)
if acts_path:
tf.logging.info('{} does not exist, Making one...'.format(
acts_path))
with tf.gfile.Open(acts_path, 'wb') as f:
np.save(f, acts[concept][bottleneck_name], allow_pickle=False)
return acts
class ImageActivationGenerator(ActivationGeneratorBase):
"""Activation generator for a basic image model"""
def __init__(self, model, source_dir, acts_dir, max_examples=10):
self.source_dir = source_dir
super(ImageActivationGenerator, self).__init__(
model, acts_dir, max_examples)
def get_examples_for_concept(self, concept):
concept_dir = os.path.join(self.source_dir, concept)
print(concept_dir, concept)
img_paths = [os.path.join(concept_dir, d)
for d in tf.gfile.ListDirectory(concept_dir)]
imgs = self.load_images_from_files(img_paths, self.max_examples,
shape=self.model.get_image_shape()[:2])
return imgs
def load_image_from_file(self, filename, shape):
"""Given a filename, try to open the file. If failed, return None.
Args:
filename: location of the image file
shape: the shape of the image file to be scaled
Returns:
the image if succeeds, None if fails.
Rasies:
exception if the image was not the right shape.
"""
if not tf.gfile.Exists(filename):
tf.logging.error('Cannot find file: {}'.format(filename))
return None
try:
img = np.array(PIL.Image.open(tf.gfile.Open(filename, 'rb')).resize(
shape, PIL.Image.BILINEAR))
# Normalize pixel values to between 0 and 1.
img = np.float32(img) / 255.0
if not (len(img.shape) == 3 and img.shape[2] == 3):
return None
else:
return img
except Exception as e:
tf.logging.info(e)
return None
return img
def load_images_from_files(self, filenames, max_imgs=500,
do_shuffle=True, run_parallel=True,
shape=(299, 299),
num_workers=100):
"""Return image arrays from filenames.
Args:
filenames: locations of image files.
max_imgs: maximum number of images from filenames.
do_shuffle: before getting max_imgs files, shuffle the names or not
run_parallel: get images in parallel or not
shape: desired shape of the image
num_workers: number of workers in parallelization.
Returns:
image arrays
"""
imgs = []
# First shuffle a copy of the filenames.
filenames = filenames[:]
if do_shuffle:
np.random.shuffle(filenames)
if run_parallel:
pool = multiprocessing.Pool(num_workers)
imgs = pool.map(
lambda filename: self.load_image_from_file(filename, shape),
filenames[:max_imgs])
imgs = [img for img in imgs if img is not None]
else:
for filename in filenames:
img = self.load_image_from_file(filename, shape)
if img is not None:
imgs.append(img)
if len(imgs) >= max_imgs:
break
return np.array(imgs)
|
the-stack_0_14 | import base64
import logging
from urllib import urlencode
from dateutil.tz import tzutc
import httplib2
from sharpy.exceptions import AccessDenied
from sharpy.exceptions import BadRequest
from sharpy.exceptions import CheddarError
from sharpy.exceptions import CheddarFailure
from sharpy.exceptions import NaughtyGateway
from sharpy.exceptions import NotFound
from sharpy.exceptions import PreconditionFailed
from sharpy.exceptions import UnprocessableEntity
client_log = logging.getLogger('SharpyClient')
class Client(object):
default_endpoint = 'https://cheddargetter.com/xml'
def __init__(self, username, password, product_code, cache=None,
timeout=None, endpoint=None):
'''
username - Your cheddargetter username (probably an email address)
password - Your cheddargetter password
product_code - The product code for the product you want to work with
cache - A file system path or an object which implements the httplib2
cache API (optional)
timeout - Socket level timout in seconds (optional)
endpoint - An alternate API endpoint (optional)
'''
self.username = username
self.password = password
self.product_code = product_code
self.endpoint = endpoint or self.default_endpoint
self.cache = cache
self.timeout = timeout
super(Client, self).__init__()
def build_url(self, path, params=None):
'''
Constructs the url for a cheddar API resource
'''
url = u'%s/%s/productCode/%s' % (
self.endpoint,
path,
self.product_code,
)
if params:
for key, value in params.items():
url = u'%s/%s/%s' % (url, key, value)
return url
def format_datetime(self, to_format):
if to_format == 'now':
str_dt = to_format
else:
if getattr(to_format, 'tzinfo', None) is not None:
utc_value = to_format.astimezone(tzutc())
else:
utc_value = to_format
str_dt = utc_value.strftime('%Y-%m-%dT%H:%M:%S+00:00')
return str_dt
def format_date(self, to_format):
if to_format == 'now':
str_dt = to_format
else:
if getattr(to_format, 'tzinfo', None) is not None:
utc_value = to_format.astimezone(tzutc())
else:
utc_value = to_format
str_dt = utc_value.strftime('%Y-%m-%d')
return str_dt
def make_request(self, path, params=None, data=None, method=None):
'''
Makes a request to the cheddar api using the authentication and
configuration settings available.
'''
# Setup values
url = self.build_url(path, params)
client_log.debug('Requesting: %s' % url)
method = method or 'GET'
body = None
headers = {}
cleaned_data = None
if data:
method = 'POST'
body = urlencode(data)
headers = {
'content-type':
'application/x-www-form-urlencoded; charset=UTF-8',
}
# Clean credit card info from when the request gets logged
# (remove ccv and only show last four of card num)
cleaned_data = data.copy()
if 'subscription[ccCardCode]' in cleaned_data:
del cleaned_data['subscription[ccCardCode]']
if 'subscription[ccNumber]' in cleaned_data:
ccNum = cleaned_data['subscription[ccNumber]']
cleaned_data['subscription[ccNumber]'] = ccNum[-4:]
client_log.debug('Request Method: %s' % method)
client_log.debug('Request Body (Cleaned Data): %s' % cleaned_data)
# Setup http client
h = httplib2.Http(cache=self.cache, timeout=self.timeout)
# Skip the normal http client behavior and send auth headers
# immediately to save an http request.
headers['Authorization'] = "Basic %s" % base64.standard_b64encode(
self.username + ':' + self.password).strip()
# Make request
response, content = h.request(url, method, body=body, headers=headers)
status = response.status
client_log.debug('Response Status: %d' % status)
client_log.debug('Response Content: %s' % content)
if status != 200 and status != 302:
exception_class = CheddarError
if status == 401:
exception_class = AccessDenied
elif status == 400:
exception_class = BadRequest
elif status == 404:
exception_class = NotFound
elif status == 412:
exception_class = PreconditionFailed
elif status == 500:
exception_class = CheddarFailure
elif status == 502:
exception_class = NaughtyGateway
elif status == 422:
exception_class = UnprocessableEntity
raise exception_class(response, content)
response.content = content
return response
|
the-stack_0_15 | from fastbook import *
from fastai.vision.widgets import *
def create_dataloader(path):
print(" Creating dataloader.. ")
db = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
get_y=parent_label,
item_tfms=Resize(128))
db = db.new(
item_tfms=RandomResizedCrop(224, min_scale=0.5),
batch_tfms=aug_transforms())
dls = db.dataloaders(path)
return dls
def train_model(dls , save_model_name = "animals_prediction.pkl"):
print(" Training Model .. ")
learn = cnn_learner(dls, resnet18, metrics=error_rate)
learn.fine_tune(4)
learn.export(save_model_name)
return learn
if __name__ == "__main__":
path = Path("DATA")
animals_path = (path/"animals")
dls = create_dataloader(animals_path)
model = train_model(dls ,"animals_prediction.pkl")
|
the-stack_0_17 | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import copy
import json
import logging
import re
from collections import OrderedDict
from datetime import timedelta
# OAuth2
from oauthlib import oauth2
from oauthlib.common import generate_token
# Django
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.timezone import now
from django.utils.functional import cached_property
# Django REST Framework
from rest_framework.exceptions import ValidationError, PermissionDenied
from rest_framework.relations import ManyRelatedField
from rest_framework import fields
from rest_framework import serializers
from rest_framework import validators
from rest_framework.utils.serializer_helpers import ReturnList
# Django-Polymorphic
from polymorphic.models import PolymorphicModel
# AWX
from awx.main.access import get_user_capabilities
from awx.main.constants import (
SCHEDULEABLE_PROVIDERS,
ANSI_SGR_PATTERN,
ACTIVE_STATES,
CENSOR_VALUE,
)
from awx.main.models import (
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialInputSource,
CredentialType, CustomInventoryScript, Group, Host, Instance,
InstanceGroup, Inventory, InventorySource, InventoryUpdate,
InventoryUpdateEvent, Job, JobEvent, JobHostSummary, JobLaunchConfig,
JobTemplate, Label, Notification, NotificationTemplate,
OAuth2AccessToken, OAuth2Application, Organization, Project,
ProjectUpdate, ProjectUpdateEvent, RefreshToken, Role, Schedule,
SystemJob, SystemJobEvent, SystemJobTemplate, Team, UnifiedJob,
UnifiedJobTemplate, WorkflowJob, WorkflowJobNode,
WorkflowJobTemplate, WorkflowJobTemplateNode, StdoutMaxBytesExceeded
)
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
from awx.main.models.rbac import (
get_roles_on_resource, role_summary_fields_generator
)
from awx.main.fields import ImplicitRoleField, JSONBField
from awx.main.utils import (
get_type_for_model, get_model_for_type,
camelcase_to_underscore, getattrd, parse_yaml_or_json,
has_model_field_prefetched, extract_ansible_vars, encrypt_dict,
prefetch_page_capabilities, get_external_account)
from awx.main.utils.filters import SmartFilter
from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.validators import vars_validate_or_raise
from awx.api.versioning import reverse
from awx.api.fields import (BooleanNullField, CharNullField, ChoiceNullField,
VerbatimField, DeprecatedCredentialField)
logger = logging.getLogger('awx.api.serializers')
# Fields that should be summarized regardless of object type.
DEFAULT_SUMMARY_FIELDS = ('id', 'name', 'description')# , 'created_by', 'modified_by')#, 'type')
# Keys are fields (foreign keys) where, if found on an instance, summary info
# should be added to the serialized data. Values are a tuple of field names on
# the related object to include in the summary data (if the field is present on
# the related object).
SUMMARIZABLE_FK_FIELDS = {
'organization': DEFAULT_SUMMARY_FIELDS,
'user': ('id', 'username', 'first_name', 'last_name'),
'application': ('id', 'name'),
'team': DEFAULT_SUMMARY_FIELDS,
'inventory': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources',
'total_inventory_sources',
'inventory_sources_with_failures',
'organization_id',
'kind',
'insights_credential_id',),
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'has_inventory_sources'),
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources'),
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job': DEFAULT_SUMMARY_FIELDS,
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'inventory_source': ('source', 'last_updated', 'status'),
'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,
'source_script': ('name', 'description'),
'role': ('id', 'role_field'),
'notification_template': DEFAULT_SUMMARY_FIELDS,
'instance_group': {'id', 'name', 'controller_id'},
'insights_credential': DEFAULT_SUMMARY_FIELDS,
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
}
def reverse_gfk(content_object, request):
'''
Computes a reverse for a GenericForeignKey field.
Returns a dictionary of the form
{ '<type>': reverse(<type detail>) }
for example
{ 'organization': '/api/v2/organizations/1/' }
'''
if content_object is None or not hasattr(content_object, 'get_absolute_url'):
return {}
return {
camelcase_to_underscore(content_object.__class__.__name__): content_object.get_absolute_url(request=request)
}
class CopySerializer(serializers.Serializer):
name = serializers.CharField()
def validate(self, attrs):
name = attrs.get('name')
view = self.context.get('view', None)
obj = view.get_object()
if name == obj.name:
raise serializers.ValidationError(_(
'The original object is already named {}, a copy from'
' it cannot have the same name.'.format(name)
))
return attrs
class BaseSerializerMetaclass(serializers.SerializerMetaclass):
'''
Custom metaclass to enable attribute inheritance from Meta objects on
serializer base classes.
Also allows for inheriting or updating field lists from base class(es):
class Meta:
# Inherit all fields from base class.
fields = ('*',)
# Inherit all fields from base class and add 'foo'.
fields = ('*', 'foo')
# Inherit all fields from base class except 'bar'.
fields = ('*', '-bar')
# Define fields as 'foo' and 'bar'; ignore base class fields.
fields = ('foo', 'bar')
# Extra field kwargs dicts are also merged from base classes.
extra_kwargs = {
'foo': {'required': True},
'bar': {'read_only': True},
}
# If a subclass were to define extra_kwargs as:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'label': 'New Label for Bar'},
}
# The resulting value of extra_kwargs would be:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'read_only': True, 'label': 'New Label for Bar'},
}
# Extra field kwargs cannot be removed in subclasses, only replaced.
'''
@staticmethod
def _is_list_of_strings(x):
return isinstance(x, (list, tuple)) and all([isinstance(y, str) for y in x])
@staticmethod
def _is_extra_kwargs(x):
return isinstance(x, dict) and all([isinstance(k, str) and isinstance(v, dict) for k,v in x.items()])
@classmethod
def _update_meta(cls, base, meta, other=None):
for attr in dir(other):
if attr.startswith('_'):
continue
val = getattr(other, attr)
meta_val = getattr(meta, attr, None)
# Special handling for lists/tuples of strings (field names).
if cls._is_list_of_strings(val) and cls._is_list_of_strings(meta_val or []):
meta_val = meta_val or []
new_vals = []
except_vals = []
if base: # Merge values from all bases.
new_vals.extend([x for x in meta_val])
for v in val:
if not base and v == '*': # Inherit all values from previous base(es).
new_vals.extend([x for x in meta_val])
elif not base and v.startswith('-'): # Except these values.
except_vals.append(v[1:])
else:
new_vals.append(v)
val = []
for v in new_vals:
if v not in except_vals and v not in val:
val.append(v)
val = tuple(val)
# Merge extra_kwargs dicts from base classes.
elif cls._is_extra_kwargs(val) and cls._is_extra_kwargs(meta_val or {}):
meta_val = meta_val or {}
new_val = {}
if base:
for k,v in meta_val.items():
new_val[k] = copy.deepcopy(v)
for k,v in val.items():
new_val.setdefault(k, {}).update(copy.deepcopy(v))
val = new_val
# Any other values are copied in case they are mutable objects.
else:
val = copy.deepcopy(val)
setattr(meta, attr, val)
def __new__(cls, name, bases, attrs):
meta = type('Meta', (object,), {})
for base in bases[::-1]:
cls._update_meta(base, meta, getattr(base, 'Meta', None))
cls._update_meta(None, meta, attrs.get('Meta', meta))
attrs['Meta'] = meta
return super(BaseSerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
fields = ('id', 'type', 'url', 'related', 'summary_fields', 'created',
'modified', 'name', 'description')
summary_fields = ()
summarizable_fields = ()
# add the URL and related resources
type = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
related = serializers.SerializerMethodField('_get_related')
summary_fields = serializers.SerializerMethodField('_get_summary_fields')
# make certain fields read only
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
super(BaseSerializer, self).__init__(*args, **kwargs)
# The following lines fix the problem of being able to pass JSON dict into PrimaryKeyRelatedField.
data = kwargs.get('data', False)
if data:
for field_name, field_instance in self.fields.items():
if isinstance(field_instance, ManyRelatedField) and not field_instance.read_only:
if isinstance(data.get(field_name, False), dict):
raise serializers.ValidationError(_('Cannot use dictionary for %s' % field_name))
@property
def version(self):
return 2
def get_type(self, obj):
return get_type_for_model(self.Meta.model)
def get_types(self):
return [self.get_type(None)]
def get_type_choices(self):
type_name_map = {
'job': _('Playbook Run'),
'ad_hoc_command': _('Command'),
'project_update': _('SCM Update'),
'inventory_update': _('Inventory Sync'),
'system_job': _('Management Job'),
'workflow_job': _('Workflow Job'),
'workflow_job_template': _('Workflow Template'),
'job_template': _('Job Template')
}
choices = []
for t in self.get_types():
name = _(type_name_map.get(t, force_text(get_model_for_type(t)._meta.verbose_name).title()))
choices.append((t, name))
return choices
def get_url(self, obj):
if obj is None or not hasattr(obj, 'get_absolute_url'):
return ''
elif isinstance(obj, User):
return self.reverse('api:user_detail', kwargs={'pk': obj.pk})
else:
return obj.get_absolute_url(request=self.context.get('request'))
def filter_field_metadata(self, fields, method):
"""
Filter field metadata based on the request method.
This it intended to be extended by subclasses.
"""
return fields
def _get_related(self, obj):
return {} if obj is None else self.get_related(obj)
def _generate_named_url(self, url_path, obj, node):
url_units = url_path.split('/')
named_url = node.generate_named_url(obj)
url_units[4] = named_url
return '/'.join(url_units)
def get_related(self, obj):
res = OrderedDict()
view = self.context.get('view', None)
if view and (hasattr(view, 'retrieve') or view.request.method == 'POST') and \
type(obj) in settings.NAMED_URL_GRAPH:
original_url = self.get_url(obj)
res['named_url'] = self._generate_named_url(
original_url, obj, settings.NAMED_URL_GRAPH[type(obj)]
)
if getattr(obj, 'created_by', None):
res['created_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.created_by.pk})
if getattr(obj, 'modified_by', None):
res['modified_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.modified_by.pk})
return res
def _get_summary_fields(self, obj):
return {} if obj is None else self.get_summary_fields(obj)
def get_summary_fields(self, obj):
# Return values for certain fields on related objects, to simplify
# displaying lists of items without additional API requests.
summary_fields = OrderedDict()
for fk, related_fields in SUMMARIZABLE_FK_FIELDS.items():
try:
# A few special cases where we don't want to access the field
# because it results in additional queries.
if fk == 'job' and isinstance(obj, UnifiedJob):
continue
if fk == 'project' and (isinstance(obj, InventorySource) or
isinstance(obj, Project)):
continue
try:
fkval = getattr(obj, fk, None)
except ObjectDoesNotExist:
continue
if fkval is None:
continue
if fkval == obj:
continue
summary_fields[fk] = OrderedDict()
for field in related_fields:
fval = getattr(fkval, field, None)
if fval is None and field == 'type':
if isinstance(fkval, PolymorphicModel):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval)
elif fval is None and field == 'unified_job_type' and isinstance(fkval, UnifiedJobTemplate):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval._get_unified_job_class())
if fval is not None:
summary_fields[fk][field] = fval
# Can be raised by the reverse accessor for a OneToOneField.
except ObjectDoesNotExist:
pass
if getattr(obj, 'created_by', None):
summary_fields['created_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['created_by'][field] = getattr(obj.created_by, field)
if getattr(obj, 'modified_by', None):
summary_fields['modified_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['modified_by'][field] = getattr(obj.modified_by, field)
# RBAC summary fields
roles = {}
for field in obj._meta.get_fields():
if type(field) is ImplicitRoleField:
roles[field.name] = role_summary_fields_generator(obj, field.name)
if len(roles) > 0:
summary_fields['object_roles'] = roles
# Advance display of RBAC capabilities
if hasattr(self, 'show_capabilities'):
user_capabilities = self._obj_capability_dict(obj)
if user_capabilities:
summary_fields['user_capabilities'] = user_capabilities
return summary_fields
def _obj_capability_dict(self, obj):
"""
Returns the user_capabilities dictionary for a single item
If inside of a list view, it runs the prefetching algorithm for
the entire current page, saves it into context
"""
view = self.context.get('view', None)
parent_obj = None
if view and hasattr(view, 'parent_model') and hasattr(view, 'get_parent_object'):
parent_obj = view.get_parent_object()
if view and view.request and view.request.user:
capabilities_cache = {}
# if serializer has parent, it is ListView, apply page capabilities prefetch
if self.parent and hasattr(self, 'capabilities_prefetch') and self.capabilities_prefetch:
qs = self.parent.instance
if 'capability_map' not in self.context:
if hasattr(self, 'polymorphic_base'):
model = self.polymorphic_base.Meta.model
prefetch_list = self.polymorphic_base._capabilities_prefetch
else:
model = self.Meta.model
prefetch_list = self.capabilities_prefetch
self.context['capability_map'] = prefetch_page_capabilities(
model, qs, prefetch_list, view.request.user
)
if obj.id in self.context['capability_map']:
capabilities_cache = self.context['capability_map'][obj.id]
return get_user_capabilities(
view.request.user, obj, method_list=self.show_capabilities, parent_obj=parent_obj,
capabilities_cache=capabilities_cache
)
else:
# Contextual information to produce user_capabilities doesn't exist
return {}
def get_created(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.date_joined
elif hasattr(obj, 'created'):
return obj.created
return None
def get_modified(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.last_login # Not actually exposed for User.
elif hasattr(obj, 'modified'):
return obj.modified
return None
def get_extra_kwargs(self):
extra_kwargs = super(BaseSerializer, self).get_extra_kwargs()
if self.instance:
read_only_on_update_fields = getattr(self.Meta, 'read_only_on_update_fields', tuple())
for field_name in read_only_on_update_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs['read_only'] = True
extra_kwargs[field_name] = kwargs
return extra_kwargs
def build_standard_field(self, field_name, model_field):
# DRF 3.3 serializers.py::build_standard_field() -> utils/field_mapping.py::get_field_kwargs() short circuits
# when a Model's editable field is set to False. The short circuit skips choice rendering.
#
# This logic is to force rendering choice's on an uneditable field.
# Note: Consider expanding this rendering for more than just choices fields
# Note: This logic works in conjuction with
if hasattr(model_field, 'choices') and model_field.choices:
was_editable = model_field.editable
model_field.editable = True
field_class, field_kwargs = super(BaseSerializer, self).build_standard_field(field_name, model_field)
if hasattr(model_field, 'choices') and model_field.choices:
model_field.editable = was_editable
if was_editable is False:
field_kwargs['read_only'] = True
# Pass model field default onto the serializer field if field is not read-only.
if model_field.has_default() and not field_kwargs.get('read_only', False):
field_kwargs['default'] = field_kwargs['initial'] = model_field.get_default()
# Enforce minimum value of 0 for PositiveIntegerFields.
if isinstance(model_field, (models.PositiveIntegerField, models.PositiveSmallIntegerField)) and 'choices' not in field_kwargs:
field_kwargs['min_value'] = 0
# Use custom boolean field that allows null and empty string as False values.
if isinstance(model_field, models.BooleanField) and not field_kwargs.get('read_only', False):
field_class = BooleanNullField
# Use custom char or choice field that coerces null to an empty string.
if isinstance(model_field, (models.CharField, models.TextField)) and not field_kwargs.get('read_only', False):
if 'choices' in field_kwargs:
field_class = ChoiceNullField
else:
field_class = CharNullField
# Update the message used for the unique validator to use capitalized
# verbose name; keeps unique message the same as with DRF 2.x.
opts = self.Meta.model._meta.concrete_model._meta
for validator in field_kwargs.get('validators', []):
if isinstance(validator, validators.UniqueValidator):
unique_error_message = model_field.error_messages.get('unique', None)
if unique_error_message:
unique_error_message = unique_error_message % {
'model_name': capfirst(opts.verbose_name),
'field_label': capfirst(model_field.verbose_name),
}
validator.message = unique_error_message
return field_class, field_kwargs
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(BaseSerializer, self).build_relational_field(field_name, relation_info)
# Don't include choices for foreign key fields.
field_kwargs.pop('choices', None)
return field_class, field_kwargs
def get_unique_together_validators(self):
# Allow the model's full_clean method to handle the unique together validation.
return []
def run_validation(self, data=fields.empty):
try:
return super(BaseSerializer, self).run_validation(data)
except ValidationError as exc:
# Avoid bug? in DRF if exc.detail happens to be a list instead of a dict.
raise ValidationError(detail=serializers.as_serializer_error(exc))
def get_validation_exclusions(self, obj=None):
# Borrowed from DRF 2.x - return model fields that should be excluded
# from model validation.
cls = self.Meta.model
opts = cls._meta.concrete_model._meta
exclusions = [field.name for field in opts.fields]
for field_name, field in self.fields.items():
field_name = field.source or field_name
if field_name not in exclusions:
continue
if field.read_only:
continue
if isinstance(field, serializers.Serializer):
continue
exclusions.remove(field_name)
# The clean_ methods cannot be ran on many-to-many models
exclusions.extend([field.name for field in opts.many_to_many])
return exclusions
def validate(self, attrs):
attrs = super(BaseSerializer, self).validate(attrs)
try:
# Create/update a model instance and run it's full_clean() method to
# do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance)
obj = self.instance or self.Meta.model()
for k,v in attrs.items():
if k not in exclusions:
setattr(obj, k, v)
obj.full_clean(exclude=exclusions)
# full_clean may modify values on the instance; copy those changes
# back to attrs so they are saved.
for k in attrs.keys():
if k not in exclusions:
attrs[k] = getattr(obj, k)
except DjangoValidationError as exc:
# DjangoValidationError may contain a list or dict; normalize into a
# dict where the keys are the field name and the values are a list
# of error messages, then raise as a DRF ValidationError. DRF would
# normally convert any DjangoValidationError to a non-field specific
# error message; here we preserve field-specific errors raised from
# the model's full_clean method.
d = exc.update_error_dict({})
for k,v in d.items():
v = v if isinstance(v, list) else [v]
v2 = []
for e in v:
if isinstance(e, DjangoValidationError):
v2.extend(list(e))
elif isinstance(e, list):
v2.extend(e)
else:
v2.append(e)
d[k] = list(map(force_text, v2))
raise ValidationError(d)
return attrs
def reverse(self, *args, **kwargs):
kwargs['request'] = self.context.get('request')
return reverse(*args, **kwargs)
@property
def is_detail_view(self):
if 'view' in self.context:
if 'pk' in self.context['view'].kwargs:
return True
return False
class EmptySerializer(serializers.Serializer):
pass
class UnifiedJobTemplateSerializer(BaseSerializer):
# As a base serializer, the capabilities prefetch is not used directly
_capabilities_prefetch = [
'admin', 'execute',
{'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use',
'workflowjobtemplate.organization.workflow_admin']}
]
class Meta:
model = UnifiedJobTemplate
fields = ('*', 'last_job_run', 'last_job_failed',
'next_job_run', 'status')
def get_related(self, obj):
res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
if obj.current_job:
res['current_job'] = obj.current_job.get_absolute_url(request=self.context.get('request'))
if obj.last_job:
res['last_job'] = obj.last_job.get_absolute_url(request=self.context.get('request'))
if obj.next_schedule:
res['next_schedule'] = obj.next_schedule.get_absolute_url(request=self.context.get('request'))
return res
def get_types(self):
if type(self) is UnifiedJobTemplateSerializer:
return ['project', 'inventory_source', 'job_template', 'system_job_template', 'workflow_job_template',]
else:
return super(UnifiedJobTemplateSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobTemplateSerializer:
if isinstance(obj, Project):
serializer_class = ProjectSerializer
elif isinstance(obj, InventorySource):
serializer_class = InventorySourceSerializer
elif isinstance(obj, JobTemplate):
serializer_class = JobTemplateSerializer
elif isinstance(obj, SystemJobTemplate):
serializer_class = SystemJobTemplateSerializer
elif isinstance(obj, WorkflowJobTemplate):
serializer_class = WorkflowJobTemplateSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# capabilities prefetch is only valid for these models
if isinstance(obj, (JobTemplate, WorkflowJobTemplate)):
serializer.capabilities_prefetch = self._capabilities_prefetch
else:
serializer.capabilities_prefetch = None
return serializer.to_representation(obj)
else:
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
class UnifiedJobSerializer(BaseSerializer):
show_capabilities = ['start', 'delete']
event_processing_finished = serializers.BooleanField(
help_text=_('Indicates whether all of the events generated by this '
'unified job have been saved to the database.'),
read_only=True
)
class Meta:
model = UnifiedJob
fields = ('*', 'unified_job_template', 'launch_type', 'status',
'failed', 'started', 'finished', 'elapsed', 'job_args',
'job_cwd', 'job_env', 'job_explanation',
'execution_node', 'controller_node',
'result_traceback', 'event_processing_finished')
extra_kwargs = {
'unified_job_template': {
'source': 'unified_job_template_id',
'label': 'unified job template',
},
'job_env': {
'read_only': True,
'label': 'job_env',
}
}
def get_types(self):
if type(self) is UnifiedJobSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job',]
else:
return super(UnifiedJobSerializer, self).get_types()
def get_related(self, obj):
res = super(UnifiedJobSerializer, self).get_related(obj)
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(request=self.context.get('request'))
if obj.schedule:
res['schedule'] = obj.schedule.get_absolute_url(request=self.context.get('request'))
if isinstance(obj, ProjectUpdate):
res['stdout'] = self.reverse('api:project_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, InventoryUpdate):
res['stdout'] = self.reverse('api:inventory_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, Job):
res['stdout'] = self.reverse('api:job_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, AdHocCommand):
res['stdout'] = self.reverse('api:ad_hoc_command_stdout', kwargs={'pk': obj.pk})
if obj.workflow_job_id:
res['source_workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(UnifiedJobSerializer, self).get_summary_fields(obj)
if obj.spawned_by_workflow:
summary_fields['source_workflow_job'] = {}
try:
summary_obj = obj.unified_job_node.workflow_job
except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
return summary_fields
for field in SUMMARIZABLE_FK_FIELDS['job']:
val = getattr(summary_obj, field, None)
if val is not None:
summary_fields['source_workflow_job'][field] = val
return summary_fields
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateSerializer
elif isinstance(obj, Job):
serializer_class = JobSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# TODO: restrict models for capabilities prefetch, when it is added
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobSerializer, self).to_representation(obj)
if 'elapsed' in ret:
if obj and obj.pk and obj.started and not obj.finished:
td = now() - obj.started
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobListSerializer(UnifiedJobSerializer):
class Meta:
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback', '-event_processing_finished')
def get_field_names(self, declared_fields, info):
field_names = super(UnifiedJobListSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback', 'event_processing_finished'))
def get_types(self):
if type(self) is UnifiedJobListSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job']
else:
return super(UnifiedJobListSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobListSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateListSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateListSerializer
elif isinstance(obj, Job):
serializer_class = JobListSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandListSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobListSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobListSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobListSerializer, self).to_representation(obj)
if 'elapsed' in ret:
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
fields = ('result_stdout',)
def get_types(self):
if type(self) is UnifiedJobStdoutSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
else:
return super(UnifiedJobStdoutSerializer, self).get_types()
class UserSerializer(BaseSerializer):
password = serializers.CharField(required=False, default='', write_only=True,
help_text=_('Write-only field used to change the password.'))
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
is_system_auditor = serializers.BooleanField(default=False)
show_capabilities = ['edit', 'delete']
class Meta:
model = User
fields = ('*', '-name', '-description', '-modified',
'username', 'first_name', 'last_name',
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'last_login', 'external_account')
def to_representation(self, obj):
ret = super(UserSerializer, self).to_representation(obj)
ret.pop('password', None)
if obj and type(self) is UserSerializer:
ret['auth'] = obj.social_auth.values('provider', 'uid')
return ret
def get_validation_exclusions(self, obj=None):
ret = super(UserSerializer, self).get_validation_exclusions(obj)
ret.extend(['password', 'is_system_auditor'])
return ret
def validate_password(self, value):
if not self.instance and value in (None, ''):
raise serializers.ValidationError(_('Password required for new User.'))
return value
def _update_password(self, obj, new_password):
# For now we're not raising an error, just not saving password for
# users managed by LDAP who already have an unusable password set.
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if obj.pk and obj.profile.ldap_dn and not obj.has_usable_password():
new_password = None
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and obj.social_auth.all():
new_password = None
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and obj.enterprise_auth.all():
new_password = None
if new_password:
obj.set_password(new_password)
obj.save(update_fields=['password'])
# Cycle the session key, but if the requesting user is the same
# as the modified user then inject a session key derived from
# the updated user to prevent logout. This is the logic used by
# the Django admin's own user_change_password view.
update_session_auth_hash(self.context['request'], obj)
elif not obj.password:
obj.set_unusable_password()
obj.save(update_fields=['password'])
def get_external_account(self, obj):
return get_external_account(obj)
def create(self, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).create(validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def update(self, obj, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).update(obj, validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def get_related(self, obj):
res = super(UserSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:user_teams_list', kwargs={'pk': obj.pk}),
organizations = self.reverse('api:user_organizations_list', kwargs={'pk': obj.pk}),
admin_of_organizations = self.reverse('api:user_admin_of_organizations_list', kwargs={'pk': obj.pk}),
projects = self.reverse('api:user_projects_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:user_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:user_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:user_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:user_access_list', kwargs={'pk': obj.pk}),
tokens = self.reverse('api:o_auth2_token_list', kwargs={'pk': obj.pk}),
authorized_tokens = self.reverse('api:user_authorized_token_list', kwargs={'pk': obj.pk}),
personal_tokens = self.reverse('api:user_personal_token_list', kwargs={'pk': obj.pk}),
))
return res
def _validate_ldap_managed_field(self, value, field_name):
if not getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
return value
try:
is_ldap_user = bool(self.instance and self.instance.profile.ldap_dn)
except AttributeError:
is_ldap_user = False
if is_ldap_user:
ldap_managed_fields = ['username']
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
if field_name in ldap_managed_fields:
if value != getattr(self.instance, field_name):
raise serializers.ValidationError(_('Unable to change %s on user managed by LDAP.') % field_name)
return value
def validate_username(self, value):
return self._validate_ldap_managed_field(value, 'username')
def validate_first_name(self, value):
return self._validate_ldap_managed_field(value, 'first_name')
def validate_last_name(self, value):
return self._validate_ldap_managed_field(value, 'last_name')
def validate_email(self, value):
return self._validate_ldap_managed_field(value, 'email')
def validate_is_superuser(self, value):
return self._validate_ldap_managed_field(value, 'is_superuser')
class UserActivityStreamSerializer(UserSerializer):
"""Changes to system auditor status are shown as separate entries,
so by excluding it from fields here we avoid duplication, which
would carry some unintended consequences.
"""
class Meta:
model = User
fields = ('*', '-is_system_auditor')
class BaseOAuth2TokenSerializer(BaseSerializer):
refresh_token = serializers.SerializerMethodField()
token = serializers.SerializerMethodField()
ALLOWED_SCOPES = ['read', 'write']
class Meta:
model = OAuth2AccessToken
fields = (
'*', '-name', 'description', 'user', 'token', 'refresh_token',
'application', 'expires', 'scope',
)
read_only_fields = ('user', 'token', 'expires', 'refresh_token')
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True}
}
def get_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return obj.token
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return ''
def get_refresh_token(self, obj):
request = self.context.get('request', None)
try:
if not obj.refresh_token:
return None
elif request.method == 'POST':
return getattr(obj.refresh_token, 'token', '')
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return None
def get_related(self, obj):
ret = super(BaseOAuth2TokenSerializer, self).get_related(obj)
if obj.user:
ret['user'] = self.reverse('api:user_detail', kwargs={'pk': obj.user.pk})
if obj.application:
ret['application'] = self.reverse(
'api:o_auth2_application_detail', kwargs={'pk': obj.application.pk}
)
ret['activity_stream'] = self.reverse(
'api:o_auth2_token_activity_stream_list', kwargs={'pk': obj.pk}
)
return ret
def _is_valid_scope(self, value):
if not value or (not isinstance(value, str)):
return False
words = value.split()
for word in words:
if words.count(word) > 1:
return False # do not allow duplicates
if word not in self.ALLOWED_SCOPES:
return False
return True
def validate_scope(self, value):
if not self._is_valid_scope(value):
raise serializers.ValidationError(_(
'Must be a simple space-separated string with allowed scopes {}.'
).format(self.ALLOWED_SCOPES))
return value
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
try:
return super(BaseOAuth2TokenSerializer, self).create(validated_data)
except oauth2.AccessDeniedError as e:
raise PermissionDenied(str(e))
class UserAuthorizedTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True},
'application': {'allow_null': False, 'required': True}
}
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(UserAuthorizedTokenSerializer, self).create(validated_data)
obj.save()
if obj.application:
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenSerializer(BaseOAuth2TokenSerializer):
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(OAuth2TokenSerializer, self).create(validated_data)
if obj.application and obj.application.user:
obj.user = obj.application.user
obj.save()
if obj.application:
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenDetailSerializer(OAuth2TokenSerializer):
class Meta:
read_only_fields = ('*', 'user', 'application')
class UserPersonalTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
read_only_fields = ('user', 'token', 'expires', 'application')
def create(self, validated_data):
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
validated_data['application'] = None
obj = super(UserPersonalTokenSerializer, self).create(validated_data)
obj.save()
return obj
class OAuth2ApplicationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = OAuth2Application
fields = (
'*', 'description', '-user', 'client_id', 'client_secret', 'client_type',
'redirect_uris', 'authorization_grant_type', 'skip_authorization', 'organization'
)
read_only_fields = ('client_id', 'client_secret')
read_only_on_update_fields = ('user', 'authorization_grant_type')
extra_kwargs = {
'user': {'allow_null': True, 'required': False},
'organization': {'allow_null': False},
'authorization_grant_type': {'allow_null': False, 'label': _('Authorization Grant Type')},
'client_secret': {
'label': _('Client Secret')
},
'client_type': {
'label': _('Client Type')
},
'redirect_uris': {
'label': _('Redirect URIs')
},
'skip_authorization': {
'label': _('Skip Authorization')
},
}
def to_representation(self, obj):
ret = super(OAuth2ApplicationSerializer, self).to_representation(obj)
request = self.context.get('request', None)
if request.method != 'POST' and obj.client_type == 'confidential':
ret['client_secret'] = CENSOR_VALUE
if obj.client_type == 'public':
ret.pop('client_secret', None)
return ret
def get_related(self, obj):
res = super(OAuth2ApplicationSerializer, self).get_related(obj)
res.update(dict(
tokens = self.reverse('api:o_auth2_application_token_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse(
'api:o_auth2_application_activity_stream_list', kwargs={'pk': obj.pk}
)
))
return res
def get_modified(self, obj):
if obj is None:
return None
return obj.updated
def _summary_field_tokens(self, obj):
token_list = [{'id': x.pk, 'token': CENSOR_VALUE, 'scope': x.scope} for x in obj.oauth2accesstoken_set.all()[:10]]
if has_model_field_prefetched(obj, 'oauth2accesstoken_set'):
token_count = len(obj.oauth2accesstoken_set.all())
else:
if len(token_list) < 10:
token_count = len(token_list)
else:
token_count = obj.oauth2accesstoken_set.count()
return {'count': token_count, 'results': token_list}
def get_summary_fields(self, obj):
ret = super(OAuth2ApplicationSerializer, self).get_summary_fields(obj)
ret['tokens'] = self._summary_field_tokens(obj)
return ret
class OrganizationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Organization
fields = ('*', 'max_hosts', 'custom_virtualenv',)
def get_related(self, obj):
res = super(OrganizationSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),
inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),
workflow_job_templates = self.reverse('api:organization_workflow_job_templates_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:organization_users_list', kwargs={'pk': obj.pk}),
admins = self.reverse('api:organization_admins_list', kwargs={'pk': obj.pk}),
teams = self.reverse('api:organization_teams_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:organization_credential_list', kwargs={'pk': obj.pk}),
applications = self.reverse('api:organization_applications_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:organization_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates = self.reverse('api:organization_notification_templates_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:organization_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:organization_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:organization_notification_templates_error_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:organization_object_roles_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:organization_access_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:organization_instance_groups_list', kwargs={'pk': obj.pk}),
))
return res
def get_summary_fields(self, obj):
summary_dict = super(OrganizationSerializer, self).get_summary_fields(obj)
counts_dict = self.context.get('related_field_counts', None)
if counts_dict is not None and summary_dict is not None:
if obj.id not in counts_dict:
summary_dict['related_field_counts'] = {
'inventories': 0, 'teams': 0, 'users': 0,
'job_templates': 0, 'admins': 0, 'projects': 0}
else:
summary_dict['related_field_counts'] = counts_dict[obj.id]
return summary_dict
def validate(self, attrs):
obj = self.instance
view = self.context['view']
obj_limit = getattr(obj, 'max_hosts', None)
api_limit = attrs.get('max_hosts')
if not view.request.user.is_superuser:
if api_limit is not None and api_limit != obj_limit:
# Only allow superusers to edit the max_hosts field
raise serializers.ValidationError(_('Cannot change max_hosts.'))
return super(OrganizationSerializer, self).validate(attrs)
class ProjectOptionsSerializer(BaseSerializer):
class Meta:
fields = ('*', 'local_path', 'scm_type', 'scm_url', 'scm_branch',
'scm_clean', 'scm_delete_on_update', 'credential', 'timeout',)
def get_related(self, obj):
res = super(ProjectOptionsSerializer, self).get_related(obj)
if obj.credential:
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential.pk})
return res
def validate(self, attrs):
errors = {}
# Don't allow assigning a local_path used by another project.
# Don't allow assigning a local_path when scm_type is set.
valid_local_paths = Project.get_local_path_choices()
if self.instance:
scm_type = attrs.get('scm_type', self.instance.scm_type) or u''
else:
scm_type = attrs.get('scm_type', u'') or u''
if self.instance and not scm_type:
valid_local_paths.append(self.instance.local_path)
if scm_type:
attrs.pop('local_path', None)
if 'local_path' in attrs and attrs['local_path'] not in valid_local_paths:
errors['local_path'] = _('This path is already being used by another manual project.')
if errors:
raise serializers.ValidationError(errors)
return super(ProjectOptionsSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(ProjectOptionsSerializer, self).to_representation(obj)
if obj is not None and 'credential' in ret and not obj.credential:
ret['credential'] = None
return ret
class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
status = serializers.ChoiceField(choices=Project.PROJECT_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete', 'copy']
capabilities_prefetch = [
'admin', 'update',
{'copy': 'organization.project_admin'}
]
class Meta:
model = Project
fields = ('*', 'organization', 'scm_update_on_launch',
'scm_update_cache_timeout', 'scm_revision', 'custom_virtualenv',) + \
('last_update_failed', 'last_updated') # Backwards compatibility
def get_related(self, obj):
res = super(ProjectSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:project_teams_list', kwargs={'pk': obj.pk}),
playbooks = self.reverse('api:project_playbooks', kwargs={'pk': obj.pk}),
inventory_files = self.reverse('api:project_inventories', kwargs={'pk': obj.pk}),
update = self.reverse('api:project_update_view', kwargs={'pk': obj.pk}),
project_updates = self.reverse('api:project_updates_list', kwargs={'pk': obj.pk}),
scm_inventory_sources = self.reverse('api:project_scm_inventory_sources', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:project_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:project_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:project_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:project_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:project_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:project_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:project_object_roles_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:project_copy', kwargs={'pk': obj.pk})
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail',
kwargs={'pk': obj.organization.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.last_update.pk})
return res
def to_representation(self, obj):
ret = super(ProjectSerializer, self).to_representation(obj)
if 'scm_revision' in ret and obj.scm_type == '':
ret['scm_revision'] = ''
return ret
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
organization = None
if 'organization' in attrs:
organization = attrs['organization']
elif self.instance:
organization = self.instance.organization
view = self.context.get('view', None)
if not organization and not view.request.user.is_superuser:
# Only allow super users to create orgless projects
raise serializers.ValidationError(_('Organization is missing'))
elif get_field_from_model_or_attrs('scm_type') == '':
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):
if get_field_from_model_or_attrs(fd):
raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})
return super(ProjectSerializer, self).validate(attrs)
class ProjectPlaybooksSerializer(ProjectSerializer):
playbooks = serializers.SerializerMethodField(help_text=_('Array of playbooks available within this project.'))
class Meta:
model = Project
fields = ('playbooks',)
def get_playbooks(self, obj):
return obj.playbook_files if obj.scm_type else obj.playbooks
@property
def data(self):
ret = super(ProjectPlaybooksSerializer, self).data
ret = ret.get('playbooks', [])
return ReturnList(ret, serializer=self)
class ProjectInventoriesSerializer(ProjectSerializer):
inventory_files = serializers.ReadOnlyField(help_text=_(
'Array of inventory files and directories available within this project, '
'not comprehensive.'))
class Meta:
model = Project
fields = ('inventory_files',)
@property
def data(self):
ret = super(ProjectInventoriesSerializer, self).data
ret = ret.get('inventory_files', [])
return ReturnList(ret, serializer=self)
class ProjectUpdateViewSerializer(ProjectSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', 'project', 'job_type', '-controller_node')
def get_related(self, obj):
res = super(ProjectUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:project_update_cancel', kwargs={'pk': obj.pk}),
scm_inventory_updates = self.reverse('api:project_update_scm_inventory_updates', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:project_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:project_update_events_list', kwargs={'pk': obj.pk}),
))
return res
class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
class Meta:
model = ProjectUpdate
fields = ('*', 'host_status_counts', 'playbook_counts',)
def get_playbook_counts(self, obj):
task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.project_update_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except ProjectUpdateEvent.DoesNotExist:
counts = {}
return counts
class ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class ProjectUpdateCancelSerializer(ProjectUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class BaseSerializerWithVariables(BaseSerializer):
def validate_variables(self, value):
return vars_validate_or_raise(value)
class InventorySerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
capabilities_prefetch = [
'admin', 'adhoc',
{'copy': 'organization.inventory_admin'}
]
groups_with_active_failures = serializers.IntegerField(
read_only=True,
min_value=0,
help_text=_('This field has been deprecated and will be removed in a future release')
)
class Meta:
model = Inventory
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources',
'total_inventory_sources', 'inventory_sources_with_failures',
'insights_credential', 'pending_deletion',)
def get_related(self, obj):
res = super(InventorySerializer, self).get_related(obj)
res.update(dict(
hosts = self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
root_groups = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
variable_data = self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
script = self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
tree = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
update_inventory_sources = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
job_templates = self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:inventory_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:inventory_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:inventory_instance_groups_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:inventory_copy', kwargs={'pk': obj.pk})
))
if obj.insights_credential:
res['insights_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.insights_credential.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(InventorySerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
def validate_host_filter(self, host_filter):
if host_filter:
try:
for match in JSONBField.get_lookups().keys():
if match == 'exact':
# __exact is allowed
continue
match = '__{}'.format(match)
if re.match(
'ansible_facts[^=]+{}='.format(match),
host_filter
):
raise models.base.ValidationError({
'host_filter': 'ansible_facts does not support searching with {}'.format(match)
})
SmartFilter().query_from_string(host_filter)
except RuntimeError as e:
raise models.base.ValidationError(e)
return host_filter
def validate(self, attrs):
kind = None
if 'kind' in attrs:
kind = attrs['kind']
elif self.instance:
kind = self.instance.kind
host_filter = None
if 'host_filter' in attrs:
host_filter = attrs['host_filter']
elif self.instance:
host_filter = self.instance.host_filter
if kind == 'smart' and not host_filter:
raise serializers.ValidationError({'host_filter': _(
'Smart inventories must specify host_filter')})
return super(InventorySerializer, self).validate(attrs)
class InventoryScriptSerializer(InventorySerializer):
class Meta:
fields = ()
class HostSerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete']
capabilities_prefetch = ['inventory.admin']
class Meta:
model = Host
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
'has_active_failures', 'has_inventory_sources', 'last_job',
'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified',)
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',
'ansible_facts_modified',)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new host.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(HostSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:host_variable_data', kwargs={'pk': obj.pk}),
groups = self.reverse('api:host_groups_list', kwargs={'pk': obj.pk}),
all_groups = self.reverse('api:host_all_groups_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:host_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:host_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:host_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:host_inventory_sources_list', kwargs={'pk': obj.pk}),
smart_inventories = self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
ad_hoc_command_events = self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
insights = self.reverse('api:host_insights', kwargs={'pk': obj.pk}),
ansible_facts = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.last_job:
res['last_job'] = self.reverse('api:job_detail', kwargs={'pk': obj.last_job.pk})
if obj.last_job_host_summary:
res['last_job_host_summary'] = self.reverse('api:job_host_summary_detail', kwargs={'pk': obj.last_job_host_summary.pk})
return res
def get_summary_fields(self, obj):
d = super(HostSerializer, self).get_summary_fields(obj)
try:
d['last_job']['job_template_id'] = obj.last_job.job_template.id
d['last_job']['job_template_name'] = obj.last_job.job_template.name
except (KeyError, AttributeError):
pass
if has_model_field_prefetched(obj, 'groups'):
group_list = sorted([{'id': g.id, 'name': g.name} for g in obj.groups.all()], key=lambda x: x['id'])[:5]
else:
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
group_cnt = obj.groups.count()
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
d.setdefault('recent_jobs', [{
'id': j.job.id,
'name': j.job.job_template.name if j.job.job_template is not None else "",
'status': j.job.status,
'finished': j.job.finished,
} for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created')[:5]])
return d
def _get_host_port_from_name(self, name):
# Allow hostname (except IPv6 for now) to specify the port # inline.
port = None
if name.count(':') == 1:
name, port = name.split(':')
try:
port = int(port)
if port < 1 or port > 65535:
raise ValueError
except ValueError:
raise serializers.ValidationError(_(u'Invalid port specification: %s') % force_text(port))
return name, port
def validate_name(self, value):
name = force_text(value or '')
# Validate here only, update in main validate method.
host, port = self._get_host_port_from_name(name)
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
return value
def validate_variables(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
name = force_text(attrs.get('name', self.instance and self.instance.name or ''))
host, port = self._get_host_port_from_name(name)
if port:
attrs['name'] = host
variables = force_text(attrs.get('variables', self.instance and self.instance.variables or ''))
vars_dict = parse_yaml_or_json(variables)
vars_dict['ansible_ssh_port'] = port
attrs['variables'] = json.dumps(vars_dict)
return super(HostSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(HostSerializer, self).to_representation(obj)
if not obj:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'last_job' in ret and not obj.last_job:
ret['last_job'] = None
if 'last_job_host_summary' in ret and not obj.last_job_host_summary:
ret['last_job_host_summary'] = None
return ret
class AnsibleFactsSerializer(BaseSerializer):
class Meta:
model = Host
def to_representation(self, obj):
return obj.ansible_facts
class GroupSerializer(BaseSerializerWithVariables):
show_capabilities = ['copy', 'edit', 'delete']
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
groups_with_active_failures = serializers.IntegerField(
read_only=True,
min_value=0,
help_text=_('This field has been deprecated and will be removed in a future release')
)
class Meta:
model = Group
fields = ('*', 'inventory', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources')
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new group.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(GroupSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:group_variable_data', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:group_hosts_list', kwargs={'pk': obj.pk}),
potential_children = self.reverse('api:group_potential_children_list', kwargs={'pk': obj.pk}),
children = self.reverse('api:group_children_list', kwargs={'pk': obj.pk}),
all_hosts = self.reverse('api:group_all_hosts_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:group_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:group_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:group_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:group_inventory_sources_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:group_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
return res
def validate_name(self, value):
if value in ('all', '_meta'):
raise serializers.ValidationError(_('Invalid group name.'))
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
return value
def to_representation(self, obj):
ret = super(GroupSerializer, self).to_representation(obj)
if obj is not None and 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
class GroupTreeSerializer(GroupSerializer):
children = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ('*', 'children')
def get_children(self, obj):
if obj is None:
return {}
children_qs = obj.children
children_qs = children_qs.select_related('inventory')
children_qs = children_qs.prefetch_related('inventory_source')
return GroupTreeSerializer(children_qs, many=True).data
class BaseVariableDataSerializer(BaseSerializer):
class Meta:
fields = ('variables',)
def to_representation(self, obj):
if obj is None:
return {}
ret = super(BaseVariableDataSerializer, self).to_representation(obj)
return parse_yaml_or_json(ret.get('variables', '') or '{}')
def to_internal_value(self, data):
data = {'variables': json.dumps(data)}
return super(BaseVariableDataSerializer, self).to_internal_value(data)
class InventoryVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Inventory
class HostVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Host
class GroupVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Group
class CustomInventoryScriptSerializer(BaseSerializer):
script = serializers.CharField(trim_whitespace=False)
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [
{'edit': 'admin'}
]
class Meta:
model = CustomInventoryScript
fields = ('*', "script", "organization")
def validate_script(self, value):
if not value.startswith("#!"):
raise serializers.ValidationError(_('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python'))
return value
def to_representation(self, obj):
ret = super(CustomInventoryScriptSerializer, self).to_representation(obj)
if obj is None:
return ret
request = self.context.get('request', None)
if request.user not in obj.admin_role and \
not request.user.is_superuser and \
not request.user.is_system_auditor and \
not (obj.organization is not None and request.user in obj.organization.auditor_role):
ret['script'] = None
return ret
def get_related(self, obj):
res = super(CustomInventoryScriptSerializer, self).get_related(obj)
res.update(dict(
object_roles = self.reverse('api:inventory_script_object_roles_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:inventory_script_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class InventorySourceOptionsSerializer(BaseSerializer):
credential = DeprecatedCredentialField(
help_text=_('Cloud credential to use for inventory updates.')
)
class Meta:
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
'custom_virtualenv', 'timeout', 'verbosity')
def get_related(self, obj):
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
if obj.credential: # TODO: remove when 'credential' field is removed
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential})
if obj.source_script:
res['source_script'] = self.reverse('api:inventory_script_detail', kwargs={'pk': obj.source_script.pk})
return res
def validate_source_vars(self, value):
ret = vars_validate_or_raise(value)
for env_k in parse_yaml_or_json(value):
if env_k in settings.INV_ENV_VARIABLE_BLACKLIST:
raise serializers.ValidationError(_("`{}` is a prohibited environment variable".format(env_k)))
return ret
def validate(self, attrs):
# TODO: Validate source, validate source_regions
errors = {}
source = attrs.get('source', self.instance and self.instance.source or '')
source_script = attrs.get('source_script', self.instance and self.instance.source_script or '')
if source == 'custom':
if source_script is None or source_script == '':
errors['source_script'] = _("If 'source' is 'custom', 'source_script' must be provided.")
else:
try:
if not self.instance:
dest_inventory = attrs.get('inventory', None)
if not dest_inventory:
errors['inventory'] = _("Must provide an inventory.")
else:
dest_inventory = self.instance.inventory
if dest_inventory and source_script.organization != dest_inventory.organization:
errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.")
except Exception:
errors['source_script'] = _("'source_script' doesn't exist.")
logger.exception('Problem processing source_script validation.')
if errors:
raise serializers.ValidationError(errors)
return super(InventorySourceOptionsSerializer, self).validate(attrs)
class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOptionsSerializer):
status = serializers.ChoiceField(choices=InventorySource.INVENTORY_SOURCE_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete']
capabilities_prefetch = [
{'admin': 'inventory.admin'},
{'start': 'inventory.update'}
]
class Meta:
model = InventorySource
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout',
'source_project', 'update_on_project_update') + \
('last_update_failed', 'last_updated') # Backwards compatibility.
def get_related(self, obj):
res = super(InventorySourceSerializer, self).get_related(obj)
res.update(dict(
update = self.reverse('api:inventory_source_update_view', kwargs={'pk': obj.pk}),
inventory_updates = self.reverse('api:inventory_source_updates_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:inventory_source_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_source_activity_stream_list', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:inventory_source_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_source_groups_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:inventory_source_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:inventory_source_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:inventory_source_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.source_project_id is not None:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': obj.source_project.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.last_update.pk})
else:
res['credentials'] = self.reverse('api:inventory_source_credentials_list', kwargs={'pk': obj.pk})
return res
def get_group(self, obj): # TODO: remove in 3.3
if obj.deprecated_group:
return obj.deprecated_group.id
return None
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(InventorySourceSerializer, self).build_relational_field(field_name, relation_info)
# SCM Project and inventory are read-only unless creating a new inventory.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
# TODO: remove when old 'credential' fields are removed
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name, self.credential)
return super(InventorySourceOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
def to_representation(self, obj):
ret = super(InventorySourceSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
def validate_source_project(self, value):
if value and value.scm_type == '':
raise serializers.ValidationError(_("Cannot use manual project for SCM-based inventory."))
return value
def validate_update_on_project_update(self, value):
if value and self.instance and self.instance.schedules.exists():
raise serializers.ValidationError(_("Setting not compatible with existing schedules."))
return value
def validate_inventory(self, value):
if value and value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
return value
# TODO: remove when old 'credential' fields are removed
def create(self, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).create(validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def update(self, obj, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).update(obj, validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def _update_deprecated_fields(self, fields, obj):
if 'credential' in fields:
new_cred = fields['credential']
existing = obj.credentials.all()
if new_cred not in existing:
for cred in existing:
# Remove all other cloud credentials
obj.credentials.remove(cred)
if new_cred:
# Add new credential
obj.credentials.add(new_cred)
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when 'credential' field removed
deprecated_fields['credential'] = attrs.pop('credential')
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
if get_field_from_model_or_attrs('source') != 'scm':
redundant_scm_fields = list(filter(
lambda x: attrs.get(x, None),
['source_project', 'source_path', 'update_on_project_update']
))
if redundant_scm_fields:
raise serializers.ValidationError(
{"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))}
)
attrs = super(InventorySourceSerializer, self).validate(attrs)
# Check type consistency of source and cloud credential, if provided
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
cred_error = InventorySource.cloud_credential_validation(
get_field_from_model_or_attrs('source'),
cred
)
if cred_error:
raise serializers.ValidationError({"credential": cred_error})
return attrs
class InventorySourceUpdateSerializer(InventorySourceSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = InventoryUpdate
fields = ('*', 'inventory', 'inventory_source', 'license_error', 'org_host_limit_error',
'source_project_update', 'custom_virtualenv', '-controller_node',)
def get_related(self, obj):
res = super(InventoryUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
inventory_source = self.reverse(
'api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}
),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:inventory_update_cancel', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:inventory_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:inventory_update_events_list', kwargs={'pk': obj.pk}),
))
if obj.source_project_update_id:
res['source_project_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.source_project_update.pk})
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
res['credentials'] = self.reverse('api:inventory_update_credentials_list', kwargs={'pk': obj.pk})
return res
class InventoryUpdateDetailSerializer(InventoryUpdateSerializer):
source_project = serializers.SerializerMethodField(
help_text=_('The project used for this job.'),
method_name='get_source_project_id'
)
class Meta:
model = InventoryUpdate
fields = ('*', 'source_project',)
def get_source_project(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template', None)
def get_source_project_id(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template.id', None)
def get_related(self, obj):
res = super(InventoryUpdateDetailSerializer, self).get_related(obj)
source_project_id = self.get_source_project_id(obj)
if source_project_id:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': source_project_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(InventoryUpdateDetailSerializer, self).get_summary_fields(obj)
source_project = self.get_source_project(obj)
if source_project:
summary_fields['source_project'] = {}
for field in SUMMARIZABLE_FK_FIELDS['project']:
value = getattr(source_project, field, None)
if value is not None:
summary_fields['source_project'][field] = value
cred = obj.credentials.first()
if cred:
summary_fields['credential'] = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
return summary_fields
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = InventoryUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class InventoryUpdateCancelSerializer(InventoryUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class TeamSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Team
fields = ('*', 'organization')
def get_related(self, obj):
res = super(TeamSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:team_projects_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:team_users_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:team_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:team_roles_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:team_object_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:team_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:team_access_list', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(TeamSerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
class RoleSerializer(BaseSerializer):
class Meta:
model = Role
fields = ('*', '-created', '-modified')
read_only_fields = ('id', 'role_field', 'description', 'name')
def to_representation(self, obj):
ret = super(RoleSerializer, self).to_representation(obj)
if obj.object_id:
content_object = obj.content_object
if hasattr(content_object, 'username'):
ret['summary_fields']['resource_name'] = obj.content_object.username
if hasattr(content_object, 'name'):
ret['summary_fields']['resource_name'] = obj.content_object.name
content_model = obj.content_type.model_class()
ret['summary_fields']['resource_type'] = get_type_for_model(content_model)
ret['summary_fields']['resource_type_display_name'] = content_model._meta.verbose_name.title()
return ret
def get_related(self, obj):
ret = super(RoleSerializer, self).get_related(obj)
ret['users'] = self.reverse('api:role_users_list', kwargs={'pk': obj.pk})
ret['teams'] = self.reverse('api:role_teams_list', kwargs={'pk': obj.pk})
try:
if obj.content_object:
ret.update(reverse_gfk(obj.content_object, self.context.get('request')))
except AttributeError:
# AttributeError's happen if our content_object is pointing at
# a model that no longer exists. This is dirty data and ideally
# doesn't exist, but in case it does, let's not puke.
pass
return ret
class RoleSerializerWithParentAccess(RoleSerializer):
show_capabilities = ['unattach']
class ResourceAccessListElementSerializer(UserSerializer):
show_capabilities = [] # Clear fields from UserSerializer parent class
def to_representation(self, user):
'''
With this method we derive "direct" and "indirect" access lists. Contained
in the direct access list are all the roles the user is a member of, and
all of the roles that are directly granted to any teams that the user is a
member of.
The indirect access list is a list of all of the roles that the user is
a member of that are ancestors of any roles that grant permissions to
the resource.
'''
ret = super(ResourceAccessListElementSerializer, self).to_representation(user)
obj = self.context['view'].get_parent_object()
if self.context['view'].request is not None:
requesting_user = self.context['view'].request.user
else:
requesting_user = None
if 'summary_fields' not in ret:
ret['summary_fields'] = {}
def format_role_perm(role):
role_dict = { 'id': role.id, 'name': role.name, 'description': role.description}
try:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
except AttributeError:
pass
if role.content_type is not None:
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, user, 'members', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
return { 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
def format_team_role_perm(naive_team_role, permissive_role_ids):
ret = []
team_role = naive_team_role
if naive_team_role.role_field == 'admin_role':
team_role = naive_team_role.content_object.member_role
for role in team_role.children.filter(id__in=permissive_role_ids).all():
role_dict = {
'id': role.id,
'name': role.name,
'description': role.description,
'team_id': team_role.object_id,
'team_name': team_role.content_object.name,
'team_organization_name': team_role.content_object.organization.name,
}
if role.content_type is not None:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, team_role, 'parents', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
ret.append({ 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
return ret
team_content_type = ContentType.objects.get_for_model(Team)
content_type = ContentType.objects.get_for_model(obj)
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
direct_access_roles = user.roles \
.filter(id__in=direct_permissive_role_ids).all()
direct_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=direct_permissive_role_ids)
if content_type == team_content_type:
# When looking at the access list for a team, exclude the entries
# for that team. This exists primarily so we don't list the read role
# as a direct role when a user is a member or admin of a team
direct_team_roles = direct_team_roles.exclude(
children__content_type=team_content_type,
children__object_id=obj.id
)
indirect_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=all_permissive_role_ids) \
.exclude(id__in=direct_team_roles)
indirect_access_roles = user.roles \
.filter(id__in=all_permissive_role_ids) \
.exclude(id__in=direct_permissive_role_ids) \
.exclude(id__in=direct_team_roles) \
.exclude(id__in=indirect_team_roles)
ret['summary_fields']['direct_access'] \
= [format_role_perm(r) for r in direct_access_roles.distinct()] \
+ [y for x in (format_team_role_perm(r, direct_permissive_role_ids) for r in direct_team_roles.distinct()) for y in x] \
+ [y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in indirect_team_roles.distinct()) for y in x]
ret['summary_fields']['indirect_access'] \
= [format_role_perm(r) for r in indirect_access_roles.distinct()]
return ret
class CredentialTypeSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
managed_by_tower = serializers.ReadOnlyField()
class Meta:
model = CredentialType
fields = ('*', 'kind', 'namespace', 'name', 'managed_by_tower', 'inputs',
'injectors')
def validate(self, attrs):
if self.instance and self.instance.managed_by_tower:
raise PermissionDenied(
detail=_("Modifications not allowed for managed credential types")
)
if self.instance and self.instance.credentials.exists():
if 'inputs' in attrs and attrs['inputs'] != self.instance.inputs:
raise PermissionDenied(
detail= _("Modifications to inputs are not allowed for credential types that are in use")
)
ret = super(CredentialTypeSerializer, self).validate(attrs)
if 'kind' in attrs and attrs['kind'] not in ('cloud', 'net'):
raise serializers.ValidationError({
"kind": _("Must be 'cloud' or 'net', not %s") % attrs['kind']
})
fields = attrs.get('inputs', {}).get('fields', [])
for field in fields:
if field.get('ask_at_runtime', False):
raise serializers.ValidationError({"inputs": _("'ask_at_runtime' is not supported for custom credentials.")})
return ret
def get_related(self, obj):
res = super(CredentialTypeSerializer, self).get_related(obj)
res['credentials'] = self.reverse(
'api:credential_type_credential_list',
kwargs={'pk': obj.pk}
)
res['activity_stream'] = self.reverse(
'api:credential_type_activity_stream_list',
kwargs={'pk': obj.pk}
)
return res
def to_representation(self, data):
value = super(CredentialTypeSerializer, self).to_representation(data)
# translate labels and help_text for credential fields "managed by Tower"
if value.get('managed_by_tower'):
value['name'] = _(value['name'])
for field in value.get('inputs', {}).get('fields', []):
field['label'] = _(field['label'])
if 'help_text' in field:
field['help_text'] = _(field['help_text'])
return value
def filter_field_metadata(self, fields, method):
# API-created/modified CredentialType kinds are limited to
# `cloud` and `net`
if method in ('PUT', 'POST'):
fields['kind']['choices'] = list(filter(
lambda choice: choice[0] in ('cloud', 'net'),
fields['kind']['choices']
))
return fields
class CredentialSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy', 'use']
capabilities_prefetch = ['admin', 'use']
class Meta:
model = Credential
fields = ('*', 'organization', 'credential_type', 'inputs', 'kind', 'cloud')
extra_kwargs = {
'credential_type': {
'label': _('Credential Type'),
},
}
def to_representation(self, data):
value = super(CredentialSerializer, self).to_representation(data)
if 'inputs' in value:
value['inputs'] = data.display_inputs()
return value
def get_related(self, obj):
res = super(CredentialSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
res.update(dict(
activity_stream = self.reverse('api:credential_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:credential_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:credential_object_roles_list', kwargs={'pk': obj.pk}),
owner_users = self.reverse('api:credential_owner_users_list', kwargs={'pk': obj.pk}),
owner_teams = self.reverse('api:credential_owner_teams_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:credential_copy', kwargs={'pk': obj.pk}),
input_sources = self.reverse('api:credential_input_source_sublist', kwargs={'pk': obj.pk}),
credential_type = self.reverse('api:credential_type_detail', kwargs={'pk': obj.credential_type.pk}),
))
parents = [role for role in obj.admin_role.parents.all() if role.object_id is not None]
if parents:
res.update({parents[0].content_type.name:parents[0].content_object.get_absolute_url(self.context.get('request'))})
elif len(obj.admin_role.members.all()) > 0:
user = obj.admin_role.members.all()[0]
res.update({'user': self.reverse('api:user_detail', kwargs={'pk': user.pk})})
return res
def get_summary_fields(self, obj):
summary_dict = super(CredentialSerializer, self).get_summary_fields(obj)
summary_dict['owners'] = []
for user in obj.admin_role.members.all():
summary_dict['owners'].append({
'id': user.pk,
'type': 'user',
'name': user.username,
'description': ' '.join([user.first_name, user.last_name]),
'url': self.reverse('api:user_detail', kwargs={'pk': user.pk}),
})
for parent in [role for role in obj.admin_role.parents.all() if role.object_id is not None]:
summary_dict['owners'].append({
'id': parent.content_object.pk,
'type': camelcase_to_underscore(parent.content_object.__class__.__name__),
'name': parent.content_object.name,
'description': parent.content_object.description,
'url': parent.content_object.get_absolute_url(self.context.get('request')),
})
return summary_dict
def get_validation_exclusions(self, obj=None):
ret = super(CredentialSerializer, self).get_validation_exclusions(obj)
for field in ('credential_type', 'inputs'):
if field in ret:
ret.remove(field)
return ret
def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk:
for rel in (
'ad_hoc_commands',
'insights_inventories',
'unifiedjobs',
'unifiedjobtemplates',
'projects',
'projectupdates',
'workflowjobnodes'
):
if getattr(self.instance, rel).count() > 0:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality'
' of the resources using it.'),
)
return credential_type
class CredentialSerializerCreate(CredentialSerializer):
user = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add user to owner role. If provided, '
'do not give either team or organization. Only valid for creation.'))
team = serializers.PrimaryKeyRelatedField(
queryset=Team.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add team to owner role. If provided, '
'do not give either user or organization. Only valid for creation.'))
organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(),
required=False, default=None, allow_null=True,
help_text=_('Inherit permissions from organization roles. If provided on creation, '
'do not give either user or team.'))
class Meta:
model = Credential
fields = ('*', 'user', 'team')
def validate(self, attrs):
owner_fields = set()
for field in ('user', 'team', 'organization'):
if field in attrs:
if attrs[field]:
owner_fields.add(field)
else:
attrs.pop(field)
if not owner_fields:
raise serializers.ValidationError({"detail": _("Missing 'user', 'team', or 'organization'.")})
if attrs.get('team'):
attrs['organization'] = attrs['team'].organization
return super(CredentialSerializerCreate, self).validate(attrs)
def create(self, validated_data):
user = validated_data.pop('user', None)
team = validated_data.pop('team', None)
credential = super(CredentialSerializerCreate, self).create(validated_data)
if user:
credential.admin_role.members.add(user)
if team:
if not credential.organization or team.organization.id != credential.organization.id:
raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")})
credential.admin_role.parents.add(team.admin_role)
credential.use_role.parents.add(team.member_role)
return credential
class CredentialInputSourceSerializer(BaseSerializer):
show_capabilities = ['delete']
class Meta:
model = CredentialInputSource
fields = (
'*',
'input_field_name',
'metadata',
'target_credential',
'source_credential',
'-name',
)
extra_kwargs = {
'input_field_name': {'required': True},
'target_credential': {'required': True},
'source_credential': {'required': True},
}
def get_related(self, obj):
res = super(CredentialInputSourceSerializer, self).get_related(obj)
res['source_credential'] = obj.source_credential.get_absolute_url(request=self.context.get('request'))
res['target_credential'] = obj.target_credential.get_absolute_url(request=self.context.get('request'))
return res
class UserCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-team', '-organization')
class TeamCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-organization')
class OrganizationCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-team')
class LabelsListMixin(object):
def _summary_field_labels(self, obj):
label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]]
if has_model_field_prefetched(obj, 'labels'):
label_ct = len(obj.labels.all())
else:
if len(label_list) < 10:
label_ct = len(label_list)
else:
label_ct = obj.labels.count()
return {'count': label_ct, 'results': label_list}
def get_summary_fields(self, obj):
res = super(LabelsListMixin, self).get_summary_fields(obj)
res['labels'] = self._summary_field_labels(obj)
return res
class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
class Meta:
fields = ('*', 'job_type', 'inventory', 'project', 'playbook',
'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags',
'force_handlers', 'skip_tags', 'start_at_task', 'timeout',
'use_fact_cache',)
def get_related(self, obj):
res = super(JobOptionsSerializer, self).get_related(obj)
res['labels'] = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk})
try:
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
except ObjectDoesNotExist:
setattr(obj, 'inventory', None)
try:
if obj.project:
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
except ObjectDoesNotExist:
setattr(obj, 'project', None)
if isinstance(obj, UnifiedJobTemplate):
res['extra_credentials'] = self.reverse(
'api:job_template_extra_credentials_list',
kwargs={'pk': obj.pk}
)
res['credentials'] = self.reverse(
'api:job_template_credentials_list',
kwargs={'pk': obj.pk}
)
elif isinstance(obj, UnifiedJob):
res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})
res['credentials'] = self.reverse('api:job_credentials_list', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(JobOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'project' in ret and not obj.project:
ret['project'] = None
if 'playbook' in ret:
ret['playbook'] = ''
return ret
def validate(self, attrs):
if 'project' in self.fields and 'playbook' in self.fields:
project = attrs.get('project', self.instance and self.instance.project or None)
playbook = attrs.get('playbook', self.instance and self.instance.playbook or '')
if not project:
raise serializers.ValidationError({'project': _('This field is required.')})
if project and project.scm_type and playbook and force_text(playbook) not in project.playbook_files:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not project.scm_type and playbook and force_text(playbook) not in project.playbooks:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not playbook:
raise serializers.ValidationError({'playbook': _('Must select playbook for project.')})
ret = super(JobOptionsSerializer, self).validate(attrs)
return ret
class JobTemplateMixin(object):
'''
Provide recent jobs and survey details in summary_fields
'''
def _recent_jobs(self, obj):
# Exclude "joblets", jobs that ran as part of a sliced workflow job
uj_qs = obj.unifiedjob_unified_jobs.exclude(job__job_slice_count__gt=1).order_by('-created')
# Would like to apply an .only, but does not play well with non_polymorphic
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
optimized_qs = uj_qs.non_polymorphic()
return [{
'id': x.id, 'status': x.status, 'finished': x.finished,
# Make type consistent with API top-level key, for instance workflow_job
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
} for x in optimized_qs[:10]]
def get_summary_fields(self, obj):
d = super(JobTemplateMixin, self).get_summary_fields(obj)
if obj.survey_spec is not None and ('name' in obj.survey_spec and 'description' in obj.survey_spec):
d['survey'] = dict(title=obj.survey_spec['name'], description=obj.survey_spec['description'])
d['recent_jobs'] = self._recent_jobs(obj)
return d
class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer):
show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': ['project.use', 'inventory.use']}
]
status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False)
class Meta:
model = JobTemplate
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
'allow_simultaneous', 'custom_virtualenv', 'job_slice_count')
def get_related(self, obj):
res = super(JobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:job_template_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_template_activity_stream_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:job_template_access_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:job_template_survey_spec', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
slice_workflow_jobs = self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk}),
))
if obj.host_config_key:
res['callback'] = self.reverse('api:job_template_callback', kwargs={'pk': obj.pk})
return res
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
inventory = get_field_from_model_or_attrs('inventory')
project = get_field_from_model_or_attrs('project')
if get_field_from_model_or_attrs('host_config_key') and not inventory:
raise serializers.ValidationError({'host_config_key': _(
"Cannot enable provisioning callback without an inventory set."
)})
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
if project is None:
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
raise serializers.ValidationError({'inventory': prompting_error_message})
return super(JobTemplateSerializer, self).validate(attrs)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def get_summary_fields(self, obj):
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobTemplateWithSpecSerializer(JobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = JobTemplate
fields = ('*', 'survey_spec')
class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
passwords_needed_to_start = serializers.ReadOnlyField()
artifacts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('*', 'job_template', 'passwords_needed_to_start',
'allow_simultaneous', 'artifacts', 'scm_revision',
'instance_group', 'diff_mode', 'job_slice_number', 'job_slice_count')
def get_related(self, obj):
res = super(JobSerializer, self).get_related(obj)
res.update(dict(
job_events = self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_label_list', kwargs={'pk': obj.pk}),
create_schedule = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk}),
))
try:
if obj.job_template:
res['job_template'] = self.reverse('api:job_template_detail',
kwargs={'pk': obj.job_template.pk})
except ObjectDoesNotExist:
setattr(obj, 'job_template', None)
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
try:
if obj.project_update:
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update.pk}
)
except ObjectDoesNotExist:
pass
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
return res
def get_artifacts(self, obj):
if obj:
return obj.display_artifacts()
return {}
def to_internal_value(self, data):
# When creating a new job and a job template is specified, populate any
# fields not provided in data from the job template.
if not self.instance and isinstance(data, dict) and data.get('job_template', False):
try:
job_template = JobTemplate.objects.get(pk=data['job_template'])
except JobTemplate.DoesNotExist:
raise serializers.ValidationError({'job_template': _('Invalid job template.')})
data.setdefault('name', job_template.name)
data.setdefault('description', job_template.description)
data.setdefault('job_type', job_template.job_type)
if job_template.inventory:
data.setdefault('inventory', job_template.inventory.pk)
if job_template.project:
data.setdefault('project', job_template.project.pk)
data.setdefault('playbook', job_template.playbook)
if job_template.credential:
data.setdefault('credential', job_template.credential)
data.setdefault('forks', job_template.forks)
data.setdefault('limit', job_template.limit)
data.setdefault('verbosity', job_template.verbosity)
data.setdefault('extra_vars', job_template.extra_vars)
data.setdefault('job_tags', job_template.job_tags)
data.setdefault('force_handlers', job_template.force_handlers)
data.setdefault('skip_tags', job_template.skip_tags)
data.setdefault('start_at_task', job_template.start_at_task)
return super(JobSerializer, self).to_internal_value(data)
def to_representation(self, obj):
ret = super(JobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'job_template' in ret and not obj.job_template:
ret['job_template'] = None
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobDetailSerializer(JobSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = Job
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
def get_playbook_counts(self, obj):
task_count = obj.job_events.filter(event='playbook_on_task_start').count()
play_count = obj.job_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.job_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except JobEvent.DoesNotExist:
counts = {}
return counts
class JobCancelSerializer(BaseSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
model = Job
fields = ('can_cancel',)
class JobRelaunchSerializer(BaseSerializer):
passwords_needed_to_start = serializers.SerializerMethodField()
retry_counts = serializers.SerializerMethodField()
hosts = serializers.ChoiceField(
required=False, allow_null=True, default='all',
choices=[
('all', _('No change to job limit')),
('failed', _('All failed and unreachable hosts'))
],
write_only=True
)
credential_passwords = VerbatimField(required=True, write_only=True)
class Meta:
model = Job
fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'credential_passwords',)
def validate_credential_passwords(self, value):
pnts = self.instance.passwords_needed_to_start
missing = set(pnts) - set(key for key in value if value[key])
if missing:
raise serializers.ValidationError(_(
'Missing passwords needed to start: {}'.format(', '.join(missing))
))
return value
def to_representation(self, obj):
res = super(JobRelaunchSerializer, self).to_representation(obj)
view = self.context.get('view', None)
if hasattr(view, '_raw_data_form_marker'):
password_keys = dict([(p, u'') for p in self.get_passwords_needed_to_start(obj)])
res.update(password_keys)
return res
def get_passwords_needed_to_start(self, obj):
if obj:
return obj.passwords_needed_to_start
return ''
def get_retry_counts(self, obj):
if obj.status in ACTIVE_STATES:
return _('Relaunch by host status not available until job finishes running.')
data = OrderedDict([])
for status in self.fields['hosts'].choices.keys():
data[status] = obj.retry_qs(status).count()
return data
def get_validation_exclusions(self, *args, **kwargs):
r = super(JobRelaunchSerializer, self).get_validation_exclusions(*args, **kwargs)
r.append('credential_passwords')
return r
def validate(self, attrs):
obj = self.instance
if obj.project is None:
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
if obj.inventory is None or obj.inventory.pending_deletion:
raise serializers.ValidationError(dict(errors=[_("Job Template Inventory is missing or undefined.")]))
attrs = super(JobRelaunchSerializer, self).validate(attrs)
return attrs
class JobCreateScheduleSerializer(BaseSerializer):
can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('can_schedule', 'prompts',)
def get_can_schedule(self, obj):
'''
Need both a job template and job prompts to schedule
'''
return obj.can_schedule
@staticmethod
def _summarize(res_name, obj):
summary = {}
for field in SUMMARIZABLE_FK_FIELDS[res_name]:
summary[field] = getattr(obj, field, None)
return summary
def get_prompts(self, obj):
try:
config = obj.launch_config
ret = config.prompts_dict(display=True)
if 'inventory' in ret:
ret['inventory'] = self._summarize('inventory', ret['inventory'])
if 'credentials' in ret:
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
ret['credentials'] = all_creds
return ret
except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
class AdHocCommandSerializer(UnifiedJobSerializer):
class Meta:
model = AdHocCommand
fields = ('*', 'job_type', 'inventory', 'limit', 'credential',
'module_name', 'module_args', 'forks', 'verbosity', 'extra_vars',
'become_enabled', 'diff_mode', '-unified_job_template', '-description')
extra_kwargs = {
'name': {
'read_only': True,
},
}
def get_field_names(self, declared_fields, info):
field_names = super(AdHocCommandSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('unified_job_template', 'description'))
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(AdHocCommandSerializer, self).build_standard_field(field_name, model_field)
# Load module name choices dynamically from DB settings.
if field_name == 'module_name':
field_class = serializers.ChoiceField
module_name_choices = [(x, x) for x in settings.AD_HOC_COMMANDS]
module_name_default = 'command' if 'command' in [x[0] for x in module_name_choices] else ''
field_kwargs['choices'] = module_name_choices
field_kwargs['required'] = bool(not module_name_default)
field_kwargs['default'] = module_name_default or serializers.empty
field_kwargs['allow_blank'] = bool(module_name_default)
field_kwargs.pop('max_length', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(AdHocCommandSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
if obj.credential_id:
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})
res.update(dict(
events = self.reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:ad_hoc_command_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:ad_hoc_command_notifications_list', kwargs={'pk': obj.pk}),
))
res['cancel'] = self.reverse('api:ad_hoc_command_cancel', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:ad_hoc_command_relaunch', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandSerializer, self).to_representation(obj)
if 'inventory' in ret and not obj.inventory_id:
ret['inventory'] = None
if 'credential' in ret and not obj.credential_id:
ret['credential'] = None
# For the UI, only module_name is returned for name, instead of the
# longer module name + module_args format.
if 'name' in ret:
ret['name'] = obj.module_name
return ret
def validate(self, attrs):
ret = super(AdHocCommandSerializer, self).validate(attrs)
return ret
def validate_extra_vars(self, value):
redacted_extra_vars, removed_vars = extract_ansible_vars(value)
if removed_vars:
raise serializers.ValidationError(_(
"{} are prohibited from use in ad hoc commands."
).format(", ".join(sorted(removed_vars, reverse=True))))
return vars_validate_or_raise(value)
class AdHocCommandDetailSerializer(AdHocCommandSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
class Meta:
model = AdHocCommand
fields = ('*', 'host_status_counts',)
def get_host_status_counts(self, obj):
try:
counts = obj.ad_hoc_command_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except AdHocCommandEvent.DoesNotExist:
counts = {}
return counts
class AdHocCommandCancelSerializer(AdHocCommandSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class AdHocCommandRelaunchSerializer(AdHocCommandSerializer):
class Meta:
fields = ()
def to_representation(self, obj):
if obj:
return dict([(p, u'') for p in obj.passwords_needed_to_start])
else:
return {}
class SystemJobTemplateSerializer(UnifiedJobTemplateSerializer):
class Meta:
model = SystemJobTemplate
fields = ('*', 'job_type',)
def get_related(self, obj):
res = super(SystemJobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:system_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:system_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:system_job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:system_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:system_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:system_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
return res
class SystemJobSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
model = SystemJob
fields = ('*', 'system_job_template', 'job_type', 'extra_vars', 'result_stdout', '-controller_node',)
def get_related(self, obj):
res = super(SystemJobSerializer, self).get_related(obj)
if obj.system_job_template:
res['system_job_template'] = self.reverse('api:system_job_template_detail',
kwargs={'pk': obj.system_job_template.pk})
res['notifications'] = self.reverse('api:system_job_notifications_list', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:system_job_cancel', kwargs={'pk': obj.pk})
res['events'] = self.reverse('api:system_job_events_list', kwargs={'pk': obj.pk})
return res
def get_result_stdout(self, obj):
try:
return obj.result_stdout
except StdoutMaxBytesExceeded as e:
return _(
"Standard Output too large to display ({text_size} bytes), "
"only download supported for sizes over {supported_size} bytes.").format(
text_size=e.total, supported_size=e.supported
)
class SystemJobCancelSerializer(SystemJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJobTemplateSerializer):
show_capabilities = ['start', 'schedule', 'edit', 'copy', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': 'organization.workflow_admin'}
]
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',
'ask_variables_on_launch', 'inventory', 'ask_inventory_on_launch',)
def get_related(self, obj):
res = super(WorkflowJobTemplateSerializer, self).get_related(obj)
res.update(dict(
workflow_jobs = self.reverse('api:workflow_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:workflow_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:workflow_job_template_launch', kwargs={'pk': obj.pk}),
workflow_nodes = self.reverse('api:workflow_job_template_workflow_nodes_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:workflow_job_template_label_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:workflow_job_template_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:workflow_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:workflow_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:workflow_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:workflow_job_template_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:workflow_job_template_object_roles_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
copy = self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
class WorkflowJobTemplateWithSpecSerializer(WorkflowJobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'survey_spec')
class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
class Meta:
model = WorkflowJob
fields = ('*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous',
'job_template', 'is_sliced_job',
'-execution_node', '-event_processing_finished', '-controller_node',
'inventory',)
def get_related(self, obj):
res = super(WorkflowJobSerializer, self).get_related(obj)
if obj.workflow_job_template:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',
kwargs={'pk': obj.workflow_job_template.pk})
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
if obj.job_template_id:
res['job_template'] = self.reverse('api:job_template_detail', kwargs={'pk': obj.job_template_id})
res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})
res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})
res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:workflow_job_relaunch', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:workflow_job_cancel', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(WorkflowJobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
class Meta:
fields = ('*', '-execution_node', '-controller_node',)
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class LaunchConfigurationBaseSerializer(BaseSerializer):
job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None,
choices=NEW_JOB_TYPE_CHOICES)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.NullBooleanField(required=False, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None,
choices=VERBOSITY_CHOICES)
exclude_errors = ()
class Meta:
fields = ('*', 'extra_data', 'inventory', # Saved launch-time config fields
'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags', 'diff_mode', 'verbosity')
def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
res['credentials'] = self.reverse(
'api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)),
kwargs={'pk': obj.pk}
)
return res
def _build_mock_obj(self, attrs):
mock_obj = self.Meta.model()
if self.instance:
for field in self.instance._meta.fields:
setattr(mock_obj, field.name, getattr(self.instance, field.name))
field_names = set(field.name for field in self.Meta.model._meta.fields)
for field_name, value in list(attrs.items()):
setattr(mock_obj, field_name, value)
if field_name not in field_names:
attrs.pop(field_name)
return mock_obj
def to_representation(self, obj):
ret = super(LaunchConfigurationBaseSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_data' in ret and obj.survey_passwords:
ret['extra_data'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(LaunchConfigurationBaseSerializer, self).get_summary_fields(obj)
# Credential would be an empty dictionary in this case
summary_fields.pop('credential', None)
return summary_fields
def validate(self, attrs):
db_extra_data = {}
if self.instance:
db_extra_data = parse_yaml_or_json(self.instance.extra_data)
attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)
ujt = None
if 'unified_job_template' in attrs:
ujt = attrs['unified_job_template']
elif self.instance:
ujt = self.instance.unified_job_template
# build additional field survey_passwords to track redacted variables
password_dict = {}
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
if hasattr(ujt, 'survey_password_variables'):
# Prepare additional field survey_passwords for save
for key in ujt.survey_password_variables():
if key in extra_data:
password_dict[key] = REPLACE_STR
# Replace $encrypted$ submissions with db value if exists
if 'extra_data' in attrs:
if password_dict:
if not self.instance or password_dict != self.instance.survey_passwords:
attrs['survey_passwords'] = password_dict.copy()
# Force dict type (cannot preserve YAML formatting if passwords are involved)
# Encrypt the extra_data for save, only current password vars in JT survey
# but first, make a copy or else this is referenced by request.data, and
# user could get encrypted string in form data in API browser
attrs['extra_data'] = extra_data.copy()
encrypt_dict(attrs['extra_data'], password_dict.keys())
# For any raw $encrypted$ string, either
# - replace with existing DB value
# - raise a validation error
# - ignore, if default present
for key in password_dict.keys():
if attrs['extra_data'].get(key, None) == REPLACE_STR:
if key not in db_extra_data:
element = ujt.pivot_spec(ujt.survey_spec)[key]
# NOTE: validation _of_ the default values of password type
# questions not done here or on launch, but doing so could
# leak info about values, so it should not be added
if not ('default' in element and element['default']):
raise serializers.ValidationError(
{"extra_data": _('Provided variable {} has no database value to replace with.').format(key)})
else:
attrs['extra_data'][key] = db_extra_data[key]
# Build unsaved version of this config, use it to detect prompts errors
mock_obj = self._build_mock_obj(attrs)
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(
_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
# Remove all unprocessed $encrypted$ strings, indicating default usage
if 'extra_data' in attrs and password_dict:
for key, value in attrs['extra_data'].copy().items():
if value == REPLACE_STR:
if key in password_dict:
attrs['extra_data'].pop(key)
attrs.get('survey_passwords', {}).pop(key, None)
else:
errors.setdefault('extra_vars', []).append(
_('"$encrypted$ is a reserved keyword, may not be used for {var_name}."'.format(key))
)
# Launch configs call extra_vars extra_data for historical reasons
if 'extra_vars' in errors:
errors['extra_data'] = errors.pop('extra_vars')
if errors:
raise serializers.ValidationError(errors)
# Model `.save` needs the container dict, not the psuedo fields
if mock_obj.char_prompts:
attrs['char_prompts'] = mock_obj.char_prompts
return attrs
class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
credential = DeprecatedCredentialField()
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
exclude_errors = ('required',) # required variables may be provided by WFJT or on launch
class Meta:
model = WorkflowJobTemplateNode
fields = ('*', 'credential', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
def get_related(self, obj):
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_template_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_template_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_template_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
except WorkflowJobTemplate.DoesNotExist:
pass
return res
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name,
self.credential)
return super(WorkflowJobTemplateNodeSerializer, self).build_field(field_name, info, model_class, nested_depth)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeSerializer, self).build_relational_field(field_name, relation_info)
# workflow_job_template is read-only unless creating a new node.
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when v2 API is deprecated
deprecated_fields['credential'] = attrs.pop('credential')
view = self.context.get('view')
attrs = super(WorkflowJobTemplateNodeSerializer, self).validate(attrs)
ujt_obj = None
if 'unified_job_template' in attrs:
ujt_obj = attrs['unified_job_template']
elif self.instance:
ujt_obj = self.instance.unified_job_template
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
if not ujt_obj.ask_credential_on_launch:
raise serializers.ValidationError({"credential": _(
"Related template is not configured to accept credentials on launch.")})
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
return attrs
def create(self, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).create(validated_data)
if 'credential' in deprecated_fields:
if deprecated_fields['credential']:
obj.credentials.add(deprecated_fields['credential'])
return obj
def update(self, obj, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).update(obj, validated_data)
if 'credential' in deprecated_fields:
existing = obj.credentials.filter(credential_type__kind='ssh')
new_cred = deprecated_fields['credential']
if new_cred not in existing:
for cred in existing:
obj.credentials.remove(cred)
if new_cred:
obj.credentials.add(new_cred)
return obj
class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
credential = DeprecatedCredentialField()
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = WorkflowJobNode
fields = ('*', 'credential', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
'do_not_run',)
def get_related(self, obj):
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.job:
res['job'] = obj.job.get_absolute_url(self.context.get('request'))
if obj.workflow_job:
res['workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job.pk})
return res
class WorkflowJobNodeListSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobNodeDetailSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer):
'''
Influence the api browser sample data to not include workflow_job_template
when editing a WorkflowNode.
Note: I was not able to accomplish this through the use of extra_kwargs.
Maybe something to do with workflow_job_template being a relational field?
'''
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeDetailSerializer, self).build_relational_field(field_name, relation_info)
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
class JobListSerializer(JobSerializer, UnifiedJobListSerializer):
pass
class AdHocCommandListSerializer(AdHocCommandSerializer, UnifiedJobListSerializer):
pass
class SystemJobListSerializer(SystemJobSerializer, UnifiedJobListSerializer):
class Meta:
model = SystemJob
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class JobHostSummarySerializer(BaseSerializer):
class Meta:
model = JobHostSummary
fields = ('*', '-name', '-description', 'job', 'host', 'host_name', 'changed',
'dark', 'failures', 'ok', 'processed', 'skipped', 'failed',
'ignored', 'rescued')
def get_related(self, obj):
res = super(JobHostSummarySerializer, self).get_related(obj)
res.update(dict(
job=self.reverse('api:job_detail', kwargs={'pk': obj.job.pk})))
if obj.host is not None:
res.update(dict(
host=self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
))
return res
def get_summary_fields(self, obj):
d = super(JobHostSummarySerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
class JobEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display2', read_only=True)
event_level = serializers.IntegerField(read_only=True)
class Meta:
model = JobEvent
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
'event_display', 'event_data', 'event_level', 'failed',
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
'verbosity')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res.update(dict(
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
))
if obj.parent_id:
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
if obj.host_id:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
if obj.hosts.exists():
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
return res
def get_summary_fields(self, obj):
d = super(JobEventSerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
def to_representation(self, obj):
ret = super(JobEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
# Show full stdout for playbook_on_* events.
if obj and obj.event.startswith('playbook_on'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class JobEventWebSocketSerializer(JobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = JobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'job_events'
class ProjectUpdateEventSerializer(JobEventSerializer):
stdout = serializers.SerializerMethodField()
event_data = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', '-name', '-description', '-job', '-job_id',
'-parent_uuid', '-parent', '-host', 'project_update')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update_id}
)
return res
def get_stdout(self, obj):
return UriCleaner.remove_sensitive(obj.stdout)
def get_event_data(self, obj):
try:
return json.loads(
UriCleaner.remove_sensitive(
json.dumps(obj.event_data)
)
)
except Exception:
logger.exception("Failed to sanitize event_data")
return {}
class ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'project_update_events'
class AdHocCommandEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display', read_only=True)
class Meta:
model = AdHocCommandEvent
fields = ('*', '-name', '-description', 'ad_hoc_command', 'event',
'counter', 'event_display', 'event_data', 'failed',
'changed', 'uuid', 'host', 'host_name', 'stdout',
'start_line', 'end_line', 'verbosity')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res.update(dict(
ad_hoc_command = self.reverse('api:ad_hoc_command_detail', kwargs={'pk': obj.ad_hoc_command_id}),
))
if obj.host:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = AdHocCommandEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'ad_hoc_command_events'
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = InventoryUpdateEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'inventory_update')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['inventory_update'] = self.reverse(
'api:inventory_update_detail', kwargs={'pk': obj.inventory_update_id}
)
return res
class InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = InventoryUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'inventory_update_events'
class SystemJobEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = SystemJobEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'system_job')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['system_job'] = self.reverse(
'api:system_job_detail', kwargs={'pk': obj.system_job_id}
)
return res
class SystemJobEventWebSocketSerializer(SystemJobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = SystemJobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'system_job_events'
class JobLaunchSerializer(BaseSerializer):
# Representational fields
passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True)
variables_needed_to_start = serializers.ReadOnlyField()
credential_needed_to_start = serializers.SerializerMethodField()
inventory_needed_to_start = serializers.SerializerMethodField()
survey_enabled = serializers.SerializerMethodField()
job_template_data = serializers.SerializerMethodField()
defaults = serializers.SerializerMethodField()
# Accepted on launch fields
extra_vars = serializers.JSONField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
credentials = serializers.PrimaryKeyRelatedField(
many=True, queryset=Credential.objects.all(),
required=False, write_only=True
)
credential_passwords = VerbatimField(required=False, write_only=True)
diff_mode = serializers.BooleanField(required=False, write_only=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
class Meta:
model = JobTemplate
fields = ('can_start_without_user_input', 'passwords_needed_to_start',
'extra_vars', 'inventory', 'limit', 'job_tags', 'skip_tags', 'job_type', 'verbosity', 'diff_mode',
'credentials', 'credential_passwords', 'ask_variables_on_launch', 'ask_tags_on_launch',
'ask_diff_mode_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_limit_on_launch',
'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch',
'survey_enabled', 'variables_needed_to_start', 'credential_needed_to_start',
'inventory_needed_to_start', 'job_template_data', 'defaults', 'verbosity')
read_only_fields = (
'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_credential_on_launch',)
def get_credential_needed_to_start(self, obj):
return False
def get_inventory_needed_to_start(self, obj):
return not (obj and obj.inventory)
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'credentials':
for cred in obj.credentials.all():
cred_dict = dict(
id=cred.id,
name=cred.name,
credential_type=cred.credential_type.pk,
passwords_needed=cred.passwords_needed
)
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
defaults_dict.setdefault(field_name, []).append(cred_dict)
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
template = self.context.get('template')
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(
_exclude_errors=['prompts'], # make several error types non-blocking
**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
# Prohibit providing multiple credentials of the same CredentialType.kind
# or multiples of same vault id
distinct_cred_kinds = []
for cred in accepted.get('credentials', []):
if cred.unique_hash() in distinct_cred_kinds:
errors.setdefault('credentials', []).append(_(
'Cannot assign multiple {} credentials.'
).format(cred.unique_hash(display=True)))
if cred.credential_type.kind not in ('ssh', 'vault', 'cloud', 'net'):
errors.setdefault('credentials', []).append(_(
'Cannot assign a Credential of kind `{}`'
).format(cred.credential_type.kind))
distinct_cred_kinds.append(cred.unique_hash())
# Prohibit removing credentials from the JT list (unsupported for now)
template_credentials = template.credentials.all()
if 'credentials' in attrs:
removed_creds = set(template_credentials) - set(attrs['credentials'])
provided_mapping = Credential.unique_dict(attrs['credentials'])
for cred in removed_creds:
if cred.unique_hash() in provided_mapping.keys():
continue # User replaced credential with new of same type
errors.setdefault('credentials', []).append(_(
'Removing {} credential at launch time without replacement is not supported. '
'Provided list lacked credential(s): {}.'
).format(cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])))
# verify that credentials (either provided or existing) don't
# require launch-time passwords that have not been provided
if 'credentials' in accepted:
launch_credentials = accepted['credentials']
else:
launch_credentials = template_credentials
passwords = attrs.get('credential_passwords', {}) # get from original attrs
passwords_lacking = []
for cred in launch_credentials:
for p in cred.passwords_needed:
if p not in passwords:
passwords_lacking.append(p)
else:
accepted.setdefault('credential_passwords', {})
accepted['credential_passwords'][p] = passwords[p]
if len(passwords_lacking):
errors['passwords_needed_to_start'] = passwords_lacking
if errors:
raise serializers.ValidationError(errors)
if 'extra_vars' in accepted:
extra_vars_save = accepted['extra_vars']
else:
extra_vars_save = None
# Validate job against JobTemplate clean_ methods
accepted = super(JobLaunchSerializer, self).validate(accepted)
# Preserve extra_vars as dictionary internally
if extra_vars_save:
accepted['extra_vars'] = extra_vars_save
return accepted
class WorkflowJobLaunchSerializer(BaseSerializer):
can_start_without_user_input = serializers.BooleanField(read_only=True)
defaults = serializers.SerializerMethodField()
variables_needed_to_start = serializers.ReadOnlyField()
survey_enabled = serializers.SerializerMethodField()
extra_vars = VerbatimField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
workflow_job_template_data = serializers.SerializerMethodField()
class Meta:
model = WorkflowJobTemplate
fields = ('ask_inventory_on_launch', 'can_start_without_user_input', 'defaults', 'extra_vars',
'inventory', 'survey_enabled', 'variables_needed_to_start',
'node_templates_missing', 'node_prompts_rejected',
'workflow_job_template_data', 'survey_enabled', 'ask_variables_on_launch')
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_workflow_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs):
template = self.instance
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Workflow is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
if errors:
raise serializers.ValidationError(errors)
WFJT_extra_vars = template.extra_vars
WFJT_inventory = template.inventory
super(WorkflowJobLaunchSerializer, self).validate(attrs)
template.extra_vars = WFJT_extra_vars
template.inventory = WFJT_inventory
return accepted
class NotificationTemplateSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [{'copy': 'organization.admin'}]
class Meta:
model = NotificationTemplate
fields = ('*', 'organization', 'notification_type', 'notification_configuration')
type_map = {"string": (str,),
"int": (int,),
"bool": (bool,),
"list": (list,),
"password": (str,),
"object": (dict, OrderedDict)}
def to_representation(self, obj):
ret = super(NotificationTemplateSerializer, self).to_representation(obj)
if 'notification_configuration' in ret:
ret['notification_configuration'] = obj.display_notification_configuration()
return ret
def get_related(self, obj):
res = super(NotificationTemplateSerializer, self).get_related(obj)
res.update(dict(
test = self.reverse('api:notification_template_test', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:notification_template_notification_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:notification_template_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def _recent_notifications(self, obj):
return [{'id': x.id, 'status': x.status, 'created': x.created} for x in obj.notifications.all().order_by('-created')[:5]]
def get_summary_fields(self, obj):
d = super(NotificationTemplateSerializer, self).get_summary_fields(obj)
d['recent_notifications'] = self._recent_notifications(obj)
return d
def validate(self, attrs):
from awx.api.views import NotificationTemplateDetail
notification_type = None
if 'notification_type' in attrs:
notification_type = attrs['notification_type']
elif self.instance:
notification_type = self.instance.notification_type
else:
notification_type = None
if not notification_type:
raise serializers.ValidationError(_('Missing required fields for Notification Configuration: notification_type'))
notification_class = NotificationTemplate.CLASS_FOR_NOTIFICATION_TYPE[notification_type]
missing_fields = []
incorrect_type_fields = []
error_list = []
if 'notification_configuration' not in attrs:
return attrs
if self.context['view'].kwargs and isinstance(self.context['view'], NotificationTemplateDetail):
object_actual = self.context['view'].get_object()
else:
object_actual = None
for field, params in notification_class.init_parameters.items():
if field not in attrs['notification_configuration']:
if 'default' in params:
attrs['notification_configuration'][field] = params['default']
else:
missing_fields.append(field)
continue
field_val = attrs['notification_configuration'][field]
field_type = params['type']
expected_types = self.type_map[field_type]
if not type(field_val) in expected_types:
incorrect_type_fields.append((field, field_type))
continue
if field_type == "list" and len(field_val) < 1:
error_list.append(_("No values specified for field '{}'").format(field))
continue
if field_type == "password" and field_val == "$encrypted$" and object_actual is not None:
attrs['notification_configuration'][field] = object_actual.notification_configuration[field]
if missing_fields:
error_list.append(_("Missing required fields for Notification Configuration: {}.").format(missing_fields))
if incorrect_type_fields:
for type_field_error in incorrect_type_fields:
error_list.append(_("Configuration field '{}' incorrect type, expected {}.").format(type_field_error[0],
type_field_error[1]))
if error_list:
raise serializers.ValidationError(error_list)
return super(NotificationTemplateSerializer, self).validate(attrs)
class NotificationSerializer(BaseSerializer):
class Meta:
model = Notification
fields = ('*', '-name', '-description', 'notification_template', 'error', 'status', 'notifications_sent',
'notification_type', 'recipients', 'subject')
def get_related(self, obj):
res = super(NotificationSerializer, self).get_related(obj)
res.update(dict(
notification_template = self.reverse('api:notification_template_detail', kwargs={'pk': obj.notification_template.pk}),
))
return res
class LabelSerializer(BaseSerializer):
class Meta:
model = Label
fields = ('*', '-description', 'organization')
def get_related(self, obj):
res = super(LabelSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class SchedulePreviewSerializer(BaseSerializer):
class Meta:
model = Schedule
fields = ('rrule',)
# We reject rrules if:
# - DTSTART is not include
# - INTERVAL is not included
# - SECONDLY is used
# - TZID is used
# - BYDAY prefixed with a number (MO is good but not 20MO)
# - BYYEARDAY
# - BYWEEKNO
# - Multiple DTSTART or RRULE elements
# - Can't contain both COUNT and UNTIL
# - COUNT > 999
def validate_rrule(self, value):
rrule_value = value
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
if not len(match_multiple_dtstart):
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
if len(match_native_dtstart):
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
if len(match_multiple_dtstart) > 1:
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
if not len(match_multiple_rrule):
raise serializers.ValidationError(_('RRULE required in rrule.'))
if len(match_multiple_rrule) > 1:
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
if 'interval' not in rrule_value.lower():
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
if 'secondly' in rrule_value.lower():
raise serializers.ValidationError(_('SECONDLY is not supported.'))
if re.match(multi_by_month_day, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))
if re.match(multi_by_month, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))
if re.match(by_day_with_numeric_prefix, rrule_value):
raise serializers.ValidationError(_("BYDAY with numeric prefix not supported."))
if 'byyearday' in rrule_value.lower():
raise serializers.ValidationError(_("BYYEARDAY not supported."))
if 'byweekno' in rrule_value.lower():
raise serializers.ValidationError(_("BYWEEKNO not supported."))
if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:
raise serializers.ValidationError(_("RRULE may not contain both COUNT and UNTIL"))
if match_count:
count_val = match_count.groups()[0].strip().split("=")
if int(count_val[1]) > 999:
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
try:
Schedule.rrulestr(rrule_value)
except Exception as e:
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
return value
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
show_capabilities = ['edit', 'delete']
timezone = serializers.SerializerMethodField()
until = serializers.SerializerMethodField()
class Meta:
model = Schedule
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run', 'timezone',
'until')
def get_timezone(self, obj):
return obj.timezone
def get_until(self, obj):
return obj.until
def get_related(self, obj):
res = super(ScheduleSerializer, self).get_related(obj)
res.update(dict(
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
))
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
if obj.unified_job_template.project:
res['project'] = obj.unified_job_template.project.get_absolute_url(self.context.get('request'))
except ObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = obj.inventory.get_absolute_url(self.context.get('request'))
elif obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))
return res
def get_summary_fields(self, obj):
summary_fields = super(ScheduleSerializer, self).get_summary_fields(obj)
if 'inventory' in summary_fields:
return summary_fields
inventory = None
if obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
inventory = obj.unified_job_template.inventory
else:
return summary_fields
summary_fields['inventory'] = dict()
for field in SUMMARIZABLE_FK_FIELDS['inventory']:
summary_fields['inventory'][field] = getattr(inventory, field, None)
return summary_fields
def validate_unified_job_template(self, value):
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
elif type(value) == Project and value.scm_type == '':
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
raise serializers.ValidationError(_(
'Inventory sources with `update_on_project_update` cannot be scheduled. '
'Schedule its source project `{}` instead.'.format(value.source_project.name)))
return value
class InstanceSerializer(BaseSerializer):
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance'),
read_only=True
)
class Meta:
model = Instance
read_only_fields = ('uuid', 'hostname', 'version')
fields = ("id", "type", "url", "related", "uuid", "hostname", "created", "modified", 'capacity_adjustment',
"version", "capacity", "consumed_capacity", "percent_capacity_remaining", "jobs_running", "jobs_total",
"cpu", "memory", "cpu_capacity", "mem_capacity", "enabled", "managed_by_policy")
def get_related(self, obj):
res = super(InstanceSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
return res
def get_consumed_capacity(self, obj):
return obj.consumed_capacity
def get_percent_capacity_remaining(self, obj):
if not obj.capacity or obj.consumed_capacity >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
class InstanceGroupSerializer(BaseSerializer):
committed_capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance group'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance group'),
read_only=True
)
instances = serializers.SerializerMethodField()
is_controller = serializers.BooleanField(
help_text=_('Indicates whether instance group controls any other group'),
read_only=True
)
is_isolated = serializers.BooleanField(
help_text=_('Indicates whether instances in this group are isolated.'
'Isolated groups have a designated controller group.'),
read_only=True
)
# NOTE: help_text is duplicated from field definitions, no obvious way of
# both defining field details here and also getting the field's help_text
policy_instance_percentage = serializers.IntegerField(
default=0, min_value=0, max_value=100, required=False, initial=0,
label=_('Policy Instance Percentage'),
help_text=_("Minimum percentage of all instances that will be automatically assigned to "
"this group when new instances come online.")
)
policy_instance_minimum = serializers.IntegerField(
default=0, min_value=0, required=False, initial=0,
label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to "
"this group when new instances come online.")
)
policy_instance_list = serializers.ListField(
child=serializers.CharField(), required=False,
label=_('Policy Instance List'),
help_text=_("List of exact-match Instances that will be assigned to this group")
)
class Meta:
model = InstanceGroup
fields = ("id", "type", "url", "related", "name", "created", "modified",
"capacity", "committed_capacity", "consumed_capacity",
"percent_capacity_remaining", "jobs_running", "jobs_total",
"instances", "controller", "is_controller", "is_isolated",
"policy_instance_percentage", "policy_instance_minimum", "policy_instance_list")
def get_related(self, obj):
res = super(InstanceGroupSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
if obj.controller_id:
res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})
return res
def validate_policy_instance_list(self, value):
for instance_name in value:
if value.count(instance_name) > 1:
raise serializers.ValidationError(_('Duplicate entry {}.').format(instance_name))
if not Instance.objects.filter(hostname=instance_name).exists():
raise serializers.ValidationError(_('{} is not a valid hostname of an existing instance.').format(instance_name))
if Instance.objects.get(hostname=instance_name).is_isolated():
raise serializers.ValidationError(_('Isolated instances may not be added or removed from instances groups via the API.'))
if self.instance and self.instance.controller_id is not None:
raise serializers.ValidationError(_('Isolated instance group membership may not be managed via the API.'))
return value
def validate_name(self, value):
if self.instance and self.instance.name == 'tower' and value != 'tower':
raise serializers.ValidationError(_('tower instance group name may not be changed.'))
return value
def get_capacity_dict(self):
# Store capacity values (globally computed) in the context
if 'capacity_map' not in self.context:
ig_qs = None
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
if self.parent: # Is ListView:
ig_qs = self.parent.instance
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(
qs=ig_qs, tasks=jobs_qs, breakdown=True)
return self.context['capacity_map']
def get_consumed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['running_capacity']
def get_committed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['committed_capacity']
def get_percent_capacity_remaining(self, obj):
if not obj.capacity:
return 0.0
consumed = self.get_consumed_capacity(obj)
if consumed >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(
((float(obj.capacity) - float(consumed)) / (float(obj.capacity))) * 100)
)
def get_instances(self, obj):
return obj.instances.count()
class ActivityStreamSerializer(BaseSerializer):
changes = serializers.SerializerMethodField()
object_association = serializers.SerializerMethodField(
help_text=_("When present, shows the field name of the role or relationship that changed."))
object_type = serializers.SerializerMethodField(
help_text=_("When present, shows the model on which the role or relationship was defined."))
@cached_property
def _local_summarizable_fk_fields(self):
summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)
# Special requests
summary_dict['group'] = summary_dict['group'] + ('inventory_id',)
for key in summary_dict.keys():
if 'id' not in summary_dict[key]:
summary_dict[key] = summary_dict[key] + ('id',)
field_list = list(summary_dict.items())
# Needed related fields that are not in the default summary fields
field_list += [
('workflow_job_template_node', ('id', 'unified_job_template_id')),
('label', ('id', 'name', 'organization_id')),
('notification', ('id', 'status', 'notification_type', 'notification_template_id')),
('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),
('o_auth2_application', ('id', 'name', 'description')),
('credential_type', ('id', 'name', 'description', 'kind', 'managed_by_tower')),
('ad_hoc_command', ('id', 'name', 'status', 'limit'))
]
return field_list
class Meta:
model = ActivityStream
fields = ('*', '-name', '-description', '-created', '-modified', 'timestamp', 'operation',
'changes', 'object1', 'object2', 'object_association', 'action_node', 'object_type')
def get_fields(self):
ret = super(ActivityStreamSerializer, self).get_fields()
for key, field in list(ret.items()):
if key == 'changes':
field.help_text = _('A summary of the new and changed values when an object is created, updated, or deleted')
if key == 'object1':
field.help_text = _('For create, update, and delete events this is the object type that was affected. '
'For associate and disassociate events this is the object type associated or disassociated with object2.')
if key == 'object2':
field.help_text = _('Unpopulated for create, update, and delete events. For associate and disassociate '
'events this is the object type that object1 is being associated with.')
if key == 'operation':
field.help_text = _('The action taken with respect to the given object(s).')
return ret
def get_changes(self, obj):
if obj is None:
return {}
try:
return json.loads(obj.changes)
except Exception:
logger.warn("Error deserializing activity stream json changes")
return {}
def get_object_association(self, obj):
if not obj.object_relationship_type:
return ""
elif obj.object_relationship_type.endswith('_role'):
# roles: these values look like
# "awx.main.models.inventory.Inventory.admin_role"
# due to historical reasons the UI expects just "role" here
return "role"
# default case: these values look like
# "awx.main.models.organization.Organization_notification_templates_success"
# so instead of splitting on period we have to take after the first underscore
try:
return obj.object_relationship_type.split(".")[-1].split("_", 1)[1]
except Exception:
logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))
return ""
def get_object_type(self, obj):
if not obj.object_relationship_type:
return ""
elif obj.object_relationship_type.endswith('_role'):
return camelcase_to_underscore(obj.object_relationship_type.rsplit('.', 2)[-2])
# default case: these values look like
# "awx.main.models.organization.Organization_notification_templates_success"
# so we have to take after the last period but before the first underscore.
try:
cls = obj.object_relationship_type.rsplit('.', 1)[0]
return camelcase_to_underscore(cls.split('_', 1))
except Exception:
logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))
return ""
def get_related(self, obj):
rel = {}
if obj.actor is not None:
rel['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
for fk, __ in self._local_summarizable_fk_fields:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
rel[fk] = []
id_list = []
for thisItem in m2m_list:
if getattr(thisItem, 'id', None) in id_list:
continue
id_list.append(getattr(thisItem, 'id', None))
if hasattr(thisItem, 'get_absolute_url'):
rel_url = thisItem.get_absolute_url(self.context.get('request'))
else:
view_name = fk + '_detail'
rel_url = self.reverse('api:' + view_name, kwargs={'pk': thisItem.id})
rel[fk].append(rel_url)
if fk == 'schedule':
rel['unified_job_template'] = thisItem.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.setting and obj.setting.get('category', None):
rel['setting'] = self.reverse(
'api:setting_singleton_detail',
kwargs={'category_slug': obj.setting['category']}
)
return rel
def _get_rel(self, obj, fk):
related_model = ActivityStream._meta.get_field(fk).related_model
related_manager = getattr(obj, fk)
if issubclass(related_model, PolymorphicModel) and hasattr(obj, '_prefetched_objects_cache'):
# HACK: manually fill PolymorphicModel caches to prevent running query multiple times
# unnecessary if django-polymorphic issue #68 is solved
if related_manager.prefetch_cache_name not in obj._prefetched_objects_cache:
obj._prefetched_objects_cache[related_manager.prefetch_cache_name] = list(related_manager.all())
return related_manager.all()
def get_summary_fields(self, obj):
summary_fields = OrderedDict()
for fk, related_fields in self._local_summarizable_fk_fields:
try:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
summary_fields[fk] = []
for thisItem in m2m_list:
if fk == 'job':
summary_fields['job_template'] = []
job_template_item = {}
job_template_fields = SUMMARIZABLE_FK_FIELDS['job_template']
job_template = getattr(thisItem, 'job_template', None)
if job_template is not None:
for field in job_template_fields:
fval = getattr(job_template, field, None)
if fval is not None:
job_template_item[field] = fval
summary_fields['job_template'].append(job_template_item)
if fk == 'workflow_job_template_node':
summary_fields['workflow_job_template'] = []
workflow_job_template_item = {}
workflow_job_template_fields = SUMMARIZABLE_FK_FIELDS['workflow_job_template']
workflow_job_template = getattr(thisItem, 'workflow_job_template', None)
if workflow_job_template is not None:
for field in workflow_job_template_fields:
fval = getattr(workflow_job_template, field, None)
if fval is not None:
workflow_job_template_item[field] = fval
summary_fields['workflow_job_template'].append(workflow_job_template_item)
if fk == 'schedule':
unified_job_template = getattr(thisItem, 'unified_job_template', None)
if unified_job_template is not None:
summary_fields[get_type_for_model(unified_job_template)] = {'id': unified_job_template.id,
'name': unified_job_template.name}
thisItemDict = {}
for field in related_fields:
fval = getattr(thisItem, field, None)
if fval is not None:
thisItemDict[field] = fval
summary_fields[fk].append(thisItemDict)
except ObjectDoesNotExist:
pass
if obj.actor is not None:
summary_fields['actor'] = dict(id = obj.actor.id,
username = obj.actor.username,
first_name = obj.actor.first_name,
last_name = obj.actor.last_name)
elif obj.deleted_actor:
summary_fields['actor'] = obj.deleted_actor.copy()
summary_fields['actor']['id'] = None
if obj.setting:
summary_fields['setting'] = [obj.setting]
return summary_fields
|
the-stack_0_18 | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import math
import tqdm
import numpy as np
from multiprocessing.pool import ThreadPool
import paddle.fluid as fluid
import paddlex.utils.logging as logging
import paddlex
import copy
import os.path as osp
from paddlex.cv.transforms import arrange_transforms
from collections import OrderedDict
from .faster_rcnn import FasterRCNN
from .utils.detection_eval import eval_results, bbox2out, mask2out
class MaskRCNN(FasterRCNN):
"""构建MaskRCNN,并实现其训练、评估、预测和模型导出。
Args:
num_classes (int): 包含了背景类的类别数。默认为81。
backbone (str): MaskRCNN的backbone网络,取值范围为['ResNet18', 'ResNet50',
'ResNet50_vd', 'ResNet101', 'ResNet101_vd', 'HRNet_W18']。默认为'ResNet50'。
with_fpn (bool): 是否使用FPN结构。默认为True。
aspect_ratios (list): 生成anchor高宽比的可选值。默认为[0.5, 1.0, 2.0]。
anchor_sizes (list): 生成anchor大小的可选值。默认为[32, 64, 128, 256, 512]。
input_channel (int): 输入图像的通道数量。默认为3。
"""
def __init__(self,
num_classes=81,
backbone='ResNet50',
with_fpn=True,
aspect_ratios=[0.5, 1.0, 2.0],
anchor_sizes=[32, 64, 128, 256, 512],
input_channel=3):
self.init_params = locals()
backbones = [
'ResNet18', 'ResNet50', 'ResNet50_vd', 'ResNet101', 'ResNet101_vd',
'HRNet_W18'
]
assert backbone in backbones, "backbone should be one of {}".format(
backbones)
super(FasterRCNN, self).__init__('detector')
self.backbone = backbone
self.num_classes = num_classes
self.with_fpn = with_fpn
self.anchor_sizes = anchor_sizes
self.labels = None
if with_fpn:
self.mask_head_resolution = 28
else:
self.mask_head_resolution = 14
self.fixed_input_shape = None
self.input_channel = input_channel
self.with_dcn = False
def build_net(self, mode='train'):
train_pre_nms_top_n = 2000 if self.with_fpn else 12000
test_pre_nms_top_n = 1000 if self.with_fpn else 6000
num_convs = 4 if self.with_fpn else 0
model = paddlex.cv.nets.detection.MaskRCNN(
backbone=self._get_backbone(self.backbone),
num_classes=self.num_classes,
mode=mode,
with_fpn=self.with_fpn,
train_pre_nms_top_n=train_pre_nms_top_n,
test_pre_nms_top_n=test_pre_nms_top_n,
num_convs=num_convs,
mask_head_resolution=self.mask_head_resolution,
fixed_input_shape=self.fixed_input_shape,
input_channel=self.input_channel)
inputs = model.generate_inputs()
if mode == 'train':
model_out = model.build_net(inputs)
loss = model_out['loss']
self.optimizer.minimize(loss)
outputs = OrderedDict(
[('loss', model_out['loss']),
('loss_cls', model_out['loss_cls']),
('loss_bbox', model_out['loss_bbox']),
('loss_mask', model_out['loss_mask']),
('loss_rpn_cls', model_out['loss_rpn_cls']), (
'loss_rpn_bbox', model_out['loss_rpn_bbox'])])
else:
outputs = model.build_net(inputs)
return inputs, outputs
def default_optimizer(self, learning_rate, warmup_steps, warmup_start_lr,
lr_decay_epochs, lr_decay_gamma,
num_steps_each_epoch):
if warmup_steps > lr_decay_epochs[0] * num_steps_each_epoch:
logging.error(
"In function train(), parameters should satisfy: warmup_steps <= lr_decay_epochs[0]*num_samples_in_train_dataset",
exit=False)
logging.error(
"See this doc for more information: https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/appendix/parameters.md#notice",
exit=False)
logging.error(
"warmup_steps should less than {} or lr_decay_epochs[0] greater than {}, please modify 'lr_decay_epochs' or 'warmup_steps' in train function".
format(lr_decay_epochs[0] * num_steps_each_epoch, warmup_steps
// num_steps_each_epoch))
boundaries = [b * num_steps_each_epoch for b in lr_decay_epochs]
values = [(lr_decay_gamma**i) * learning_rate
for i in range(len(lr_decay_epochs) + 1)]
lr_decay = fluid.layers.piecewise_decay(
boundaries=boundaries, values=values)
lr_warmup = fluid.layers.linear_lr_warmup(
learning_rate=lr_decay,
warmup_steps=warmup_steps,
start_lr=warmup_start_lr,
end_lr=learning_rate)
optimizer = fluid.optimizer.Momentum(
learning_rate=lr_warmup,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-04))
return optimizer
def train(self,
num_epochs,
train_dataset,
train_batch_size=1,
eval_dataset=None,
save_interval_epochs=1,
log_interval_steps=2,
save_dir='output',
pretrain_weights='IMAGENET',
optimizer=None,
learning_rate=1.0 / 800,
warmup_steps=500,
warmup_start_lr=1.0 / 2400,
lr_decay_epochs=[8, 11],
lr_decay_gamma=0.1,
metric=None,
use_vdl=False,
early_stop=False,
early_stop_patience=5,
resume_checkpoint=None):
"""训练。
Args:
num_epochs (int): 训练迭代轮数。
train_dataset (paddlex.datasets): 训练数据读取器。
train_batch_size (int): 训练或验证数据batch大小。目前检测仅支持单卡评估,训练数据batch大小与
显卡数量之商为验证数据batch大小。默认值为1。
eval_dataset (paddlex.datasets): 验证数据读取器。
save_interval_epochs (int): 模型保存间隔(单位:迭代轮数)。默认为1。
log_interval_steps (int): 训练日志输出间隔(单位:迭代次数)。默认为20。
save_dir (str): 模型保存路径。默认值为'output'。
pretrain_weights (str): 若指定为路径时,则加载路径下预训练模型;若为字符串'IMAGENET',
则自动下载在ImageNet图片数据上预训练的模型权重;若为字符串'COCO',
则自动下载在COCO数据集上预训练的模型权重;若为None,则不使用预训练模型。默认为None。
optimizer (paddle.fluid.optimizer): 优化器。当该参数为None时,使用默认优化器:
fluid.layers.piecewise_decay衰减策略,fluid.optimizer.Momentum优化方法。
learning_rate (float): 默认优化器的学习率。默认为1.0/800。
warmup_steps (int): 默认优化器进行warmup过程的步数。默认为500。
warmup_start_lr (int): 默认优化器warmup的起始学习率。默认为1.0/2400。
lr_decay_epochs (list): 默认优化器的学习率衰减轮数。默认为[8, 11]。
lr_decay_gamma (float): 默认优化器的学习率衰减率。默认为0.1。
metric (bool): 训练过程中评估的方式,取值范围为['COCO', 'VOC']。
use_vdl (bool): 是否使用VisualDL进行可视化。默认值为False。
early_stop (bool): 是否使用提前终止训练策略。默认值为False。
early_stop_patience (int): 当使用提前终止训练策略时,如果验证集精度在`early_stop_patience`个epoch内
连续下降或持平,则终止训练。默认值为5。
resume_checkpoint (str): 恢复训练时指定上次训练保存的模型路径。若为None,则不会恢复训练。默认值为None。
Raises:
ValueError: 评估类型不在指定列表中。
ValueError: 模型从inference model进行加载。
"""
if metric is None:
if isinstance(train_dataset, paddlex.datasets.CocoDetection) or \
isinstance(train_dataset, paddlex.datasets.EasyDataDet):
metric = 'COCO'
else:
raise Exception(
"train_dataset should be datasets.COCODetection or datasets.EasyDataDet."
)
assert metric in ['COCO', 'VOC'], "Metric only support 'VOC' or 'COCO'"
self.metric = metric
if not self.trainable:
raise Exception("Model is not trainable from load_model method.")
self.labels = copy.deepcopy(train_dataset.labels)
self.labels.insert(0, 'background')
# 构建训练网络
if optimizer is None:
# 构建默认的优化策略
num_steps_each_epoch = train_dataset.num_samples // train_batch_size
optimizer = self.default_optimizer(
learning_rate=learning_rate,
warmup_steps=warmup_steps,
warmup_start_lr=warmup_start_lr,
lr_decay_epochs=lr_decay_epochs,
lr_decay_gamma=lr_decay_gamma,
num_steps_each_epoch=num_steps_each_epoch)
self.optimizer = optimizer
# 构建训练、验证、测试网络
self.build_program()
fuse_bn = True
if self.with_fpn and self.backbone in [
'ResNet18', 'ResNet50', 'HRNet_W18'
]:
fuse_bn = False
self.net_initialize(
startup_prog=fluid.default_startup_program(),
pretrain_weights=pretrain_weights,
fuse_bn=fuse_bn,
save_dir=save_dir,
resume_checkpoint=resume_checkpoint)
# 训练
self.train_loop(
num_epochs=num_epochs,
train_dataset=train_dataset,
train_batch_size=train_batch_size,
eval_dataset=eval_dataset,
save_interval_epochs=save_interval_epochs,
log_interval_steps=log_interval_steps,
save_dir=save_dir,
use_vdl=use_vdl,
early_stop=early_stop,
early_stop_patience=early_stop_patience)
def evaluate(self,
eval_dataset,
batch_size=1,
epoch_id=None,
metric=None,
return_details=False):
"""评估。
Args:
eval_dataset (paddlex.datasets): 验证数据读取器。
batch_size (int): 验证数据批大小。默认为1。当前只支持设置为1。
epoch_id (int): 当前评估模型所在的训练轮数。
metric (bool): 训练过程中评估的方式,取值范围为['COCO', 'VOC']。默认为None,
根据用户传入的Dataset自动选择,如为VOCDetection,则metric为'VOC';
如为COCODetection,则metric为'COCO'。
return_details (bool): 是否返回详细信息。默认值为False。
Returns:
tuple (metrics, eval_details) /dict (metrics): 当return_details为True时,返回(metrics, eval_details),
当return_details为False时,返回metrics。metrics为dict,包含关键字:'bbox_mmap'和'segm_mmap'
或者’bbox_map‘和'segm_map',分别表示预测框和分割区域平均准确率平均值在
各个IoU阈值下的结果取平均值的结果(mmAP)、平均准确率平均值(mAP)。eval_details为dict,
包含bbox、mask和gt三个关键字。其中关键字bbox的键值是一个列表,列表中每个元素代表一个预测结果,
一个预测结果是一个由图像id,预测框类别id, 预测框坐标,预测框得分组成的列表。
关键字mask的键值是一个列表,列表中每个元素代表各预测框内物体的分割结果,分割结果由图像id、
预测框类别id、表示预测框内各像素点是否属于物体的二值图、预测框得分。
而关键字gt的键值是真实标注框的相关信息。
"""
input_channel = getattr(self, 'input_channel', 3)
arrange_transforms(
model_type=self.model_type,
class_name=self.__class__.__name__,
transforms=eval_dataset.transforms,
mode='eval',
input_channel=input_channel)
if metric is None:
if hasattr(self, 'metric') and self.metric is not None:
metric = self.metric
else:
if isinstance(eval_dataset, paddlex.datasets.CocoDetection):
metric = 'COCO'
else:
raise Exception(
"eval_dataset should be datasets.COCODetection.")
assert metric in ['COCO', 'VOC'], "Metric only support 'VOC' or 'COCO'"
if batch_size > 1:
batch_size = 1
logging.warning(
"Mask RCNN supports batch_size=1 only during evaluating, so batch_size is forced to be set to 1."
)
data_generator = eval_dataset.generator(
batch_size=batch_size, drop_last=False)
total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
results = list()
logging.info(
"Start to evaluating(total_samples={}, total_steps={})...".format(
eval_dataset.num_samples, total_steps))
for step, data in tqdm.tqdm(
enumerate(data_generator()), total=total_steps):
images = np.array([d[0] for d in data]).astype('float32')
im_infos = np.array([d[1] for d in data]).astype('float32')
im_shapes = np.array([d[3] for d in data]).astype('float32')
feed_data = {
'image': images,
'im_info': im_infos,
'im_shape': im_shapes,
}
with fluid.scope_guard(self.scope):
outputs = self.exe.run(
self.test_prog,
feed=[feed_data],
fetch_list=list(self.test_outputs.values()),
return_numpy=False)
res = {
'bbox': (np.array(outputs[0]),
outputs[0].recursive_sequence_lengths()),
'mask': (np.array(outputs[1]),
outputs[1].recursive_sequence_lengths())
}
res_im_id = [d[2] for d in data]
res['im_info'] = (im_infos, [])
res['im_shape'] = (im_shapes, [])
res['im_id'] = (np.array(res_im_id), [])
results.append(res)
logging.debug("[EVAL] Epoch={}, Step={}/{}".format(epoch_id, step +
1, total_steps))
ap_stats, eval_details = eval_results(
results,
'COCO',
eval_dataset.coco_gt,
with_background=True,
resolution=self.mask_head_resolution)
if metric == 'VOC':
if isinstance(ap_stats[0], np.ndarray) and isinstance(ap_stats[1],
np.ndarray):
metrics = OrderedDict(
zip(['bbox_map', 'segm_map'],
[ap_stats[0][1], ap_stats[1][1]]))
else:
metrics = OrderedDict(
zip(['bbox_map', 'segm_map'], [0.0, 0.0]))
elif metric == 'COCO':
if isinstance(ap_stats[0], np.ndarray) and isinstance(ap_stats[1],
np.ndarray):
metrics = OrderedDict(
zip(['bbox_mmap', 'segm_mmap'],
[ap_stats[0][0], ap_stats[1][0]]))
else:
metrics = OrderedDict(
zip(['bbox_mmap', 'segm_mmap'], [0.0, 0.0]))
if return_details:
return metrics, eval_details
return metrics
@staticmethod
def _postprocess(res, batch_size, num_classes, mask_head_resolution,
labels):
clsid2catid = dict({i: i for i in range(num_classes)})
xywh_results = bbox2out([res], clsid2catid)
segm_results = mask2out([res], clsid2catid, mask_head_resolution)
preds = [[] for i in range(batch_size)]
import pycocotools.mask as mask_util
for index, xywh_res in enumerate(xywh_results):
image_id = xywh_res['image_id']
del xywh_res['image_id']
xywh_res['mask'] = mask_util.decode(segm_results[index][
'segmentation'])
xywh_res['category'] = labels[xywh_res['category_id']]
preds[image_id].append(xywh_res)
return preds
def predict(self, img_file, transforms=None):
"""预测。
Args:
img_file(str|np.ndarray): 预测图像路径,或者是解码后的排列格式为(H, W, C)且类型为float32且为BGR格式的数组。
transforms (paddlex.det.transforms): 数据预处理操作。
Returns:
lict: 预测结果列表,每个预测结果由预测框类别标签、预测框类别名称、
预测框坐标(坐标格式为[xmin, ymin, w, h])、
原图大小的预测二值图(1表示预测框类别,0表示背景类)、
预测框得分组成。
"""
if transforms is None and not hasattr(self, 'test_transforms'):
raise Exception("transforms need to be defined, now is None.")
if isinstance(img_file, (str, np.ndarray)):
images = [img_file]
else:
raise Exception("img_file must be str/np.ndarray")
if transforms is None:
transforms = self.test_transforms
input_channel = getattr(self, 'input_channel', 3)
im, im_resize_info, im_shape = FasterRCNN._preprocess(
images,
transforms,
self.model_type,
self.__class__.__name__,
input_channel=input_channel)
with fluid.scope_guard(self.scope):
result = self.exe.run(self.test_prog,
feed={
'image': im,
'im_info': im_resize_info,
'im_shape': im_shape
},
fetch_list=list(self.test_outputs.values()),
return_numpy=False,
use_program_cache=True)
res = {
k: (np.array(v), v.recursive_sequence_lengths())
for k, v in zip(list(self.test_outputs.keys()), result)
}
res['im_id'] = (np.array(
[[i] for i in range(len(images))]).astype('int32'), [])
res['im_shape'] = (np.array(im_shape), [])
preds = MaskRCNN._postprocess(res,
len(images), self.num_classes,
self.mask_head_resolution, self.labels)
return preds[0]
def batch_predict(self, img_file_list, transforms=None):
"""预测。
Args:
img_file_list(list|tuple): 对列表(或元组)中的图像同时进行预测,列表中的元素可以是图像路径
也可以是解码后的排列格式为(H,W,C)且类型为float32且为BGR格式的数组。
transforms (paddlex.det.transforms): 数据预处理操作。
Returns:
dict: 每个元素都为列表,表示各图像的预测结果。在各图像的预测结果列表中,每个预测结果由预测框类别标签、预测框类别名称、
预测框坐标(坐标格式为[xmin, ymin, w, h])、
原图大小的预测二值图(1表示预测框类别,0表示背景类)、
预测框得分组成。
"""
if transforms is None and not hasattr(self, 'test_transforms'):
raise Exception("transforms need to be defined, now is None.")
if not isinstance(img_file_list, (list, tuple)):
raise Exception("im_file must be list/tuple")
if transforms is None:
transforms = self.test_transforms
input_channel = getattr(self, 'input_channel', 3)
im, im_resize_info, im_shape = FasterRCNN._preprocess(
img_file_list,
transforms,
self.model_type,
self.__class__.__name__,
self.thread_pool,
input_channel=input_channel)
with fluid.scope_guard(self.scope):
result = self.exe.run(self.test_prog,
feed={
'image': im,
'im_info': im_resize_info,
'im_shape': im_shape
},
fetch_list=list(self.test_outputs.values()),
return_numpy=False,
use_program_cache=True)
res = {
k: (np.array(v), v.recursive_sequence_lengths())
for k, v in zip(list(self.test_outputs.keys()), result)
}
res['im_id'] = (np.array(
[[i] for i in range(len(img_file_list))]).astype('int32'), [])
res['im_shape'] = (np.array(im_shape), [])
preds = MaskRCNN._postprocess(res,
len(img_file_list), self.num_classes,
self.mask_head_resolution, self.labels)
return preds
|
the-stack_0_20 | import sqlite3
import json
import math
from sqlite3.dbapi2 import Error
from flask import Flask, request, Response, render_template
app = Flask(__name__)
def open_db():
db = sqlite3.connect('./transactions.db')
db.row_factory = sqlite3.Row
return db
@app.route('/', methods=['GET'])
def transactions():
return render_template('transactions.html')
@app.route('/categories', methods=['GET'])
def categories():
return render_template('categories.html')
@app.route('/api/transactions', methods=['GET'])
def get_transactions():
with open_db() as db:
results = db.execute('SELECT * FROM transactions WHERE date >= "2021-05-01" ORDER BY date ASC')
return Response(json.dumps([dict(idx) for idx in results.fetchall()]), mimetype='application/json')
@app.route('/api/transactions/<int:id>', methods=['PUT', 'PATCH'])
def update_transaction(id):
transaction = request.get_json(force=True)
with open_db() as db:
db.execute('UPDATE transactions SET category_id = ? WHERE id = ?', (transaction['category_id'], id))
db.commit()
return {'success': True}
@app.route('/api/categories', methods=['GET'])
def get_categories():
with open_db() as db:
results = db.execute('SELECT * FROM categories')
return Response(json.dumps([dict(idx) for idx in results.fetchall()]), mimetype='application/json')
@app.route('/api/categories', methods=['POST'])
def create_category():
category = request.get_json(force=True)
with open_db() as db:
db.execute('INSERT INTO categories (name) VALUES (?)', (category.get('name'),))
db.commit()
return {'success': True}
@app.route('/api/breakdown', methods=['GET'])
def get_breakdown():
group_by_first = request.args.get('group_by', 'month').lower()
with open_db() as db:
results = db.execute('''
SELECT
j.*
FROM (
SELECT
t.id,
t.date,
SUBSTR(t.date, 0, 8) as month,
t.amount,
REPLACE(t.description, ' ', ' ') as description,
t.category_id,
c.name as category_name,
t.source
FROM transactions t
INNER JOIN categories c on t.category_id = c.id
WHERE c.name NOT IN ('Income', 'Payments', 'Savings') AND t.date >= '2021-05'
) j
ORDER BY j.month ASC, j.category_name ASC
''')
# return Response(json.dumps([dict(idx) for idx in results.fetchall()], indent=2), mimetype='application/json')
transactions = [dict(idx) for idx in results.fetchall()]
if group_by_first == 'month':
first_group = 'month'
second_group = 'category_name'
elif group_by_first == 'category':
first_group = 'category_name'
second_group = 'month'
else:
return Response(Error('Invalid group by'))
aggregated_transactions = {}
for item in transactions:
item['description'] = item['description'].replace(' ', ' ', 10).replace('\t', ' ')
top_group_value = item.get(first_group)
second_group_value = item.get(second_group)
if top_group_value in aggregated_transactions.keys():
if second_group_value in aggregated_transactions[top_group_value].keys():
sub_group = aggregated_transactions[top_group_value][second_group_value]
sub_group['transactions'].append(item)
sub_group['summary']['amount'] += item['amount']
sub_group['summary']['total_transactions'] += 1
sub_group['summary']['min'] = min(sub_group['summary']['min'], item['amount'])
sub_group['summary']['max'] = max(sub_group['summary']['max'], item['amount'])
sub_group['summary']['avg'] = round(sub_group['summary']['amount'] / sub_group['summary']['total_transactions'], 2)
else:
aggregated_transactions[top_group_value][second_group_value] = {
'summary': {
'amount': item['amount'],
'total_transactions': 1,
'min': item['amount'],
'max': item['amount'],
'avg': item['amount']
},
'transactions': [item]
}
else:
aggregated_transactions[top_group_value] = {}
aggregated_transactions[top_group_value][second_group_value] = {
'summary': {
'amount': item['amount'],
'total_transactions': 1,
'min': item['amount'],
'max': item['amount'],
'avg': item['amount']
},
'transactions': [item]
}
return Response(json.dumps(aggregated_transactions, indent=2), mimetype='application/json')
|
the-stack_0_22 | """A collection of tasks."""
import logging
from ..const import AddonState
from ..coresys import CoreSysAttributes
from ..exceptions import (
AddonsError,
AudioError,
CliError,
CoreDNSError,
HomeAssistantError,
MulticastError,
ObserverError,
)
from ..host.const import HostFeature
from ..jobs.decorator import Job, JobCondition
_LOGGER: logging.Logger = logging.getLogger(__name__)
HASS_WATCHDOG_API = "HASS_WATCHDOG_API"
RUN_UPDATE_SUPERVISOR = 29100
RUN_UPDATE_ADDONS = 57600
RUN_UPDATE_CLI = 28100
RUN_UPDATE_DNS = 30100
RUN_UPDATE_AUDIO = 30200
RUN_UPDATE_MULTICAST = 30300
RUN_UPDATE_OBSERVER = 30400
RUN_RELOAD_ADDONS = 10800
RUN_RELOAD_BACKUPS = 72000
RUN_RELOAD_HOST = 7600
RUN_RELOAD_UPDATER = 7200
RUN_RELOAD_INGRESS = 930
RUN_WATCHDOG_HOMEASSISTANT_DOCKER = 15
RUN_WATCHDOG_HOMEASSISTANT_API = 120
RUN_WATCHDOG_DNS_DOCKER = 30
RUN_WATCHDOG_AUDIO_DOCKER = 60
RUN_WATCHDOG_CLI_DOCKER = 60
RUN_WATCHDOG_OBSERVER_DOCKER = 60
RUN_WATCHDOG_MULTICAST_DOCKER = 60
RUN_WATCHDOG_ADDON_DOCKER = 30
RUN_WATCHDOG_ADDON_APPLICATON = 120
RUN_WATCHDOG_OBSERVER_APPLICATION = 180
RUN_REFRESH_ADDON = 15
RUN_CHECK_CONNECTIVITY = 30
class Tasks(CoreSysAttributes):
"""Handle Tasks inside Supervisor."""
def __init__(self, coresys):
"""Initialize Tasks."""
self.coresys = coresys
self._cache = {}
async def load(self):
"""Add Tasks to scheduler."""
# Update
self.sys_scheduler.register_task(self._update_addons, RUN_UPDATE_ADDONS)
self.sys_scheduler.register_task(self._update_supervisor, RUN_UPDATE_SUPERVISOR)
self.sys_scheduler.register_task(self._update_cli, RUN_UPDATE_CLI)
self.sys_scheduler.register_task(self._update_dns, RUN_UPDATE_DNS)
self.sys_scheduler.register_task(self._update_audio, RUN_UPDATE_AUDIO)
self.sys_scheduler.register_task(self._update_multicast, RUN_UPDATE_MULTICAST)
self.sys_scheduler.register_task(self._update_observer, RUN_UPDATE_OBSERVER)
# Reload
self.sys_scheduler.register_task(self.sys_store.reload, RUN_RELOAD_ADDONS)
self.sys_scheduler.register_task(self.sys_updater.reload, RUN_RELOAD_UPDATER)
self.sys_scheduler.register_task(self.sys_backups.reload, RUN_RELOAD_BACKUPS)
self.sys_scheduler.register_task(self.sys_host.reload, RUN_RELOAD_HOST)
self.sys_scheduler.register_task(self.sys_ingress.reload, RUN_RELOAD_INGRESS)
# Watchdog
self.sys_scheduler.register_task(
self._watchdog_homeassistant_docker, RUN_WATCHDOG_HOMEASSISTANT_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_homeassistant_api, RUN_WATCHDOG_HOMEASSISTANT_API
)
self.sys_scheduler.register_task(
self._watchdog_dns_docker, RUN_WATCHDOG_DNS_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_audio_docker, RUN_WATCHDOG_AUDIO_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_cli_docker, RUN_WATCHDOG_CLI_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_observer_docker, RUN_WATCHDOG_OBSERVER_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_observer_application, RUN_WATCHDOG_OBSERVER_APPLICATION
)
self.sys_scheduler.register_task(
self._watchdog_multicast_docker, RUN_WATCHDOG_MULTICAST_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_addon_docker, RUN_WATCHDOG_ADDON_DOCKER
)
self.sys_scheduler.register_task(
self._watchdog_addon_application, RUN_WATCHDOG_ADDON_APPLICATON
)
# Refresh
self.sys_scheduler.register_task(self._refresh_addon, RUN_REFRESH_ADDON)
# Connectivity
self.sys_scheduler.register_task(
self._check_connectivity, RUN_CHECK_CONNECTIVITY
)
_LOGGER.info("All core tasks are scheduled")
@Job(
conditions=[
JobCondition.HEALTHY,
JobCondition.FREE_SPACE,
JobCondition.INTERNET_HOST,
JobCondition.RUNNING,
]
)
async def _update_addons(self):
"""Check if an update is available for an Add-on and update it."""
for addon in self.sys_addons.all:
if not addon.is_installed or not addon.auto_update:
continue
# Evaluate available updates
if not addon.need_update:
continue
if not addon.test_update_schema():
_LOGGER.warning(
"Add-on %s will be ignored, schema tests failed", addon.slug
)
continue
# Run Add-on update sequential
# avoid issue on slow IO
_LOGGER.info("Add-on auto update process %s", addon.slug)
try:
await addon.update(backup=True)
except AddonsError:
_LOGGER.error("Can't auto update Add-on %s", addon.slug)
@Job(
conditions=[
JobCondition.FREE_SPACE,
JobCondition.INTERNET_HOST,
JobCondition.RUNNING,
]
)
async def _update_supervisor(self):
"""Check and run update of Supervisor Supervisor."""
if not self.sys_supervisor.need_update:
return
_LOGGER.info(
"Found new Supervisor version %s, updating",
self.sys_supervisor.latest_version,
)
await self.sys_supervisor.update()
async def _watchdog_homeassistant_docker(self):
"""Check running state of Docker and start if they is close."""
if not self.sys_homeassistant.watchdog:
# Watchdog is not enabled for Home Assistant
return
if self.sys_homeassistant.error_state:
# Home Assistant is in an error state, this is handled by the rollback feature
return
if not await self.sys_homeassistant.core.is_failed():
# The home assistant container is not in a failed state
return
if self.sys_homeassistant.core.in_progress:
# Home Assistant has a task in progress
return
if await self.sys_homeassistant.core.is_running():
# Home Assistant is running
return
_LOGGER.warning("Watchdog found a problem with Home Assistant Docker!")
try:
await self.sys_homeassistant.core.start()
except HomeAssistantError as err:
_LOGGER.error("Home Assistant watchdog reanimation failed!")
self.sys_capture_exception(err)
else:
return
_LOGGER.info("Rebuilding the Home Assistant Container")
await self.sys_homeassistant.core.rebuild()
async def _watchdog_homeassistant_api(self):
"""Create scheduler task for monitoring running state of API.
Try 2 times to call API before we restart Home-Assistant. Maybe we had
a delay in our system.
"""
if not self.sys_homeassistant.watchdog:
# Watchdog is not enabled for Home Assistant
return
if self.sys_homeassistant.error_state:
# Home Assistant is in an error state, this is handled by the rollback feature
return
if not await self.sys_homeassistant.core.is_running():
# The home assistant container is not running
return
if self.sys_homeassistant.core.in_progress:
# Home Assistant has a task in progress
return
if await self.sys_homeassistant.api.check_api_state():
# Home Assistant is running properly
return
# Init cache data
retry_scan = self._cache.get(HASS_WATCHDOG_API, 0)
# Look like we run into a problem
retry_scan += 1
if retry_scan == 1:
self._cache[HASS_WATCHDOG_API] = retry_scan
_LOGGER.warning("Watchdog miss API response from Home Assistant")
return
_LOGGER.error("Watchdog found a problem with Home Assistant API!")
try:
await self.sys_homeassistant.core.restart()
except HomeAssistantError as err:
_LOGGER.error("Home Assistant watchdog reanimation failed!")
self.sys_capture_exception(err)
finally:
self._cache[HASS_WATCHDOG_API] = 0
@Job(conditions=JobCondition.RUNNING)
async def _update_cli(self):
"""Check and run update of cli."""
if not self.sys_plugins.cli.need_update:
return
_LOGGER.info(
"Found new cli version %s, updating", self.sys_plugins.cli.latest_version
)
await self.sys_plugins.cli.update()
@Job(conditions=JobCondition.RUNNING)
async def _update_dns(self):
"""Check and run update of CoreDNS plugin."""
if not self.sys_plugins.dns.need_update:
return
_LOGGER.info(
"Found new CoreDNS plugin version %s, updating",
self.sys_plugins.dns.latest_version,
)
await self.sys_plugins.dns.update()
@Job(conditions=JobCondition.RUNNING)
async def _update_audio(self):
"""Check and run update of PulseAudio plugin."""
if not self.sys_plugins.audio.need_update:
return
_LOGGER.info(
"Found new PulseAudio plugin version %s, updating",
self.sys_plugins.audio.latest_version,
)
await self.sys_plugins.audio.update()
@Job(conditions=JobCondition.RUNNING)
async def _update_observer(self):
"""Check and run update of Observer plugin."""
if not self.sys_plugins.observer.need_update:
return
_LOGGER.info(
"Found new Observer plugin version %s, updating",
self.sys_plugins.observer.latest_version,
)
await self.sys_plugins.observer.update()
@Job(conditions=JobCondition.RUNNING)
async def _update_multicast(self):
"""Check and run update of multicast."""
if not self.sys_plugins.multicast.need_update:
return
_LOGGER.info(
"Found new Multicast version %s, updating",
self.sys_plugins.multicast.latest_version,
)
await self.sys_plugins.multicast.update()
async def _watchdog_dns_docker(self):
"""Check running state of Docker and start if they is close."""
# if CoreDNS is active
if await self.sys_plugins.dns.is_running() or self.sys_plugins.dns.in_progress:
return
_LOGGER.warning("Watchdog found a problem with CoreDNS plugin!")
# Detect loop
await self.sys_plugins.dns.loop_detection()
try:
await self.sys_plugins.dns.start()
except CoreDNSError:
_LOGGER.error("CoreDNS watchdog reanimation failed!")
async def _watchdog_audio_docker(self):
"""Check running state of Docker and start if they is close."""
# if PulseAudio plugin is active
if (
await self.sys_plugins.audio.is_running()
or self.sys_plugins.audio.in_progress
):
return
_LOGGER.warning("Watchdog found a problem with PulseAudio plugin!")
try:
await self.sys_plugins.audio.start()
except AudioError:
_LOGGER.error("PulseAudio watchdog reanimation failed!")
async def _watchdog_cli_docker(self):
"""Check running state of Docker and start if they is close."""
# if cli plugin is active
if await self.sys_plugins.cli.is_running() or self.sys_plugins.cli.in_progress:
return
_LOGGER.warning("Watchdog found a problem with cli plugin!")
try:
await self.sys_plugins.cli.start()
except CliError:
_LOGGER.error("CLI watchdog reanimation failed!")
async def _watchdog_observer_docker(self):
"""Check running state of Docker and start if they is close."""
# if observer plugin is active
if (
await self.sys_plugins.observer.is_running()
or self.sys_plugins.observer.in_progress
):
return
_LOGGER.warning("Watchdog/Docker found a problem with observer plugin!")
try:
await self.sys_plugins.observer.start()
except ObserverError:
_LOGGER.error("Observer watchdog reanimation failed!")
async def _watchdog_observer_application(self):
"""Check running state of application and rebuild if they is not response."""
# if observer plugin is active
if (
self.sys_plugins.observer.in_progress
or await self.sys_plugins.observer.check_system_runtime()
):
return
_LOGGER.warning("Watchdog/Application found a problem with observer plugin!")
try:
await self.sys_plugins.observer.rebuild()
except ObserverError:
_LOGGER.error("Observer watchdog reanimation failed!")
async def _watchdog_multicast_docker(self):
"""Check running state of Docker and start if they is close."""
# if multicast plugin is active
if (
await self.sys_plugins.multicast.is_running()
or self.sys_plugins.multicast.in_progress
):
return
_LOGGER.warning("Watchdog found a problem with Multicast plugin!")
try:
await self.sys_plugins.multicast.start()
except MulticastError:
_LOGGER.error("Multicast watchdog reanimation failed!")
async def _watchdog_addon_docker(self):
"""Check running state of Docker and start if they is close."""
for addon in self.sys_addons.installed:
# if watchdog need looking for
if not addon.watchdog or await addon.is_running():
continue
# if Addon have running actions
if addon.in_progress or addon.state != AddonState.STARTED:
continue
_LOGGER.warning("Watchdog found a problem with %s!", addon.slug)
try:
await addon.start()
except AddonsError as err:
_LOGGER.error("%s watchdog reanimation failed with %s", addon.slug, err)
self.sys_capture_exception(err)
async def _watchdog_addon_application(self):
"""Check running state of the application and start if they is hangs."""
for addon in self.sys_addons.installed:
# if watchdog need looking for
if not addon.watchdog or addon.state != AddonState.STARTED:
continue
# Init cache data
retry_scan = self._cache.get(addon.slug, 0)
# if Addon have running actions / Application work
if addon.in_progress or await addon.watchdog_application():
continue
# Look like we run into a problem
retry_scan += 1
if retry_scan == 1:
self._cache[addon.slug] = retry_scan
_LOGGER.warning(
"Watchdog missing application response from %s", addon.slug
)
return
_LOGGER.warning("Watchdog found a problem with %s application!", addon.slug)
try:
await addon.restart()
except AddonsError as err:
_LOGGER.error("%s watchdog reanimation failed with %s", addon.slug, err)
self.sys_capture_exception(err)
finally:
self._cache[addon.slug] = 0
async def _refresh_addon(self) -> None:
"""Refresh addon state."""
for addon in self.sys_addons.installed:
# if watchdog need looking for
if addon.watchdog or addon.state != AddonState.STARTED:
continue
# if Addon have running actions
if addon.in_progress or await addon.is_running():
continue
# Adjust state
addon.state = AddonState.STOPPED
async def _check_connectivity(self) -> None:
"""Check system connectivity."""
value = self._cache.get("connectivity", 0)
# Need only full check if not connected or each 10min
if value >= 600:
pass
elif (
self.sys_supervisor.connectivity
and self.sys_host.network.connectivity is None
) or (
self.sys_supervisor.connectivity
and self.sys_host.network.connectivity is not None
and self.sys_host.network.connectivity
):
self._cache["connectivity"] = value + RUN_CHECK_CONNECTIVITY
return
# Check connectivity
try:
await self.sys_supervisor.check_connectivity()
if HostFeature.NETWORK in self.sys_host.features:
await self.sys_host.network.check_connectivity()
finally:
self._cache["connectivity"] = 0
|
the-stack_0_23 | """Patch to fix MNIST download issue as described here:
- https://github.com/pytorch/ignite/issues/1737
- https://github.com/pytorch/vision/issues/3500
"""
import os
import subprocess as sp
import torch
from torchvision.datasets.mnist import MNIST, read_image_file, read_label_file
from torchvision.datasets.utils import extract_archive
def patched_download(self):
"""wget patched download method.
"""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition("/")[2]
download_root = os.path.expanduser(self.raw_folder)
extract_root = None
remove_finished = False
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
# Use wget to download archives
sp.run(["wget", url, "-P", download_root])
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
# process and save as torch files
print("Processing...")
training_set = (
read_image_file(os.path.join(self.raw_folder, "train-images-idx3-ubyte")),
read_label_file(os.path.join(self.raw_folder, "train-labels-idx1-ubyte")),
)
test_set = (
read_image_file(os.path.join(self.raw_folder, "t10k-images-idx3-ubyte")),
read_label_file(os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte")),
)
with open(os.path.join(self.processed_folder, self.training_file), "wb") as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), "wb") as f:
torch.save(test_set, f)
print("Done!")
def main():
# Patch download method
MNIST.download = patched_download
# Download MNIST
MNIST(".", download=True)
if __name__ == "__main__":
main()
|
the-stack_0_25 | # -*- coding: utf-8 -*-
# :Project: metapensiero.pj -- compatibility
# :Created: lun 30 mar 2020, 01:48:33
# :Author: Alberto Berti <[email protected]>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2020 Alberto Berti
#
import ast
import sys
is_py36 = sys.version_info >= (3, 6)
if is_py36:
assign_types = (ast.Assign, ast.AnnAssign)
else:
assign_types = (ast.Assign,)
|
the-stack_0_27 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
from _bpy import types as bpy_types
import _bpy
StructRNA = bpy_types.bpy_struct
StructMetaPropGroup = bpy_types.bpy_struct_meta_idprop
# StructRNA = bpy_types.Struct
bpy_types.BlendDataLibraries.load = _bpy._library_load
bpy_types.BlendDataLibraries.write = _bpy._library_write
bpy_types.BlendData.user_map = _bpy._rna_id_collection_user_map
bpy_types.BlendData.batch_remove = _bpy._rna_id_collection_batch_remove
class Context(StructRNA):
__slots__ = ()
def copy(self):
from types import BuiltinMethodType
new_context = {}
generic_attrs = (
*StructRNA.__dict__.keys(),
"bl_rna", "rna_type", "copy",
)
for attr in dir(self):
if not (attr.startswith("_") or attr in generic_attrs):
value = getattr(self, attr)
if type(value) != BuiltinMethodType:
new_context[attr] = value
return new_context
class Library(bpy_types.ID):
__slots__ = ()
@property
def users_id(self):
"""ID data blocks which use this library"""
import bpy
# See: readblenentry.c, IDTYPE_FLAGS_ISLINKABLE,
# we could make this an attribute in rna.
attr_links = (
"actions", "armatures", "brushes", "cameras",
"curves", "grease_pencils", "collections", "images",
"lights", "lattices", "materials", "metaballs",
"meshes", "node_groups", "objects", "scenes",
"sounds", "speakers", "textures", "texts",
"fonts", "worlds",
)
return tuple(id_block
for attr in attr_links
for id_block in getattr(bpy.data, attr)
if id_block.library == self)
class Texture(bpy_types.ID):
__slots__ = ()
@property
def users_material(self):
"""Materials that use this texture"""
import bpy
return tuple(mat for mat in bpy.data.materials
if self in [slot.texture
for slot in mat.texture_slots
if slot]
)
@property
def users_object_modifier(self):
"""Object modifiers that use this texture"""
import bpy
return tuple(
obj for obj in bpy.data.objects if
self in [
mod.texture
for mod in obj.modifiers
if mod.type == 'DISPLACE']
)
class Collection(bpy_types.ID):
__slots__ = ()
@property
def users_dupli_group(self):
"""The collection instance objects this collection is used in"""
import bpy
return tuple(obj for obj in bpy.data.objects
if self == obj.instance_collection)
class Object(bpy_types.ID):
__slots__ = ()
@property
def children(self):
"""All the children of this object. Warning: takes O(len(bpy.data.objects)) time."""
import bpy
return tuple(child for child in bpy.data.objects
if child.parent == self)
@property
def users_collection(self):
"""The collections this object is in. Warning: takes O(len(bpy.data.collections) + len(bpy.data.scenes)) time."""
import bpy
return (
tuple(
collection for collection in bpy.data.collections
if self in collection.objects[:]
) + tuple(
scene.collection for scene in bpy.data.scenes
if self in scene.collection.objects[:]
)
)
@property
def users_scene(self):
"""The scenes this object is in. Warning: takes O(len(bpy.data.scenes) * len(bpy.data.objects)) time."""
import bpy
return tuple(scene for scene in bpy.data.scenes
if self in scene.objects[:])
class WindowManager(bpy_types.ID):
__slots__ = ()
def popup_menu(self, draw_func, title="", icon='NONE'):
import bpy
popup = self.popmenu_begin__internal(title, icon=icon)
try:
draw_func(popup, bpy.context)
finally:
self.popmenu_end__internal(popup)
def popover(
self, draw_func, *,
ui_units_x=0,
keymap=None,
from_active_button=False,
):
import bpy
popup = self.popover_begin__internal(
ui_units_x=ui_units_x,
from_active_button=from_active_button,
)
try:
draw_func(popup, bpy.context)
finally:
self.popover_end__internal(popup, keymap=keymap)
def popup_menu_pie(self, event, draw_func, title="", icon='NONE'):
import bpy
pie = self.piemenu_begin__internal(title, icon=icon, event=event)
if pie:
try:
draw_func(pie, bpy.context)
finally:
self.piemenu_end__internal(pie)
class WorkSpace(bpy_types.ID):
__slots__ = ()
def status_text_set(self, text):
"""
Set the status text or None to clear,
When text is a function, this will be called with the (header, context) arguments.
"""
from bl_ui.space_statusbar import STATUSBAR_HT_header
draw_fn = getattr(STATUSBAR_HT_header, "_draw_orig", None)
if draw_fn is None:
draw_fn = STATUSBAR_HT_header._draw_orig = STATUSBAR_HT_header.draw
if not (text is None or isinstance(text, str)):
draw_fn = text
text = None
self.status_text_set_internal(text)
STATUSBAR_HT_header.draw = draw_fn
class _GenericBone:
"""
functions for bones, common between Armature/Pose/Edit bones.
internal subclassing use only.
"""
__slots__ = ()
def translate(self, vec):
"""Utility function to add *vec* to the head and tail of this bone"""
self.head += vec
self.tail += vec
def parent_index(self, parent_test):
"""
The same as 'bone in other_bone.parent_recursive'
but saved generating a list.
"""
# use the name so different types can be tested.
name = parent_test.name
parent = self.parent
i = 1
while parent:
if parent.name == name:
return i
parent = parent.parent
i += 1
return 0
@property
def x_axis(self):
""" Vector pointing down the x-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((1.0, 0.0, 0.0))
@property
def y_axis(self):
""" Vector pointing down the y-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((0.0, 1.0, 0.0))
@property
def z_axis(self):
""" Vector pointing down the z-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))
@property
def basename(self):
"""The name of this bone before any '.' character"""
# return self.name.rsplit(".", 1)[0]
return self.name.split(".")[0]
@property
def parent_recursive(self):
"""A list of parents, starting with the immediate parent"""
parent_list = []
parent = self.parent
while parent:
if parent:
parent_list.append(parent)
parent = parent.parent
return parent_list
@property
def center(self):
"""The midpoint between the head and the tail."""
return (self.head + self.tail) * 0.5
@property
def vector(self):
"""
The direction this bone is pointing.
Utility function for (tail - head)
"""
return (self.tail - self.head)
@property
def children(self):
"""A list of all the bones children. Warning: takes O(len(bones)) time."""
return [child for child in self._other_bones if child.parent == self]
@property
def children_recursive(self):
"""A list of all children from this bone. Warning: takes O(len(bones)**2) time."""
bones_children = []
for bone in self._other_bones:
index = bone.parent_index(self)
if index:
bones_children.append((index, bone))
# sort by distance to parent
bones_children.sort(key=lambda bone_pair: bone_pair[0])
return [bone for index, bone in bones_children]
@property
def children_recursive_basename(self):
"""
Returns a chain of children with the same base name as this bone.
Only direct chains are supported, forks caused by multiple children
with matching base names will terminate the function
and not be returned. Warning: takes O(len(bones)**2) time.
"""
basename = self.basename
chain = []
child = self
while True:
children = child.children
children_basename = []
for child in children:
if basename == child.basename:
children_basename.append(child)
if len(children_basename) == 1:
child = children_basename[0]
chain.append(child)
else:
if children_basename:
print("multiple basenames found, "
"this is probably not what you want!",
self.name, children_basename)
break
return chain
@property
def _other_bones(self):
id_data = self.id_data
id_data_type = type(id_data)
if id_data_type == bpy_types.Object:
bones = id_data.pose.bones
elif id_data_type == bpy_types.Armature:
bones = id_data.edit_bones
if not bones: # not in edit mode
bones = id_data.bones
return bones
class PoseBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
@property
def children(self):
obj = self.id_data
pbones = obj.pose.bones
self_bone = self.bone
return tuple(pbones[bone.name] for bone in obj.data.bones
if bone.parent == self_bone)
class Bone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
class EditBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
def align_orientation(self, other):
"""
Align this bone to another by moving its tail and settings its roll
the length of the other bone is not used.
"""
vec = other.vector.normalized() * self.length
self.tail = self.head + vec
self.roll = other.roll
def transform(self, matrix, scale=True, roll=True):
"""
Transform the the bones head, tail, roll and envelope
(when the matrix has a scale component).
:arg matrix: 3x3 or 4x4 transformation matrix.
:type matrix: :class:`mathutils.Matrix`
:arg scale: Scale the bone envelope by the matrix.
:type scale: bool
:arg roll:
Correct the roll to point in the same relative
direction to the head and tail.
:type roll: bool
"""
from mathutils import Vector
z_vec = self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))
self.tail = matrix @ self.tail
self.head = matrix @ self.head
if scale:
scalar = matrix.median_scale
self.head_radius *= scalar
self.tail_radius *= scalar
if roll:
self.align_roll(matrix @ z_vec)
def ord_ind(i1, i2):
if i1 < i2:
return i1, i2
return i2, i1
class Mesh(bpy_types.ID):
__slots__ = ()
def from_pydata(self, vertices, edges, faces):
"""
Make a mesh from a list of vertices/edges/faces
Until we have a nicer way to make geometry, use this.
:arg vertices:
float triplets each representing (X, Y, Z)
eg: [(0.0, 1.0, 0.5), ...].
:type vertices: iterable object
:arg edges:
int pairs, each pair contains two indices to the
*vertices* argument. eg: [(1, 2), ...]
:type edges: iterable object
:arg faces:
iterator of faces, each faces contains three or more indices to
the *vertices* argument. eg: [(5, 6, 8, 9), (1, 2, 3), ...]
:type faces: iterable object
.. warning::
Invalid mesh data
*(out of range indices, edges with matching indices,
2 sided faces... etc)* are **not** prevented.
If the data used for mesh creation isn't known to be valid,
run :class:`Mesh.validate` after this function.
"""
from itertools import chain, islice, accumulate
face_lengths = tuple(map(len, faces))
self.vertices.add(len(vertices))
self.edges.add(len(edges))
self.loops.add(sum(face_lengths))
self.polygons.add(len(faces))
self.vertices.foreach_set("co", tuple(chain.from_iterable(vertices)))
self.edges.foreach_set("vertices", tuple(chain.from_iterable(edges)))
vertex_indices = tuple(chain.from_iterable(faces))
loop_starts = tuple(islice(chain([0], accumulate(face_lengths)), len(faces)))
self.polygons.foreach_set("loop_total", face_lengths)
self.polygons.foreach_set("loop_start", loop_starts)
self.polygons.foreach_set("vertices", vertex_indices)
# if no edges - calculate them
if faces and (not edges):
self.update(calc_edges=True)
elif edges:
self.update(calc_edges_loose=True)
@property
def edge_keys(self):
return [ed.key for ed in self.edges]
class MeshEdge(StructRNA):
__slots__ = ()
@property
def key(self):
return ord_ind(*tuple(self.vertices))
class MeshLoopTriangle(StructRNA):
__slots__ = ()
@property
def center(self):
"""The midpoint of the face."""
face_verts = self.vertices[:]
mesh_verts = self.id_data.vertices
return (
mesh_verts[face_verts[0]].co +
mesh_verts[face_verts[1]].co +
mesh_verts[face_verts[2]].co
) / 3.0
@property
def edge_keys(self):
verts = self.vertices[:]
return (
ord_ind(verts[0], verts[1]),
ord_ind(verts[1], verts[2]),
ord_ind(verts[2], verts[0]),
)
class MeshPolygon(StructRNA):
__slots__ = ()
@property
def edge_keys(self):
verts = self.vertices[:]
vlen = len(self.vertices)
return [ord_ind(verts[i], verts[(i + 1) % vlen]) for i in range(vlen)]
@property
def loop_indices(self):
start = self.loop_start
end = start + self.loop_total
return range(start, end)
class Text(bpy_types.ID):
__slots__ = ()
def as_string(self):
"""Return the text as a string."""
return "\n".join(line.body for line in self.lines)
def from_string(self, string):
"""Replace text with this string."""
self.clear()
self.write(string)
def as_module(self):
from os.path import splitext
from types import ModuleType
mod = ModuleType(splitext(self.name)[0])
# TODO: We could use Text.compiled (C struct member)
# if this is called often it will be much faster.
exec(self.as_string(), mod.__dict__)
return mod
class Sound(bpy_types.ID):
__slots__ = ()
@property
def factory(self):
"""The aud.Factory object of the sound."""
import aud
return aud._sound_from_pointer(self.as_pointer())
class RNAMeta(type):
# TODO(campbell): move to C-API
@property
def is_registered(cls):
return "bl_rna" in cls.__dict__
class RNAMetaPropGroup(StructMetaPropGroup, RNAMeta):
pass
# Same as 'Operator'
# only without 'as_keywords'
class Gizmo(StructRNA):
__slots__ = ()
def __getattribute__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return getattr(properties, attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return setattr(properties, attr, value)
return super().__setattr__(attr, value)
def __delattr__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return delattr(properties, attr)
return super().__delattr__(attr)
from _bpy import (
_rna_gizmo_target_set_handler as target_set_handler,
_rna_gizmo_target_get_value as target_get_value,
_rna_gizmo_target_set_value as target_set_value,
_rna_gizmo_target_get_range as target_get_range,
)
# Convenience wrappers around private `_gpu` module.
def draw_custom_shape(self, shape, *, matrix=None, select_id=None):
"""
Draw a shape created form :class:`bpy.types.Gizmo.draw_custom_shape`.
:arg shape: The cached shape to draw.
:type shape: Undefined.
:arg matrix: 4x4 matrix, when not given
:class:`bpy.types.Gizmo.matrix_world` is used.
:type matrix: :class:`mathutils.Matrix`
:arg select_id: The selection id.
Only use when drawing within :class:`bpy.types.Gizmo.draw_select`.
:type select_it: int
"""
import gpu
if matrix is None:
matrix = self.matrix_world
batch, shader = shape
shader.bind()
if select_id is not None:
gpu.select.load_id(select_id)
else:
if self.is_highlight:
color = (*self.color_highlight, self.alpha_highlight)
else:
color = (*self.color, self.alpha)
shader.uniform_float("color", color)
with gpu.matrix.push_pop():
gpu.matrix.multiply_matrix(matrix)
batch.draw()
@staticmethod
def new_custom_shape(type, verts):
"""
Create a new shape that can be passed to :class:`bpy.types.Gizmo.draw_custom_shape`.
:arg type: The type of shape to create in (POINTS, LINES, TRIS, LINE_STRIP).
:type type: string
:arg verts: Coordinates.
:type verts: sequence of of 2D or 3D coordinates.
:arg display_name: Optional callback that takes the full path, returns the name to display.
:type display_name: Callable that takes a string and returns a string.
:return: The newly created shape.
:rtype: Undefined (it may change).
"""
import gpu
from gpu.types import (
GPUBatch,
GPUVertBuf,
GPUVertFormat,
)
dims = len(verts[0])
if dims not in {2, 3}:
raise ValueError("Expected 2D or 3D vertex")
fmt = GPUVertFormat()
pos_id = fmt.attr_add(id="pos", comp_type='F32', len=dims, fetch_mode='FLOAT')
vbo = GPUVertBuf(len=len(verts), format=fmt)
vbo.attr_fill(id=pos_id, data=verts)
batch = GPUBatch(type=type, buf=vbo)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR' if dims == 3 else '2D_UNIFORM_COLOR')
batch.program_set(shader)
return (batch, shader)
# Dummy class to keep the reference in `bpy_types_dict` and avoid
# erros like: "TypeError: expected GizmoGroup subclass of class ..."
class GizmoGroup(StructRNA):
__slots__ = ()
# Only defined so operators members can be used by accessing self.order
# with doc generation 'self.properties.bl_rna.properties' can fail
class Operator(StructRNA, metaclass=RNAMeta):
__slots__ = ()
def __getattribute__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return getattr(properties, attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return setattr(properties, attr, value)
return super().__setattr__(attr, value)
def __delattr__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return delattr(properties, attr)
return super().__delattr__(attr)
def as_keywords(self, ignore=()):
"""Return a copy of the properties as a dictionary"""
ignore = ignore + ("rna_type",)
return {attr: getattr(self, attr)
for attr in self.properties.rna_type.properties.keys()
if attr not in ignore}
class Macro(StructRNA):
# bpy_types is imported before ops is defined
# so we have to do a local import on each run
__slots__ = ()
@classmethod
def define(self, opname):
from _bpy import ops
return ops.macro_define(self, opname)
class PropertyGroup(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
class RenderEngine(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class KeyingSetInfo(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class AddonPreferences(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class _GenericUI:
__slots__ = ()
@classmethod
def _dyn_ui_initialize(cls):
draw_funcs = getattr(cls.draw, "_draw_funcs", None)
if draw_funcs is None:
def draw_ls(self, context):
# ensure menus always get default context
operator_context_default = self.layout.operator_context
# Support filtering out by owner
workspace = context.workspace
if workspace.use_filter_by_owner:
owner_names = {owner_id.name for owner_id in workspace.owner_ids}
else:
owner_names = None
for func in draw_ls._draw_funcs:
# Begin 'owner_id' filter.
if owner_names is not None:
owner_id = getattr(func, "_owner", None)
if owner_id is not None:
if func._owner not in owner_names:
continue
# End 'owner_id' filter.
# so bad menu functions don't stop
# the entire menu from drawing
try:
func(self, context)
except:
import traceback
traceback.print_exc()
self.layout.operator_context = operator_context_default
draw_funcs = draw_ls._draw_funcs = [cls.draw]
cls.draw = draw_ls
return draw_funcs
@staticmethod
def _dyn_owner_apply(draw_func):
from _bpy import _bl_owner_id_get
owner_id = _bl_owner_id_get()
if owner_id is not None:
draw_func._owner = owner_id
@classmethod
def is_extended(cls):
return bool(getattr(cls.draw, "_draw_funcs", None))
@classmethod
def append(cls, draw_func):
"""
Append a draw function to this menu,
takes the same arguments as the menus draw function
"""
draw_funcs = cls._dyn_ui_initialize()
cls._dyn_owner_apply(draw_func)
draw_funcs.append(draw_func)
@classmethod
def prepend(cls, draw_func):
"""
Prepend a draw function to this menu, takes the same arguments as
the menus draw function
"""
draw_funcs = cls._dyn_ui_initialize()
cls._dyn_owner_apply(draw_func)
draw_funcs.insert(0, draw_func)
@classmethod
def remove(cls, draw_func):
"""Remove a draw function that has been added to this menu"""
draw_funcs = cls._dyn_ui_initialize()
try:
draw_funcs.remove(draw_func)
except ValueError:
pass
class Panel(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class UIList(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Header(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Menu(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
def path_menu(self, searchpaths, operator, *,
props_default=None, prop_filepath="filepath",
filter_ext=None, filter_path=None, display_name=None,
add_operator=None):
"""
Populate a menu from a list of paths.
:arg searchpaths: Paths to scan.
:type searchpaths: sequence of strings.
:arg operator: The operator id to use with each file.
:type operator: string
:arg prop_filepath: Optional operator filepath property (defaults to "filepath").
:type prop_filepath: string
:arg props_default: Properties to assign to each operator.
:type props_default: dict
:arg filter_ext: Optional callback that takes the file extensions.
Returning false excludes the file from the list.
:type filter_ext: Callable that takes a string and returns a bool.
:arg display_name: Optional callback that takes the full path, returns the name to display.
:type display_name: Callable that takes a string and returns a string.
"""
layout = self.layout
import os
import bpy.utils
layout = self.layout
if not searchpaths:
layout.label(text="* Missing Paths *")
# collect paths
files = []
for directory in searchpaths:
files.extend([
(f, os.path.join(directory, f))
for f in os.listdir(directory)
if (not f.startswith("."))
if ((filter_ext is None) or
(filter_ext(os.path.splitext(f)[1])))
if ((filter_path is None) or
(filter_path(f)))
])
files.sort()
col = layout.column(align=True)
for f, filepath in files:
# Intentionally pass the full path to 'display_name' callback,
# since the callback may want to use part a directory in the name.
row = col.row(align=True)
name = display_name(filepath) if display_name else bpy.path.display_name(f)
props = row.operator(
operator,
text=name,
translate=False,
)
if props_default is not None:
for attr, value in props_default.items():
setattr(props, attr, value)
setattr(props, prop_filepath, filepath)
if operator == "script.execute_preset":
props.menu_idname = self.bl_idname
if add_operator:
props = row.operator(add_operator, text="", icon='REMOVE')
props.name = name
props.remove_name = True
if add_operator:
wm = bpy.data.window_managers[0]
layout.separator()
row = layout.row()
sub = row.row()
sub.emboss = 'NORMAL'
sub.prop(wm, "preset_name", text="")
props = row.operator(add_operator, text="", icon='ADD')
props.name = wm.preset_name
def draw_preset(self, _context):
"""
Define these on the subclass:
- preset_operator (string)
- preset_subdir (string)
Optionally:
- preset_add_operator (string)
- preset_extensions (set of strings)
- preset_operator_defaults (dict of keyword args)
"""
import bpy
ext_valid = getattr(self, "preset_extensions", {".py", ".xml"})
props_default = getattr(self, "preset_operator_defaults", None)
add_operator = getattr(self, "preset_add_operator", None)
self.path_menu(
bpy.utils.preset_paths(self.preset_subdir),
self.preset_operator,
props_default=props_default,
filter_ext=lambda ext: ext.lower() in ext_valid,
add_operator=add_operator,
)
@classmethod
def draw_collapsible(cls, context, layout):
# helper function for (optionally) collapsed header menus
# only usable within headers
if context.area.show_menus:
# Align menus to space them closely.
layout.row(align=True).menu_contents(cls.__name__)
else:
layout.menu(cls.__name__, icon='COLLAPSEMENU')
class NodeTree(bpy_types.ID, metaclass=RNAMetaPropGroup):
__slots__ = ()
class Node(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@classmethod
def poll(cls, _ntree):
return True
class NodeInternal(Node):
__slots__ = ()
class NodeSocket(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@property
def links(self):
"""List of node links from or to this socket. Warning: takes O(len(nodetree.links)) time."""
return tuple(
link for link in self.id_data.links
if (link.from_socket == self or
link.to_socket == self))
class NodeSocketInterface(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
# These are intermediate subclasses, need a bpy type too
class CompositorNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'CompositorNodeTree'
def update(self):
self.tag_need_exec()
class ShaderNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'ShaderNodeTree'
class TextureNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'TextureNodeTree'
|
the-stack_0_30 | from proteus import Context
from proteus import Comm
comm = Comm.get()
ctx = Context.get()
# simulation flags for error analysis
#
# simFlagsList is initialized in proteus.iproteus
#
simFlagsList[0]['errorQuantities']=['u']
simFlagsList[0]['errorTypes']= ['numericalSolution'] #compute error in soln and glob. mass bal
simFlagsList[0]['errorNorms']= ['L2','H1'] #compute L2 norm in space or H0 or ...
simFlagsList[0]['errorTimes']= ['Last'] #'All', 'Last'
simFlagsList[0]['echo']=True
#
start
quit
|
the-stack_0_32 | from keras_applications import get_submodules_from_kwargs
def Conv2dBn(
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
activation_dtype=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_batchnorm=False,
**kwargs
):
"""Extension of Conv2D layer with batchnorm"""
conv_name, act_name, bn_name = None, None, None
block_name = kwargs.pop('name', None)
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if block_name is not None:
conv_name = block_name + '_conv'
if block_name is not None and activation is not None:
act_str = activation.__name__ if callable(activation) else str(activation)
act_name = block_name + '_' + act_str
if block_name is not None and use_batchnorm:
bn_name = block_name + '_bn'
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def wrapper(input_tensor):
x = layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=None,
use_bias=not (use_batchnorm),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
name=conv_name,
)(input_tensor)
if use_batchnorm:
x = layers.BatchNormalization(axis=bn_axis, name=bn_name)(x)
if activation:
if activation_dtype is None:
x = layers.Activation(activation, name=act_name)(x)
else:
x = layers.Activation(activation, name=act_name, dtype=activation_dtype)(x)
return x
return wrapper
|
the-stack_0_33 | #!/usr/bin/env python3
import os
import random
import unittest
from math import exp, pi
import gpytorch
import torch
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood, FixedNoiseGaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.priors import SmoothedBoxPrior
from gpytorch.test.utils import least_used_cuda_device
from torch import optim
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_inputs, train_targets, likelihood):
super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1))
self.rbf_covar_module = RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1))
self.covar_module = ScaleKernel(self.rbf_covar_module)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
class TestWhiteNoiseGPRegression(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(1)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(1)
random.seed(1)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def _get_data(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
# Simple training data: let's try to learn a sine function
train_x = torch.linspace(0, 1, 11, device=device)
train_y = torch.sin(train_x * (2 * pi))
test_x = torch.linspace(0, 1, 51, device=device)
test_y = torch.sin(test_x * (2 * pi))
return train_x, test_x, train_y, test_y
def test_posterior_latent_gp_and_likelihood_without_optimization(self, cuda=False):
train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
with gpytorch.settings.debug(False):
# We're manually going to set the hyperparameters to be ridiculous
likelihood = FixedNoiseGaussianLikelihood(torch.ones(11) * 1e-8)
gp_model = ExactGPModel(train_x, train_y, likelihood)
# Update lengthscale prior to accommodate extreme parameters
gp_model.rbf_covar_module.initialize(lengthscale=exp(-6))
gp_model.mean_module.initialize(constant=0)
if cuda:
gp_model.cuda()
likelihood.cuda()
# Compute posterior distribution
gp_model.eval()
likelihood.eval()
# Let's see how our model does, conditioned with weird hyperparams
# The posterior should fit all the data
function_predictions = likelihood(gp_model(train_x))
self.assertLess(torch.norm(function_predictions.mean - train_y), 1e-3)
self.assertLess(torch.norm(function_predictions.variance), 5e-3)
# It shouldn't fit much else though
test_function_predictions = gp_model(torch.tensor([1.1]).type_as(test_x))
self.assertLess(torch.norm(test_function_predictions.mean - 0), 1e-4)
self.assertLess(torch.norm(test_function_predictions.variance - gp_model.covar_module.outputscale), 1e-4)
def test_posterior_latent_gp_and_likelihood_without_optimization_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
self.test_posterior_latent_gp_and_likelihood_without_optimization(cuda=True)
def test_posterior_latent_gp_and_likelihood_with_optimization(self, cuda=False):
train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = FixedNoiseGaussianLikelihood(torch.ones(11) * 0.001)
gp_model = ExactGPModel(train_x, train_y, likelihood)
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.rbf_covar_module.initialize(lengthscale=exp(1))
gp_model.mean_module.initialize(constant=0)
if cuda:
gp_model.cuda()
likelihood.cuda()
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
optimizer.n_iter = 0
with gpytorch.settings.debug(False):
for _ in range(75):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for param in likelihood.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
test_function_predictions = likelihood(gp_model(test_x))
mean_abs_error = torch.mean(torch.abs(test_y - test_function_predictions.mean))
self.assertLess(mean_abs_error.squeeze().item(), 0.05)
def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
self.test_posterior_latent_gp_and_likelihood_with_optimization(cuda=True)
def test_posterior_latent_gp_and_likelihood_fast_pred_var(self, cuda=False):
train_x, test_x, train_y, test_y = self._get_data(cuda=cuda)
with gpytorch.settings.fast_pred_var(), gpytorch.settings.debug(False):
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = GaussianLikelihood(noise_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1))
gp_model = ExactGPModel(train_x, train_y, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
gp_model.rbf_covar_module.initialize(lengthscale=exp(1))
gp_model.mean_module.initialize(constant=0)
likelihood.initialize(noise=exp(1))
if cuda:
gp_model.cuda()
likelihood.cuda()
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1)
optimizer.n_iter = 0
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for param in likelihood.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
optimizer.step()
# Test the model
gp_model.eval()
likelihood.eval()
# Set the cache
test_function_predictions = likelihood(gp_model(train_x))
# Now bump up the likelihood to something huge
# This will make it easy to calculate the variance
likelihood.raw_noise.data.fill_(3)
test_function_predictions = likelihood(gp_model(train_x))
noise = likelihood.noise
var_diff = (test_function_predictions.variance - noise).abs()
self.assertLess(torch.max(var_diff / noise), 0.05)
def test_posterior_latent_gp_and_likelihood_fast_pred_var_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
self.test_posterior_latent_gp_and_likelihood_fast_pred_var(cuda=True)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_34 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_query_set)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv" : "path.to.csv.serializer",
"txt" : "path.to.txt.serializer",
}
"""
from django.conf import settings
from django.utils import importlib
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml" : "django.core.serializers.xml_serializer",
"python" : "django.core.serializers.python",
"json" : "django.core.serializers.json",
}
# Check for PyYaml and register the serializer if it's available.
try:
import yaml
BUILTIN_SERIALIZERS["yaml"] = "django.core.serializers.pyyaml"
except ImportError:
pass
_serializers = {}
def register_serializer(format, serializer_module, serializers=None):
""""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
module = importlib.import_module(serializer_module)
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return _serializers.keys()
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in _serializers.iteritems() if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is a instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
|
the-stack_0_35 | __copyright__ = "Copyright 2017, Georgia Institute of Technology"
__license__ = "MIT"
__version_info__ = ('0', '0', '1')
__version__ = '.'.join(__version_info__)
__maintainer__ = "Marat Dukhan"
__email__ = "[email protected]"
import logging
logger = logging.getLogger("confu")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
class ConsoleFormatter(logging.Formatter):
def __init__(self):
super(ConsoleFormatter, self).__init__("%(message)s")
def format(self, record):
message = super(ConsoleFormatter, self).format(record)
if record.levelname in ["DEBUG", "INFO"]:
return message[0].upper() + message[1:]
else:
return {
"WARNING": "Warning", "ERROR": "Error", "CRITICAL": "Fatal error"
}[record.levelname] + ": " + message[0].lower() + message[1:]
console_formatter = ConsoleFormatter()
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
from confu.builds import Build
from confu.platform import Platform
def standard_parser(description="Confu configuration script"):
import argparse
from os import linesep
from confu.platform import host, possible_targets
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--target", dest="target", metavar="PLATFORM", type=Platform,
default=host.name,
help="platform where the code will run. Potential options:" + linesep +
" " + host.name + " (default)" + linesep +
linesep.join(" " + target for target in possible_targets[1:]))
parser.add_argument("--toolchain", dest="toolchain", metavar="TOOLCHAIN",
choices=["auto", "gnu", "clang"], default="auto",
help="toolchain to use for compilation. Potential options:" + linesep +
linesep.join(" " + name for name in ["auto (default)", "gnu", "clang"]))
return parser
|
the-stack_0_36 | import pytest
from commitizen import BaseCommitizen, defaults, factory
from commitizen.config import BaseConfig
from commitizen.exceptions import NoCommitizenFoundException
def test_factory():
config = BaseConfig()
config.settings.update({"name": defaults.name})
r = factory.commiter_factory(config)
assert isinstance(r, BaseCommitizen)
def test_factory_fails():
config = BaseConfig()
config.settings.update({"name": "Nothing"})
with pytest.raises(NoCommitizenFoundException) as excinfo:
factory.commiter_factory(config)
assert "The committer has not been found in the system." in str(excinfo)
|
the-stack_0_38 | import collections
import enum
from itertools import starmap, product
import six
from ibis.compat import suppress
import ibis.util as util
import ibis.common as com
import ibis.expr.types as ir
import ibis.expr.schema as sch
import ibis.expr.datatypes as dt
try:
from cytoolz import curry, compose, identity
except ImportError:
from toolz import curry, compose, identity
def highest_precedence_dtype(exprs):
"""Return the highest precedence type from the passed expressions
Also verifies that there are valid implicit casts between any of the types
and the selected highest precedence type.
This is a thin wrapper around datatypes highest precedence check.
Parameters
----------
exprs : Iterable[ir.ValueExpr]
A sequence of Expressions
Returns
-------
dtype: DataType
The highest precedence datatype
"""
if not exprs:
raise ValueError('Must pass at least one expression')
return dt.highest_precedence(expr.type() for expr in exprs)
def castable(source, target):
"""Return whether source ir type is implicitly castable to target
Based on the underlying datatypes and the value in case of Literals
"""
op = source.op()
value = getattr(op, 'value', None)
return dt.castable(source.type(), target.type(), value=value)
def comparable(left, right):
return castable(left, right) or castable(right, left)
def cast(source, target):
"""Currently Literal to *Scalar implicit casts are allowed"""
import ibis.expr.operations as ops # TODO: don't use ops here
if not castable(source, target):
raise com.IbisTypeError('Source is not castable to target type!')
# currently it prevents column -> scalar implicit castings
# however the datatypes are matching
op = source.op()
if not isinstance(op, ops.Literal):
raise com.IbisTypeError('Only able to implicitly cast literals!')
out_type = target.type().scalar_type()
return out_type(op)
# ---------------------------------------------------------------------
# Input type validators / coercion functions
class validator(curry):
def __repr__(self):
return '{}({}{})'.format(
self.func.__name__,
repr(self.args)[1:-1],
', '.join('{}={!r}'.format(k, v) for k, v in self.keywords.items())
)
noop = validator(identity)
@validator
def one_of(inners, arg):
"""At least one of the inner validators must pass"""
for inner in inners:
with suppress(com.IbisTypeError, ValueError):
return inner(arg)
rules_formatted = ', '.join(map(repr, inners))
raise com.IbisTypeError(
'Arg passes neither of the following rules: {}'.format(rules_formatted)
)
@validator
def all_of(inners, arg):
"""All of the inner valudators must pass.
The order of inner validators matters.
Parameters
----------
inners : List[validator]
Functions are applied from right to left so allof([rule1, rule2], arg) is
the same as rule1(rule2(arg)).
arg : Any
Value to be validated.
Returns
-------
arg : Any
Value maybe coerced by inner validators to the appropiate types
"""
return compose(*inners)(arg)
@validator
def isin(values, arg):
if arg not in values:
raise ValueError(
'Value with type {} is not in {!r}'.format(type(arg), values)
)
if isinstance(values, dict): # TODO check for mapping instead
return values[arg]
else:
return arg
@validator
def member_of(obj, arg):
if isinstance(arg, enum.Enum):
enum.unique(obj) # check that enum has unique values
arg = arg.name
if not hasattr(obj, arg):
raise com.IbisTypeError(
'Value with type {} is not a member of {}'.format(type(arg), obj)
)
return getattr(obj, arg)
@validator
def list_of(inner, arg, min_length=0):
if isinstance(arg, six.string_types) or not isinstance(
arg, (collections.Sequence, ir.ListExpr)
):
raise com.IbisTypeError('Argument must be a sequence')
if len(arg) < min_length:
raise com.IbisTypeError(
'Arg must have at least {} number of elements'.format(min_length)
)
return ir.sequence(list(map(inner, arg)))
@validator
def datatype(arg):
return dt.dtype(arg)
@validator
def instance_of(klass, arg):
"""Require that a value has a particular Python type."""
if not isinstance(arg, klass):
raise com.IbisTypeError(
'Given argument with type {} is not an instance of {}'.format(
type(arg), klass
)
)
return arg
@validator
def value(dtype, arg):
"""Validates that the given argument is a Value with a particular datatype
Parameters
----------
dtype : DataType subclass or DataType instance
arg : python literal or an ibis expression
If a python literal is given the validator tries to coerce it to an ibis
literal.
Returns
-------
arg : AnyValue
An ibis value expression with the specified datatype
"""
if not isinstance(arg, ir.Expr):
# coerce python literal to ibis literal
arg = ir.literal(arg)
if not isinstance(arg, ir.AnyValue):
raise com.IbisTypeError('Given argument with type {} is not a value '
'expression'.format(type(arg)))
# retrieve literal values for implicit cast check
value = getattr(arg.op(), 'value', None)
if isinstance(dtype, type) and isinstance(arg.type(), dtype):
# dtype class has been specified like dt.Interval or dt.Decimal
return arg
elif dt.castable(arg.type(), dt.dtype(dtype), value=value):
# dtype instance or string has been specified and arg's dtype is
# implicitly castable to it, like dt.int8 is castable to dt.int64
return arg
else:
raise com.IbisTypeError('Given argument with datatype {} is not '
'subtype of {} nor implicitly castable to '
'it'.format(arg.type(), dtype))
@validator
def scalar(inner, arg):
return instance_of(ir.ScalarExpr, inner(arg))
@validator
def column(inner, arg):
return instance_of(ir.ColumnExpr, inner(arg))
@validator
def array_of(inner, arg):
val = arg if isinstance(arg, ir.Expr) else ir.literal(arg)
argtype = val.type()
if not isinstance(argtype, dt.Array):
raise com.IbisTypeError(
'Argument must be an array, got expression {} which is of type '
'{}'.format(val, val.type()))
return value(dt.Array(inner(val[0]).type()), val)
any = value(dt.any)
double = value(dt.double)
string = value(dt.string)
boolean = value(dt.boolean)
integer = value(dt.int64)
decimal = value(dt.Decimal)
floating = value(dt.float64)
date = value(dt.date)
time = value(dt.time)
timestamp = value(dt.Timestamp)
category = value(dt.category)
temporal = one_of([timestamp, date, time])
strict_numeric = one_of([integer, floating, decimal])
soft_numeric = one_of([integer, floating, decimal, boolean])
numeric = soft_numeric
set_ = value(dt.Set)
array = value(dt.Array)
struct = value(dt.Struct)
mapping = value(dt.Map(dt.any, dt.any))
@validator
def interval(arg, units=None):
arg = value(dt.Interval, arg)
unit = arg.type().unit
if units is not None and unit not in units:
msg = 'Interval unit `{}` is not among the allowed ones {}'
raise com.IbisTypeError(msg.format(unit, units))
return arg
@validator
def client(arg):
from ibis.client import Client
return instance_of(Client, arg)
# ---------------------------------------------------------------------
# Ouput type promoter functions
def promoter(fn):
def wrapper(name_or_value, *args, **kwargs):
if isinstance(name_or_value, str):
return lambda self: fn(getattr(self, name_or_value),
*args, **kwargs)
else:
return fn(name_or_value, *args, **kwargs)
return wrapper
@promoter
def shape_like(arg, dtype=None):
if isinstance(arg, (tuple, list, ir.ListExpr)):
datatype = dtype or highest_precedence_dtype(arg)
columnar = util.any_of(arg, ir.AnyColumn)
else:
datatype = dtype or arg.type()
columnar = isinstance(arg, ir.AnyColumn)
dtype = dt.dtype(datatype)
if columnar:
return dtype.array_type()
else:
return dtype.scalar_type()
@promoter
def scalar_like(arg):
output_dtype = arg.type()
return output_dtype.scalar_type()
@promoter
def array_like(arg):
output_dtype = arg.type()
return output_dtype.array_type()
column_like = array_like
@promoter
def typeof(arg):
return arg._factory
@validator
def table(schema, arg):
"""A table argument.
Parameters
----------
schema : Union[sch.Schema, List[Tuple[str, dt.DataType]]
A validator for the table's columns. Only column subset validators are
currently supported. Accepts any arguments that `sch.schema` accepts.
See the example for usage.
arg : The validatable argument.
Examples
--------
The following op will accept an argument named ``'table'``. Note that the
``schema`` argument specifies rules for columns that are required to be in
the table: ``time``, ``group`` and ``value1``. These must match the types
specified in the column rules. Column ``value2`` is optional, but if
present it must be of the specified type. The table may have extra columns
not specified in the schema.
"""
assert isinstance(arg, ir.TableExpr)
if arg.schema() >= sch.schema(schema):
return arg
raise com.IbisTypeError(
'Argument is not a table with column subset of {}'.format(schema)
)
# TODO: might just use bounds instead of actual literal values
# that could simplify interval binop output_type methods
def _promote_numeric_binop(exprs, op):
bounds, dtypes = [], []
for arg in exprs:
dtypes.append(arg.type())
if hasattr(arg.op(), 'value'):
# arg.op() is a literal
bounds.append([arg.op().value])
else:
bounds.append(arg.type().bounds)
# In some cases, the bounding type might be int8, even though neither
# of the types are that small. We want to ensure the containing type is
# _at least_ as large as the smallest type in the expression.
values = starmap(op, product(*bounds))
dtypes += [dt.infer(value, allow_overflow=True) for value in values]
return dt.highest_precedence(dtypes)
@promoter
def numeric_like(args, op):
if util.all_of(args, ir.IntegerValue):
dtype = _promote_numeric_binop(args, op)
return shape_like(args, dtype=dtype)
else:
return shape_like(args)
# TODO: create varargs marker for impala udfs
|
the-stack_0_39 | """
Test Convex Breaking
"""
import pytest
import secrets
from convex_api.account import Account
from convex_api.api import API
from convex_api.exceptions import ConvexAPIError
from convex_api.utils import (
add_0x_prefix,
to_address
)
def test_convex_recursion(convex, test_account):
chain_length = 4
address_list = []
for index in range(0, chain_length):
contract = f"""
(def chain-{index}
(deploy
'(do
(def stored-data
^{{:private? true}}
nil
)
(def chain-address
^{{:private? true}}
nil
)
(defn get
^{{:callable? true}}
[]
(call chain-address (get))
)
(defn set
^{{:callable? true}}
[x]
( if chain-address (call chain-address(set x)) (def stored-data x))
)
(defn set-chain-address
^{{:callable? true}}
[x]
(def chain-address x)
)
)
)
)
"""
convex.topup_account(test_account)
result = convex.send(contract, test_account)
address_list.append(to_address(result['value']))
for index in range(0, chain_length):
next_index = index + 1
if next_index == chain_length:
next_index = 0
call_address = address_list[next_index]
result = convex.send(f'(call chain-{index} (set-chain-address #{call_address}))', test_account)
test_number = secrets.randbelow(1000)
if index == chain_length - 1:
with pytest.raises(ConvexAPIError, match='DEPTH'):
result = convex.send(f'(call chain-{index} (set {test_number}))', test_account)
else:
result = convex.send(f'(call chain-0 (set {test_number}))', test_account)
assert(result)
assert(result['value'] == test_number)
with pytest.raises(ConvexAPIError, match='DEPTH'):
convex.query('(call chain-0 (get))', test_account)
def test_schedule_transfer(convex, test_account, other_account):
# you can send coins to an actor , if it exports the receive-coin function
contract = """
(def transfer-for-ever
(deploy
'(do
(defn tx-delay
^{:callable? true}
[to-address amount]
(transfer to-address amount)
(def call-address *address*)
(schedule (+ *timestamp* 1000) (call call-address (tx-delay to-address amount)))
)
(defn tx-now
^{:callable? true}
[to-address amount]
(transfer to-address amount)
)
(defn show-schedule
^{:callable? true}
[]
[(get *state* :schedule) *address*]
)
(defn receive-coin
^{:callable? true}
[sender amount data]
(accept amount)
)
)
)
)
"""
# (call contract-address (tx-to to-address amount))
convex.topup_account(test_account)
convex.topup_account(other_account, 8000000)
result = convex.send(contract, test_account)
contract_address = to_address(result['value'])
convex.transfer(contract_address, 800000, other_account)
convex.topup_account(test_account)
result = convex.send(f'(call #{contract_address} (tx-delay #{other_account.address} 1000))', test_account)
print(result)
result = convex.send(f'(call #{contract_address} (show-schedule))', test_account)
print(result)
|
the-stack_0_40 | # Autores:
# Darlan de Castro Silva Filho
# Marcos Henrique Fernandes Marcone
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.graph_objs as go
import plotly.express as px
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
# Funções de estilização e opções das bibliotecas utilizadas
plt.style.use('classic')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# Função para o path das bases de dados
# Entrada: nome = string
# Saida: path = string
def path(nome):
return './'+nome+'.csv'
# Importa os arquivos (bases de dados) utilizados
unidades = pd.read_csv(path('unidades'), sep=';')
docentes = pd.read_csv(path('docentes'), sep=';')
avaliacao = pd.read_csv(path('avaliacaoDocencia'), sep=';')
# Filtra os docentes que trabalham em Natal e que tenham a categoria de Professor do Magistério Superior
unidadesFiltradas = unidades.loc[:, [
'id_unidade', 'municipio', 'unidade_responsavel']]
docentesComUnidadeAcademica = pd.merge(
docentes, unidadesFiltradas, left_on="id_unidade_lotacao", right_on="id_unidade").drop('id_unidade', axis=1)
docentesNatalUnidadeAcademica = docentesComUnidadeAcademica[
docentesComUnidadeAcademica['municipio'] == 'NATAL']
docentesNatalMSUnidadeAcademica = docentesNatalUnidadeAcademica[
docentesNatalUnidadeAcademica['categoria'] == 'PROFESSOR DO MAGISTERIO SUPERIOR']
# Filtra as unidades_dirigentes não aceitas pela aplicação
docentesNatalMSUnidadeAcademica['unidade_dirigente'] = np.where(docentesNatalMSUnidadeAcademica['unidade_responsavel'] == 'UNIVERSIDADE FEDERAL DO RIO GRANDE DO NORTE', (
docentesNatalMSUnidadeAcademica['lotacao']), (docentesNatalMSUnidadeAcademica['unidade_responsavel']))
unidadesNaoAceitas = ['PRÓ-REITORIA DE EXTENSÃO UNIVERSITÁRIA', 'MUSEU CÂMARA CASCUDO', 'UNIVERSIDADE FEDERAL DO RIO GRANDE DO NORTE', 'EDITORA UNIVERSITÁRIA', 'EMPRESA BRASILEIRA DE SERVICOS HOSPITALARES',
'REITORIA', 'INSTITUTO DE MEDICINA TROPICAL - IMT-RN', 'SECRETARIA DE EDUCAÇÃO A DISTÂNCIA', 'GABINETE DO REITOR', 'SUPERINTENDENCIA DE COMUNICACAO', 'PRÓ-REITORIA DE ADMINISTRAÇÃO (PROAD)']
docentesNatalMSUnidadeAcademica = docentesNatalMSUnidadeAcademica[~docentesNatalMSUnidadeAcademica['unidade_dirigente'].isin(
unidadesNaoAceitas)]
# Gráfico de barras da distribuição dos docentes da UFRN por unidade acadêmica
quantidadeDocentesUnidadeDirigente = docentesNatalMSUnidadeAcademica['unidade_dirigente'].value_counts(
)
barraDocentesUnidadeDirigente = go.Bar(x=quantidadeDocentesUnidadeDirigente.index,
y=quantidadeDocentesUnidadeDirigente.values, text=quantidadeDocentesUnidadeDirigente.values, textposition='auto')
layoutDocentesUnidadeDirigente = go.Layout(title='Gráfico de docentes por unidade responsável (UFRN 2021 - Unidades de Natal - Magistério Superior)', xaxis={
'title': 'Unidade responsável'}, yaxis={'title': 'Número de docentes'})
figuraDocentesUnidadeDirigente = go.Figure(
data=[barraDocentesUnidadeDirigente], layout=layoutDocentesUnidadeDirigente)
# Gráfico de pizza da distribuição dos docentes da UFRN por sexo
quantidadeDocentesSexo = docentesNatalMSUnidadeAcademica['sexo'].value_counts()
piechartSexo = go.Pie(labels=['Masculino', 'Feminino'], values=quantidadeDocentesSexo.values, text=quantidadeDocentesSexo.values, marker={
'colors': ['#665FD1', '#FFFF7E'], 'line': dict(color='#000000', width=2)})
layoutDocentesSexo = go.Layout(title='Gráfico de docentes por sexo (UFRN 2021 - Unidades de Natal - Magistério Superior)',
xaxis={'title': 'Docentes'}, yaxis={'title': 'Número de docentes'}, barmode='stack')
figuraDocentesSexo = go.Figure(data=piechartSexo, layout=layoutDocentesSexo)
# Gráfico de pizza da distribuição dos docentes da UFRN por formação acadêmica
quantidadeDocentesFormacao = docentesNatalMSUnidadeAcademica['formacao'].value_counts(
)
piechartFormacao = go.Pie(labels=quantidadeDocentesFormacao.index, values=quantidadeDocentesFormacao.values, text=quantidadeDocentesFormacao.values, marker={
'colors': ['#665FD1', '#FFFF7E', '#F5054F', '#3F012C'], 'line': dict(color='#000000', width=2)})
layoutDocentesFormacao = go.Layout(title='Gráfico de docentes por formação (UFRN 2021 - Unidades de Natal - Magistério Superior)',
xaxis={'title': 'Formação'}, yaxis={'title': 'Número de docentes'})
figuraDocentesFormacao = go.Figure(
data=[piechartFormacao], layout=layoutDocentesFormacao)
# Gráfico de pizza da distribuição dos docentes da UFRN por classe funcional
quantidadeDocentesClasseFuncional = docentesNatalMSUnidadeAcademica['classe_funcional'].value_counts(
).sort_index()
piechartClasseFuncional = go.Pie(labels=quantidadeDocentesClasseFuncional.index, values=quantidadeDocentesClasseFuncional.values,
text=quantidadeDocentesClasseFuncional.values, marker={'colors': px.colors.qualitative.Dark24, 'line': dict(color='#000000', width=2)})
barraDocentesClasseFuncional = go.Bar(x=quantidadeDocentesClasseFuncional.index, y=quantidadeDocentesClasseFuncional.values,
text=quantidadeDocentesClasseFuncional.values, textposition='auto', marker={'color': '#5D21D0'})
layoutDocentesClasseFuncional = go.Layout(title='Gráfico de docentes por classe funcional (UFRN 2021 - Unidades de Natal - Magistério Superior)', xaxis={
'title': 'Classe funcional'}, yaxis={'title': 'Número de docentes'}, height=450)
figuraDocentesClasseFuncional = go.Figure(
data=[piechartClasseFuncional], layout=layoutDocentesClasseFuncional)
# Cria gráfico para ressaltar os dados de classe funcional dos docentes agrupados por unidade_dirigente
filtroClasseFuncional = ['unidade_dirigente', 'classe_funcional']
docentesClasseGroupBy = docentesNatalMSUnidadeAcademica.groupby(
filtroClasseFuncional).count().reset_index().loc[:, filtroClasseFuncional + ['nome']]
docentesClasseGroupBy['quantidade'] = docentesClasseGroupBy['nome']
del docentesClasseGroupBy['nome']
figClasseDetalhe = px.bar(docentesClasseGroupBy, x="unidade_dirigente", y="quantidade", color="classe_funcional",
text='quantidade', color_discrete_sequence=px.colors.qualitative.Bold, height=800)
# Cria gráfico para ressaltar os dados de sexo dos docentes agrupados por unidade_dirigente
filtroSexo = ['unidade_dirigente', 'sexo']
docentesSexoGroupBy = docentesNatalMSUnidadeAcademica.groupby(
filtroSexo).count().reset_index().loc[:, filtroSexo + ['nome']]
docentesSexoGroupBy['quantidade'] = docentesSexoGroupBy['nome']
del docentesSexoGroupBy['nome']
figSexoDetalhe = px.bar(docentesSexoGroupBy, x="unidade_dirigente", y="quantidade",
color="sexo", text='quantidade', color_discrete_sequence=px.colors.qualitative.Bold)
# Cria gráfico para ressaltar os dados de formação acadêmica dos docentes agrupados por unidade_dirigente
filtroFormacao = ['unidade_dirigente', 'formacao']
docentesFormacaoGroupBy = docentesNatalMSUnidadeAcademica.groupby(
filtroFormacao).count().reset_index().loc[:, filtroFormacao + ['nome']]
docentesFormacaoGroupBy['quantidade'] = docentesFormacaoGroupBy['nome']
del docentesFormacaoGroupBy['nome']
figFormacaoDetalhe = px.bar(docentesFormacaoGroupBy, x="unidade_dirigente",
y="quantidade", color="formacao", text='quantidade', range_y=[0, 400])
# Cria um dicionário com os dados indexados por unidade_dirigente
unidadesDirigentes = docentesNatalMSUnidadeAcademica['unidade_dirigente'].unique(
)
unidadesDirigentes
dfUnidadesDirigentes = {}
for unidadeDirigente in unidadesDirigentes:
df = docentesNatalMSUnidadeAcademica[docentesNatalMSUnidadeAcademica['unidade_dirigente'] == unidadeDirigente]
dfUnidadesDirigentes[unidadeDirigente] = df
# Função utilizada na filtragem de um dataFrame agrupando os dados por uma propriedade e o filtrando por outras duas
# Entradas: df = DataFrame, title = string, x = string, y = string, cor = ['rgb(a,b,c)','rgb(d,e,f)'...]
# Saídas: figAdmissao = Gráfico de barras
def filtrarDFPorUnidadeDirigente(df, title, x, y, cor=px.colors.qualitative.Bold):
dfFinal = df[title]
filtro = [x, y]
docentesFiltroGroupBy = dfFinal.groupby(
filtro).count().reset_index().loc[:, filtro + ['nome']]
docentesFiltroGroupBy['quantidade'] = docentesFiltroGroupBy['nome']
del docentesFiltroGroupBy['nome']
figAdmissao = px.bar(docentesFiltroGroupBy, x=x, y="quantidade", color=y,
text='quantidade', color_discrete_sequence=cor, title=title)
return figAdmissao
# Cria e formata um dataFrame geral com todos os professores e os atributos necessários para a geração dos gráficos e da tabela por média
avaliacaoDocentesFiltro = avaliacao[avaliacao['nome_docente'].isin(
docentesNatalMSUnidadeAcademica['nome'])]
avaliacaoDocentesFiltro['total_postura'] = avaliacaoDocentesFiltro['postura_profissional_media'] * \
avaliacaoDocentesFiltro['qtd_discentes']
avaliacaoDocentesFiltro['total_atuacao'] = avaliacaoDocentesFiltro['atuacao_profissional_media'] * \
avaliacaoDocentesFiltro['qtd_discentes']
docentesMedias = avaliacaoDocentesFiltro.loc[:, [
'nome_docente', 'qtd_discentes', 'total_postura', 'total_atuacao']]
docentesMediasGroupBy = docentesMedias.groupby(['nome_docente']).sum()
docentesMediasGroupBy['media_postura'] = docentesMediasGroupBy['total_postura'] / \
docentesMediasGroupBy['qtd_discentes']
docentesMediasGroupBy['media_atuacao'] = docentesMediasGroupBy['total_atuacao'] / \
docentesMediasGroupBy['qtd_discentes']
docentesMediasGroupBy['media_alunos'] = avaliacaoDocentesFiltro.groupby(
['nome_docente']).mean().loc[:, 'autoavaliacao_aluno_media']
docentesMediasNatalMSUnidadeAcademica = pd.merge(
docentesNatalMSUnidadeAcademica, docentesMediasGroupBy, left_on="nome", right_on="nome_docente").round(3)
# Exclui os campos não necessários para a geração da tabela de notas e assinala os campos restantes para um novo dataFrame
docenteParaTabelaNotas = docentesMediasNatalMSUnidadeAcademica.loc[:, [
'nome', 'media_postura', 'media_atuacao', 'media_alunos', 'unidade_dirigente', 'lotacao', 'qtd_discentes']]
# Faz a filtragem e formatação de um dataFrame para agrupas os dados da media_postura, media_atuacao e media_alunos por undade_dirigente
docentesMediaUnidadeDirigente = docentesMediasNatalMSUnidadeAcademica.groupby(
'unidade_dirigente').mean().loc[:, ['media_postura', 'media_atuacao', 'media_alunos']]
docentesMediaUnidadeDirigente['unidade_dirigente'] = docentesMediaUnidadeDirigente.index
# Faz a filtragem e formatação de um dataFrame para conter as informações da media_postura, media_atuacao e media_alunos a serem apresentas no gráfico de linha por evolução temporal
docentesMediasAno = avaliacaoDocentesFiltro.loc[:, [
'nome_docente', 'qtd_discentes', 'total_postura', 'total_atuacao', 'ano']]
docentesMediasAnoGroupBy = docentesMediasAno.groupby(['ano']).sum()
docentesMediasAnoGroupBy['media_postura'] = docentesMediasAnoGroupBy['total_postura'] / \
docentesMediasAnoGroupBy['qtd_discentes']
docentesMediasAnoGroupBy['media_atuacao'] = docentesMediasAnoGroupBy['total_atuacao'] / \
docentesMediasAnoGroupBy['qtd_discentes']
docentesMediasAnoGroupBy['media_alunos'] = avaliacaoDocentesFiltro.groupby(
['ano']).mean().loc[:, 'autoavaliacao_aluno_media']
docentesMediasAnoGroupBy['ano'] = docentesMediasAnoGroupBy.index
# Cria o gráfico de linhas da evolução temporal da media_postura, media_atuacao e media_alunos
figuraMediasAnoGroupBy = go.Figure()
figuraMediasAnoGroupBy.add_trace(go.Scatter(x=docentesMediasAnoGroupBy['ano'], y=docentesMediasAnoGroupBy['media_postura'],
mode='lines',
name='media_postura'))
figuraMediasAnoGroupBy.add_trace(go.Scatter(x=docentesMediasAnoGroupBy['ano'], y=docentesMediasAnoGroupBy['media_atuacao'],
mode='lines',
name='media_atuacao'))
figuraMediasAnoGroupBy.add_trace(go.Scatter(x=docentesMediasAnoGroupBy['ano'], y=docentesMediasAnoGroupBy['media_alunos'],
mode='lines',
name='media_alunos'))
figuraMediasAnoGroupBy.update_layout(
title='Evolução da avaliação dos discentes e docentes do magistério superior da UFRN nos anos de 2013 à 2019')
# Define as opções de unidades dirigentes que serão mostradas no 'dropdown-1'
indicadoresDropdown1 = [
'GERAL'] + list(docentesNatalMSUnidadeAcademica['unidade_dirigente'].unique())
# Estilos das divs dos gráficos iniciais
estilosDivGraficosIniciais = {'width': '95%',
'display': 'inline-block', 'padding': '0 20'}
# Cria a variável app e escolhe os stylesheets da aplicação
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# Define o layout a ser apresentado na página web
app.layout = html.Div([
html.H1(children='Análise dos dados dos docentes do magistério superior da UFRN das unidades de Natal no ano de 2021'),
html.Div([
dcc.Markdown('''
Trabalho referente à disciplina DCA-0131, Ciência de Dados, ministrada pelo professor Luiz Affonso Hederson Guedes de Oliveira.
Plataforma desenvolvida pelos discentes Darlan de Castro e Marcos Henrique, alunos do curso de Engenharia Computação da UFRN.
A aplicação web desenvolvida consiste em uma análise exploratória dos dados sobre os docentes do magistério superior das unidades de Natal da Universidade Federal do Rio Grande do Norte (UFRN) no ano de 2021.
Os dados utilizados aqui podem ser acessados pelo seguinte site: [http://dados.ufrn.br](http://dados.ufrn.br)
As principais tecnologias usadas para o desenvolvimento da plataforma foram:
* Linguagem Python;
* Pacotes Pandas, Plotly e Dash;
* Heroku (deploy da aplicação).
''')
]),
html.H2(
children='Divisão dos docentes do magistério superior da UFRN no ano de 2021'),
html.Div([
html.Div([
dcc.Markdown('''
Nesta seção da aplicação pode-se acompanhar a divisão dos docentes através de difentes categorias, como sexo, formação e classe funcional, assim como ver como eles estão distribuídos por cada unidade responsável na UFRN.
Na primeira caixa de seleção pode-se escolher qual unidade responsável deseja-se analisar. Assim, são atualizados os três primeiros gráficos com informações das divisões dos decentes referentes a cada lotação que compõe aquela unidade responsável.
Se a opção for escolhida for "GERAL", então pode-se mostar gráficos gerais sobre toda as unidades de Natal da UFRN, ou gráficos detalhados mostrando a divisão por unidades responsáveis.
'''),
dcc.Dropdown(
id='dropdown-1',
options=[{'label': i, 'value': i}
for i in indicadoresDropdown1],
value='GERAL'
),
dcc.RadioItems(
id='radioitems-1',
options=[{'label': i, 'value': i}
for i in ['GERAL', 'DETALHADA']],
value='GERAL',
labelStyle={'display': 'inline-block'}
)
],
style={'width': '80%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(
id='grafico-sexo')
], style=estilosDivGraficosIniciais),
html.Div([
dcc.Graph(
id='grafico-formacao')
], style=estilosDivGraficosIniciais),
html.Div([
dcc.Graph(
id='grafico-classe')
], style=estilosDivGraficosIniciais),
html.Div([
dcc.Graph(
id='grafico-sobra',
figure=figuraDocentesUnidadeDirigente)
], style=estilosDivGraficosIniciais, id='div-grafico-sobra'),
]),
html.H2(children='Estatísticas das avaliações dos docentes do magistério superior da UFRN (campus Natal) nos anos de 2013 à 2019'),
dcc.Markdown('''
Nesta seção da aplicação pode-se acompanhar dados sobre as avaliações dos docentes da UFRN e da autoavalição dos alunos feita a cada fim de semestre. Os dados disponibilizados constam do período de 2013 à 2019.
Ao todo são três dados importantes a serem considerados a média de postura dos docentes, a média de atuação dos docentes e autoavaliação dos alunos.
No primeiro gráfico pode-se acompanhar a média desses três quesitos por cada unidade responsável.
'''),
html.Div([
dcc.Graph(
id='grafico-nota-1')
], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),
html.Div([
dcc.Slider(
id='slider-grafico-nota-1',
min=1,
max=3,
value=1,
marks={str(i): str(i) for i in [1, 2, 3]},
step=None)],
style={'width': '80%', 'padding': '0px 15px 15px 15px'}),
dcc.Markdown('''
* Opção 1 - Média de atuação dos docentes;
* Opção 2 - Média de postura dos docentes;
* Opção 3 - Média da autoavaliação dos discentes.
'''),
dcc.Markdown('''
No segundo gráfico há dados sobre a evolução das médias de postura e atuação dos docentes e autoavaliação dos discentes ao longo dos anos.
'''),
html.Div([
dcc.Graph(
id='grafico-nota-2',
figure=figuraMediasAnoGroupBy)
], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),
dcc.Markdown('''
No terceito gráfico pode-se ver um histograma com a frequência das médias de postura e atuação dos docentes dividida por sexo.
'''),
html.Div([
dcc.Graph(
id='grafico-histograma')
], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),
html.Div([
dcc.Slider(
id='slider-grafico-histograma',
min=1,
max=2,
value=1,
marks={str(i): str(i) for i in [1, 2]},
step=None)],
style={'width': '80%', 'padding': '0px 15px 15px 15px'}),
dcc.Markdown('''
* Opção 1 - Média de atuação dos docentes;
* Opção 2 - Média de postura dos docentes.
'''),
dcc.Markdown('''
Nesta parte, pode-se selecionar uma unidade responsável (primeira caixa de seleção) e a partir dela escolher uma lotação (segunda caixa de seleção) para verificar a média de atuação e postura de cada profressor, assim como da autoavaliação dos discentes das turmas desses docentes e quantidade de discentes que passaram por eles, para cada departamento da UFRN.
'''),
html.Div([
dcc.Dropdown(
id='dropdown-2',
options=[{'label': i, 'value': i}
for i in docenteParaTabelaNotas['unidade_dirigente'].unique()],
value=docenteParaTabelaNotas['unidade_dirigente'].iloc[0]
)],
style={'width': '80%', 'display': 'inline-block'}),
html.Div([
dcc.Dropdown(
id='dropdown-3',
)],
style={'width': '80%', 'display': 'inline-block'}),
html.Div([
dash_table.DataTable(
id='table-nota',
columns=[{"name": i, "id": i} for i in [
'nome', 'media_postura', 'media_atuacao', 'media_alunos', 'qtd_discentes']],
style_cell={'textAlign': 'left'},
)
], style={'width': '95%', 'display': 'inline-block', 'padding': '0 20'}),
])
# Callback para atualização do estilo dos gráfico de barras (quatantidadeDocente x unidade_dirigente)
# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'
# Saída: 'figure' - 'grafico-classe'
@app.callback(
dash.dependencies.Output('div-grafico-sobra', 'style'),
[dash.dependencies.Input('dropdown-1', 'value'),
dash.dependencies.Input('radioitems-1', 'value')])
def visibility_graficoSobra(dropValue, radioValue):
if(radioValue == 'GERAL' and dropValue == 'GERAL'):
estilosDivGraficosIniciais['display'] = 'inline-block'
return estilosDivGraficosIniciais
estilosDivGraficosIniciais['display'] = 'none'
return estilosDivGraficosIniciais
# Callback para atualização da 'figure' no gráfico por sexo.
# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'
# Saída: 'figure' - 'grafico-sexo'
@app.callback(
dash.dependencies.Output('grafico-sexo', 'figure'),
[dash.dependencies.Input('dropdown-1', 'value'),
dash.dependencies.Input('radioitems-1', 'value')])
def att_sexo(dropValue, radioValue):
if(radioValue == 'GERAL' and dropValue == 'GERAL'):
return figuraDocentesSexo
elif(radioValue == 'DETALHADA' and dropValue == 'GERAL'):
return figSexoDetalhe
return filtrarDFPorUnidadeDirigente(dfUnidadesDirigentes, dropValue, 'lotacao', 'sexo')
# Callback para atualização da 'figure' no gráfico por formação
# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'
# Saída: 'figure' - 'grafico-formacao'
@app.callback(
dash.dependencies.Output('grafico-formacao', 'figure'),
[dash.dependencies.Input('dropdown-1', 'value'),
dash.dependencies.Input('radioitems-1', 'value')])
def att_formacao(dropValue, radioValue):
if(radioValue == 'GERAL' and dropValue == 'GERAL'):
return figuraDocentesFormacao
elif(radioValue == 'DETALHADA' and dropValue == 'GERAL'):
return figFormacaoDetalhe
return filtrarDFPorUnidadeDirigente(dfUnidadesDirigentes, dropValue, 'lotacao', 'formacao')
# Callback para atualização da 'figure' no gráfico por classe
# Entradas: 'value' - 'dropdown-1', 'value' - 'radioitems-1'
# Saída: 'figure' - 'grafico-classe'
@app.callback(
dash.dependencies.Output('grafico-classe', 'figure'),
[dash.dependencies.Input('dropdown-1', 'value'),
dash.dependencies.Input('radioitems-1', 'value')])
def att_classe(dropValue, radioValue):
if(radioValue == 'GERAL' and dropValue == 'GERAL'):
return figuraDocentesClasseFuncional
elif(radioValue == 'DETALHADA' and dropValue == 'GERAL'):
return figClasseDetalhe
return filtrarDFPorUnidadeDirigente(dfUnidadesDirigentes, dropValue, 'lotacao', 'classe_funcional')
# Callback para atualização da 'figure' no gráfico por nota
# Entradas: 'value' - 'slider-grafico-nota-1'
# Saída: 'figure' - 'grafico-nota-1'
@app.callback(
dash.dependencies.Output('grafico-nota-1', 'figure'),
[dash.dependencies.Input('slider-grafico-nota-1', 'value')])
def att_nota1(sliderValue):
var = 'media_atuacao'
if sliderValue == 2:
var = 'media_postura'
elif sliderValue == 3:
var = 'media_alunos'
return px.scatter(docentesMediaUnidadeDirigente, x="unidade_dirigente", y=var,
size=var, hover_name="unidade_dirigente", color="unidade_dirigente")
# Callback para atualização da 'figure' no histograma
# Entradas: 'value' - 'slider-grafico-histograma'
# Saída: 'figure' - 'grafico-histograma'
@app.callback(
dash.dependencies.Output('grafico-histograma', 'figure'),
[dash.dependencies.Input('slider-grafico-histograma', 'value')])
def att_histograma(sliderValue):
var = 'media_atuacao'
if sliderValue == 2:
var = 'media_postura'
return px.histogram(docentesMediasNatalMSUnidadeAcademica, x=var, color="sexo", title='Histograma da avaliação dos docentes do magistério superior da UFRN nos anos de 2013 à 2019')
# Callback para atualização das 'options' no dropdown por lotação da tabela
# Entradas: 'value' - 'dropdown-2'
# Saída: 'options' - 'dropdown-3'
@app.callback(
dash.dependencies.Output('dropdown-3', 'options'),
[dash.dependencies.Input('dropdown-2', 'value')])
def att_dropdown3Options(dropValue):
df = docenteParaTabelaNotas[docenteParaTabelaNotas['unidade_dirigente'] == dropValue]
del df['unidade_dirigente']
return [{'label': 'GERAL', 'value': 'GERAL'}] + [{'label': i, 'value': i} for i in df['lotacao'].unique()]
# Callback para atualização do 'value' no dropdown por lotação da tabela
# Entradas: 'value' - 'dropdown-2'
# Saída: 'value' - 'dropdown-3'
@app.callback(
dash.dependencies.Output('dropdown-3', 'value'),
[dash.dependencies.Input('dropdown-2', 'value')])
def att_dropdown3Value(dropValue):
return 'GERAL'
# Callback para atualização da 'data' na tabela de exposição das notas dos professores por unidade_dirigente e lotação
# Entradas: 'value' - 'dropdown-2', value' - 'dropdown-3'
# Saída: 'data' - 'table-nota'
@app.callback(
dash.dependencies.Output('table-nota', 'data'),
[dash.dependencies.Input('dropdown-2', 'value'),
dash.dependencies.Input('dropdown-3', 'value')])
def att_table(dropValue2, dropValue3):
df = docenteParaTabelaNotas[docenteParaTabelaNotas['unidade_dirigente'] == dropValue2]
del df['unidade_dirigente']
if dropValue3 == 'GERAL':
del df['lotacao']
return df.to_dict("records")
df = docenteParaTabelaNotas[docenteParaTabelaNotas['lotacao'] == dropValue3]
del df['lotacao']
return df.to_dict("records")
# Atribui o servidor da aplicação a variável server
server = app.server
if __name__ == '__main__':
app.run_server(debug=True)
|
the-stack_0_43 | # -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import werkzeug
from textwrap import wrap
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.utils import escape
import base64
logo = Response(base64.b64decode(
'''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
='''), mimetype='image/png')
TEMPLATE = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(http://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
'''
def iter_sys_path():
if os.name == 'posix':
def strip(x):
prefix = os.path.expanduser('~')
if x.startswith(prefix):
x = '~' + x[len(prefix):]
return x
else:
strip = lambda x: x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), \
not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set,
key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = 'unknown'
python_eggs.append('<li>%s <small>[%s]</small>' % (
escape(egg.project_name),
escape(version)
))
wsgi_env = []
sorted_environ = sorted(req.environ.items(),
key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append('<tr><th>%s<td><code>%s</code>' % (
escape(str(key)),
' '.join(wrap(escape(repr(value))))
))
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append('virtual')
if expanded:
class_.append('exp')
sys_path.append('<li%s>%s' % (
class_ and ' class="%s"' % ' '.join(class_) or '',
escape(item)
))
return (TEMPLATE % {
'python_version': '<br>'.join(escape(sys.version).splitlines()),
'platform': escape(sys.platform),
'os': escape(os.name),
'api_version': sys.api_version,
'byteorder': sys.byteorder,
'werkzeug_version': werkzeug.__version__,
'python_eggs': '\n'.join(python_eggs),
'wsgi_env': '\n'.join(wsgi_env),
'sys_path': '\n'.join(sys_path)
}).encode('utf-8')
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get('resource') == 'logo':
response = logo
else:
response = Response(render_testapp(req), mimetype='text/html')
return response(environ, start_response)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 5000, test_app, use_reloader=True)
|
the-stack_0_44 | # -*- coding: utf-8 -*-
import pandas as pd
from .ecg_eventrelated import ecg_eventrelated
from .ecg_intervalrelated import ecg_intervalrelated
def ecg_analyze(data, sampling_rate=1000, method="auto"):
"""Performs ECG analysis on either epochs (event-related
analysis) or on longer periods of data such as resting-state data.
Parameters
----------
data : dict, DataFrame
A dictionary of epochs, containing one DataFrame per epoch,
usually obtained via `epochs_create()`, or a DataFrame
containing all epochs, usually obtained via `epochs_to_df()`.
Can also take a DataFrame of processed signals from
a longer period of data, typically generated by `ecg_process()`
or `bio_process()`. Can also take a dict containing sets of
separate periods of data.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
Defaults to 1000Hz.
method : str
Can be one of 'event-related' for event-related analysis on epochs,
or 'interval-related' for analysis on longer periods of data. Defaults
to 'auto' where the right method will be chosen based on the
mean duration of the data ('event-related' for duration under 10s).
Returns
-------
DataFrame
A dataframe containing the analyzed ECG features. If
event-related analysis is conducted, each epoch is indicated
by the `Label` column. See `ecg_eventrelated()` and
`ecg_intervalrelated()` docstrings for details.
See Also
--------
bio_process, ecg_process, epochs_create, ecg_eventrelated, ecg_intervalrelated
Examples
----------
>>> import neurokit2 as nk
>>>
>>> # Example 1: Download the data for event-related analysis
>>> data = nk.data("bio_eventrelated_100hz")
>>>
>>> # Process the data for event-related analysis
>>> df, info = nk.bio_process(ecg=data["ECG"], sampling_rate=100)
>>> events = nk.events_find(data["Photosensor"],
threshold_keep='below',
event_conditions=["Negative",
"Neutral",
"Neutral",
"Negative"])
>>> epochs = nk.epochs_create(df, events,
sampling_rate=100,
epochs_start=-0.1, epochs_end=1.9)
>>> nk.ecg_analyze(epochs, sampling_rate=100)
>>>
>>> # Example 2: Download the resting-state data
>>> data = nk.data("bio_resting_5min_100hz")
>>>
>>> # Process the data
>>> df, info = nk.ecg_process(data["ECG"], sampling_rate=100)
>>>
>>> # Analyze
>>> nk.ecg_analyze(df, sampling_rate=100)
"""
method = method.lower()
# Event-related analysis
if method in ["event-related", "event", "epoch"]:
# Sanity checks
if isinstance(data, dict):
for i in data:
colnames = data[i].columns.values
elif isinstance(data, pd.DataFrame):
colnames = data.columns.values
if len([i for i in colnames if "Label" in i]) == 0:
raise ValueError("NeuroKit error: ecg_analyze(): Wrong input"
"or method, we couldn't extract"
"extract epochs features.")
else:
features = ecg_eventrelated(data)
# Interval-related analysis
elif method in ["interval-related", "interval", "resting-state"]:
features = ecg_intervalrelated(data)
# Auto
elif method in ["auto"]:
if isinstance(data, dict):
for i in data:
duration = len(data[i]) / sampling_rate
if duration >= 10:
features = ecg_intervalrelated(data)
else:
features = ecg_eventrelated(data)
if isinstance(data, pd.DataFrame):
if 'Label' in data.columns:
epoch_len = data['Label'].value_counts()[0]
duration = epoch_len / sampling_rate
else:
duration = len(data) / sampling_rate
if duration >= 10:
features = ecg_intervalrelated(data)
else:
features = ecg_eventrelated(data)
return features
|
the-stack_0_46 | # 47. Permutations II
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
"""
Given a collection of numbers that might contain duplicates, return all possible unique permutations.
"""
permutations = set()
self.helper(nums, [], permutations)
return permutations
def helper(self, array, currentPermutation, permutations):
if not len(array) and len(currentPermutation):
permutations.add(tuple(currentPermutation))
else:
for index in range(len(array)):
newArray = array[: index] + array[index + 1:]
newPermutation = currentPermutation + [array[index]]
self.helper(newArray, newPermutation, permutations)
|
the-stack_0_47 | import torch
import numpy as np
def train_perm_orth(train_loader, model, optimizer, scheduler, criterion, regularizer=None, rho=1E-4, delta=0.5,
nu=1E-2, eps=1E-3, tau=1E-2, lagrange_pen=1E-2, perm_flag=True, t_step=40):
if perm_flag:
tau_min = 1E-24
tau_max = 1E-1
c = None
lam_lm = []
for p in optimizer.param_groups[0]['params']:
lam_lm.append(torch.zeros_like(p))
k_iter = 0
ts = torch.empty(len(train_loader), device=model.device).uniform_(0.0, 1.0)
with torch.no_grad():
for p in optimizer.param_groups[0]['params']:
p.data = torch.rand_like(p.data)
p.data, _, _ = torch.svd(p.data)
input_cml = []
target_cml = []
t_cml = []
inner_iter = 0
loss = 0.0
loss_obj = 0.0
for iter, (input, target) in enumerate(train_loader):
t = ts[iter]
input = input.to(model.device, non_blocking=False)
target = target.to(model.device, non_blocking=False)
output = model(input, perm_train=True, t=t)
input_all = input
target_all = target
new_loss = criterion(output, target_all)
loss_obj += new_loss
# This part is for the augmented Lagrangian method
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss += new_loss + int_pen
inner_iter += 1
input_cml.append(input.clone())
target_cml.append(target.clone())
t_cml.append(t.clone())
if inner_iter % t_step == 0:
optimizer.zero_grad()
loss.backward()
grad_norm = 0.0
violator = 0.0
for p in optimizer.param_groups[0]['params']:
param_norm = p.grad.data.norm(2)
grad_norm += param_norm.item() ** 2
violator += torch.sum((torch.matmul(p.data.t(), p.data) - torch.eye(p.data.shape[0],
device=p.device)) ** 2)
grad_norm = grad_norm ** (1. / 2)
if c is None:
c = loss.clone().item()
q_opt = 1
loss_inner = loss.clone()
print('Iteration: %03d, Loss %.2E, Objective %.2E, Negative Penalty: %.2E,'
'Grad Norm: %.2E, Ortho Violation: %.2E, tau: %.2E' %
(k_iter, loss_inner.item(), loss_obj.item(), int_pen.item(), grad_norm, violator.item(), tau))
# Compute F for defining Y function
F_list = []
with torch.no_grad():
for p in optimizer.param_groups[0]['params']:
f = torch.matmul(p.grad.data, p.t().data) - torch.matmul(p.data, p.grad.t().data)
F_list.append(f)
# Store old parameters
params_old = [None] * len(optimizer.param_groups[0]['params'])
for idx, param in enumerate(optimizer.param_groups[0]['params']):
params_old[idx] = param.clone()
grads_old = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
# Compute the values of Y(tau) and Y'(tau), store them into the model
Y_t, Y_ft_prime = compute_ytau(tau, F_list, optimizer.param_groups[0]['params'])
for p, y_t in zip(optimizer.param_groups[0]['params'], Y_t):
p.data = y_t.clone()
loss_inner = 0.0
for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):
output = model(input_2, perm_train=True, t=t_2)
loss_inner += criterion(output, target_2)
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss_inner += int_pen
optimizer.zero_grad()
loss_inner.backward()
grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
with torch.no_grad():
dF_dt = 0.0
for g_new, y_ft_p in zip(grads_new, Y_ft_prime):
df = g_new * (y_ft_p / torch.norm(y_ft_p.data))
df = torch.sum(df)
dF_dt += df.item()
threshold_flag = True
k_inner = 0
while threshold_flag:
with torch.no_grad():
threshold = c + rho * tau * dF_dt
if loss_inner.item() >= threshold:
# Compute Y for smaller value of tau
with torch.no_grad():
tau *= delta
Y_t, Y_ft_prime = compute_ytau(tau, F_list, optimizer.param_groups[0]['params'])
for p, y_t in zip(optimizer.param_groups[0]['params'], Y_t):
p.data = y_t.clone()
loss_old = loss_inner.clone()
loss_inner = 0.0
for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):
output = model(input_2, perm_train=True, t=t_2)
loss_inner += criterion(output, target_2)
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss_inner += int_pen
optimizer.zero_grad()
loss_inner.backward()
grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
k_inner += 1
if (loss_inner.item() - loss_old.item()) / (1 + loss_old.item()) < 1E-5:
threshold_flag = False
else:
threshold_flag = False
with torch.no_grad():
c = (nu * q_opt * c + loss_inner.item())
q_opt = nu * q_opt + 1
c = c / q_opt
bb_num = 0.0
bb_denom = 0.0
yy_sum = 0.0
for p_old, g_old, p_new, g_new in zip(params_old, grads_old, optimizer.param_groups[0]['params'],
grads_new):
s_bb = p_new - p_old
y_bb = g_new - g_old
bb_num += torch.sum(s_bb ** 2)
bb_denom += torch.sum(s_bb * y_bb)
yy_sum += torch.sum(y_bb ** 2)
tau_bb = bb_num / torch.abs(bb_denom)
tau_bb = tau_bb.item()
tau_bb2 = torch.abs(bb_denom) / yy_sum
tau_bb2 = tau_bb2.item()
tau_bb = np.minimum(tau_bb, tau_bb2)
tau = np.minimum(tau_bb, tau_max)
tau = np.maximum(tau, tau_min)
lam_lm, lagrange_pen = integer_penalty_update(optimizer.param_groups[0]['params'], lam_lm,
lagrange_pen)
loss_inner = 0.0
for t_2, input_2, target_2 in zip(t_cml, input_cml, target_cml):
output = model(input_2, perm_train=True, t=t_2)
loss_obj = criterion(output, target_2)
int_pen = integer_penalty(optimizer.param_groups[0]['params'], lam_lm, lagrange_pen)
loss_inner += loss_obj + int_pen
optimizer.zero_grad()
loss_inner.backward()
grads_new = [p.grad.data.clone() for p in optimizer.param_groups[0]['params']]
grad_norm = 0.0
for g_new in grads_new:
gn = g_new.norm(2)
grad_norm += gn.item() ** 2
grad_norm = grad_norm ** (1. / 2)
k_iter += 1
input_cml = []
target_cml = []
t_cml = []
loss = 0.0
loss_obj = 0.0
model.train()
loss_sum = 0.0
correct = 0.0
change_P = np.nan
params_before = [None] * len(optimizer.param_groups[0]['params'])
if nu is not None:
for idx, param in enumerate(optimizer.param_groups[0]['params']):
params_before[idx] = param.clone().detach()
optimizer.step()
lr = scheduler.get_lr()[0]
with torch.no_grad():
for param, param_o in zip(optimizer.param_groups[0]['params'], params_old):
param.data = 1 / (1 + lr / nu) * (param + lr / nu * param_o)
output = model(input_all, perm_train=True)
loss = criterion(output, target_all)
if regularizer is not None:
loss += regularizer(model)
loss_sum += loss.item() * input.size(0)
pred = output.data.argmax(1, keepdim=True)
correct += pred.eq(target_all.data.view_as(pred)).sum().item()
return {
'loss': loss_sum / len(train_loader.dataset),
'accuracy': correct * 100.0 / len(train_loader.dataset),
'change_perm': change_P
}
def hard_int_penalty(p_list, pen=1E1):
pen_loss = 0.0
for p in p_list:
p_mask = p.data * (p.data <= 0)
pen_loss += pen * torch.sum(p_mask ** 2)
return pen_loss
def integer_penalty(p_list, lam_list, mu):
pen_loss = 0.0
for p, lam in zip(p_list, lam_list):
mask = (p - lam / mu) <= 0
mask_alt = (p - lam / mu) > 0
p_l = torch.sum((- lam * p + 0.5 * mu * (p ** 2)) * mask)
p_l += torch.sum((-1/(2 * mu) * lam ** 2) * mask_alt)
pen_loss += p_l
return pen_loss
def integer_penalty_update(p_list, lam_list, mu):
new_lam_list = []
with torch.no_grad():
for p, lam in zip(p_list, lam_list):
upd = lam - mu * p
new_lam_list.append(upd * (upd > 0))
new_mu = mu * 1.01
return new_lam_list, new_mu
def compute_ytau(tau, f_list, p_list):
y_tau = []
y_tau_prime = []
for p, f in zip(p_list, f_list):
eye = torch.eye(f.shape[0], device=f.device)
qmat_inv = torch.inverse(eye + tau / 2 * f)
y_ft = torch.matmul(qmat_inv, eye - tau / 2 * f)
y_ft = torch.matmul(y_ft, p)
y_ft_prime = - torch.matmul(qmat_inv, f)
y_ft_prime = torch.matmul(y_ft_prime, (p + y_ft) / 2)
y_tau.append(y_ft.clone())
y_tau_prime.append(y_ft_prime.clone())
return y_tau, y_tau_prime |
the-stack_0_48 | """The test for the History Statistics sensor platform."""
# pylint: disable=protected-access
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import setup_component
from homeassistant.components.sensor.history_stats import HistoryStatsSensor
import homeassistant.core as ha
from homeassistant.helpers.template import Template
import homeassistant.util.dt as dt_util
from tests.common import init_recorder_component, get_test_home_assistant
class TestHistoryStatsSensor(unittest.TestCase):
"""Test the History Statistics sensor."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup(self):
"""Test the history statistics sensor setup."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'state': 'on',
'start': '{{ now().replace(hour=0)'
'.replace(minute=0).replace(second=0) }}',
'duration': '02:00',
'name': 'Test',
}
}
self.assertTrue(setup_component(self.hass, 'sensor', config))
state = self.hass.states.get('sensor.test')
self.assertEqual(state.state, STATE_UNKNOWN)
def test_period_parsing(self):
"""Test the conversion from templates to period."""
today = Template('{{ now().replace(hour=0).replace(minute=0)'
'.replace(second=0) }}', self.hass)
duration = timedelta(hours=2, minutes=1)
sensor1 = HistoryStatsSensor(
self.hass, 'test', 'on', today, None, duration, 'time', 'test')
sensor2 = HistoryStatsSensor(
self.hass, 'test', 'on', None, today, duration, 'time', 'test')
sensor1.update_period()
sensor1_start, sensor1_end = sensor1._period
sensor2.update_period()
sensor2_start, sensor2_end = sensor2._period
# Start = 00:00:00
self.assertEqual(sensor1_start.hour, 0)
self.assertEqual(sensor1_start.minute, 0)
self.assertEqual(sensor1_start.second, 0)
# End = 02:01:00
self.assertEqual(sensor1_end.hour, 2)
self.assertEqual(sensor1_end.minute, 1)
self.assertEqual(sensor1_end.second, 0)
# Start = 21:59:00
self.assertEqual(sensor2_start.hour, 21)
self.assertEqual(sensor2_start.minute, 59)
self.assertEqual(sensor2_start.second, 0)
# End = 00:00:00
self.assertEqual(sensor2_end.hour, 0)
self.assertEqual(sensor2_end.minute, 0)
self.assertEqual(sensor2_end.second, 0)
def test_measure(self):
"""Test the history statistics sensor measure."""
t0 = dt_util.utcnow() - timedelta(minutes=40)
t1 = t0 + timedelta(minutes=20)
t2 = dt_util.utcnow() - timedelta(minutes=10)
# Start t0 t1 t2 End
# |--20min--|--20min--|--10min--|--10min--|
# |---off---|---on----|---off---|---on----|
fake_states = {
'binary_sensor.test_id': [
ha.State('binary_sensor.test_id', 'on', last_changed=t0),
ha.State('binary_sensor.test_id', 'off', last_changed=t1),
ha.State('binary_sensor.test_id', 'on', last_changed=t2),
]
}
start = Template('{{ as_timestamp(now()) - 3600 }}', self.hass)
end = Template('{{ now() }}', self.hass)
sensor1 = HistoryStatsSensor(
self.hass, 'binary_sensor.test_id', 'on', start, end, None,
'time', 'Test')
sensor2 = HistoryStatsSensor(
self.hass, 'unknown.id', 'on', start, end, None, 'time', 'Test')
sensor3 = HistoryStatsSensor(
self.hass, 'binary_sensor.test_id', 'on', start, end, None,
'count', 'test')
sensor4 = HistoryStatsSensor(
self.hass, 'binary_sensor.test_id', 'on', start, end, None,
'ratio', 'test')
self.assertEqual(sensor1._type, 'time')
self.assertEqual(sensor3._type, 'count')
self.assertEqual(sensor4._type, 'ratio')
with patch('homeassistant.components.history.'
'state_changes_during_period', return_value=fake_states):
with patch('homeassistant.components.history.get_state',
return_value=None):
sensor1.update()
sensor2.update()
sensor3.update()
sensor4.update()
self.assertEqual(sensor1.state, 0.5)
self.assertEqual(sensor2.state, None)
self.assertEqual(sensor3.state, 2)
self.assertEqual(sensor4.state, 50)
def test_wrong_date(self):
"""Test when start or end value is not a timestamp or a date."""
good = Template('{{ now() }}', self.hass)
bad = Template('{{ TEST }}', self.hass)
sensor1 = HistoryStatsSensor(
self.hass, 'test', 'on', good, bad, None, 'time', 'Test')
sensor2 = HistoryStatsSensor(
self.hass, 'test', 'on', bad, good, None, 'time', 'Test')
before_update1 = sensor1._period
before_update2 = sensor2._period
sensor1.update_period()
sensor2.update_period()
self.assertEqual(before_update1, sensor1._period)
self.assertEqual(before_update2, sensor2._period)
def test_wrong_duration(self):
"""Test when duration value is not a timedelta."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'name': 'Test',
'state': 'on',
'start': '{{ now() }}',
'duration': 'TEST',
}
}
setup_component(self.hass, 'sensor', config)
self.assertEqual(self.hass.states.get('sensor.test'), None)
self.assertRaises(TypeError,
setup_component(self.hass, 'sensor', config))
def test_bad_template(self):
"""Test Exception when the template cannot be parsed."""
bad = Template('{{ x - 12 }}', self.hass) # x is undefined
duration = '01:00'
sensor1 = HistoryStatsSensor(
self.hass, 'test', 'on', bad, None, duration, 'time', 'Test')
sensor2 = HistoryStatsSensor(
self.hass, 'test', 'on', None, bad, duration, 'time', 'Test')
before_update1 = sensor1._period
before_update2 = sensor2._period
sensor1.update_period()
sensor2.update_period()
self.assertEqual(before_update1, sensor1._period)
self.assertEqual(before_update2, sensor2._period)
def test_not_enough_arguments(self):
"""Test config when not enough arguments provided."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'name': 'Test',
'state': 'on',
'start': '{{ now() }}',
}
}
setup_component(self.hass, 'sensor', config)
self.assertEqual(self.hass.states.get('sensor.test'), None)
self.assertRaises(TypeError,
setup_component(self.hass, 'sensor', config))
def test_too_many_arguments(self):
"""Test config when too many arguments provided."""
self.init_recorder()
config = {
'history': {
},
'sensor': {
'platform': 'history_stats',
'entity_id': 'binary_sensor.test_id',
'name': 'Test',
'state': 'on',
'start': '{{ as_timestamp(now()) - 3600 }}',
'end': '{{ now() }}',
'duration': '01:00',
}
}
setup_component(self.hass, 'sensor', config)
self.assertEqual(self.hass.states.get('sensor.test'), None)
self.assertRaises(TypeError,
setup_component(self.hass, 'sensor', config))
def init_recorder(self):
"""Initialize the recorder."""
init_recorder_component(self.hass)
self.hass.start()
|
the-stack_0_49 | """
This module implements some special functions that commonly appear in
combinatorial contexts (e.g. in power series); in particular,
sequences of rational numbers such as Bernoulli and Fibonacci numbers.
Factorials, binomial coefficients and related functions are located in
the separate 'factorials' module.
"""
from __future__ import print_function, division
from sympy.core import S, Symbol, Rational, Integer, Add, Dummy
from sympy.core.compatibility import as_int, SYMPY_INTS, range
from sympy.core.cache import cacheit
from sympy.core.function import Function, expand_mul
from sympy.core.numbers import E, pi
from sympy.core.relational import LessThan, StrictGreaterThan
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.trigonometric import sin, cos, cot
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.utilities.memoization import recurrence_memo
from mpmath import bernfrac, workprec
from mpmath.libmp import ifib as _ifib
def _product(a, b):
p = 1
for k in range(a, b + 1):
p *= k
return p
# Dummy symbol used for computing polynomial sequences
_sym = Symbol('x')
_symbols = Function('x')
#----------------------------------------------------------------------------#
# #
# Fibonacci numbers #
# #
#----------------------------------------------------------------------------#
class fibonacci(Function):
r"""
Fibonacci numbers / Fibonacci polynomials
The Fibonacci numbers are the integer sequence defined by the
initial terms F_0 = 0, F_1 = 1 and the two-term recurrence
relation F_n = F_{n-1} + F_{n-2}. This definition
extended to arbitrary real and complex arguments using
the formula
.. math :: F_z = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5}
The Fibonacci polynomials are defined by F_1(x) = 1,
F_2(x) = x, and F_n(x) = x*F_{n-1}(x) + F_{n-2}(x) for n > 2.
For all positive integers n, F_n(1) = F_n.
* fibonacci(n) gives the nth Fibonacci number, F_n
* fibonacci(n, x) gives the nth Fibonacci polynomial in x, F_n(x)
Examples
========
>>> from sympy import fibonacci, Symbol
>>> [fibonacci(x) for x in range(11)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
>>> fibonacci(5, Symbol('t'))
t**4 + 3*t**2 + 1
References
==========
.. [1] http://en.wikipedia.org/wiki/Fibonacci_number
.. [2] http://mathworld.wolfram.com/FibonacciNumber.html
See Also
========
bell, bernoulli, catalan, euler, harmonic, lucas
"""
@staticmethod
def _fib(n):
return _ifib(n)
@staticmethod
@recurrence_memo([None, S.One, _sym])
def _fibpoly(n, prev):
return (prev[-2] + _sym*prev[-1]).expand()
@classmethod
def eval(cls, n, sym=None):
if n is S.Infinity:
return S.Infinity
if n.is_Integer:
n = int(n)
if n < 0:
return S.NegativeOne**(n + 1) * fibonacci(-n)
if sym is None:
return Integer(cls._fib(n))
else:
if n < 1:
raise ValueError("Fibonacci polynomials are defined "
"only for positive integer indices.")
return cls._fibpoly(n).subs(_sym, sym)
def _eval_rewrite_as_sqrt(self, n):
return 2**(-n)*sqrt(5)*((1 + sqrt(5))**n - (-sqrt(5) + 1)**n) / 5
def _eval_rewrite_as_GoldenRatio(self,n):
return (S.GoldenRatio**n - 1/(-S.GoldenRatio)**n)/(2*S.GoldenRatio-1)
class lucas(Function):
"""
Lucas numbers
Lucas numbers satisfy a recurrence relation similar to that of
the Fibonacci sequence, in which each term is the sum of the
preceding two. They are generated by choosing the initial
values L_0 = 2 and L_1 = 1.
* lucas(n) gives the nth Lucas number
Examples
========
>>> from sympy import lucas
>>> [lucas(x) for x in range(11)]
[2, 1, 3, 4, 7, 11, 18, 29, 47, 76, 123]
References
==========
.. [1] http://en.wikipedia.org/wiki/Lucas_number
.. [2] http://mathworld.wolfram.com/LucasNumber.html
See Also
========
bell, bernoulli, catalan, euler, fibonacci, harmonic
"""
@classmethod
def eval(cls, n):
if n is S.Infinity:
return S.Infinity
if n.is_Integer:
return fibonacci(n + 1) + fibonacci(n - 1)
def _eval_rewrite_as_sqrt(self, n):
return 2**(-n)*((1 + sqrt(5))**n + (-sqrt(5) + 1)**n)
#----------------------------------------------------------------------------#
# #
# Bernoulli numbers #
# #
#----------------------------------------------------------------------------#
class bernoulli(Function):
r"""
Bernoulli numbers / Bernoulli polynomials
The Bernoulli numbers are a sequence of rational numbers
defined by B_0 = 1 and the recursive relation (n > 0)::
n
___
\ / n + 1 \
0 = ) | | * B .
/___ \ k / k
k = 0
They are also commonly defined by their exponential generating
function, which is x/(exp(x) - 1). For odd indices > 1, the
Bernoulli numbers are zero.
The Bernoulli polynomials satisfy the analogous formula::
n
___
\ / n \ n-k
B (x) = ) | | * B * x .
n /___ \ k / k
k = 0
Bernoulli numbers and Bernoulli polynomials are related as
B_n(0) = B_n.
We compute Bernoulli numbers using Ramanujan's formula::
/ n + 3 \
B = (A(n) - S(n)) / | |
n \ n /
where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
when n = 4 (mod 6), and::
[n/6]
___
\ / n + 3 \
S(n) = ) | | * B
/___ \ n - 6*k / n-6*k
k = 1
This formula is similar to the sum given in the definition, but
cuts 2/3 of the terms. For Bernoulli polynomials, we use the
formula in the definition.
* bernoulli(n) gives the nth Bernoulli number, B_n
* bernoulli(n, x) gives the nth Bernoulli polynomial in x, B_n(x)
Examples
========
>>> from sympy import bernoulli
>>> [bernoulli(n) for n in range(11)]
[1, -1/2, 1/6, 0, -1/30, 0, 1/42, 0, -1/30, 0, 5/66]
>>> bernoulli(1000001)
0
References
==========
.. [1] http://en.wikipedia.org/wiki/Bernoulli_number
.. [2] http://en.wikipedia.org/wiki/Bernoulli_polynomial
.. [3] http://mathworld.wolfram.com/BernoulliNumber.html
.. [4] http://mathworld.wolfram.com/BernoulliPolynomial.html
See Also
========
bell, catalan, euler, fibonacci, harmonic, lucas
"""
# Calculates B_n for positive even n
@staticmethod
def _calc_bernoulli(n):
s = 0
a = int(binomial(n + 3, n - 6))
for j in range(1, n//6 + 1):
s += a * bernoulli(n - 6*j)
# Avoid computing each binomial coefficient from scratch
a *= _product(n - 6 - 6*j + 1, n - 6*j)
a //= _product(6*j + 4, 6*j + 9)
if n % 6 == 4:
s = -Rational(n + 3, 6) - s
else:
s = Rational(n + 3, 3) - s
return s / binomial(n + 3, n)
# We implement a specialized memoization scheme to handle each
# case modulo 6 separately
_cache = {0: S.One, 2: Rational(1, 6), 4: Rational(-1, 30)}
_highest = {0: 0, 2: 2, 4: 4}
@classmethod
def eval(cls, n, sym=None):
if n.is_Number:
if n.is_Integer and n.is_nonnegative:
if n is S.Zero:
return S.One
elif n is S.One:
if sym is None:
return -S.Half
else:
return sym - S.Half
# Bernoulli numbers
elif sym is None:
if n.is_odd:
return S.Zero
n = int(n)
# Use mpmath for enormous Bernoulli numbers
if n > 500:
p, q = bernfrac(n)
return Rational(int(p), int(q))
case = n % 6
highest_cached = cls._highest[case]
if n <= highest_cached:
return cls._cache[n]
# To avoid excessive recursion when, say, bernoulli(1000) is
# requested, calculate and cache the entire sequence ... B_988,
# B_994, B_1000 in increasing order
for i in range(highest_cached + 6, n + 6, 6):
b = cls._calc_bernoulli(i)
cls._cache[i] = b
cls._highest[case] = i
return b
# Bernoulli polynomials
else:
n, result = int(n), []
for k in range(n + 1):
result.append(binomial(n, k)*cls(k)*sym**(n - k))
return Add(*result)
else:
raise ValueError("Bernoulli numbers are defined only"
" for nonnegative integer indices.")
if sym is None:
if n.is_odd and (n - 1).is_positive:
return S.Zero
#----------------------------------------------------------------------------#
# #
# Bell numbers #
# #
#----------------------------------------------------------------------------#
class bell(Function):
r"""
Bell numbers / Bell polynomials
The Bell numbers satisfy `B_0 = 1` and
.. math:: B_n = \sum_{k=0}^{n-1} \binom{n-1}{k} B_k.
They are also given by:
.. math:: B_n = \frac{1}{e} \sum_{k=0}^{\infty} \frac{k^n}{k!}.
The Bell polynomials are given by `B_0(x) = 1` and
.. math:: B_n(x) = x \sum_{k=1}^{n-1} \binom{n-1}{k-1} B_{k-1}(x).
The second kind of Bell polynomials (are sometimes called "partial" Bell
polynomials or incomplete Bell polynomials) are defined as
.. math:: B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
\sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
\frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
\left(\frac{x_1}{1!} \right)^{j_1}
\left(\frac{x_2}{2!} \right)^{j_2} \dotsb
\left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.
* bell(n) gives the `n^{th}` Bell number, `B_n`.
* bell(n, x) gives the `n^{th}` Bell polynomial, `B_n(x)`.
* bell(n, k, (x1, x2, ...)) gives Bell polynomials of the second kind,
`B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.
Notes
=====
Not to be confused with Bernoulli numbers and Bernoulli polynomials,
which use the same notation.
Examples
========
>>> from sympy import bell, Symbol, symbols
>>> [bell(n) for n in range(11)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975]
>>> bell(30)
846749014511809332450147
>>> bell(4, Symbol('t'))
t**4 + 6*t**3 + 7*t**2 + t
>>> bell(6, 2, symbols('x:6')[1:])
6*x1*x5 + 15*x2*x4 + 10*x3**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Bell_number
.. [2] http://mathworld.wolfram.com/BellNumber.html
.. [3] http://mathworld.wolfram.com/BellPolynomial.html
See Also
========
bernoulli, catalan, euler, fibonacci, harmonic, lucas
"""
@staticmethod
@recurrence_memo([1, 1])
def _bell(n, prev):
s = 1
a = 1
for k in range(1, n):
a = a * (n - k) // k
s += a * prev[k]
return s
@staticmethod
@recurrence_memo([S.One, _sym])
def _bell_poly(n, prev):
s = 1
a = 1
for k in range(2, n + 1):
a = a * (n - k + 1) // (k - 1)
s += a * prev[k - 1]
return expand_mul(_sym * s)
@staticmethod
def _bell_incomplete_poly(n, k, symbols):
r"""
The second kind of Bell polynomials (incomplete Bell polynomials).
Calculated by recurrence formula:
.. math:: B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) =
\sum_{m=1}^{n-k+1}
\x_m \binom{n-1}{m-1} B_{n-m,k-1}(x_1, x_2, \dotsc, x_{n-m-k})
where
B_{0,0} = 1;
B_{n,0} = 0; for n>=1
B_{0,k} = 0; for k>=1
"""
if (n == 0) and (k == 0):
return S.One
elif (n == 0) or (k == 0):
return S.Zero
s = S.Zero
a = S.One
for m in range(1, n - k + 2):
s += a * bell._bell_incomplete_poly(
n - m, k - 1, symbols) * symbols[m - 1]
a = a * (n - m) / m
return expand_mul(s)
@classmethod
def eval(cls, n, k_sym=None, symbols=None):
if n.is_Integer and n.is_nonnegative:
if k_sym is None:
return Integer(cls._bell(int(n)))
elif symbols is None:
return cls._bell_poly(int(n)).subs(_sym, k_sym)
else:
r = cls._bell_incomplete_poly(int(n), int(k_sym), symbols)
return r
def _eval_rewrite_as_Sum(self, n, k_sym=None, symbols=None):
from sympy import Sum
if (k_sym is not None) or (symbols is not None):
return self
# Dobinski's formula
if not n.is_nonnegative:
return self
k = Dummy('k', integer=True, nonnegative=True)
return 1 / E * Sum(k**n / factorial(k), (k, 0, S.Infinity))
#----------------------------------------------------------------------------#
# #
# Harmonic numbers #
# #
#----------------------------------------------------------------------------#
class harmonic(Function):
r"""
Harmonic numbers
The nth harmonic number is given by `\operatorname{H}_{n} =
1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}`.
More generally:
.. math:: \operatorname{H}_{n,m} = \sum_{k=1}^{n} \frac{1}{k^m}
As `n \rightarrow \infty`, `\operatorname{H}_{n,m} \rightarrow \zeta(m)`,
the Riemann zeta function.
* ``harmonic(n)`` gives the nth harmonic number, `\operatorname{H}_n`
* ``harmonic(n, m)`` gives the nth generalized harmonic number
of order `m`, `\operatorname{H}_{n,m}`, where
``harmonic(n) == harmonic(n, 1)``
Examples
========
>>> from sympy import harmonic, oo
>>> [harmonic(n) for n in range(6)]
[0, 1, 3/2, 11/6, 25/12, 137/60]
>>> [harmonic(n, 2) for n in range(6)]
[0, 1, 5/4, 49/36, 205/144, 5269/3600]
>>> harmonic(oo, 2)
pi**2/6
>>> from sympy import Symbol, Sum
>>> n = Symbol("n")
>>> harmonic(n).rewrite(Sum)
Sum(1/_k, (_k, 1, n))
We can evaluate harmonic numbers for all integral and positive
rational arguments:
>>> from sympy import S, expand_func, simplify
>>> harmonic(8)
761/280
>>> harmonic(11)
83711/27720
>>> H = harmonic(1/S(3))
>>> H
harmonic(1/3)
>>> He = expand_func(H)
>>> He
-log(6) - sqrt(3)*pi/6 + 2*Sum(log(sin(_k*pi/3))*cos(2*_k*pi/3), (_k, 1, 1))
+ 3*Sum(1/(3*_k + 1), (_k, 0, 0))
>>> He.doit()
-log(6) - sqrt(3)*pi/6 - log(sqrt(3)/2) + 3
>>> H = harmonic(25/S(7))
>>> He = simplify(expand_func(H).doit())
>>> He
log(sin(pi/7)**(-2*cos(pi/7))*sin(2*pi/7)**(2*cos(16*pi/7))*cos(pi/14)**(-2*sin(pi/14))/14)
+ pi*tan(pi/14)/2 + 30247/9900
>>> He.n(40)
1.983697455232980674869851942390639915940
>>> harmonic(25/S(7)).n(40)
1.983697455232980674869851942390639915940
We can rewrite harmonic numbers in terms of polygamma functions:
>>> from sympy import digamma, polygamma
>>> m = Symbol("m")
>>> harmonic(n).rewrite(digamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n).rewrite(polygamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n,3).rewrite(polygamma)
polygamma(2, n + 1)/2 - polygamma(2, 1)/2
>>> harmonic(n,m).rewrite(polygamma)
(-1)**m*(polygamma(m - 1, 1) - polygamma(m - 1, n + 1))/factorial(m - 1)
Integer offsets in the argument can be pulled out:
>>> from sympy import expand_func
>>> expand_func(harmonic(n+4))
harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)
>>> expand_func(harmonic(n-4))
harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n
Some limits can be computed as well:
>>> from sympy import limit, oo
>>> limit(harmonic(n), n, oo)
oo
>>> limit(harmonic(n, 2), n, oo)
pi**2/6
>>> limit(harmonic(n, 3), n, oo)
-polygamma(2, 1)/2
However we can not compute the general relation yet:
>>> limit(harmonic(n, m), n, oo)
harmonic(oo, m)
which equals ``zeta(m)`` for ``m > 1``.
References
==========
.. [1] http://en.wikipedia.org/wiki/Harmonic_number
.. [2] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber/
.. [3] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber2/
See Also
========
bell, bernoulli, catalan, euler, fibonacci, lucas
"""
# Generate one memoized Harmonic number-generating function for each
# order and store it in a dictionary
_functions = {}
@classmethod
def eval(cls, n, m=None):
from sympy import zeta
if m is S.One:
return cls(n)
if m is None:
m = S.One
if m.is_zero:
return n
if n is S.Infinity and m.is_Number:
# TODO: Fix for symbolic values of m
if m.is_negative:
return S.NaN
elif LessThan(m, S.One):
return S.Infinity
elif StrictGreaterThan(m, S.One):
return zeta(m)
else:
return cls
if n.is_Integer and n.is_nonnegative and m.is_Integer:
if n == 0:
return S.Zero
if not m in cls._functions:
@recurrence_memo([0])
def f(n, prev):
return prev[-1] + S.One / n**m
cls._functions[m] = f
return cls._functions[m](int(n))
def _eval_rewrite_as_polygamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return S.NegativeOne**m/factorial(m - 1) * (polygamma(m - 1, 1) - polygamma(m - 1, n + 1))
def _eval_rewrite_as_digamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_trigamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_Sum(self, n, m=None):
from sympy import Sum
k = Dummy("k", integer=True)
if m is None:
m = S.One
return Sum(k**(-m), (k, 1, n))
def _eval_expand_func(self, **hints):
from sympy import Sum
n = self.args[0]
m = self.args[1] if len(self.args) == 2 else 1
if m == S.One:
if n.is_Add:
off = n.args[0]
nnew = n - off
if off.is_Integer and off.is_positive:
result = [S.One/(nnew + i) for i in range(off, 0, -1)] + [harmonic(nnew)]
return Add(*result)
elif off.is_Integer and off.is_negative:
result = [-S.One/(nnew + i) for i in range(0, off, -1)] + [harmonic(nnew)]
return Add(*result)
if n.is_Rational:
# Expansions for harmonic numbers at general rational arguments (u + p/q)
# Split n as u + p/q with p < q
p, q = n.as_numer_denom()
u = p // q
p = p - u * q
if u.is_nonnegative and p.is_positive and q.is_positive and p < q:
k = Dummy("k")
t1 = q * Sum(1 / (q * k + p), (k, 0, u))
t2 = 2 * Sum(cos((2 * pi * p * k) / S(q)) *
log(sin((pi * k) / S(q))),
(k, 1, floor((q - 1) / S(2))))
t3 = (pi / 2) * cot((pi * p) / q) + log(2 * q)
return t1 + t2 - t3
return self
def _eval_rewrite_as_tractable(self, n, m=1):
from sympy import polygamma
return self.rewrite(polygamma).rewrite("tractable", deep=True)
def _eval_evalf(self, prec):
from sympy import polygamma
if all(i.is_number for i in self.args):
return self.rewrite(polygamma)._eval_evalf(prec)
#----------------------------------------------------------------------------#
# #
# Euler numbers #
# #
#----------------------------------------------------------------------------#
class euler(Function):
r"""
Euler numbers
The euler numbers are given by::
2*n+1 k
___ ___ j 2*n+1
\ \ / k \ (-1) * (k-2*j)
E = I ) ) | | --------------------
2n /___ /___ \ j / k k
k = 1 j = 0 2 * I * k
E = 0
2n+1
* euler(n) gives the n-th Euler number, E_n
Examples
========
>>> from sympy import Symbol
>>> from sympy.functions import euler
>>> [euler(n) for n in range(10)]
[1, 0, -1, 0, 5, 0, -61, 0, 1385, 0]
>>> n = Symbol("n")
>>> euler(n+2*n)
euler(3*n)
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler_numbers
.. [2] http://mathworld.wolfram.com/EulerNumber.html
.. [3] http://en.wikipedia.org/wiki/Alternating_permutation
.. [4] http://mathworld.wolfram.com/AlternatingPermutation.html
See Also
========
bell, bernoulli, catalan, fibonacci, harmonic, lucas
"""
@classmethod
def eval(cls, m):
if m.is_odd:
return S.Zero
if m.is_Integer and m.is_nonnegative:
from mpmath import mp
m = m._to_mpmath(mp.prec)
res = mp.eulernum(m, exact=True)
return Integer(res)
def _eval_rewrite_as_Sum(self, arg):
from sympy import Sum
if arg.is_even:
k = Dummy("k", integer=True)
j = Dummy("j", integer=True)
n = self.args[0] / 2
Em = (S.ImaginaryUnit * Sum(Sum(binomial(k, j) * ((-1)**j * (k - 2*j)**(2*n + 1)) /
(2**k*S.ImaginaryUnit**k * k), (j, 0, k)), (k, 1, 2*n + 1)))
return Em
def _eval_evalf(self, prec):
m = self.args[0]
if m.is_Integer and m.is_nonnegative:
from mpmath import mp
from sympy import Expr
m = m._to_mpmath(prec)
with workprec(prec):
res = mp.eulernum(m)
return Expr._from_mpmath(res, prec)
#----------------------------------------------------------------------------#
# #
# Catalan numbers #
# #
#----------------------------------------------------------------------------#
class catalan(Function):
r"""
Catalan numbers
The n-th catalan number is given by::
1 / 2*n \
C = ----- | |
n n + 1 \ n /
* catalan(n) gives the n-th Catalan number, C_n
Examples
========
>>> from sympy import (Symbol, binomial, gamma, hyper, polygamma,
... catalan, diff, combsimp, Rational, I)
>>> [ catalan(i) for i in range(1,10) ]
[1, 2, 5, 14, 42, 132, 429, 1430, 4862]
>>> n = Symbol("n", integer=True)
>>> catalan(n)
catalan(n)
Catalan numbers can be transformed into several other, identical
expressions involving other mathematical functions
>>> catalan(n).rewrite(binomial)
binomial(2*n, n)/(n + 1)
>>> catalan(n).rewrite(gamma)
4**n*gamma(n + 1/2)/(sqrt(pi)*gamma(n + 2))
>>> catalan(n).rewrite(hyper)
hyper((-n + 1, -n), (2,), 1)
For some non-integer values of n we can get closed form
expressions by rewriting in terms of gamma functions:
>>> catalan(Rational(1,2)).rewrite(gamma)
8/(3*pi)
We can differentiate the Catalan numbers C(n) interpreted as a
continuous real funtion in n:
>>> diff(catalan(n), n)
(polygamma(0, n + 1/2) - polygamma(0, n + 2) + log(4))*catalan(n)
As a more advanced example consider the following ratio
between consecutive numbers:
>>> combsimp((catalan(n + 1)/catalan(n)).rewrite(binomial))
2*(2*n + 1)/(n + 2)
The Catalan numbers can be generalized to complex numbers:
>>> catalan(I).rewrite(gamma)
4**I*gamma(1/2 + I)/(sqrt(pi)*gamma(2 + I))
and evaluated with arbitrary precision:
>>> catalan(I).evalf(20)
0.39764993382373624267 - 0.020884341620842555705*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Catalan_number
.. [2] http://mathworld.wolfram.com/CatalanNumber.html
.. [3] http://functions.wolfram.com/GammaBetaErf/CatalanNumber/
.. [4] http://geometer.org/mathcircles/catalan.pdf
See Also
========
bell, bernoulli, euler, fibonacci, harmonic, lucas
sympy.functions.combinatorial.factorials.binomial
"""
@classmethod
def eval(cls, n):
from sympy import gamma
if (n.is_Integer and n.is_nonnegative) or \
(n.is_noninteger and n.is_negative):
return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2))
if (n.is_integer and n.is_negative):
if (n + 1).is_negative:
return S.Zero
if (n + 1).is_zero:
return -S.Half
def fdiff(self, argindex=1):
from sympy import polygamma, log
n = self.args[0]
return catalan(n)*(polygamma(0, n + Rational(1, 2)) - polygamma(0, n + 2) + log(4))
def _eval_rewrite_as_binomial(self, n):
return binomial(2*n, n)/(n + 1)
def _eval_rewrite_as_factorial(self, n):
return factorial(2*n) / (factorial(n+1) * factorial(n))
def _eval_rewrite_as_gamma(self, n):
from sympy import gamma
# The gamma function allows to generalize Catalan numbers to complex n
return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2))
def _eval_rewrite_as_hyper(self, n):
from sympy import hyper
return hyper([1 - n, -n], [2], 1)
def _eval_rewrite_as_Product(self, n):
from sympy import Product
if not (n.is_integer and n.is_nonnegative):
return self
k = Dummy('k', integer=True, positive=True)
return Product((n + k) / k, (k, 2, n))
def _eval_evalf(self, prec):
from sympy import gamma
if self.args[0].is_number:
return self.rewrite(gamma)._eval_evalf(prec)
#----------------------------------------------------------------------------#
# #
# Genocchi numbers #
# #
#----------------------------------------------------------------------------#
class genocchi(Function):
r"""
Genocchi numbers
The Genocchi numbers are a sequence of integers G_n that satisfy the
relation::
oo
____
\ `
2*t \ n
------ = \ G_n*t
t / ------
e + 1 / n!
/___,
n = 1
Examples
========
>>> from sympy import Symbol
>>> from sympy.functions import genocchi
>>> [genocchi(n) for n in range(1, 9)]
[1, -1, 0, 1, 0, -3, 0, 17]
>>> n = Symbol('n', integer=True, positive=True)
>>> genocchi(2 * n + 1)
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Genocchi_number
.. [2] http://mathworld.wolfram.com/GenocchiNumber.html
See Also
========
bell, bernoulli, catalan, euler, fibonacci, harmonic, lucas
"""
@classmethod
def eval(cls, n):
if n.is_Number:
if (not n.is_Integer) or n.is_nonpositive:
raise ValueError("Genocchi numbers are defined only for " +
"positive integers")
return 2 * (1 - S(2) ** n) * bernoulli(n)
if n.is_odd and (n - 1).is_positive:
return S.Zero
if (n - 1).is_zero:
return S.One
def _eval_rewrite_as_bernoulli(self, n):
if n.is_integer and n.is_nonnegative:
return (1 - S(2) ** n) * bernoulli(n) * 2
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_positive:
return True
def _eval_is_negative(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_odd:
return False
return (n / 2).is_odd
def _eval_is_positive(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_odd:
return fuzzy_not((n - 1).is_positive)
return (n / 2).is_even
def _eval_is_even(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_even:
return False
return (n - 1).is_positive
def _eval_is_odd(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_even:
return True
return fuzzy_not((n - 1).is_positive)
def _eval_is_prime(self):
n = self.args[0]
# only G_6 = -3 and G_8 = 17 are prime,
# but SymPy does not consider negatives as prime
# so only n=8 is tested
return (n - 8).is_zero
#######################################################################
###
### Functions for enumerating partitions, permutations and combinations
###
#######################################################################
class _MultisetHistogram(tuple):
pass
_N = -1
_ITEMS = -2
_M = slice(None, _ITEMS)
def _multiset_histogram(n):
"""Return tuple used in permutation and combination counting. Input
is a dictionary giving items with counts as values or a sequence of
items (which need not be sorted).
The data is stored in a class deriving from tuple so it is easily
recognized and so it can be converted easily to a list.
"""
if type(n) is dict: # item: count
if not all(isinstance(v, int) and v >= 0 for v in n.values()):
raise ValueError
tot = sum(n.values())
items = sum(1 for k in n if n[k] > 0)
return _MultisetHistogram([n[k] for k in n if n[k] > 0] + [items, tot])
else:
n = list(n)
s = set(n)
if len(s) == len(n):
n = [1]*len(n)
n.extend([len(n), len(n)])
return _MultisetHistogram(n)
m = dict(zip(s, range(len(s))))
d = dict(zip(range(len(s)), [0]*len(s)))
for i in n:
d[m[i]] += 1
return _multiset_histogram(d)
def nP(n, k=None, replacement=False):
"""Return the number of permutations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all permutations of length 0
through the number of items represented by ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' permutations of 2 would
include 'aa', 'ab', 'ba' and 'bb'.) The multiplicity of elements in
``n`` is ignored when ``replacement`` is True but the total number
of elements is considered since no element can appear more times than
the number of elements in ``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nP
>>> from sympy.utilities.iterables import multiset_permutations, multiset
>>> nP(3, 2)
6
>>> nP('abc', 2) == nP(multiset('abc'), 2) == 6
True
>>> nP('aab', 2)
3
>>> nP([1, 2, 2], 2)
3
>>> [nP(3, i) for i in range(4)]
[1, 3, 6, 6]
>>> nP(3) == sum(_)
True
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nP('aabc', replacement=True)
121
>>> [len(list(multiset_permutations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 9, 27, 81]
>>> sum(_)
121
References
==========
.. [1] http://en.wikipedia.org/wiki/Permutation
See Also
========
sympy.utilities.iterables.multiset_permutations
"""
try:
n = as_int(n)
except ValueError:
return Integer(_nP(_multiset_histogram(n), k, replacement))
return Integer(_nP(n, k, replacement))
@cacheit
def _nP(n, k=None, replacement=False):
from sympy.functions.combinatorial.factorials import factorial
from sympy.core.mul import prod
if k == 0:
return 1
if isinstance(n, SYMPY_INTS): # n different items
# assert n >= 0
if k is None:
return sum(_nP(n, i, replacement) for i in range(n + 1))
elif replacement:
return n**k
elif k > n:
return 0
elif k == n:
return factorial(k)
elif k == 1:
return n
else:
# assert k >= 0
return _product(n - k + 1, n)
elif isinstance(n, _MultisetHistogram):
if k is None:
return sum(_nP(n, i, replacement) for i in range(n[_N] + 1))
elif replacement:
return n[_ITEMS]**k
elif k == n[_N]:
return factorial(k)/prod([factorial(i) for i in n[_M] if i > 1])
elif k > n[_N]:
return 0
elif k == 1:
return n[_ITEMS]
else:
# assert k >= 0
tot = 0
n = list(n)
for i in range(len(n[_M])):
if not n[i]:
continue
n[_N] -= 1
if n[i] == 1:
n[i] = 0
n[_ITEMS] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[_ITEMS] += 1
n[i] = 1
else:
n[i] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[i] += 1
n[_N] += 1
return tot
@cacheit
def _AOP_product(n):
"""for n = (m1, m2, .., mk) return the coefficients of the polynomial,
prod(sum(x**i for i in range(nj + 1)) for nj in n); i.e. the coefficients
of the product of AOPs (all-one polynomials) or order given in n. The
resulting coefficient corresponding to x**r is the number of r-length
combinations of sum(n) elements with multiplicities given in n.
The coefficients are given as a default dictionary (so if a query is made
for a key that is not present, 0 will be returned).
Examples
========
>>> from sympy.functions.combinatorial.numbers import _AOP_product
>>> from sympy.abc import x
>>> n = (2, 2, 3) # e.g. aabbccc
>>> prod = ((x**2 + x + 1)*(x**2 + x + 1)*(x**3 + x**2 + x + 1)).expand()
>>> c = _AOP_product(n); dict(c)
{0: 1, 1: 3, 2: 6, 3: 8, 4: 8, 5: 6, 6: 3, 7: 1}
>>> [c[i] for i in range(8)] == [prod.coeff(x, i) for i in range(8)]
True
The generating poly used here is the same as that listed in
http://tinyurl.com/cep849r, but in a refactored form.
"""
from collections import defaultdict
n = list(n)
ord = sum(n)
need = (ord + 2)//2
rv = [1]*(n.pop() + 1)
rv.extend([0]*(need - len(rv)))
rv = rv[:need]
while n:
ni = n.pop()
N = ni + 1
was = rv[:]
for i in range(1, min(N, len(rv))):
rv[i] += rv[i - 1]
for i in range(N, need):
rv[i] += rv[i - 1] - was[i - N]
rev = list(reversed(rv))
if ord % 2:
rv = rv + rev
else:
rv[-1:] = rev
d = defaultdict(int)
for i in range(len(rv)):
d[i] = rv[i]
return d
def nC(n, k=None, replacement=False):
"""Return the number of combinations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all combinations of length 0
through the number of items represented in ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' sets of 2 would include 'aa',
'ab', and 'bb'.) The multiplicity of elements in ``n`` is ignored when
``replacement`` is True but the total number of elements is considered
since no element can appear more times than the number of elements in
``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nC
>>> from sympy.utilities.iterables import multiset_combinations
>>> nC(3, 2)
3
>>> nC('abc', 2)
3
>>> nC('aab', 2)
2
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nC('aabc', replacement=True)
35
>>> [len(list(multiset_combinations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 6, 10, 15]
>>> sum(_)
35
If there are ``k`` items with multiplicities ``m_1, m_2, ..., m_k``
then the total of all combinations of length 0 hrough ``k`` is the
product, ``(m_1 + 1)*(m_2 + 1)*...*(m_k + 1)``. When the multiplicity
of each item is 1 (i.e., k unique items) then there are 2**k
combinations. For example, if there are 4 unique items, the total number
of combinations is 16:
>>> sum(nC(4, i) for i in range(5))
16
References
==========
.. [1] http://en.wikipedia.org/wiki/Combination
.. [2] http://tinyurl.com/cep849r
See Also
========
sympy.utilities.iterables.multiset_combinations
"""
from sympy.functions.combinatorial.factorials import binomial
from sympy.core.mul import prod
if isinstance(n, SYMPY_INTS):
if k is None:
if not replacement:
return 2**n
return sum(nC(n, i, replacement) for i in range(n + 1))
if k < 0:
raise ValueError("k cannot be negative")
if replacement:
return binomial(n + k - 1, k)
return binomial(n, k)
if isinstance(n, _MultisetHistogram):
N = n[_N]
if k is None:
if not replacement:
return prod(m + 1 for m in n[_M])
return sum(nC(n, i, replacement) for i in range(N + 1))
elif replacement:
return nC(n[_ITEMS], k, replacement)
# assert k >= 0
elif k in (1, N - 1):
return n[_ITEMS]
elif k in (0, N):
return 1
return _AOP_product(tuple(n[_M]))[k]
else:
return nC(_multiset_histogram(n), k, replacement)
@cacheit
def _stirling1(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if n == k:
return S.One
elif k == 1:
return factorial(n1)
elif k == n1:
return binomial(n, 2)
elif k == n - 2:
return (3*n - 1)*binomial(n, 3)/4
elif k == n - 3:
return binomial(n, 2)*binomial(n, 4)
# general recurrence
return n1*_stirling1(n1, k) + _stirling1(n1, k - 1)
@cacheit
def _stirling2(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if k == n1:
return binomial(n, 2)
elif k == 2:
return 2**n1 - 1
# general recurrence
return k*_stirling2(n1, k) + _stirling2(n1, k - 1)
def stirling(n, k, d=None, kind=2, signed=False):
"""Return Stirling number S(n, k) of the first or second (default) kind.
The sum of all Stirling numbers of the second kind for k = 1
through n is bell(n). The recurrence relationship for these numbers
is::
{0} {n} {0} {n + 1} {n} { n }
{ } = 1; { } = { } = 0; { } = j*{ } + { }
{0} {0} {k} { k } {k} {k - 1}
where ``j`` is::
``n`` for Stirling numbers of the first kind
``-n`` for signed Stirling numbers of the first kind
``k`` for Stirling numbers of the second kind
The first kind of Stirling number counts the number of permutations of
``n`` distinct items that have ``k`` cycles; the second kind counts the
ways in which ``n`` distinct items can be partitioned into ``k`` parts.
If ``d`` is given, the "reduced Stirling number of the second kind" is
returned: ``S^{d}(n, k) = S(n - d + 1, k - d + 1)`` with ``n >= k >= d``.
(This counts the ways to partition ``n`` consecutive integers into
``k`` groups with no pairwise difference less than ``d``. See example
below.)
To obtain the signed Stirling numbers of the first kind, use keyword
``signed=True``. Using this keyword automatically sets ``kind`` to 1.
Examples
========
>>> from sympy.functions.combinatorial.numbers import stirling, bell
>>> from sympy.combinatorics import Permutation
>>> from sympy.utilities.iterables import multiset_partitions, permutations
First kind (unsigned by default):
>>> [stirling(6, i, kind=1) for i in range(7)]
[0, 120, 274, 225, 85, 15, 1]
>>> perms = list(permutations(range(4)))
>>> [sum(Permutation(p).cycles == i for p in perms) for i in range(5)]
[0, 6, 11, 6, 1]
>>> [stirling(4, i, kind=1) for i in range(5)]
[0, 6, 11, 6, 1]
First kind (signed):
>>> [stirling(4, i, signed=True) for i in range(5)]
[0, -6, 11, -6, 1]
Second kind:
>>> [stirling(10, i) for i in range(12)]
[0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1, 0]
>>> sum(_) == bell(10)
True
>>> len(list(multiset_partitions(range(4), 2))) == stirling(4, 2)
True
Reduced second kind:
>>> from sympy import subsets, oo
>>> def delta(p):
... if len(p) == 1:
... return oo
... return min(abs(i[0] - i[1]) for i in subsets(p, 2))
>>> parts = multiset_partitions(range(5), 3)
>>> d = 2
>>> sum(1 for p in parts if all(delta(i) >= d for i in p))
7
>>> stirling(5, 3, 2)
7
References
==========
.. [1] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
.. [2] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
See Also
========
sympy.utilities.iterables.multiset_partitions
"""
# TODO: make this a class like bell()
n = as_int(n)
k = as_int(k)
if n < 0:
raise ValueError('n must be nonnegative')
if k > n:
return S.Zero
if d:
# assert k >= d
# kind is ignored -- only kind=2 is supported
return _stirling2(n - d + 1, k - d + 1)
elif signed:
# kind is ignored -- only kind=1 is supported
return (-1)**(n - k)*_stirling1(n, k)
if kind == 1:
return _stirling1(n, k)
elif kind == 2:
return _stirling2(n, k)
else:
raise ValueError('kind must be 1 or 2, not %s' % k)
@cacheit
def _nT(n, k):
"""Return the partitions of ``n`` items into ``k`` parts. This
is used by ``nT`` for the case when ``n`` is an integer."""
if k == 0:
return 1 if k == n else 0
return sum(_nT(n - k, j) for j in range(min(k, n - k) + 1))
def nT(n, k=None):
"""Return the number of ``k``-sized partitions of ``n`` items.
Possible values for ``n``::
integer - ``n`` identical items
sequence - converted to a multiset internally
multiset - {element: multiplicity}
Note: the convention for ``nT`` is different than that of ``nC`` and
``nP`` in that
here an integer indicates ``n`` *identical* items instead of a set of
length ``n``; this is in keeping with the ``partitions`` function which
treats its integer-``n`` input like a list of ``n`` 1s. One can use
``range(n)`` for ``n`` to indicate ``n`` distinct items.
If ``k`` is None then the total number of ways to partition the elements
represented in ``n`` will be returned.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nT
Partitions of the given multiset:
>>> [nT('aabbc', i) for i in range(1, 7)]
[1, 8, 11, 5, 1, 0]
>>> nT('aabbc') == sum(_)
True
>>> [nT("mississippi", i) for i in range(1, 12)]
[1, 74, 609, 1521, 1768, 1224, 579, 197, 50, 9, 1]
Partitions when all items are identical:
>>> [nT(5, i) for i in range(1, 6)]
[1, 2, 2, 1, 1]
>>> nT('1'*5) == sum(_)
True
When all items are different:
>>> [nT(range(5), i) for i in range(1, 6)]
[1, 15, 25, 10, 1]
>>> nT(range(5)) == sum(_)
True
References
==========
.. [1] http://undergraduate.csse.uwa.edu.au/units/CITS7209/partition.pdf
See Also
========
sympy.utilities.iterables.partitions
sympy.utilities.iterables.multiset_partitions
"""
from sympy.utilities.enumerative import MultisetPartitionTraverser
if isinstance(n, SYMPY_INTS):
# assert n >= 0
# all the same
if k is None:
return sum(_nT(n, k) for k in range(1, n + 1))
return _nT(n, k)
if not isinstance(n, _MultisetHistogram):
try:
# if n contains hashable items there is some
# quick handling that can be done
u = len(set(n))
if u == 1:
return nT(len(n), k)
elif u == len(n):
n = range(u)
raise TypeError
except TypeError:
n = _multiset_histogram(n)
N = n[_N]
if k is None and N == 1:
return 1
if k in (1, N):
return 1
if k == 2 or N == 2 and k is None:
m, r = divmod(N, 2)
rv = sum(nC(n, i) for i in range(1, m + 1))
if not r:
rv -= nC(n, m)//2
if k is None:
rv += 1 # for k == 1
return rv
if N == n[_ITEMS]:
# all distinct
if k is None:
return bell(N)
return stirling(N, k)
m = MultisetPartitionTraverser()
if k is None:
return m.count_partitions(n[_M])
# MultisetPartitionTraverser does not have a range-limited count
# method, so need to enumerate and count
tot = 0
for discard in m.enum_range(n[_M], k-1, k):
tot += 1
return tot
|
the-stack_0_52 | from __future__ import annotations
import ast
import functools
import sys
from typing import Iterable
from tokenize_rt import NON_CODING_TOKENS
from tokenize_rt import Offset
from tokenize_rt import Token
from pyupgrade._ast_helpers import ast_to_offset
from pyupgrade._ast_helpers import is_name_attr
from pyupgrade._data import register
from pyupgrade._data import State
from pyupgrade._data import TokenFunc
from pyupgrade._token_helpers import CLOSING
from pyupgrade._token_helpers import find_closing_bracket
from pyupgrade._token_helpers import find_token
from pyupgrade._token_helpers import OPENING
def _fix_optional(i: int, tokens: list[Token]) -> None:
j = find_token(tokens, i, '[')
k = find_closing_bracket(tokens, j)
if tokens[j].line == tokens[k].line:
tokens[k] = Token('CODE', ' | None')
del tokens[i:j + 1]
else:
tokens[j] = tokens[j]._replace(src='(')
tokens[k] = tokens[k]._replace(src=')')
tokens[i:j] = [Token('CODE', 'None | ')]
def _fix_union(
i: int,
tokens: list[Token],
*,
arg_count: int,
) -> None:
depth = 1
parens_done = []
open_parens = []
commas = []
coding_depth = None
j = find_token(tokens, i, '[')
k = j + 1
while depth:
# it's possible our first coding token is a close paren
# so make sure this is separate from the if chain below
if (
tokens[k].name not in NON_CODING_TOKENS and
tokens[k].src != '(' and
coding_depth is None
):
if tokens[k].src == ')': # the coding token was an empty tuple
coding_depth = depth - 1
else:
coding_depth = depth
if tokens[k].src in OPENING:
if tokens[k].src == '(':
open_parens.append((depth, k))
depth += 1
elif tokens[k].src in CLOSING:
if tokens[k].src == ')':
paren_depth, open_paren = open_parens.pop()
parens_done.append((paren_depth, (open_paren, k)))
depth -= 1
elif tokens[k].src == ',':
commas.append((depth, k))
k += 1
k -= 1
assert coding_depth is not None
assert not open_parens, open_parens
comma_depth = min((depth for depth, _ in commas), default=sys.maxsize)
min_depth = min(comma_depth, coding_depth)
to_delete = [
paren
for depth, positions in parens_done
if depth < min_depth
for paren in positions
]
if comma_depth <= coding_depth:
comma_positions = [k for depth, k in commas if depth == comma_depth]
if len(comma_positions) == arg_count:
to_delete.append(comma_positions.pop())
else:
comma_positions = []
to_delete.sort()
if tokens[j].line == tokens[k].line:
del tokens[k]
for comma in comma_positions:
tokens[comma] = Token('CODE', ' |')
for paren in reversed(to_delete):
del tokens[paren]
del tokens[i:j + 1]
else:
tokens[j] = tokens[j]._replace(src='(')
tokens[k] = tokens[k]._replace(src=')')
for comma in comma_positions:
tokens[comma] = Token('CODE', ' |')
for paren in reversed(to_delete):
del tokens[paren]
del tokens[i:j]
def _supported_version(state: State) -> bool:
return (
state.in_annotation and (
state.settings.min_version >= (3, 10) or (
not state.settings.keep_runtime_typing and
'annotations' in state.from_imports['__future__']
)
)
)
def _any_arg_is_str(node_slice: ast.expr) -> bool:
return (
isinstance(node_slice, ast.Str) or (
isinstance(node_slice, ast.Tuple) and
any(isinstance(elt, ast.Str) for elt in node_slice.elts)
)
)
@register(ast.Subscript)
def visit_Subscript(
state: State,
node: ast.Subscript,
parent: ast.AST,
) -> Iterable[tuple[Offset, TokenFunc]]:
if not _supported_version(state):
return
# prevent rewriting forward annotations
if (
(sys.version_info >= (3, 9) and _any_arg_is_str(node.slice)) or
(
sys.version_info < (3, 9) and
isinstance(node.slice, ast.Index) and
_any_arg_is_str(node.slice.value)
)
):
return
if is_name_attr(
node.value,
state.from_imports,
('typing',),
('Optional',),
):
yield ast_to_offset(node), _fix_optional
elif is_name_attr(node.value, state.from_imports, ('typing',), ('Union',)):
if sys.version_info >= (3, 9): # pragma: >=3.9 cover
node_slice = node.slice
elif isinstance(node.slice, ast.Index): # pragma: <3.9 cover
node_slice: ast.AST = node.slice.value
else: # pragma: <3.9 cover
node_slice = node.slice # unexpected slice type
if isinstance(node_slice, ast.Slice): # not a valid annotation
return
if isinstance(node_slice, ast.Tuple):
if node_slice.elts:
arg_count = len(node_slice.elts)
else:
return # empty Union
else:
arg_count = 1
func = functools.partial(_fix_union, arg_count=arg_count)
yield ast_to_offset(node), func
|
the-stack_0_53 | import argparse
import logging
import time
import sys
from twilio.rest import Client
import settings
import RPi.GPIO as GPIO
twilio = Client(settings.TWILIO_PUBLIC_KEY, settings.TWILIO_SECRET_KEY)
log = logging.getLogger(__name__)
class SaltLevelMonitor(object):
def __init__(self, force_report=False, unit=settings.METRIC, threshold=0,
tank_depth=settings.DEFAULT_TANK_DEPTH):
self.force_report = force_report
self.unit = unit if unit in settings.VALID_UNITS else settings.METRIC
self.notation = 'inches' if unit == settings.IMPERIAL else 'centimeters'
self.threshold = float(threshold)
self.tank_depth = float(tank_depth)
self.distance = None
self.remaining_salt = None
def check_salt_level(self):
self.distance = self.get_average_distance()
self._convert_units()
self.remaining_salt = self.tank_depth - self.distance
message = self._get_report_message()
log.info('Salt level is: {0:.2f} {1}'.format(self.remaining_salt, self.notation))
if self.remaining_salt < self.threshold or self.force_report:
log.info(message['body'])
self.report_salt_level(message)
def get_average_distance(self):
""" used to get an average read since the sensor isn't 100% accurate """
reads = [self.get_distance() for _ in range(settings.READS_PER_CHECK)]
return sum(reads) / settings.READS_PER_CHECK
@staticmethod
def get_distance():
""" returns distance in centimeters """
# set Trigger to HIGH
GPIO.output(settings.GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(settings.GPIO_TRIGGER, False)
start_time = time.time()
stop_time = time.time()
# save StartTime
while GPIO.input(settings.GPIO_ECHO) == 0:
start_time = time.time()
# save time of arrival
while GPIO.input(settings.GPIO_ECHO) == 1:
stop_time = time.time()
# time difference between start and arrival
time_elapsed = stop_time - start_time
return (time_elapsed * settings.SPEED_OF_SOUND) / 2
def _convert_units(self):
"""
convert distance to inches if IMPERIAL or convert tank_depth and threshold to centimeters
"""
if self.unit == settings.IMPERIAL:
self.distance = self.distance / settings.CM_TO_INCHES
else:
self.tank_depth = self.tank_depth * settings.CM_TO_INCHES
self.threshold = self.threshold * settings.CM_TO_INCHES
def _get_report_message(self):
message = settings.MESSAGE_TEMPLATE.copy()
message['body'] = settings.SALT_LEVEL_ALERT_MESSAGE.format(
self.remaining_salt, self.notation)
if self.force_report:
message['body'] = '{} (forced report)'.format(message['body'])
return message
@staticmethod
def report_salt_level(message):
twilio.messages.create(**message)
def __enter__(self):
GPIO.setmode(GPIO.BCM)
# set GPIO direction (IN / OUT)
GPIO.setup(settings.GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(settings.GPIO_ECHO, GPIO.IN)
return self
def __exit__(self, *args):
GPIO.cleanup()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Salty Dog')
parser.add_argument('-u',
'--unit',
action='store',
dest='unit',
default='metric',
help='Unit of measure used in reporting')
parser.add_argument('-t',
'--threshold',
action='store',
dest='threshold',
help='Threshold for reporting in inches or cm (must match --unit)')
parser.add_argument('-d',
'--tank-depth',
action='store',
dest='tank_depth',
help='Total depth of your salt tank in inches or cm (must match --unit)')
parser.add_argument('-f',
'--force-report',
action='store_true',
dest='force_report',
default=False,
help='Force Salty Dog to send SMS regardless of salt level measured')
args = parser.parse_args(sys.argv[1:])
parsed_kwargs = {
'force_report': args.force_report,
'unit': args.unit,
'threshold': args.threshold,
'tank_depth': args.tank_depth,
}
with SaltLevelMonitor(**parsed_kwargs) as monitor:
monitor.check_salt_level()
|
the-stack_0_54 | from __future__ import absolute_import
from __future__ import unicode_literals
import time
import socket
import logging
from ._compat import bytes_types, string_types
from ._compat import struct_l
from .version import __version__
try:
import ssl
except ImportError:
ssl = None # pyflakes.ignore
try:
from .snappy_socket import SnappySocket
except ImportError:
SnappySocket = None # pyflakes.ignore
try:
import simplejson as json
except ImportError:
import json # pyflakes.ignore
import tornado.iostream
import tornado.ioloop
try:
from tornado.simple_httpclient import _default_ca_certs as default_ca_certs
except ImportError:
# Tornado < 4
from tornado.simple_httpclient import _DEFAULT_CA_CERTS
def default_ca_certs():
return _DEFAULT_CA_CERTS
from nsq import event, protocol
from .deflate_socket import DeflateSocket
logger = logging.getLogger(__name__)
# states
INIT = 'INIT'
DISCONNECTED = 'DISCONNECTED'
CONNECTING = 'CONNECTING'
CONNECTED = 'CONNECTED'
DEFAULT_USER_AGENT = 'pynsq/%s' % __version__
class AsyncConn(event.EventedMixin):
"""
Low level object representing a TCP connection to nsqd.
When a message on this connection is requeued and the requeue delay
has not been specified, it calculates the delay automatically by an
increasing multiple of ``requeue_delay``.
Generates the following events that can be listened to with
:meth:`nsq.AsyncConn.on`:
* ``connect``
* ``close``
* ``error``
* ``identify``
* ``identify_response``
* ``auth``
* ``auth_response``
* ``heartbeat``
* ``ready``
* ``message``
* ``response``
* ``backoff``
* ``resume``
:param host: the host to connect to
:param port: the post to connect to
:param timeout: the timeout for read/write operations (in seconds)
:param heartbeat_interval: the amount of time (in seconds) to negotiate
with the connected producers to send heartbeats (requires nsqd 0.2.19+)
:param requeue_delay: the base multiple used when calculating requeue delay
(multiplied by # of attempts)
:param tls_v1: enable TLS v1 encryption (requires nsqd 0.2.22+)
:param tls_options: dictionary of options to pass to `ssl.wrap_socket()
<http://docs.python.org/2/library/ssl.html#ssl.wrap_socket>`_ as
``**kwargs``
:param snappy: enable Snappy stream compression (requires nsqd 0.2.23+)
:param deflate: enable deflate stream compression (requires nsqd 0.2.23+)
:param deflate_level: configure the deflate compression level for this
connection (requires nsqd 0.2.23+)
:param output_buffer_size: size of the buffer (in bytes) used by nsqd
for buffering writes to this connection
:param output_buffer_timeout: timeout (in ms) used by nsqd before
flushing buffered writes (set to 0 to disable). **Warning**:
configuring clients with an extremely low (``< 25ms``)
``output_buffer_timeout`` has a significant effect on ``nsqd``
CPU usage (particularly with ``> 50`` clients connected).
:param sample_rate: take only a sample of the messages being sent
to the client. Not setting this or setting it to 0 will ensure
you get all the messages destined for the client.
Sample rate can be greater than 0 or less than 100 and the client
will receive that percentage of the message traffic.
(requires nsqd 0.2.25+)
:param user_agent: a string identifying the agent for this client
in the spirit of HTTP (default: ``<client_library_name>/<version>``)
(requires nsqd 0.2.25+)
:param auth_secret: a byte string passed when using nsq auth
(requires nsqd 1.0+)
:param msg_timeout: the amount of time (in seconds) that nsqd will wait
before considering messages that have been delivered to this
consumer timed out (requires nsqd 0.2.28+)
:param hostname: a string identifying the host where this client runs
(default: ``<hostname>``)
"""
def __init__(
self,
host,
port,
timeout=1.0,
heartbeat_interval=30,
requeue_delay=90,
tls_v1=False,
tls_options=None,
snappy=False,
deflate=False,
deflate_level=6,
user_agent=DEFAULT_USER_AGENT,
output_buffer_size=16 * 1024,
output_buffer_timeout=250,
sample_rate=0,
io_loop=None,
auth_secret=None,
msg_timeout=None,
hostname=None):
assert isinstance(host, string_types)
assert isinstance(port, int)
assert isinstance(timeout, float)
assert isinstance(tls_options, (dict, None.__class__))
assert isinstance(deflate_level, int)
assert isinstance(heartbeat_interval, int) and heartbeat_interval >= 1
assert isinstance(requeue_delay, int) and requeue_delay >= 0
assert isinstance(output_buffer_size, int) and output_buffer_size >= 0
assert isinstance(output_buffer_timeout, int) and output_buffer_timeout >= 0
assert isinstance(sample_rate, int) and sample_rate >= 0 and sample_rate < 100
assert isinstance(auth_secret, bytes_types + (None.__class__,))
assert tls_v1 and ssl or not tls_v1, \
'tls_v1 requires Python 2.6+ or Python 2.5 w/ pip install ssl'
assert msg_timeout is None or (isinstance(msg_timeout, (float, int)) and msg_timeout > 0)
self.state = INIT
self.host = host
self.port = port
self.timeout = timeout
self.last_recv_timestamp = time.time()
self.last_msg_timestamp = time.time()
self.in_flight = 0
self.rdy = 0
self.rdy_timeout = None
# for backwards compatibility when interacting with older nsqd
# (pre 0.2.20), default this to their hard-coded max
self.max_rdy_count = 2500
self.tls_v1 = tls_v1
self.tls_options = tls_options
self.snappy = snappy
self.deflate = deflate
self.deflate_level = deflate_level
self.hostname = hostname
if self.hostname is None:
self.hostname = socket.gethostname()
self.short_hostname = self.hostname.split('.')[0]
self.heartbeat_interval = heartbeat_interval * 1000
self.msg_timeout = int(msg_timeout * 1000) if msg_timeout else None
self.requeue_delay = requeue_delay
self.io_loop = io_loop
if not self.io_loop:
self.io_loop = tornado.ioloop.IOLoop.instance()
self.output_buffer_size = output_buffer_size
self.output_buffer_timeout = output_buffer_timeout
self.sample_rate = sample_rate
self.user_agent = user_agent
self._authentication_required = False # tracking server auth state
self.auth_secret = auth_secret
self.socket = None
self.stream = None
self._features_to_enable = []
self.last_rdy = 0
self.rdy = 0
self.callback_queue = []
super(AsyncConn, self).__init__()
@property
def id(self):
return str(self)
def __str__(self):
return self.host + ':' + str(self.port)
def connected(self):
return self.state == CONNECTED
def connecting(self):
return self.state == CONNECTING
def closed(self):
return self.state in (INIT, DISCONNECTED)
def connect(self):
if not self.closed():
return
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(self.timeout)
self.socket.setblocking(0)
self.stream = tornado.iostream.IOStream(self.socket, io_loop=self.io_loop)
self.stream.set_close_callback(self._socket_close)
self.stream.set_nodelay(True)
self.state = CONNECTING
self.on(event.CONNECT, self._on_connect)
self.on(event.DATA, self._on_data)
self.stream.connect((self.host, self.port), self._connect_callback)
def _connect_callback(self):
self.state = CONNECTED
self.stream.write(protocol.MAGIC_V2)
self._start_read()
self.trigger(event.CONNECT, conn=self)
def _read_bytes(self, size, callback):
try:
self.stream.read_bytes(size, callback)
except IOError:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.ConnectionClosedError('Stream is closed'),
)
def _start_read(self):
self._read_bytes(4, self._read_size)
def _socket_close(self):
self.state = DISCONNECTED
self.trigger(event.CLOSE, conn=self)
def close(self):
self.stream.close()
def _read_size(self, data):
try:
size = struct_l.unpack(data)[0]
except Exception:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.IntegrityError('failed to unpack size'),
)
return
self._read_bytes(size, self._read_body)
def _read_body(self, data):
try:
self.trigger(event.DATA, conn=self, data=data)
except Exception:
logger.exception('uncaught exception in data event')
self._start_read()
def send(self, data):
self.stream.write(data)
def upgrade_to_tls(self, options=None):
assert ssl, 'tls_v1 requires Python 2.6+ or Python 2.5 w/ pip install ssl'
# in order to upgrade to TLS we need to *replace* the IOStream...
#
# first remove the event handler for the currently open socket
# so that when we add the socket to the new SSLIOStream below,
# it can re-add the appropriate event handlers.
self.io_loop.remove_handler(self.socket.fileno())
opts = {
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': default_ca_certs()
}
opts.update(options or {})
self.socket = ssl.wrap_socket(self.socket, ssl_version=ssl.PROTOCOL_TLSv1,
do_handshake_on_connect=False, **opts)
self.stream = tornado.iostream.SSLIOStream(self.socket, io_loop=self.io_loop)
self.stream.set_close_callback(self._socket_close)
# now that the IOStream has been swapped we can kickstart
# the SSL handshake
self.stream._do_ssl_handshake()
def upgrade_to_snappy(self):
assert SnappySocket, 'snappy requires the python-snappy package'
# in order to upgrade to Snappy we need to use whatever IOStream
# is currently in place (normal or SSL)...
#
# first read any compressed bytes the existing IOStream might have
# already buffered and use that to bootstrap the SnappySocket, then
# monkey patch the existing IOStream by replacing its socket
# with a wrapper that will automagically handle compression.
existing_data = self.stream._consume(self.stream._read_buffer_size)
self.socket = SnappySocket(self.socket)
self.socket.bootstrap(existing_data)
self.stream.socket = self.socket
def upgrade_to_deflate(self):
# in order to upgrade to DEFLATE we need to use whatever IOStream
# is currently in place (normal or SSL)...
#
# first read any compressed bytes the existing IOStream might have
# already buffered and use that to bootstrap the DefalteSocket, then
# monkey patch the existing IOStream by replacing its socket
# with a wrapper that will automagically handle compression.
existing_data = self.stream._consume(self.stream._read_buffer_size)
self.socket = DeflateSocket(self.socket, self.deflate_level)
self.socket.bootstrap(existing_data)
self.stream.socket = self.socket
def send_rdy(self, value):
try:
self.send(protocol.ready(value))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to send RDY %d' % value, e),
)
return False
self.last_rdy = value
self.rdy = value
return True
def _on_connect(self, **kwargs):
identify_data = {
'short_id': self.short_hostname, # TODO remove when deprecating pre 1.0 support
'long_id': self.hostname, # TODO remove when deprecating pre 1.0 support
'client_id': self.short_hostname,
'hostname': self.hostname,
'heartbeat_interval': self.heartbeat_interval,
'feature_negotiation': True,
'tls_v1': self.tls_v1,
'snappy': self.snappy,
'deflate': self.deflate,
'deflate_level': self.deflate_level,
'output_buffer_timeout': self.output_buffer_timeout,
'output_buffer_size': self.output_buffer_size,
'sample_rate': self.sample_rate,
'user_agent': self.user_agent
}
if self.msg_timeout:
identify_data['msg_timeout'] = self.msg_timeout
self.trigger(event.IDENTIFY, conn=self, data=identify_data)
self.on(event.RESPONSE, self._on_identify_response)
try:
self.send(protocol.identify(identify_data))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to bootstrap connection', e),
)
def _on_identify_response(self, data, **kwargs):
self.off(event.RESPONSE, self._on_identify_response)
if data == b'OK':
logger.warning('nsqd version does not support feature netgotiation')
return self.trigger(event.READY, conn=self)
try:
data = json.loads(data.decode('utf-8'))
except ValueError:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.IntegrityError(
'failed to parse IDENTIFY response JSON from nsqd - %r' %
data
),
)
return
self.trigger(event.IDENTIFY_RESPONSE, conn=self, data=data)
if self.tls_v1 and data.get('tls_v1'):
self._features_to_enable.append('tls_v1')
if self.snappy and data.get('snappy'):
self._features_to_enable.append('snappy')
if self.deflate and data.get('deflate'):
self._features_to_enable.append('deflate')
if data.get('auth_required'):
self._authentication_required = True
if data.get('max_rdy_count'):
self.max_rdy_count = data.get('max_rdy_count')
else:
# for backwards compatibility when interacting with older nsqd
# (pre 0.2.20), default this to their hard-coded max
logger.warn('setting max_rdy_count to default value of 2500')
self.max_rdy_count = 2500
self.on(event.RESPONSE, self._on_response_continue)
self._on_response_continue(conn=self, data=None)
def _on_response_continue(self, data, **kwargs):
if self._features_to_enable:
feature = self._features_to_enable.pop(0)
if feature == 'tls_v1':
self.upgrade_to_tls(self.tls_options)
elif feature == 'snappy':
self.upgrade_to_snappy()
elif feature == 'deflate':
self.upgrade_to_deflate()
# the server will 'OK' after these conneciton upgrades triggering another response
return
self.off(event.RESPONSE, self._on_response_continue)
if self.auth_secret and self._authentication_required:
self.on(event.RESPONSE, self._on_auth_response)
self.trigger(event.AUTH, conn=self, data=self.auth_secret)
try:
self.send(protocol.auth(self.auth_secret))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('Error sending AUTH', e),
)
return
self.trigger(event.READY, conn=self)
def _on_auth_response(self, data, **kwargs):
try:
data = json.loads(data.decode('utf-8'))
except ValueError:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.IntegrityError(
'failed to parse AUTH response JSON from nsqd - %r' % data
),
)
return
self.off(event.RESPONSE, self._on_auth_response)
self.trigger(event.AUTH_RESPONSE, conn=self, data=data)
return self.trigger(event.READY, conn=self)
def _on_data(self, data, **kwargs):
self.last_recv_timestamp = time.time()
frame, data = protocol.unpack_response(data)
if frame == protocol.FRAME_TYPE_MESSAGE:
self.last_msg_timestamp = time.time()
self.in_flight += 1
message = protocol.decode_message(data)
message.on(event.FINISH, self._on_message_finish)
message.on(event.REQUEUE, self._on_message_requeue)
message.on(event.TOUCH, self._on_message_touch)
self.trigger(event.MESSAGE, conn=self, message=message)
elif frame == protocol.FRAME_TYPE_RESPONSE and data == b'_heartbeat_':
self.send(protocol.nop())
self.trigger(event.HEARTBEAT, conn=self)
elif frame == protocol.FRAME_TYPE_RESPONSE:
self.trigger(event.RESPONSE, conn=self, data=data)
elif frame == protocol.FRAME_TYPE_ERROR:
self.trigger(event.ERROR, conn=self, error=protocol.Error(data))
def _on_message_requeue(self, message, backoff=True, time_ms=-1, **kwargs):
if backoff:
self.trigger(event.BACKOFF, conn=self)
else:
self.trigger(event.CONTINUE, conn=self)
self.in_flight -= 1
try:
time_ms = self.requeue_delay * message.attempts * 1000 if time_ms < 0 else time_ms
self.send(protocol.requeue(message.id, time_ms))
except Exception as e:
self.close()
self.trigger(event.ERROR, conn=self, error=protocol.SendError(
'failed to send REQ %s @ %d' % (message.id, time_ms), e))
def _on_message_finish(self, message, **kwargs):
self.trigger(event.RESUME, conn=self)
self.in_flight -= 1
try:
self.send(protocol.finish(message.id))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to send FIN %s' % message.id, e),
)
def _on_message_touch(self, message, **kwargs):
try:
self.send(protocol.touch(message.id))
except Exception as e:
self.close()
self.trigger(
event.ERROR,
conn=self,
error=protocol.SendError('failed to send TOUCH %s' % message.id, e),
)
|
the-stack_0_56 | import numpy as np
from openmdao.api import ExplicitComponent
from pycycle.constants import P_REF, R_UNIVERSAL_ENG, R_UNIVERSAL_SI, MIN_VALID_CONCENTRATION
class PropsCalcs(ExplicitComponent):
"""computes, S, H, Cp, Cv, gamma, given a converged equilibirum mixture"""
def initialize(self):
self.options.declare('thermo', desc='thermodynamic data object', recordable=False)
def setup(self):
thermo = self.options['thermo']
self.add_input('T', val=284., units="degK", desc="Temperature")
self.add_input('P', val=1., units='bar', desc="Pressure")
self.add_input('n', val=np.ones(thermo.num_prod),
desc="molar concentration of the mixtures, last element is the total molar concentration")
self.add_input('n_moles', val=1., desc="1/molar_mass for gaseous mixture")
ne1 = thermo.num_element + 1
self.add_input('result_T', val=np.ones(ne1),
desc="result of the linear solve for T", shape=ne1)
self.add_input('result_P', val=np.ones(ne1),
desc="result of the linear solve for T", shape=ne1)
self.add_output('h', val=1., units="cal/g", desc="enthalpy")
self.add_output('S', val=1., units="cal/(g*degK)", desc="entropy")
self.add_output('gamma', val=1.4, lower=1.0, upper=2.0, desc="ratio of specific heats")
self.add_output('Cp', val=1., units="cal/(g*degK)", desc="Specific heat at constant pressure")
self.add_output('Cv', val=1., units="cal/(g*degK)", desc="Specific heat at constant volume")
self.add_output('rho', val=0.0004, units="g/cm**3", desc="density")
self.add_output('R', val=1., units='(N*m)/(kg*degK)', desc='Specific gas constant')
# self.deriv_options['check_type'] = "cs"
# partial derivs setup
self.declare_partials('h', ['n', 'T'])
self.declare_partials('S', ['n', 'T', 'P'])
self.declare_partials('S', 'n_moles')
self.declare_partials('Cp', ['n', 'T', 'result_T'])
self.declare_partials('rho', ['T', 'P', 'n_moles'])
self.declare_partials('gamma', ['n', 'n_moles', 'T', 'result_T', 'result_P'])
self.declare_partials('Cv', ['n', 'n_moles', 'T', 'result_T', 'result_P'])
self.declare_partials('R', 'n_moles', val=R_UNIVERSAL_SI)
def compute(self, inputs, outputs):
thermo = self.options['thermo']
num_prod = thermo.num_prod
num_element = thermo.num_element
T = inputs['T']
P = inputs['P']
result_T = inputs['result_T']
nj = inputs['n'][:num_prod]
# nj[nj<0] = 1e-10 # ensure all concentrations stay non-zero
n_moles = inputs['n_moles']
self.dlnVqdlnP = dlnVqdlnP = -1 + inputs['result_P'][num_element]
self.dlnVqdlnT = dlnVqdlnT = 1 - result_T[num_element]
self.Cp0_T = Cp0_T = thermo.Cp0(T)
Cpf = np.sum(nj*Cp0_T)
self.H0_T = H0_T = thermo.H0(T)
self.S0_T = S0_T = thermo.S0(T)
self.nj_H0 = nj_H0 = nj*H0_T
# Cpe = 0
# for i in range(0, num_element):
# for j in range(0, num_prod):
# Cpe -= thermo.aij[i][j]*nj[j]*H0_T[j]*self.result_T[i]
# vectorization of this for loop for speed
Cpe = -np.sum(np.sum(thermo.aij*nj_H0, axis=1)*result_T[:num_element])
Cpe += np.sum(nj_H0*H0_T) # nj*H0_T**2
Cpe -= np.sum(nj_H0)*result_T[num_element]
outputs['h'] = np.sum(nj_H0)*R_UNIVERSAL_ENG*T
try:
val = (S0_T+np.log(n_moles/nj/(P/P_REF)))
except FloatingPointError:
P = 1e-5
val = (S0_T+np.log(n_moles/nj/(P/P_REF)))
outputs['S'] = R_UNIVERSAL_ENG * np.sum(nj*val)
outputs['Cp'] = Cp = (Cpe+Cpf)*R_UNIVERSAL_ENG
outputs['Cv'] = Cv = Cp + n_moles*R_UNIVERSAL_ENG*dlnVqdlnT**2/dlnVqdlnP
outputs['gamma'] = -1*Cp/Cv/dlnVqdlnP
outputs['rho'] = P/(n_moles*R_UNIVERSAL_SI*T)*100 # 1 Bar is 100 Kpa
outputs['R'] = R_UNIVERSAL_SI*n_moles #(m**3 * Pa)/(mol*degK)
def compute_partials(self, inputs, J):
thermo = self.options['thermo']
num_prod = thermo.num_prod
num_element = thermo.num_element
T = inputs['T']
P = inputs['P']
nj = inputs['n']
n_moles = inputs['n_moles']
result_T = inputs['result_T']
result_T_last = result_T[num_element]
result_T_rest = result_T[:num_element]
dlnVqdlnP = -1 + inputs['result_P'][num_element]
dlnVqdlnT = 1 - result_T_last
Cp0_T = thermo.Cp0(T)
Cpf = np.sum(nj * Cp0_T)
H0_T = thermo.H0(T)
S0_T = thermo.S0(T)
nj_H0 = nj * H0_T
# Cpe = 0
# for i in range(0, num_element):
# for j in range(0, num_prod):
# Cpe -= thermo.aij[i][j]*nj[j]*H0_T[j]*self.result_T[i]
# vectorization of this for loop for speed
Cpe = -np.sum(np.sum(thermo.aij * nj_H0, axis=1) * result_T_rest)
Cpe += np.sum(nj_H0 * H0_T) # nj*H0_T**2
Cpe -= np.sum(nj_H0) * result_T_last
Cp = (Cpe + Cpf) * R_UNIVERSAL_ENG
Cv = Cp + n_moles * R_UNIVERSAL_ENG * dlnVqdlnT ** 2 / dlnVqdlnP
dH0_dT = thermo.H0_applyJ(T, 1.)
dS0_dT = thermo.S0_applyJ(T, 1.)
dCp0_dT = thermo.Cp0_applyJ(T, 1.)
sum_nj_R = n_moles*R_UNIVERSAL_SI
drho_dT = P/(sum_nj_R*T**2)*100
drho_dnmoles = -P/(n_moles**2*R_UNIVERSAL_SI*T)*100
dCpe_dT = 2*np.sum(nj*H0_T*dH0_dT)
# for i in range(num_element):
# self.dCpe_dT -= np.sum(aij[i]*nj*self.dH0_dT)*self.result_T[i]
dCpe_dT -= np.sum(np.sum(thermo.aij*nj*dH0_dT, axis=1)*result_T_rest)
dCpe_dT -= np.sum(nj*dH0_dT)*result_T_last
dCpf_dT = np.sum(nj*dCp0_dT)
J['h', 'T'] = R_UNIVERSAL_ENG*(np.sum(nj*dH0_dT)*T + np.sum(nj*H0_T))
J['h', 'n'] = R_UNIVERSAL_ENG*T*H0_T
J['S', 'n'] = R_UNIVERSAL_ENG*(S0_T + np.log(n_moles) - np.log(P/P_REF) - np.log(nj) - 1)
# zero out any derivs w.r.t trace species
_trace = np.where(nj <= MIN_VALID_CONCENTRATION+1e-20)
J['S', 'n'][0, _trace] = 0
J['S', 'T'] = R_UNIVERSAL_ENG*np.sum(nj*dS0_dT)
J['S', 'P'] = -R_UNIVERSAL_ENG*np.sum(nj/P)
J['S', 'n_moles'] = R_UNIVERSAL_ENG*np.sum(nj)/n_moles
J['rho', 'T'] = -P/(sum_nj_R*T**2)*100
J['rho', 'n_moles'] = -P/(n_moles**2*R_UNIVERSAL_SI*T)*100
J['rho', 'P'] = 1/(sum_nj_R*T)*100
dCp_dnj = R_UNIVERSAL_ENG*(Cp0_T + H0_T**2)
for j in range(num_prod):
for i in range(num_element):
dCp_dnj[j] -= R_UNIVERSAL_ENG*thermo.aij[i][j]*H0_T[j]*result_T[i]
dCp_dnj -= R_UNIVERSAL_ENG * H0_T * result_T_last
J['Cp', 'n'] = dCp_dnj
dCp_dresultT = np.zeros(num_element+1, dtype=inputs._data.dtype)
# for i in range(num_element):
# self.dCp_dresultT[i] = -R_UNIVERSAL_ENG*np.sum(aij[i]*nj_H0)
dCp_dresultT[:num_element] = -R_UNIVERSAL_ENG*np.sum(thermo.aij*nj_H0, axis=1)
dCp_dresultT[num_element] = - R_UNIVERSAL_ENG*np.sum(nj_H0)
J['Cp', 'result_T'] = dCp_dresultT
dCp_dT = (dCpe_dT + dCpf_dT)*R_UNIVERSAL_ENG
J['Cp', 'T'] = dCp_dT
J['Cv', 'n'] = dCp_dnj
dCv_dnmoles = R_UNIVERSAL_ENG*dlnVqdlnT**2/dlnVqdlnP
J['Cv', 'n_moles'] = dCv_dnmoles
J['Cv', 'T'] = dCp_dT
dCv_dresultP = np.zeros((1, num_element+1), dtype=inputs._data.dtype)
dCv_dresultP[0, -1] = -R_UNIVERSAL_ENG*n_moles*(dlnVqdlnT/dlnVqdlnP)**2
J['Cv', 'result_P'] = dCv_dresultP
dCv_dresultT = dCp_dresultT.copy()
dCv_dresultT[-1] -= n_moles*R_UNIVERSAL_ENG/dlnVqdlnP*(2*dlnVqdlnT)
dCv_dresultT_last = dCv_dresultT[-1]
J['Cv', 'result_T'] = dCv_dresultT
J['gamma', 'n'] = dCp_dnj*(Cp/Cv-1)/(dlnVqdlnP*Cv)
J['gamma', 'n_moles'] = Cp/dlnVqdlnP/Cv**2*dCv_dnmoles
J['gamma', 'T'] = dCp_dT/dlnVqdlnP/Cv*(Cp/Cv-1)
dgamma_dresultT = np.zeros((1, num_element+1), dtype=inputs._data.dtype)
dgamma_dresultT[0, :num_element] = 1/Cv/dlnVqdlnP*dCp_dresultT[:num_element]*(Cp/Cv-1)
dgamma_dresultT[0, -1] = (-dCp_dresultT[-1]/Cv+Cp/Cv**2*dCv_dresultT_last)/dlnVqdlnP
J['gamma', 'result_T'] = dgamma_dresultT
gamma_dresultP = np.zeros((1, num_element+1), dtype=inputs._data.dtype)
gamma_dresultP[0, num_element] = Cp/Cv/dlnVqdlnP*(dCv_dresultP[0, -1]/Cv + 1/dlnVqdlnP)
J['gamma', 'result_P'] = gamma_dresultP
if __name__ == "__main__":
from openmdao.api import Problem, Group, IndepVarComp
from pycycle.cea import species_data
thermo = species_data.Properties(species_data.co2_co_o2)
p = Problem()
model = p.model = Group()
indeps = model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('T', 2761.56784655, units='degK')
indeps.add_output('P', 1.034210, units='bar')
indeps.add_output('n', val=np.array([2.272e-02, 1.000e-10, 1.136e-02]))
indeps.add_output('n_moles', val=0.0340831628675)
indeps.add_output('result_T', val=np.array([-3.02990116, 1.95459777, -0.05024694]))
indeps.add_output('result_P', val=np.array([0.53047724, 0.48627081, -0.00437025]))
model.add_subsystem('calcs', PropsCalcs(thermo=thermo), promotes=['*'])
p.setup()
p.run_model()
print("outputs")
print('h', p['h'])
print('S', p['S'])
print('gamma', p['gamma'])
print('Cp', p['Cp'])
print('Cv', p['Cv'])
print('rho', p['rho'])
print()
print()
print('############################################')
p.model.run_linearize()
jac = p.model.get_subsystem('calcs').jacobian._subjacs
for pair in jac:
print(pair)
print(jac[pair])
print
|
the-stack_0_57 | import numpy as np
from PIL import Image
from tqdm import tqdm
import torch
from torch import nn, optim
from torch.autograd import Variable, grad
from torchvision import utils
from model import Generator, Discriminator
from datetime import datetime
import random
import copy
import os
import config
import utils
import data
import evaluate
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from torch.nn import functional as F
args = config.get_config()
writer = None
def batch_size(reso):
if args.gpu_count == 1:
save_memory = False
if not save_memory:
batch_table = {4:128, 8:128, 16:128, 32:64, 64:32, 128:16, 256:8, 512:4, 1024:1}
else:
batch_table = {4:128, 8:128, 16:128, 32:32, 64:16, 128:4, 256:2, 512:2, 1024:1}
elif args.gpu_count == 2:
batch_table = {4:256, 8:256, 16:256, 32:128, 64:64, 128:32, 256:16, 512:8, 1024:2}
elif args.gpu_count == 4:
batch_table = {4:512, 8:256, 16:128, 32:64, 64:32, 128:32, 256:32, 512:16, 1024:4}
elif args.gpu_count == 8:
batch_table = {4:512, 8:512, 16:512, 32:256, 64:256, 128:128, 256:64, 512:32, 1024:8}
else:
assert(False)
return batch_table[reso]
def batch_size_by_phase(phase):
return batch_size(4 * 2 ** phase)
class Session:
def __init__(self):
# Note: 4 requirements for sampling from pre-existing models:
# 1) Ensure you save and load both multi-gpu versions (DataParallel) or both not.
# 2) Ensure you set the same phase value as the pre-existing model and that your local and global alpha=1.0 are set
# 3) Sample from the g_running, not from the latest generator
# 4) You may need to warm up the g_running by running evaluate.reconstruction_dryrun() first
self.alpha = -1
self.sample_i = min(args.start_iteration, 0)
self.phase = args.start_phase
self.generator = nn.DataParallel( Generator(args.nz+1, args.n_label).cuda() )
self.g_running = nn.DataParallel( Generator(args.nz+1, args.n_label).cuda() )
self.encoder = nn.DataParallel( Discriminator(nz = args.nz+1, n_label = args.n_label, binary_predictor = args.train_mode == config.MODE_GAN).cuda() )
print("Using ", torch.cuda.device_count(), " GPUs!")
self.reset_opt()
print('Session created.')
def reset_opt(self):
self.optimizerG = optim.Adam(self.generator.parameters(), args.lr, betas=(0.0, 0.99))
self.optimizerD = optim.Adam(self.encoder.parameters(), args.lr, betas=(0.0, 0.99)) # includes all the encoder parameters...
def save_all(self, path):
torch.save({'G_state_dict': self.generator.state_dict(),
'D_state_dict': self.encoder.state_dict(),
'G_running_state_dict': self.g_running.state_dict(),
'optimizerD': self.optimizerD.state_dict(),
'optimizerG': self.optimizerG.state_dict(),
'iteration': self.sample_i,
'phase': self.phase,
'alpha': self.alpha},
path)
def load(self, path):
checkpoint = torch.load(path)
self.sample_i = int(checkpoint['iteration'])
self.generator.load_state_dict(checkpoint['G_state_dict'])
self.g_running.load_state_dict(checkpoint['G_running_state_dict'])
self.encoder.load_state_dict(checkpoint['D_state_dict'])
if args.reset_optimizers <= 0:
self.optimizerD.load_state_dict(checkpoint['optimizerD'])
self.optimizerG.load_state_dict(checkpoint['optimizerG'])
print("Reloaded old optimizers")
else:
print("Despite loading the state, we reset the optimizers.")
self.alpha = checkpoint['alpha']
self.phase = int(checkpoint['phase'])
if args.start_phase > 0: #If the start phase has been manually set, try to actually use it (e.g. when have trained 64x64 for extra rounds and then turning the model over to 128x128)
self.phase = min(args.start_phase, self.phase)
print("Use start phase: {}".format(self.phase))
if self.phase > args.max_phase:
print('Warning! Loaded model claimed phase {} but max_phase={}'.format(self.phase, args.max_phase))
self.phase = args.max_phase
def create(self):
if args.start_iteration <= 0:
args.start_iteration = 1
if args.no_progression:
self.sample_i = args.start_iteration = int( (args.max_phase + 0.5) * args.images_per_stage ) # Start after the fade-in stage of the last iteration
args.force_alpha = 1.0
print("Progressive growth disabled. Setting start step = {} and alpha = {}".format(args.start_iteration, args.force_alpha))
else:
reload_from = '{}/checkpoint/{}_state'.format(args.save_dir, str(args.start_iteration).zfill(6)) #e.g. '604000' #'600000' #latest'
print(reload_from)
if os.path.exists(reload_from):
self.load(reload_from)
print("Loaded {}".format(reload_from))
print("Iteration asked {} and got {}".format(args.start_iteration, self.sample_i))
if args.testonly:
self.generator = copy.deepcopy(self.g_running)
else:
assert(not args.testonly)
self.sample_i = args.start_iteration
print('Start from iteration {}'.format(self.sample_i))
self.g_running.train(False)
if args.force_alpha >= 0.0:
self.alpha = args.force_alpha
accumulate(self.g_running, self.generator, 0)
def setup():
utils.make_dirs()
if not args.testonly:
config.log_args(args)
if args.use_TB:
from dateutil import tz
from tensorboardX import SummaryWriter
dt = datetime.now(tz.gettz('Europe/Helsinki')).strftime(r"%y%m%d_%H%M")
global writer
writer = SummaryWriter("{}/{}/{}".format(args.summary_dir, args.save_dir, dt))
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed_all(args.manual_seed)
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(1 - decay, par2[k].data)
def get_grad_penalty(discriminator, real_image, fake_image, step, alpha):
""" Used in WGAN-GP version only. """
eps = torch.rand(batch_size_by_phase(step), 1, 1, 1).cuda()
if eps.size(0) != real_image.size(0) or eps.size(0) != fake_image.size(0):
# If end-of-batch situation, we restrict other vectors to matcht the number of training images available.
eps = eps[:real_image.size(0)]
fake_image = fake_image[:real_image.size(0)]
x_hat = eps * real_image.data + (1 - eps) * fake_image.data
x_hat = Variable(x_hat, requires_grad=True)
if args.train_mode == config.MODE_GAN: # Regular GAN mode
hat_predict, _ = discriminator(x_hat, step, alpha, args.use_ALQ)
grad_x_hat = grad(
outputs=hat_predict.sum(), inputs=x_hat, create_graph=True)[0]
else:
hat_z = discriminator(x_hat, step, alpha, args.use_ALQ)
# KL_fake: \Delta( e(g(Z)) , Z ) -> max_e
KL_maximizer = KLN01Loss(direction=args.KL, minimize=False)
KL_fake = KL_maximizer(hat_z) * args.fake_D_KL_scale
grad_x_hat = grad(
outputs=KL_fake.sum(), inputs=x_hat, create_graph=True)[0]
# Push the gradients of the interpolated samples towards 1
grad_penalty = ((grad_x_hat.view(grad_x_hat.size(0), -1)
.norm(2, dim=1) - 1)**2).mean()
grad_penalty = 10 * grad_penalty
return grad_penalty
def D_prediction_of_G_output(generator, encoder, step, alpha):
# To use labels, enable here and elsewhere:
#label = Variable(torch.ones(batch_size_by_phase(step), args.n_label)).cuda()
# label = Variable(
# torch.multinomial(
# torch.ones(args.n_label), args.batch_size, replacement=True)).cuda()
myz = Variable(torch.randn(batch_size_by_phase(step), args.nz)).cuda(non_blocking=(args.gpu_count>1))
myz = utils.normalize(myz)
myz, label = utils.split_labels_out_of_latent(myz)
fake_image = generator(myz, label, step, alpha)
fake_predict, _ = encoder(fake_image, step, alpha, args.use_ALQ)
loss = fake_predict.mean()
return loss, fake_image
class KLN01Loss(torch.nn.Module): #Adapted from https://github.com/DmitryUlyanov/AGE
def __init__(self, direction, minimize):
super(KLN01Loss, self).__init__()
self.minimize = minimize
assert direction in ['pq', 'qp'], 'direction?'
self.direction = direction
def forward(self, samples):
assert samples.nelement() == samples.size(1) * samples.size(0), '?'
samples = samples.view(samples.size(0), -1)
self.samples_var = utils.var(samples)
self.samples_mean = samples.mean(0)
samples_mean = self.samples_mean
samples_var = self.samples_var
if self.direction == 'pq':
t1 = (1 + samples_mean.pow(2)) / (2 * samples_var.pow(2))
t2 = samples_var.log()
KL = (t1 + t2 - 0.5).mean()
else:
# In the AGE implementation, there is samples_var^2 instead of samples_var^1
t1 = (samples_var + samples_mean.pow(2)) / 2
# In the AGE implementation, this did not have the 0.5 scaling factor:
t2 = -0.5*samples_var.log()
KL = (t1 + t2 - 0.5).mean()
if not self.minimize:
KL *= -1
return KL
def train(generator, encoder, g_running, train_data_loader, test_data_loader, session, total_steps, train_mode):
pbar = tqdm(initial=session.sample_i, total = total_steps)
benchmarking = False
match_x = args.match_x
generatedImagePool = None
refresh_dataset = True
refresh_imagePool = True
# After the Loading stage, we cycle through successive Fade-in and Stabilization stages
batch_count = 0
reset_optimizers_on_phase_start = False
# TODO Unhack this (only affects the episode count statistics anyway):
if args.data != 'celebaHQ':
epoch_len = len(train_data_loader(1,4).dataset)
else:
epoch_len = train_data_loader._len['data4x4']
if args.step_offset != 0:
if args.step_offset == -1:
args.step_offset = session.sample_i
print("Step offset is {}".format(args.step_offset))
session.phase += args.phase_offset
session.alpha = 0.0
while session.sample_i < total_steps:
####################### Phase Maintenance #######################
steps_in_previous_phases = max(session.phase * args.images_per_stage, args.step_offset)
sample_i_current_stage = session.sample_i - steps_in_previous_phases
# If we can move to the next phase
if sample_i_current_stage >= args.images_per_stage:
if session.phase < args.max_phase: # If any phases left
iteration_levels = int(sample_i_current_stage / args.images_per_stage)
session.phase += iteration_levels
sample_i_current_stage -= iteration_levels * args.images_per_stage
match_x = args.match_x # Reset to non-matching phase
print("iteration B alpha={} phase {} will be reduced to 1 and [max]".format(sample_i_current_stage, session.phase))
refresh_dataset = True
refresh_imagePool = True # Reset the pool to avoid images of 2 different resolutions in the pool
if reset_optimizers_on_phase_start:
utils.requires_grad(generator)
utils.requires_grad(encoder)
generator.zero_grad()
encoder.zero_grad()
session.reset_opt()
print("Optimizers have been reset.")
reso = 4 * 2 ** session.phase
# If we can switch from fade-training to stable-training
if sample_i_current_stage >= args.images_per_stage/2:
if session.alpha < 1.0:
refresh_dataset = True # refresh dataset generator since no longer have to fade
match_x = args.match_x * args.matching_phase_x
else:
match_x = args.match_x
session.alpha = min(1, sample_i_current_stage * 2.0 / args.images_per_stage) # For 100k, it was 0.00002 = 2.0 / args.images_per_stage
if refresh_dataset:
train_dataset = data.Utils.sample_data2(train_data_loader, batch_size(reso), reso, session)
refresh_dataset = False
print("Refreshed dataset. Alpha={} and iteration={}".format(session.alpha, sample_i_current_stage))
if refresh_imagePool:
imagePoolSize = 200 if reso < 256 else 100
generatedImagePool = utils.ImagePool(imagePoolSize) #Reset the pool to avoid images of 2 different resolutions in the pool
refresh_imagePool = False
print('Image pool created with size {} because reso is {}'.format(imagePoolSize, reso))
####################### Training init #######################
z = Variable( torch.FloatTensor(batch_size(reso), args.nz, 1, 1) ).cuda(non_blocking=(args.gpu_count>1))
KL_minimizer = KLN01Loss(direction=args.KL, minimize=True)
KL_maximizer = KLN01Loss(direction=args.KL, minimize=False)
stats = {}
one = torch.FloatTensor([1]).cuda(non_blocking=(args.gpu_count>1))
try:
real_image, _ = next(train_dataset)
except (OSError, StopIteration):
train_dataset = data.Utils.sample_data2(train_data_loader, batch_size(reso), reso, session)
real_image, _ = next(train_dataset)
####################### DISCRIMINATOR / ENCODER ###########################
utils.switch_grad_updates_to_first_of(encoder, generator)
encoder.zero_grad()
x = Variable(real_image).cuda(non_blocking=(args.gpu_count>1))
kls = ""
if train_mode == config.MODE_GAN:
# Discriminator for real samples
real_predict, _ = encoder(x, session.phase, session.alpha, args.use_ALQ)
real_predict = real_predict.mean() \
- 0.001 * (real_predict ** 2).mean()
real_predict.backward(-one) # Towards 1
# (1) Generator => D. Identical to (2) see below
fake_predict, fake_image = D_prediction_of_G_output(generator, encoder, session.phase, session.alpha)
fake_predict.backward(one)
# Grad penalty
grad_penalty = get_grad_penalty(encoder, x, fake_image, session.phase, session.alpha)
grad_penalty.backward()
elif train_mode == config.MODE_CYCLIC:
e_losses = []
# e(X)
real_z = encoder(x, session.phase, session.alpha, args.use_ALQ)
if args.use_real_x_KL:
# KL_real: - \Delta( e(X) , Z ) -> max_e
KL_real = KL_minimizer(real_z) * args.real_x_KL_scale
e_losses.append(KL_real)
stats['real_mean'] = KL_minimizer.samples_mean.data.mean()
stats['real_var'] = KL_minimizer.samples_var.data.mean()
stats['KL_real'] = KL_real.data.item()
kls = "{0:.3f}".format(stats['KL_real'])
# The final entries are the label. Normal case, just 1. Extract it/them, and make it [b x 1]:
real_z, label = utils.split_labels_out_of_latent(real_z)
recon_x = generator(real_z, label, session.phase, session.alpha)
if args.use_loss_x_reco:
# match_x: E_x||g(e(x)) - x|| -> min_e
err = utils.mismatch(recon_x, x, args.match_x_metric) * match_x
e_losses.append(err)
stats['x_reconstruction_error'] = err.item()
args.use_wpgan_grad_penalty = False
grad_penalty = 0.0
if args.use_loss_fake_D_KL:
# TODO: The following codeblock is essentially the same as the KL_minimizer part on G side. Unify
utils.populate_z(z, args.nz+args.n_label, args.noise, batch_size(reso))
z = torch.squeeze(z)
z, label = utils.split_labels_out_of_latent(z)
fake = generator(z, label, session.phase, session.alpha).detach()
if session.alpha >= 1.0:
fake = generatedImagePool.query(fake.data)
# e(g(Z))
egz = encoder(fake, session.phase, session.alpha, args.use_ALQ)
# KL_fake: \Delta( e(g(Z)) , Z ) -> max_e
KL_fake = KL_maximizer(egz) * args.fake_D_KL_scale
e_losses.append(KL_fake)
stats['fake_mean'] = KL_maximizer.samples_mean.data.mean()
stats['fake_var'] = KL_maximizer.samples_var.data.mean()
stats['KL_fake'] = -KL_fake.item()
kls = "{0}/{1:.3f}".format(kls, stats['KL_fake'])
if args.use_wpgan_grad_penalty:
grad_penalty = get_grad_penalty(encoder, x, fake, session.phase, session.alpha)
# Update e
if len(e_losses) > 0:
e_loss = sum(e_losses)
stats['E_loss'] = np.float32(e_loss.cpu().detach().numpy())
e_loss.backward()
if args.use_wpgan_grad_penalty:
grad_penalty.backward()
stats['Grad_penalty'] = grad_penalty.data
#book-keeping
disc_loss_val = e_loss.item()
session.optimizerD.step()
torch.cuda.empty_cache()
######################## GENERATOR / DECODER #############################
if (batch_count + 1) % args.n_critic == 0:
utils.switch_grad_updates_to_first_of(generator, encoder)
for _ in range(args.n_generator):
generator.zero_grad()
g_losses = []
if train_mode == config.MODE_GAN:
fake_predict, _ = D_prediction_of_G_output(generator, encoder, session.phase, session.alpha)
loss = -fake_predict
g_losses.append(loss)
elif train_mode == config.MODE_CYCLIC: #TODO We push the z variable around here like idiots
def KL_of_encoded_G_output(generator, z):
utils.populate_z(z, args.nz+args.n_label, args.noise, batch_size(reso))
z, label = utils.split_labels_out_of_latent(z)
fake = generator(z, label, session.phase, session.alpha)
egz = encoder(fake, session.phase, session.alpha, args.use_ALQ)
# KL_fake: \Delta( e(g(Z)) , Z ) -> min_g
return egz, label, KL_minimizer(egz) * args.fake_G_KL_scale, z
egz, label, kl, z = KL_of_encoded_G_output(generator, z)
if args.use_loss_KL_z:
g_losses.append(kl) # G minimizes this KL
stats['KL(Phi(G))'] = kl.item()
kls = "{0}/{1:.3f}".format(kls, stats['KL(Phi(G))'])
if args.use_loss_z_reco:
z = torch.cat((z, label), 1)
z_diff = utils.mismatch(egz, z, args.match_z_metric) * args.match_z # G tries to make the original z and encoded z match
g_losses.append(z_diff)
if len(g_losses) > 0:
loss = sum(g_losses)
stats['G_loss'] = np.float32(loss.cpu().detach().numpy())
loss.backward()
# Book-keeping only:
gen_loss_val = loss.item()
session.optimizerG.step()
torch.cuda.empty_cache()
if train_mode == config.MODE_CYCLIC:
if args.use_loss_z_reco:
stats['z_reconstruction_error'] = z_diff.item()
accumulate(g_running, generator)
del z, x, one, real_image, real_z, KL_real, label, recon_x, fake, egz, KL_fake, kl, z_diff
if train_mode == config.MODE_CYCLIC:
if args.use_TB:
for key,val in stats.items():
writer.add_scalar(key, val, session.sample_i)
elif batch_count % 100 == 0:
print(stats)
if args.use_TB:
writer.add_scalar('LOD', session.phase + session.alpha, session.sample_i)
######################## Statistics ########################
b = batch_size_by_phase(session.phase)
zr, xr = (stats['z_reconstruction_error'], stats['x_reconstruction_error']) if train_mode == config.MODE_CYCLIC else (0.0, 0.0)
e = (session.sample_i / float(epoch_len))
pbar.set_description(
('{0}; it: {1}; phase: {2}; b: {3:.1f}; Alpha: {4:.3f}; Reso: {5}; E: {6:.2f}; KL(real/fake/fakeG): {7}; z-reco: {8:.2f}; x-reco {9:.3f}; real_var {10:.4f}').format(batch_count+1, session.sample_i+1, session.phase, b, session.alpha, reso, e, kls, zr, xr, stats['real_var'])
)
#(f'{i + 1}; it: {iteration+1}; b: {b:.1f}; G: {gen_loss_val:.5f}; D: {disc_loss_val:.5f};'
# f' Grad: {grad_loss_val:.5f}; Alpha: {alpha:.3f}; Reso: {reso}; S-mean: {real_mean:.3f}; KL(real/fake/fakeG): {kls}; z-reco: {zr:.2f}'))
pbar.update(batch_size(reso))
session.sample_i += batch_size(reso) # if not benchmarking else 100
batch_count += 1
######################## Saving ########################
if batch_count % args.checkpoint_cycle == 0:
for postfix in {'latest', str(session.sample_i).zfill(6)}:
session.save_all('{}/{}_state'.format(args.checkpoint_dir, postfix))
print("Checkpointed to {}".format(session.sample_i))
######################## Tests ########################
try:
evaluate.tests_run(g_running, encoder, test_data_loader, session, writer,
reconstruction = (batch_count % 800 == 0),
interpolation = (batch_count % 800 == 0),
collated_sampling = (batch_count % 800 == 0),
individual_sampling = (batch_count % (args.images_per_stage/batch_size(reso)/4) == 0)
)
except (OSError, StopIteration):
print("Skipped periodic tests due to an exception.")
pbar.close()
def main():
setup()
session = Session()
session.create()
print('PyTorch {}'.format(torch.__version__))
if args.train_path:
train_data_loader = data.get_loader(args.data, args.train_path)
else:
train_data_loader = None
if args.test_path:
test_data_loader = data.get_loader(args.data, args.test_path)
elif args.aux_inpath:
test_data_loader = data.get_loader(args.data, args.aux_inpath)
else:
test_data_loader = None
# 4 modes: Train (with data/train), test (with data/test), aux-test (with custom aux_inpath), dump-training-set
if args.run_mode == config.RUN_TRAIN:
train(session.generator, session.encoder, session.g_running, train_data_loader, test_data_loader,
session = session,
total_steps = args.total_kimg * 1000,
train_mode = args.train_mode)
elif args.run_mode == config.RUN_TEST:
if args.reconstructions_N > 0 or args.interpolate_N > 0:
evaluate.Utils.reconstruction_dryrun(session.generator, session.encoder, test_data_loader, session=session)
evaluate.tests_run(session.generator, session.encoder, test_data_loader, session=session, writer=writer)
elif args.run_mode == config.RUN_DUMP:
session.phase = args.start_phase
data.dump_training_set(train_data_loader, args.dump_trainingset_N, args.dump_trainingset_dir, session)
if __name__ == '__main__':
main()
|
the-stack_0_58 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import itertools
import json
import logging
import math
import os
import pkgutil
import socket
import traceback
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple
from urllib.parse import quote, unquote
import lazy_object_proxy
import markdown
import sqlalchemy as sqla
from flask import (
Markup, Response, escape, flash, jsonify, make_response, redirect, render_template, request,
session as flask_session, url_for,
)
from flask_appbuilder import BaseView, ModelView, expose, has_access, permission_name
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.filters import BaseFilter
from flask_babel import lazy_gettext
from jinja2.utils import htmlsafe_json_dumps # type: ignore
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy import and_, desc, func, or_, union_all
from sqlalchemy.orm import joinedload
from wtforms import SelectField, validators
import airflow
from airflow import models, settings
from airflow._vendor import nvd3
from airflow.api.common.experimental.mark_tasks import (
set_dag_run_state_to_failed, set_dag_run_state_to_success,
)
from airflow.configuration import AIRFLOW_CONFIG, conf
from airflow.exceptions import AirflowException
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.base_job import BaseJob
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models import Connection, DagModel, DagTag, Log, SlaMiss, TaskFail, XCom, errors
from airflow.models.dagcode import DagCode
from airflow.models.dagrun import DagRun, DagRunType
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import RUNNING_DEPS, SCHEDULER_QUEUED_DEPS
from airflow.utils import timezone
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.utils.helpers import alchemy_to_dict, render_log_filename
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.www import utils as wwwutils
from airflow.www.app import appbuilder
from airflow.www.decorators import action_logging, gzipped, has_dag_access
from airflow.www.forms import (
ConnectionForm, DagRunForm, DateTimeForm, DateTimeWithNumRunsForm, DateTimeWithNumRunsWithDagRunsForm,
)
from airflow.www.widgets import AirflowModelListWidget
PAGE_SIZE = conf.getint('webserver', 'page_size')
FILTER_TAGS_COOKIE = 'tags_filter'
FILTER_STATUS_COOKIE = 'dag_status_filter'
if os.environ.get('SKIP_DAGS_PARSING') != 'True':
dagbag = models.DagBag(settings.DAGS_FOLDER, store_serialized_dags=STORE_SERIALIZED_DAGS)
else:
dagbag = models.DagBag(os.devnull, include_examples=False)
def get_date_time_num_runs_dag_runs_form_data(request, session, dag):
dttm = request.args.get('execution_date')
if dttm:
dttm = timezone.parse(dttm)
else:
dttm = dag.get_latest_execution_date(session=session) or timezone.utcnow()
base_date = request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
DR = models.DagRun
drs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(desc(DR.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
dttm = dr.execution_date
dr_state = dr.state
return {
'dttm': dttm,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': dttm.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
######################################################################################
# Error handlers
######################################################################################
def circles(error):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn() if conf.getboolean(
'webserver',
'EXPOSE_HOSTNAME',
fallback=True) else 'redact'), 404
def show_traceback(error):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn() if conf.getboolean(
'webserver',
'EXPOSE_HOSTNAME',
fallback=True) else 'redact',
nukular=ascii_.nukular,
info=traceback.format_exc() if conf.getboolean(
'webserver',
'EXPOSE_STACKTRACE',
fallback=True) else 'Error! Please contact server admin'), 500
######################################################################################
# BaseViews
######################################################################################
class AirflowBaseView(BaseView):
from airflow import macros
route_base = ''
# Make our macros available to our UI templates too.
extra_args = {
'macros': macros,
}
def render_template(self, *args, **kwargs):
return super().render_template(
*args,
# Cache this at most once per request, not for the lifetime of the view instance
scheduler_job=lazy_object_proxy.Proxy(SchedulerJob.most_recent_job),
**kwargs
)
class Airflow(AirflowBaseView):
@expose('/health')
def health(self):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
payload = {
'metadatabase': {'status': 'unhealthy'}
}
latest_scheduler_heartbeat = None
scheduler_status = 'unhealthy'
payload['metadatabase'] = {'status': 'healthy'}
try:
scheduler_job = SchedulerJob.most_recent_job()
if scheduler_job:
latest_scheduler_heartbeat = scheduler_job.latest_heartbeat.isoformat()
if scheduler_job.is_alive():
scheduler_status = 'healthy'
except Exception:
payload['metadatabase']['status'] = 'unhealthy'
payload['scheduler'] = {'status': scheduler_status,
'latest_scheduler_heartbeat': latest_scheduler_heartbeat}
return wwwutils.json_response(payload)
@expose('/home')
@has_access
def index(self):
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
arg_tags_filter = request.args.getlist('tags', None)
arg_status_filter = request.args.get('status', None)
if request.args.get('reset_tags') is not None:
flask_session[FILTER_TAGS_COOKIE] = None
arg_tags_filter = None
else:
cookie_val = flask_session.get(FILTER_TAGS_COOKIE)
if arg_tags_filter:
flask_session[FILTER_TAGS_COOKIE] = ','.join(arg_tags_filter)
elif cookie_val:
arg_tags_filter = cookie_val.split(',')
if arg_status_filter is None:
cookie_val = flask_session.get(FILTER_STATUS_COOKIE)
if cookie_val:
arg_status_filter = cookie_val
else:
arg_status_filter = 'active' if hide_paused_dags_by_default else 'all'
flask_session[FILTER_STATUS_COOKIE] = arg_status_filter
else:
status = arg_status_filter.strip().lower()
flask_session[FILTER_STATUS_COOKIE] = status
arg_status_filter = status
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
start = current_page * dags_per_page
end = start + dags_per_page
# Get all the dag id the user could access
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
with create_session() as session:
# read orm_dags from the db
dags_query = session.query(DagModel).filter(
~DagModel.is_subdag, DagModel.is_active
)
if arg_search_query:
dags_query = dags_query.filter(
DagModel.dag_id.ilike('%' + arg_search_query + '%') |
DagModel.owners.ilike('%' + arg_search_query + '%')
)
if arg_tags_filter:
dags_query = dags_query.filter(DagModel.tags.any(DagTag.name.in_(arg_tags_filter)))
if 'all_dags' not in filter_dag_ids:
dags_query = dags_query.filter(DagModel.dag_id.in_(filter_dag_ids))
all_dags = dags_query
active_dags = dags_query.filter(~DagModel.is_paused)
paused_dags = dags_query.filter(DagModel.is_paused)
is_paused_count = dict(
all_dags.with_entities(DagModel.is_paused, func.count(DagModel.dag_id))
.group_by(DagModel.is_paused).all()
)
status_count_active = is_paused_count.get(False, 0)
status_count_paused = is_paused_count.get(True, 0)
all_dags_count = status_count_active + status_count_paused
if arg_status_filter == 'active':
current_dags = active_dags
num_of_all_dags = status_count_active
elif arg_status_filter == 'paused':
current_dags = paused_dags
num_of_all_dags = status_count_paused
else:
current_dags = all_dags
num_of_all_dags = all_dags_count
dags = current_dags.order_by(DagModel.dag_id).options(
joinedload(DagModel.tags)).offset(start).limit(dags_per_page).all()
dagtags = session.query(DagTag.name).distinct(DagTag.name).all()
tags = [
{"name": name, "selected": bool(arg_tags_filter and name in arg_tags_filter)}
for name, in dagtags
]
import_errors = session.query(errors.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"dag_import_error")
from airflow.plugins_manager import import_errors as plugin_import_errors
for filename, stacktrace in plugin_import_errors.items():
flash(
"Broken plugin: [{filename}] {stacktrace}".format(
stacktrace=stacktrace,
filename=filename),
"error")
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
state_color_mapping = State.state_color.copy()
state_color_mapping["null"] = state_color_mapping.pop(None)
return self.render_template(
'airflow/dags.html',
dags=dags,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=min(start + 1, num_of_all_dags),
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page,
num_of_pages,
search=escape(arg_search_query) if arg_search_query else None,
status=arg_status_filter if arg_status_filter else None),
num_runs=num_runs,
tags=tags,
state_color=state_color_mapping,
status_filter=arg_status_filter,
status_count_all=all_dags_count,
status_count_active=status_count_active,
status_count_paused=status_count_paused)
@expose('/dag_stats', methods=['POST'])
@has_access
@provide_session
def dag_stats(self, session=None):
dr = models.DagRun
allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
dag_state_stats = session.query(dr.dag_id, dr.state, sqla.func.count(dr.state))\
.group_by(dr.dag_id, dr.state)
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
payload = {}
dag_state_stats = dag_state_stats.filter(dr.dag_id.in_(filter_dag_ids))
data = {}
for dag_id, state, count in dag_state_stats:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.dag_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({
'state': state,
'count': count
})
return wwwutils.json_response(payload)
@expose('/task_stats', methods=['POST'])
@has_access
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
allowed_dag_ids = set(appbuilder.sm.get_accessible_dag_ids())
if not allowed_dag_ids:
return wwwutils.json_response({})
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = {dag_id for dag_id, in session.query(models.DagModel.dag_id)}
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING, Dag.is_active)
)
if selected_dag_ids:
RunningDagRun = RunningDagRun.filter(DagRun.dag_id.in_(filter_dag_ids))
RunningDagRun = RunningDagRun.subquery('running_dag_run')
# Select all task_instances from active dag_runs.
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun,
and_(RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
if selected_dag_ids:
RunningTI = RunningTI.filter(TI.dag_id.in_(filter_dag_ids))
if conf.getboolean('webserver', 'SHOW_RECENT_STATS_FOR_COMPLETED_RUNS', fallback=True):
LastDagRun = (
session.query(
DagRun.dag_id,
sqla.func.max(DagRun.execution_date).label('execution_date')
)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING, Dag.is_active)
.group_by(DagRun.dag_id)
)
if selected_dag_ids:
LastDagRun = LastDagRun.filter(DagRun.dag_id.in_(filter_dag_ids))
LastDagRun = LastDagRun.subquery('last_dag_run')
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun,
and_(LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
if selected_dag_ids:
LastTI = LastTI.filter(TI.dag_id.in_(filter_dag_ids))
FinalTI = union_all(LastTI, RunningTI).alias('final_ti')
else:
FinalTI = RunningTI.subquery('final_ti')
qry = (
session.query(FinalTI.c.dag_id, FinalTI.c.state, sqla.func.count())
.group_by(FinalTI.c.dag_id, FinalTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.task_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({
'state': state,
'count': count
})
return wwwutils.json_response(payload)
@expose('/last_dagruns', methods=['POST'])
@has_access
@provide_session
def last_dagruns(self, session=None):
DagRun = models.DagRun
allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
query = session.query(
DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('last_run')
).group_by(DagRun.dag_id)
# Filter to only ask for accessible and selected dags
query = query.filter(DagRun.dag_id.in_(filter_dag_ids))
resp = {
r.dag_id.replace('.', '__dot__'): {
'dag_id': r.dag_id,
'last_run': r.last_run.isoformat(),
} for r in query
}
return wwwutils.json_response(resp)
@expose('/code')
@has_dag_access(can_dag_read=True)
@has_access
@provide_session
def code(self, session=None):
all_errors = ""
try:
dag_id = request.args.get('dag_id')
dag_orm = DagModel.get_dagmodel(dag_id, session=session)
code = DagCode.get_code_by_fileloc(dag_orm.fileloc)
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except Exception as e:
all_errors += (
"Exception encountered during " +
"dag_id retrieval/dag retrieval fallback/code highlighting:\n\n{}\n".format(e)
)
html_code = '<p>Failed to load file.</p><p>Details: {}</p>'.format(
escape(all_errors))
return self.render_template(
'airflow/dag_code.html', html_code=html_code, dag=dag_orm, title=dag_id,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'),
wrapped=conf.getboolean('webserver', 'default_wrap'))
@expose('/dag_details')
@has_dag_access(can_dag_read=True)
@has_access
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
root = request.args.get('root', '')
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
active_runs = models.DagRun.find(
dag_id=dag_id,
state=State.RUNNING,
external_trigger=False
)
return self.render_template(
'airflow/dag_details.html',
dag=dag, title=title, root=root, states=states, State=State, active_runs=active_runs)
@expose('/rendered')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
logging.info("Retrieving rendered templates.")
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.get_rendered_template_fields()
except AirflowException as e:
msg = "Error rendering template: " + escape(e)
if e.__cause__:
msg += Markup("<br/><br/>OriginalError: ") + escape(e.__cause__)
flash(msg, "error")
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.template_fields:
content = getattr(task, template_field)
if template_field in wwwutils.get_attr_renderer():
html_dict[template_field] = \
wwwutils.get_attr_renderer()[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render_template(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
title=title)
@expose('/get_logs_with_metadata')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
if request.args.get('try_number') is not None:
try_number = int(request.args.get('try_number'))
else:
try_number = None
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
response_format = request.args.get('format', 'json')
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
response = jsonify({'error': error_message})
response.status_code = 400
return response
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('logging', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
def _get_logs_with_metadata(try_number, metadata):
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
metadata['end_of_log'] = True
else:
logs, metadatas = handler.read(ti, try_number, metadata=metadata)
metadata = metadatas[0]
return logs, metadata
try:
if ti is not None:
dag = dagbag.get_dag(dag_id)
if dag:
ti.task = dag.get_task(ti.task_id)
if response_format == 'json':
logs, metadata = _get_logs_with_metadata(try_number, metadata)
message = logs[0] if try_number is not None else logs
return jsonify(message=message, metadata=metadata)
filename_template = conf.get('logging', 'LOG_FILENAME_TEMPLATE')
attachment_filename = render_log_filename(
ti=ti,
try_number="all" if try_number is None else try_number,
filename_template=filename_template)
metadata['download_logs'] = True
def _generate_log_stream(try_number, metadata):
if try_number is None and ti is not None:
next_try = ti.next_try_number
try_numbers = list(range(1, next_try))
else:
try_numbers = [try_number]
for try_number in try_numbers:
metadata.pop('end_of_log', None)
metadata.pop('max_offset', None)
metadata.pop('offset', None)
while 'end_of_log' not in metadata or not metadata['end_of_log']:
logs, metadata = _get_logs_with_metadata(try_number, metadata)
yield "\n".join(logs) + "\n"
return Response(_generate_log_stream(try_number, metadata),
mimetype="text/plain",
headers={"Content-Disposition": "attachment; filename={}".format(
attachment_filename)})
except AttributeError as e:
error_message = ["Task log handler {} does not support read logs.\n{}\n"
.format(task_log_reader, str(e))]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag_model = DagModel.get_dagmodel(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
num_logs = 0
if ti is not None:
num_logs = ti.next_try_number - 1
if ti.state == State.UP_FOR_RESCHEDULE:
# Tasks in reschedule state decremented the try number
num_logs += 1
logs = [''] * num_logs
root = request.args.get('root', '')
return self.render_template(
'airflow/ti_log.html',
logs=logs, dag=dag_model, title="Log by attempts",
dag_id=dag_id, task_id=task_id,
execution_date=execution_date, form=form,
root=root, wrapped=conf.getboolean('webserver', 'default_wrap'))
@expose('/elasticsearch')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def elasticsearch(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
try_number = request.args.get('try_number', 1)
elasticsearch_frontend = conf.get('elasticsearch', 'frontend')
log_id_template = conf.get('elasticsearch', 'log_id_template')
log_id = log_id_template.format(
dag_id=dag_id, task_id=task_id,
execution_date=execution_date, try_number=try_number)
url = 'https://' + elasticsearch_frontend.format(log_id=quote(log_id))
return redirect(url)
@expose('/task')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect(url_for('Airflow.index'))
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task): # noqa
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in wwwutils.get_attr_renderer(): # noqa
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in wwwutils.get_attr_renderer():
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = \
wwwutils.get_attr_renderer()[attr_name](source)
no_failed_deps_result = [(
"Unknown",
"All dependencies are met but the task instance is not running. In most "
"cases this just means that the task will probably be scheduled soon "
"unless:<br/>\n- The scheduler is down or under heavy load<br/>\n{}\n"
"<br/>\nIf this task instance does not start soon please contact your "
"Airflow administrator for assistance.".format(
"- This task instance already ran and had it's state changed manually "
"(e.g. cleared in the UI)<br/>" if ti.state == State.NONE else ""))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_QUEUED_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render_template(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
root=root,
dag=dag, title=title)
@expose('/xcom')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dm_db = models.DagModel
ti_db = models.TaskInstance
dag = session.query(dm_db).filter(dm_db.dag_id == dag_id).first()
ti = session.query(ti_db).filter(ti_db.dag_id == dag_id and ti_db.task_id == task_id).first()
if not ti:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect(url_for('Airflow.index'))
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render_template(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
dag=dag, title=title)
@expose('/run', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def run(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
ignore_all_deps = request.form.get('ignore_all_deps') == "true"
ignore_task_deps = request.form.get('ignore_task_deps') == "true"
ignore_ti_state = request.form.get('ignore_ti_state') == "true"
executor = ExecutorLoader.get_default_executor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.executors.kubernetes_executor import KubernetesExecutor
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be run
dep_context = DepContext(
deps=RUNNING_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/delete', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def delete(self):
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagNotFound, DagFileExists
dag_id = request.values.get('dag_id')
origin = request.values.get('origin') or url_for('Airflow.index')
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash("DAG with id {} not found. Cannot delete".format(dag_id), 'error')
return redirect(request.referrer)
except DagFileExists:
flash("Dag id {} is still in DagBag. "
"Remove the DAG file first.".format(dag_id),
'error')
return redirect(request.referrer)
flash("Deleting DAG with id {}. May take a couple minutes to fully"
" disappear.".format(dag_id))
# Upon success return to origin.
return redirect(origin)
@expose('/trigger', methods=['POST', 'GET'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
@provide_session
def trigger(self, session=None):
dag_id = request.values.get('dag_id')
origin = request.values.get('origin') or url_for('Airflow.index')
if request.method == 'GET':
return self.render_template(
'airflow/trigger.html',
dag_id=dag_id,
origin=origin,
conf=''
)
dag_orm = session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
if not dag_orm:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = f"{DagRunType.MANUAL.value}__{execution_date.isoformat()}"
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
conf = request.values.get('conf')
if conf:
try:
run_conf = json.loads(conf)
except json.decoder.JSONDecodeError:
flash("Invalid JSON configuration", "error")
return self.render_template(
'airflow/trigger.html',
dag_id=dag_id,
origin=origin,
conf=conf
)
dag = dagbag.get_dag(dag_id)
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False, only_failed=False):
from airflow.exceptions import AirflowException
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
try:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
dry_run=True,
)
except AirflowException as ex:
flash(str(ex), 'error')
return redirect(origin)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render_template(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def clear(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('upstream') == "true"
downstream = request.form.get('downstream') == "true"
future = request.form.get('future') == "true"
past = request.form.get('past') == "true"
recursive = request.form.get('recursive') == "true"
only_failed = request.form.get('only_failed') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed, only_failed=only_failed)
@expose('/dagrun_clear', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_clear(self):
dag_id = request.form.get('dag_id')
origin = request.form.get('origin')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = timezone.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked', methods=['POST'])
@has_access
@provide_session
def blocked(self, session=None):
allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' in allowed_dag_ids:
allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
# Filter by post parameters
selected_dag_ids = {
unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id
}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response([])
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.filter(DR.dag_id.in_(filter_dag_ids))
.group_by(DR.dag_id)
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
dag = dagbag.get_dag(dag_id)
if dag:
# TODO: Make max_active_runs a column so we can query for it directly
max_active_runs = dag.max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked failed on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as failed",
details=details)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as success",
details=details)
return response
@expose('/dagrun_failed', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_failed(self):
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = request.form.get('origin')
return self._mark_dagrun_state_as_failed(dag_id, execution_date,
confirmed, origin)
@expose('/dagrun_success', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def dagrun_success(self):
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = request.form.get('origin')
return self._mark_dagrun_state_as_success(dag_id, execution_date,
confirmed, origin)
def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, state):
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
latest_execution_date = dag.get_latest_execution_date()
if not latest_execution_date:
flash(f"Cannot make {state}, seem that dag {dag_id} has never run", "error")
return redirect(origin)
execution_date = timezone.parse(execution_date)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(tasks=[task], execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=True)
flash("Marked {} on {} task instances".format(state, len(altered)))
return redirect(origin)
to_be_altered = set_state(tasks=[task], execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render_template(
"airflow/confirm.html",
message=("Here's the list of task instances you are about to mark as {}:".format(state)),
details=details)
return response
@expose('/failed', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def failed(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('failed_upstream') == "true"
downstream = request.form.get('failed_downstream') == "true"
future = request.form.get('failed_future') == "true"
past = request.form.get('failed_past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.FAILED)
@expose('/success', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def success(self):
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = request.form.get('origin')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('success_upstream') == "true"
downstream = request.form.get('success_downstream') == "true"
future = request.form.get('success_future') == "true"
past = request.form.get('success_past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.SUCCESS)
@expose('/tree')
@has_dag_access(can_dag_read=True)
@has_access
@gzipped
@action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if not dag:
flash('DAG "{0}" seems to be missing from DagBag.'.format(dag_id), "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
if num_runs:
num_runs = int(num_runs)
else:
num_runs = conf.getint('webserver', 'default_dag_run_display_number')
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
with create_session() as session:
dag_runs = (
session.query(DagRun)
.filter(
DagRun.dag_id == dag.dag_id,
DagRun.execution_date <= base_date)
.order_by(DagRun.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs
}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
min_date = min(dates) if dates else None
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
task_instances: Dict[Tuple[str, datetime], models.TaskInstance] = {}
for ti in tis:
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = set()
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = 0
node_limit = 5000 / max(1, len(dag.leaves))
def encode_ti(ti: Optional[models.TaskInstance]) -> Optional[List]:
if not ti:
return None
# NOTE: order of entry is important here because client JS relies on it for
# tree node reconstruction. Remember to change JS code in tree.html
# whenever order is altered.
data = [
ti.state,
ti.try_number,
None, # start_ts
None, # duration
]
if ti.start_date:
# round to seconds to reduce payload size
data[2] = int(ti.start_date.timestamp())
if ti.duration is not None:
data[3] = int(ti.duration)
return data
def recurse_nodes(task, visited):
nonlocal node_count
node_count += 1
visited.add(task)
task_id = task.task_id
node = {
'name': task.task_id,
'instances': [
encode_ti(task_instances.get((task_id, d)))
for d in dates
],
'num_dep': len(task.downstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'ui_color': task.ui_color,
}
if task.downstream_list:
children = [
recurse_nodes(t, visited) for t in task.downstream_list
if node_count < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
if task.task_id not in expanded:
children_key = 'children'
expanded.add(task.task_id)
else:
children_key = "_children"
node[children_key] = children
if task.depends_on_past:
node['depends_on_past'] = task.depends_on_past
if task.start_date:
# round to seconds to reduce payload size
node['start_ts'] = int(task.start_date.timestamp())
if task.end_date:
# round to seconds to reduce payload size
node['end_ts'] = int(task.end_date.timestamp())
if task.extra_links:
node['extra_links'] = task.extra_links
return node
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates
],
}
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
external_logs = conf.get('elasticsearch', 'frontend')
# avoid spaces to reduce payload size
data = htmlsafe_json_dumps(data, separators=(',', ':'))
# escape slashes to avoid JSON parse error in JS
data = data.replace('\\', '\\\\')
return self.render_template(
'airflow/tree.html',
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
root=root,
form=form,
dag=dag,
data=data,
blur=blur, num_runs=num_runs,
show_external_logs=bool(external_logs))
@expose('/graph')
@has_dag_access(can_dag_read=True)
@has_access
@gzipped
@action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if not dag:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect(url_for('Airflow.index'))
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
'rx': 5,
'ry': 5,
}
})
def get_downstream(task):
for t in task.downstream_list:
edge = {
'source_id': task.task_id,
'target_id': t.task_id,
}
if edge not in edges:
edges.append(edge)
get_downstream(t)
for t in dag.roots:
get_downstream(t)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
'extra_links': t.extra_links,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) \
if hasattr(dag, 'doc_md') and dag.doc_md else ''
external_logs = conf.get('elasticsearch', 'frontend')
return self.render_template(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=wwwutils.state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
blur=blur,
root=root or '',
task_instances=task_instances,
tasks=tasks,
nodes=nodes,
edges=edges,
show_external_logs=bool(external_logs))
@expose('/duration')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def duration(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if dag is None:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect(url_for('Airflow.index'))
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
TF = TaskFail
ti_fails = (
session.query(TF)
.filter(TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all() # noqa
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
if tf.duration:
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$( document ).trigger('chartload')" +
cum_chart.htmlcontent[s_index:])
return self.render_template(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def tries(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(start_date=min_date, end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
# y value should reflect completed tries to have a 0 baseline.
y.append(ti.prev_attempted_tries)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
tab_title='Tries',
)
@expose('/landing_times')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def landing_times(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else timezone.utc_epoch()
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
task_id = task.task_id
y[task_id] = []
x[task_id] = []
for ti in task.get_task_instances(start_date=min_date, end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[task_id].append(dttm)
y[task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
tab_title='Landing times',
)
@expose('/paused', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
def paused(self):
dag_id = request.args.get('dag_id')
is_paused = True if request.args.get('is_paused') == 'false' else False
models.DagModel.get_dagmodel(dag_id).set_is_paused(
is_paused=is_paused)
return "OK"
@expose('/refresh', methods=['POST'])
@has_dag_access(can_dag_edit=True)
@has_access
@action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.values.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dag = dagbag.get_dag(dag_id)
# sync dag permission
appbuilder.sm.sync_perm_for_dag(dag_id, dag.access_control)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/gantt')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [
ti for ti in dag.get_task_instances(dttm, dttm)
if ti.start_date and ti.state]
tis = sorted(tis, key=lambda ti: ti.start_date)
TF = TaskFail
ti_fails = list(itertools.chain(*[(
session
.query(TF)
.filter(TF.dag_id == ti.dag_id,
TF.task_id == ti.task_id,
TF.execution_date == ti.execution_date)
.all()
) for ti in tis]))
# determine bars to show in the gantt chart
gantt_bar_items = []
tasks = []
for ti in tis:
end_date = ti.end_date or timezone.utcnow()
# prev_attempted_tries will reflect the currently running try_number
# or the try_number of the last complete run
# https://issues.apache.org/jira/browse/AIRFLOW-2143
try_count = ti.prev_attempted_tries
gantt_bar_items.append((ti.task_id, ti.start_date, end_date, ti.state, try_count))
d = alchemy_to_dict(ti)
d['extraLinks'] = dag.get_task(ti.task_id).extra_links
tasks.append(d)
tf_count = 0
try_count = 1
prev_task_id = ""
for tf in ti_fails:
end_date = tf.end_date or timezone.utcnow()
start_date = tf.start_date or end_date
if tf_count != 0 and tf.task_id == prev_task_id:
try_count = try_count + 1
else:
try_count = 1
prev_task_id = tf.task_id
gantt_bar_items.append((tf.task_id, start_date, end_date, State.FAILED, try_count))
tf_count = tf_count + 1
task = dag.get_task(tf.task_id)
d = alchemy_to_dict(tf)
d['state'] = State.FAILED
d['operator'] = task.task_type
d['try_number'] = try_count
d['extraLinks'] = task.extra_links
tasks.append(d)
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render_template(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=data,
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/extra_links')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def extra_links(self):
"""
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
link_name = request.args.get('link_name')
dttm = timezone.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
response = jsonify(
{'url': None,
'error': "can't find dag {dag} or task_id {task_id}".format(
dag=dag,
task_id=task_id
)}
)
response.status_code = 404
return response
task = dag.get_task(task_id)
try:
url = task.get_extra_links(dttm, link_name)
except ValueError as err:
response = jsonify({'url': None, 'error': str(err)})
response.status_code = 404
return response
if url:
response = jsonify({'error': None, 'url': url})
response.status_code = 200
return response
else:
response = jsonify(
{'url': None, 'error': 'No URL found for {dest}'.format(dest=link_name)})
response.status_code = 404
return response
@expose('/object/task_instances')
@has_dag_access(can_dag_read=True)
@has_access
@action_logging
def task_instances(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = timezone.parse(dttm)
else:
return "Error: Invalid execution_date"
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(dttm, dttm)}
return json.dumps(task_instances)
class VersionView(AirflowBaseView):
default_view = 'version'
@expose('/version')
@has_access
def version(self):
try:
airflow_version = airflow.__version__
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
git_version = str(pkgutil.get_data('airflow', 'git_version'), encoding="UTF-8")
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render_template(
'airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(AirflowBaseView):
default_view = 'conf'
@expose('/configuration')
@has_access
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = AIRFLOW_CONFIG
# Don't show config when expose_config variable is False in airflow config
if conf.getboolean("webserver", "expose_config"):
with open(AIRFLOW_CONFIG, 'r') as file:
config = file.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render_template(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
######################################################################################
# ModelViews
######################################################################################
class DagFilter(BaseFilter):
def apply(self, query, func): # noqa
if appbuilder.sm.has_all_dags_access():
return query
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
return query.filter(self.model.dag_id.in_(filter_dag_ids))
class AirflowModelView(ModelView):
list_widget = AirflowModelListWidget
page_size = PAGE_SIZE
CustomSQLAInterface = wwwutils.CustomSQLAInterface
class SlaMissModelView(AirflowModelView):
route_base = '/slamiss'
datamodel = AirflowModelView.CustomSQLAInterface(SlaMiss)
base_permissions = ['can_list']
list_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
add_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
edit_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
search_columns = ['dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
class XComModelView(AirflowModelView):
route_base = '/xcom'
datamodel = AirflowModelView.CustomSQLAInterface(XCom)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
search_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
list_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
add_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']
edit_columns = ['key', 'value', 'execution_date', 'task_id', 'dag_id']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
@action('muldelete', 'Delete', "Are you sure you want to delete selected records?",
single=False)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pre_add(self, item):
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
def pre_update(self, item):
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
class ConnectionModelView(AirflowModelView):
route_base = '/connection'
datamodel = AirflowModelView.CustomSQLAInterface(Connection)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
extra_fields = ['extra__jdbc__drv_path', 'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
'extra__google_cloud_platform__num_retries',
'extra__grpc__auth_type',
'extra__grpc__credential_pem_file',
'extra__grpc__scopes',
'extra__yandexcloud__service_account_json',
'extra__yandexcloud__service_account_json_path',
'extra__yandexcloud__oauth',
'extra__yandexcloud__public_ssh_key',
'extra__yandexcloud__folder_id',
'extra__kubernetes__in_cluster',
'extra__kubernetes__kube_config',
'extra__kubernetes__namespace']
list_columns = ['conn_id', 'conn_type', 'host', 'port', 'is_encrypted',
'is_extra_encrypted']
add_columns = edit_columns = ['conn_id', 'conn_type', 'host', 'schema',
'login', 'password', 'port', 'extra'] + extra_fields
add_form = edit_form = ConnectionForm
add_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
base_order = ('conn_id', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
@has_dag_access(can_dag_edit=True)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def process_form(self, form, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform', 'grpc', 'yandexcloud', 'kubernetes']:
extra = {
key: formdata[key]
for key in self.extra_fields if key in formdata}
form.extra.data = json.dumps(extra)
def prefill_form(self, form, pk):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
if not hasattr(d, 'get'):
logging.warning('extra field for {} is not iterable'.format(
form.data.get('conn_id', '<unknown>')))
return
for field in self.extra_fields:
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class PoolModelView(AirflowModelView):
route_base = '/pool'
datamodel = AirflowModelView.CustomSQLAInterface(models.Pool)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete']
list_columns = ['pool', 'slots', 'running_slots', 'queued_slots']
add_columns = ['pool', 'slots', 'description']
edit_columns = ['pool', 'slots', 'description']
base_order = ('pool', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
def action_muldelete(self, items):
if any(item.pool == models.Pool.DEFAULT_POOL_NAME for item in items):
flash("default_pool cannot be deleted", 'error')
self.update_redirect()
return redirect(self.get_redirect())
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pool_link(attr):
pool_id = attr.get('pool')
if pool_id is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id)
return Markup("<a href='{url}'>{pool_id}</a>").format(url=url, pool_id=pool_id)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def frunning_slots(attr):
pool_id = attr.get('pool')
running_slots = attr.get('running_slots')
if pool_id is not None and running_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='running')
return Markup("<a href='{url}'>{running_slots}</a>").format(url=url, running_slots=running_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fqueued_slots(attr):
pool_id = attr.get('pool')
queued_slots = attr.get('queued_slots')
if pool_id is not None and queued_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='queued')
return Markup("<a href='{url}'>{queued_slots}</a>").format(url=url, queued_slots=queued_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'pool': pool_link,
'running_slots': frunning_slots,
'queued_slots': fqueued_slots
}
validators_columns = {
'pool': [validators.DataRequired()],
'slots': [validators.NumberRange(min=-1)]
}
class VariableModelView(AirflowModelView):
route_base = '/variable'
list_template = 'airflow/variable_list.html'
edit_template = 'airflow/variable_edit.html'
datamodel = AirflowModelView.CustomSQLAInterface(models.Variable)
base_permissions = ['can_add', 'can_list', 'can_edit', 'can_delete', 'can_varimport']
list_columns = ['key', 'val', 'is_encrypted']
add_columns = ['key', 'val']
edit_columns = ['key', 'val']
search_columns = ['key', 'val']
base_order = ('key', 'asc')
def hidden_field_formatter(attr):
key = attr.get('key')
val = attr.get('val')
if wwwutils.should_hide_value_for_key(key):
return Markup('*' * 8)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'val': hidden_field_formatter,
}
validators_columns = {
'key': [validators.DataRequired()]
}
def prefill_form(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?',
single=False)
def action_muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action('varexport', 'Export', '', single=False)
def action_varexport(self, items):
var_dict = {}
d = json.JSONDecoder()
for var in items:
try:
val = d.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
@expose('/varimport', methods=["POST"])
@has_access
@action_logging
def varimport(self):
try:
out = request.files['file'].read()
if isinstance(out, bytes):
d = json.loads(out.decode('utf-8'))
else:
d = json.loads(out)
except Exception:
self.update_redirect()
flash("Missing file or syntax error.", 'error')
return redirect(self.get_redirect())
else:
suc_count = fail_count = 0
for k, v in d.items():
try:
models.Variable.set(k, v, serialize_json=not isinstance(v, str))
except Exception as e:
logging.info('Variable import failed: {}'.format(repr(e)))
fail_count += 1
else:
suc_count += 1
flash("{} variable(s) successfully updated.".format(suc_count))
if fail_count:
flash("{} variable(s) failed to be updated.".format(fail_count), 'error')
self.update_redirect()
return redirect(self.get_redirect())
class JobModelView(AirflowModelView):
route_base = '/job'
datamodel = AirflowModelView.CustomSQLAInterface(BaseJob)
base_permissions = ['can_list']
list_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',
'end_date', 'latest_heartbeat',
'executor_class', 'hostname', 'unixname']
search_columns = ['id', 'dag_id', 'state', 'job_type', 'start_date',
'end_date', 'latest_heartbeat', 'executor_class',
'hostname', 'unixname']
base_order = ('start_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'latest_heartbeat': wwwutils.datetime_f('latest_heartbeat'),
}
class DagRunModelView(AirflowModelView):
route_base = '/dagrun'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagRun)
base_permissions = ['can_list', 'can_add']
add_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger', 'conf']
list_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']
search_columns = ['state', 'dag_id', 'execution_date', 'run_id', 'external_trigger']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
add_form = edit_form = DagRunForm
formatters_columns = {
'execution_date': wwwutils.datetime_f('execution_date'),
'state': wwwutils.state_f,
'start_date': wwwutils.datetime_f('start_date'),
'dag_id': wwwutils.dag_link,
'run_id': wwwutils.dag_run_link,
}
@action('muldelete', "Delete", "Are you sure you want to delete selected records?",
single=False)
@has_dag_access(can_dag_edit=True)
@provide_session
def action_muldelete(self, items, session=None):
self.datamodel.delete_all(items)
self.update_redirect()
dirty_ids = []
for item in items:
dirty_ids.append(item.dag_id)
return redirect(self.get_redirect())
@action('set_running', "Set state to 'running'", '', single=False)
@provide_session
def action_set_running(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.start_date = timezone.utcnow()
dr.state = State.RUNNING
session.commit()
flash("{count} dag runs were set to running".format(count=count))
except Exception as ex:
flash(str(ex), 'error')
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action('set_failed', "Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?",
single=False)
@provide_session
def action_set_failed(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(count=count, altered_ti_count=altered_ti_count))
except Exception:
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action('set_success', "Set state to 'success'",
"All task instances would also be marked as success, are you sure?",
single=False)
@provide_session
def action_set_success(self, drs, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(
DR.id.in_([dagrun.id for dagrun in drs])).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(count=count, altered_ti_count=altered_ti_count))
except Exception:
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
class LogModelView(AirflowModelView):
route_base = '/log'
datamodel = AirflowModelView.CustomSQLAInterface(Log)
base_permissions = ['can_list']
list_columns = ['id', 'dttm', 'dag_id', 'task_id', 'event', 'execution_date',
'owner', 'extra']
search_columns = ['dag_id', 'task_id', 'event', 'execution_date', 'owner', 'extra']
base_order = ('dttm', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'dttm': wwwutils.datetime_f('dttm'),
'execution_date': wwwutils.datetime_f('execution_date'),
'dag_id': wwwutils.dag_link,
}
class TaskInstanceModelView(AirflowModelView):
route_base = '/taskinstance'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskInstance)
base_permissions = ['can_list']
page_size = PAGE_SIZE
list_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url']
order_columns = [item for item in list_columns if item not in ['try_number', 'log_url']]
search_columns = ['state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date']
base_order = ('job_id', 'asc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def log_url_formatter(attr):
log_url = attr.get('log_url')
return Markup(
'<a href="{log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(log_url=log_url)
def duration_f(attr):
end_date = attr.get('end_date')
duration = attr.get('duration')
if end_date and duration:
return timedelta(seconds=duration)
formatters_columns = {
'log_url': log_url_formatter,
'task_id': wwwutils.task_instance_link,
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'execution_date': wwwutils.datetime_f('execution_date'),
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'queued_dttm': wwwutils.datetime_f('queued_dttm'),
'dag_id': wwwutils.dag_link,
'duration': duration_f,
}
@provide_session
@action('clear', lazy_gettext('Clear'),
lazy_gettext('Are you sure you want to clear the state of the selected task'
' instance(s) and set their dagruns to the running state?'),
single=False)
def action_clear(self, tis, session=None):
try:
dag_to_tis = {}
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(tis)))
self.update_redirect()
return redirect(self.get_redirect())
except Exception:
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, tis, target_state, session=None):
try:
count = len(tis)
for ti in tis:
ti.set_state(target_state, session)
session.commit()
flash("{count} task instances were set to '{target_state}'".format(
count=count, target_state=target_state))
except Exception:
flash('Failed to set state', 'error')
@action('set_running', "Set state to 'running'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_running(self, tis):
self.set_task_instance_state(tis, State.RUNNING)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_failed', "Set state to 'failed'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_failed(self, tis):
self.set_task_instance_state(tis, State.FAILED)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_success', "Set state to 'success'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_success(self, tis):
self.set_task_instance_state(tis, State.SUCCESS)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_retry', "Set state to 'up_for_retry'", '', single=False)
@has_dag_access(can_dag_edit=True)
def action_set_retry(self, tis):
self.set_task_instance_state(tis, State.UP_FOR_RETRY)
self.update_redirect()
return redirect(self.get_redirect())
class DagModelView(AirflowModelView):
route_base = '/dagmodel'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagModel)
base_permissions = ['can_list', 'can_show']
list_columns = ['dag_id', 'is_paused', 'last_scheduler_run',
'last_expired', 'scheduler_lock', 'fileloc', 'owners']
formatters_columns = {
'dag_id': wwwutils.dag_link
}
base_filters = [['dag_id', DagFilter, lambda: []]]
def get_query(self):
"""
Default filters for model
"""
return (
super().get_query()
.filter(or_(models.DagModel.is_active,
models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super().get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
@has_access
@permission_name("list")
@provide_session
@expose('/autocomplete')
def autocomplete(self, session=None):
query = unquote(request.args.get('query', ''))
if not query:
wwwutils.json_response([])
# Provide suggestions of dag_ids and owners
dag_ids_query = session.query(DagModel.dag_id.label('item')).filter(
~DagModel.is_subdag, DagModel.is_active,
DagModel.dag_id.ilike('%' + query + '%'))
owners_query = session.query(func.distinct(DagModel.owners).label('item')).filter(
~DagModel.is_subdag, DagModel.is_active,
DagModel.owners.ilike('%' + query + '%'))
# Hide DAGs if not showing status: "all"
status = flask_session.get(FILTER_STATUS_COOKIE)
if status == 'active':
dag_ids_query = dag_ids_query.filter(~DagModel.is_paused)
owners_query = owners_query.filter(~DagModel.is_paused)
elif status == 'paused':
dag_ids_query = dag_ids_query.filter(DagModel.is_paused)
owners_query = owners_query.filter(DagModel.is_paused)
filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
if 'all_dags' not in filter_dag_ids:
dag_ids_query = dag_ids_query.filter(DagModel.dag_id.in_(filter_dag_ids))
owners_query = owners_query.filter(DagModel.dag_id.in_(filter_dag_ids))
payload = [row[0] for row in dag_ids_query.union(owners_query).limit(10).all()]
return wwwutils.json_response(payload)
|
the-stack_0_59 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module defining classes related to inventory actions
"""
from pyherc.data import is_armour, is_weapon, is_boots
from pyherc.aspects import log_debug, log_info
from pyherc.events import new_unequip_event
from pyherc.rules.factory import SubActionFactory
class UnEquipFactory(SubActionFactory):
"""
Factory for creating unequip actions
.. versionadded:: 0.8
"""
@log_debug
def __init__(self):
"""
Constructor for this factory
"""
super().__init__()
self.sub_action = 'unequip'
@log_debug
def can_handle(self, parameters):
"""
Can this factory process these parameters
:param parameters: parameters to check
:returns: True if factory is capable of handling parameters
:rtype: Boolean
"""
return self.sub_action == parameters.sub_action
@log_info
def get_action(self, parameters):
"""
Create an unequip action
:param parameters: parameters used to control creation
:type parameters: InventoryParameters
"""
return UnEquipAction(parameters.character, parameters.item)
class UnEquipAction():
"""
Action for unequiping an item
.. versionadded:: 0.8
"""
@log_debug
def __init__(self, character, item):
"""
Default constructor
:param character: character wearing the item
:type character: Character
:param item: item to unequip
:type item: Item
"""
super().__init__()
self.character = character
self.item = item
@log_info
def execute(self):
"""
Executes this action
"""
if is_armour(self.item):
self.character.inventory.armour = None
self.character.raise_event(new_unequip_event(self.character,
self.item))
if is_weapon(self.item):
self.character.inventory.weapon = None
self.character.raise_event(new_unequip_event(self.character,
self.item))
if is_boots(self.item):
self.character.inventory.boots = None
self.character.raise_event(new_unequip_event(self.character,
self.item))
@log_debug
def is_legal(self):
"""
Check if the action is possible to perform
:returns: True if move is possible, false otherwise
:rtype: Boolean
"""
return True
|
the-stack_0_61 | # pylint: disable=W0611
# coding: utf-8
'''
Window
======
Core class for creating the default Kivy window. Kivy supports only one window
per application: please don't try to create more than one.
'''
__all__ = ('Keyboard', 'WindowBase', 'Window')
from os.path import join, exists
from os import getcwd
from kivy.core import core_select_lib
from kivy.clock import Clock
from kivy.config import Config
from kivy.logger import Logger
from kivy.base import EventLoop, stopTouchApp
from kivy.modules import Modules
from kivy.event import EventDispatcher
from kivy.properties import ListProperty, ObjectProperty, AliasProperty, \
NumericProperty, OptionProperty, StringProperty, BooleanProperty
from kivy.utils import platform, reify
from kivy.context import get_current_context
from kivy.uix.behaviors import FocusBehavior
from kivy.setupconfig import USE_SDL2
from kivy.graphics.transformation import Matrix
# late import
VKeyboard = None
android = None
class Keyboard(EventDispatcher):
'''Keyboard interface that is returned by
:meth:`WindowBase.request_keyboard`. When you request a keyboard,
you'll get an instance of this class. Whatever the keyboard input is
(system or virtual keyboard), you'll receive events through this
instance.
:Events:
`on_key_down`: keycode, text, modifiers
Fired when a new key is pressed down
`on_key_up`: keycode
Fired when a key is released (up)
Here is an example of how to request a Keyboard in accordance with the
current configuration:
.. include:: ../../examples/widgets/keyboardlistener.py
:literal:
'''
# Keycodes mapping, between str <-> int. These keycodes are
# currently taken from pygame.key. But when a new provider will be
# used, it must do the translation to these keycodes too.
keycodes = {
# specials keys
'backspace': 8, 'tab': 9, 'enter': 13, 'rshift': 303, 'shift': 304,
'alt': 308, 'rctrl': 306, 'lctrl': 305,
'super': 309, 'alt-gr': 307, 'compose': 311, 'pipe': 310,
'capslock': 301, 'escape': 27, 'spacebar': 32, 'pageup': 280,
'pagedown': 281, 'end': 279, 'home': 278, 'left': 276, 'up':
273, 'right': 275, 'down': 274, 'insert': 277, 'delete': 127,
'numlock': 300, 'print': 144, 'screenlock': 145, 'pause': 19,
# a-z keys
'a': 97, 'b': 98, 'c': 99, 'd': 100, 'e': 101, 'f': 102, 'g': 103,
'h': 104, 'i': 105, 'j': 106, 'k': 107, 'l': 108, 'm': 109, 'n': 110,
'o': 111, 'p': 112, 'q': 113, 'r': 114, 's': 115, 't': 116, 'u': 117,
'v': 118, 'w': 119, 'x': 120, 'y': 121, 'z': 122,
# 0-9 keys
'0': 48, '1': 49, '2': 50, '3': 51, '4': 52,
'5': 53, '6': 54, '7': 55, '8': 56, '9': 57,
# numpad
'numpad0': 256, 'numpad1': 257, 'numpad2': 258, 'numpad3': 259,
'numpad4': 260, 'numpad5': 261, 'numpad6': 262, 'numpad7': 263,
'numpad8': 264, 'numpad9': 265, 'numpaddecimal': 266,
'numpaddivide': 267, 'numpadmul': 268, 'numpadsubstract': 269,
'numpadadd': 270, 'numpadenter': 271,
# F1-15
'f1': 282, 'f2': 283, 'f3': 284, 'f4': 285, 'f5': 286, 'f6': 287,
'f7': 288, 'f8': 289, 'f9': 290, 'f10': 291, 'f11': 292, 'f12': 293,
'f13': 294, 'f14': 295, 'f15': 296,
# other keys
'(': 40, ')': 41,
'[': 91, ']': 93,
'{': 123, '}': 125,
':': 59, ';': 59,
'=': 61, '+': 43,
'-': 45, '_': 95,
'/': 47, '*': 42,
'?': 47,
'`': 96, '~': 126,
'´': 180, '¦': 166,
'\\': 92, '|': 124,
'"': 34, "'": 39,
',': 44, '.': 46,
'<': 60, '>': 62,
'@': 64, '!': 33,
'#': 35, '$': 36,
'%': 37, '^': 94,
'&': 38, '¬': 172,
'¨': 168, '…': 8230,
'ù': 249, 'à': 224,
'é': 233, 'è': 232,
}
__events__ = ('on_key_down', 'on_key_up', 'on_textinput')
def __init__(self, **kwargs):
super(Keyboard, self).__init__()
#: Window which the keyboard is attached too
self.window = kwargs.get('window', None)
#: Callback that will be called when the keyboard is released
self.callback = kwargs.get('callback', None)
#: Target that have requested the keyboard
self.target = kwargs.get('target', None)
#: VKeyboard widget, if allowed by the configuration
self.widget = kwargs.get('widget', None)
def on_key_down(self, keycode, text, modifiers):
pass
def on_key_up(self, keycode):
pass
def on_textinput(self, text):
pass
def release(self):
'''Call this method to release the current keyboard.
This will ensure that the keyboard is no longer attached to your
callback.'''
if self.window:
self.window.release_keyboard(self.target)
def _on_window_textinput(self, instance, text):
return self.dispatch('on_textinput', text)
def _on_window_key_down(self, instance, keycode, scancode, text,
modifiers):
keycode = (keycode, self.keycode_to_string(keycode))
if text == '\x04':
Window.trigger_keyboard_height()
return
return self.dispatch('on_key_down', keycode, text, modifiers)
def _on_window_key_up(self, instance, keycode, *largs):
keycode = (keycode, self.keycode_to_string(keycode))
return self.dispatch('on_key_up', keycode)
def _on_vkeyboard_key_down(self, instance, keycode, text, modifiers):
if keycode is None:
keycode = text.lower()
keycode = (self.string_to_keycode(keycode), keycode)
return self.dispatch('on_key_down', keycode, text, modifiers)
def _on_vkeyboard_key_up(self, instance, keycode, text, modifiers):
if keycode is None:
keycode = text
keycode = (self.string_to_keycode(keycode), keycode)
return self.dispatch('on_key_up', keycode)
def _on_vkeyboard_textinput(self, instance, text):
return self.dispatch('on_textinput', text)
def string_to_keycode(self, value):
'''Convert a string to a keycode number according to the
:attr:`Keyboard.keycodes`. If the value is not found in the
keycodes, it will return -1.
'''
return Keyboard.keycodes.get(value, -1)
def keycode_to_string(self, value):
'''Convert a keycode number to a string according to the
:attr:`Keyboard.keycodes`. If the value is not found in the
keycodes, it will return ''.
'''
keycodes = list(Keyboard.keycodes.values())
if value in keycodes:
return list(Keyboard.keycodes.keys())[keycodes.index(value)]
return ''
class WindowBase(EventDispatcher):
'''WindowBase is an abstract window widget for any window implementation.
:Parameters:
`borderless`: str, one of ('0', '1')
Set the window border state. Check the
:mod:`~kivy.config` documentation for a
more detailed explanation on the values.
`fullscreen`: str, one of ('0', '1', 'auto', 'fake')
Make the window fullscreen. Check the
:mod:`~kivy.config` documentation for a
more detailed explanation on the values.
`width`: int
Width of the window.
`height`: int
Height of the window.
:Events:
`on_motion`: etype, motionevent
Fired when a new :class:`~kivy.input.motionevent.MotionEvent` is
dispatched
`on_touch_down`:
Fired when a new touch event is initiated.
`on_touch_move`:
Fired when an existing touch event changes location.
`on_touch_up`:
Fired when an existing touch event is terminated.
`on_draw`:
Fired when the :class:`Window` is being drawn.
`on_flip`:
Fired when the :class:`Window` GL surface is being flipped.
`on_rotate`: rotation
Fired when the :class:`Window` is being rotated.
`on_close`:
Fired when the :class:`Window` is closed.
`on_request_close`:
Fired when the event loop wants to close the window, or if the
escape key is pressed and `exit_on_escape` is `True`. If a function
bound to this event returns `True`, the window will not be closed.
If the the event is triggered because of the keyboard escape key,
the keyword argument `source` is dispatched along with a value of
`keyboard` to the bound functions.
.. versionadded:: 1.9.0
`on_keyboard`: key, scancode, codepoint, modifier
Fired when the keyboard is used for input.
.. versionchanged:: 1.3.0
The *unicode* parameter has been deprecated in favor of
codepoint, and will be removed completely in future versions.
`on_key_down`: key, scancode, codepoint
Fired when a key pressed.
.. versionchanged:: 1.3.0
The *unicode* parameter has been deprecated in favor of
codepoint, and will be removed completely in future versions.
`on_key_up`: key, scancode, codepoint
Fired when a key is released.
.. versionchanged:: 1.3.0
The *unicode* parameter has be deprecated in favor of
codepoint, and will be removed completely in future versions.
`on_dropfile`: str
Fired when a file is dropped on the application.
'''
__instance = None
__initialized = False
_fake_fullscreen = False
_density = 1
# private properties
_size = ListProperty([0, 0])
_modifiers = ListProperty([])
_rotation = NumericProperty(0)
_clearcolor = ObjectProperty([0, 0, 0, 1])
children = ListProperty([])
'''List of the children of this window.
:attr:`children` is a :class:`~kivy.properties.ListProperty` instance and
defaults to an empty list.
Use :meth:`add_widget` and :meth:`remove_widget` to manipulate the list of
children. Don't manipulate the list directly unless you know what you are
doing.
'''
parent = ObjectProperty(None, allownone=True)
'''Parent of this window.
:attr:`parent` is a :class:`~kivy.properties.ObjectProperty` instance and
defaults to None. When created, the parent is set to the window itself.
You must take care of it if you are doing a recursive check.
'''
icon = StringProperty()
def _get_modifiers(self):
return self._modifiers
modifiers = AliasProperty(_get_modifiers, None)
'''List of keyboard modifiers currently active.
'''
def _get_size(self):
r = self._rotation
w, h = self._size
if self._density != 1:
w, h = self._win._get_gl_size()
if self.softinput_mode == 'resize':
h -= self.keyboard_height
if r in (0, 180):
return w, h
return h, w
def _set_size(self, size):
if self._size != size:
r = self._rotation
if r in (0, 180):
self._size = size
else:
self._size = size[1], size[0]
self.dispatch('on_resize', *size)
return True
else:
return False
size = AliasProperty(_get_size, _set_size, bind=('_size', ))
'''Get the rotated size of the window. If :attr:`rotation` is set, then the
size will change to reflect the rotation.
'''
def _get_clearcolor(self):
return self._clearcolor
def _set_clearcolor(self, value):
if value is not None:
if type(value) not in (list, tuple):
raise Exception('Clearcolor must be a list or tuple')
if len(value) != 4:
raise Exception('Clearcolor must contain 4 values')
self._clearcolor = value
clearcolor = AliasProperty(_get_clearcolor, _set_clearcolor,
bind=('_clearcolor', ))
'''Color used to clear the window.
::
from kivy.core.window import Window
# red background color
Window.clearcolor = (1, 0, 0, 1)
# don't clear background at all
Window.clearcolor = None
.. versionchanged:: 1.7.2
The clearcolor default value is now: (0, 0, 0, 1).
'''
# make some property read-only
def _get_width(self):
_size = self._size
if self._density != 1:
_size = self._win._get_gl_size()
r = self._rotation
if r == 0 or r == 180:
return _size[0]
return _size[1]
width = AliasProperty(_get_width, None, bind=('_rotation', '_size'))
'''Rotated window width.
:attr:`width` is a read-only :class:`~kivy.properties.AliasProperty`.
'''
def _get_height(self):
'''Rotated window height'''
r = self._rotation
_size = self._size
if self._density != 1:
_size = self._win._get_gl_size()
kb = self.keyboard_height if self.softinput_mode == 'resize' else 0
if r == 0 or r == 180:
return _size[1] - kb
return _size[0] - kb
height = AliasProperty(_get_height, None, bind=('_rotation', '_size'))
'''Rotated window height.
:attr:`height` is a read-only :class:`~kivy.properties.AliasProperty`.
'''
def _get_center(self):
return self.width / 2., self.height / 2.
center = AliasProperty(_get_center, None, bind=('width', 'height'))
'''Center of the rotated window.
:attr:`center` is a :class:`~kivy.properties.AliasProperty`.
'''
def _get_rotation(self):
return self._rotation
def _set_rotation(self, x):
x = int(x % 360)
if x == self._rotation:
return
if x not in (0, 90, 180, 270):
raise ValueError('can rotate only 0, 90, 180, 270 degrees')
self._rotation = x
if self.initialized is False:
return
self.dispatch('on_resize', *self.size)
self.dispatch('on_rotate', x)
rotation = AliasProperty(_get_rotation, _set_rotation,
bind=('_rotation', ))
'''Get/set the window content rotation. Can be one of 0, 90, 180, 270
degrees.
'''
softinput_mode = OptionProperty('', options=('', 'pan', 'scale', 'resize'))
'''This specifies the behavior of window contents on display of soft
keyboard on mobile platform. Can be one of '', 'pan', 'scale', 'resize'.
When '' The main window is left as it is allowing the user to use
:attr:`keyboard_height` to manage the window contents the way they want.
when 'pan' The main window pans moving the bottom part of the window to be
always on top of the keyboard.
when 'resize' The window is resized and the contents scaled to fit the
remaining space.
.. versionadded:: 1.9.0
:attr:`softinput_mode` is a :class:`OptionProperty` defaults to None.
'''
_keyboard_changed = BooleanProperty(False)
def _upd_kbd_height(self, *kargs):
self._keyboard_changed = not self._keyboard_changed
def _get_ios_kheight(self):
return 0
def _get_android_kheight(self):
global android
if not android:
import android
return android.get_keyboard_height()
def _get_kheight(self):
if platform == 'android':
return self._get_android_kheight()
if platform == 'ios':
return self._get_ios_kheight()
return 0
keyboard_height = AliasProperty(_get_kheight, None,
bind=('_keyboard_changed',))
'''Rerturns the height of the softkeyboard/IME on mobile platforms.
Will return 0 if not on mobile platform or if IME is not active.
.. versionadded:: 1.9.0
:attr:`keyboard_height` is a read-only :class:`AliasProperty` defaults to 0.
'''
def _set_system_size(self, size):
self._size = size
def _get_system_size(self):
if self.softinput_mode == 'resize':
return self._size[0], self._size[1] - self.keyboard_height
return self._size
system_size = AliasProperty(
_get_system_size,
_set_system_size,
bind=('_size', ))
'''Real size of the window ignoring rotation.
'''
borderless = BooleanProperty(False)
'''When set to True, this property removes the window border/decoration.
.. versionadded:: 1.9.0
:attr:`borderless` is a :class:`BooleanProperty`, defaults to False.
'''
fullscreen = OptionProperty(False, options=(True, False, 'auto', 'fake'))
'''This property sets the fullscreen mode of the window. Available options
are: True, False, 'auto', 'fake'. Check the :mod:`~kivy.config`
documentation for a more detailed explanation on the values.
.. versionadded:: 1.2.0
.. note::
The 'fake' option has been deprecated, use the :attr:`borderless`
property instead.
'''
mouse_pos = ObjectProperty([0, 0])
'''2d position of the mouse within the window.
.. versionadded:: 1.2.0
'''
@property
def __self__(self):
return self
top = NumericProperty(None, allownone=True)
left = NumericProperty(None, allownone=True)
position = OptionProperty('auto', options=['auto', 'custom'])
render_context = ObjectProperty(None)
canvas = ObjectProperty(None)
title = StringProperty('Kivy')
__events__ = (
'on_draw', 'on_flip', 'on_rotate', 'on_resize', 'on_close',
'on_motion', 'on_touch_down', 'on_touch_move', 'on_touch_up',
'on_mouse_down', 'on_mouse_move', 'on_mouse_up', 'on_keyboard',
'on_key_down', 'on_key_up', 'on_textinput', 'on_dropfile',
'on_request_close', 'on_joy_axis', 'on_joy_hat', 'on_joy_ball',
'on_joy_button_down', "on_joy_button_up")
def __new__(cls, **kwargs):
if cls.__instance is None:
cls.__instance = EventDispatcher.__new__(cls)
return cls.__instance
def __init__(self, **kwargs):
force = kwargs.pop('force', False)
# don't init window 2 times,
# except if force is specified
if WindowBase.__instance is not None and not force:
return
self.initialized = False
self._is_desktop = Config.getboolean('kivy', 'desktop')
# create a trigger for update/create the window when one of window
# property changes
self.trigger_create_window = Clock.create_trigger(
self.create_window, -1)
# Create a trigger for updating the keyboard height
self.trigger_keyboard_height = Clock.create_trigger(
self._upd_kbd_height, .5)
# set the default window parameter according to the configuration
if 'borderless' not in kwargs:
kwargs['borderless'] = Config.getboolean('graphics', 'borderless')
if 'fullscreen' not in kwargs:
fullscreen = Config.get('graphics', 'fullscreen')
if fullscreen not in ('auto', 'fake'):
fullscreen = fullscreen.lower() in ('true', '1', 'yes', 'yup')
kwargs['fullscreen'] = fullscreen
if 'width' not in kwargs:
kwargs['width'] = Config.getint('graphics', 'width')
if 'height' not in kwargs:
kwargs['height'] = Config.getint('graphics', 'height')
if 'rotation' not in kwargs:
kwargs['rotation'] = Config.getint('graphics', 'rotation')
if 'position' not in kwargs:
kwargs['position'] = Config.getdefault('graphics', 'position',
'auto')
if 'top' in kwargs:
kwargs['position'] = 'custom'
kwargs['top'] = kwargs['top']
else:
kwargs['top'] = Config.getint('graphics', 'top')
if 'left' in kwargs:
kwargs['position'] = 'custom'
kwargs['left'] = kwargs['left']
else:
kwargs['left'] = Config.getint('graphics', 'left')
kwargs['_size'] = (kwargs.pop('width'), kwargs.pop('height'))
super(WindowBase, self).__init__(**kwargs)
# bind all the properties that need to recreate the window
self._bind_create_window()
self.bind(size=self.trigger_keyboard_height,
rotation=self.trigger_keyboard_height)
self.bind(softinput_mode=lambda *dt: self.update_viewport(),
keyboard_height=lambda *dt: self.update_viewport())
# init privates
self._system_keyboard = Keyboard(window=self)
self._keyboards = {'system': self._system_keyboard}
self._vkeyboard_cls = None
self.children = []
self.parent = self
# before creating the window
import kivy.core.gl # NOQA
# configure the window
self.create_window()
# attach modules + listener event
EventLoop.set_window(self)
Modules.register_window(self)
EventLoop.add_event_listener(self)
# manage keyboard(s)
self.configure_keyboards()
# assign the default context of the widget creation
if not hasattr(self, '_context'):
self._context = get_current_context()
# mark as initialized
self.initialized = True
def _bind_create_window(self):
for prop in (
'fullscreen', 'borderless', 'position', 'top',
'left', '_size', 'system_size'):
self.bind(**{prop: self.trigger_create_window})
def _unbind_create_window(self):
for prop in (
'fullscreen', 'borderless', 'position', 'top',
'left', '_size', 'system_size'):
self.unbind(**{prop: self.trigger_create_window})
def toggle_fullscreen(self):
'''Toggle between fullscreen and windowed mode.
.. deprecated:: 1.9.0
Use :attr:`fullscreen` instead.
'''
pass
def maximize(self):
'''Maximizes the window. This method should be used on desktop
platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: maximize() is not implemented in the current '
'window provider.')
def minimize(self):
'''Minimizes the window. This method should be used on desktop
platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: minimize() is not implemented in the current '
'window provider.')
def restore(self):
'''Restores the size and position of a maximized or minimized window.
This method should be used on desktop platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: restore() is not implemented in the current '
'window provider.')
def hide(self):
'''Hides the window. This method should be used on desktop
platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: hide() is not implemented in the current '
'window provider.')
def show(self):
'''Shows the window. This method should be used on desktop
platforms only.
.. versionadded:: 1.9.0
.. note::
This feature requires a SDL2 window provider and is currently only
supported on desktop platforms.
.. warning::
This code is still experimental, and its API may be subject to
change in a future version.
'''
Logger.warning('Window: show() is not implemented in the current '
'window provider.')
def close(self):
'''Close the window'''
pass
def create_window(self, *largs):
'''Will create the main window and configure it.
.. warning::
This method is called automatically at runtime. If you call it, it
will recreate a RenderContext and Canvas. This means you'll have a
new graphics tree, and the old one will be unusable.
This method exist to permit the creation of a new OpenGL context
AFTER closing the first one. (Like using runTouchApp() and
stopTouchApp()).
This method has only been tested in a unittest environment and
is not suitable for Applications.
Again, don't use this method unless you know exactly what you are
doing!
'''
# just to be sure, if the trigger is set, and if this method is
# manually called, unset the trigger
Clock.unschedule(self.create_window)
# ensure the window creation will not be called twice
if platform in ('android', 'ios'):
self._unbind_create_window()
if not self.initialized:
from kivy.core.gl import init_gl
init_gl()
# create the render context and canvas, only the first time.
from kivy.graphics import RenderContext, Canvas
self.render_context = RenderContext()
self.canvas = Canvas()
self.render_context.add(self.canvas)
else:
# if we get initialized more than once, then reload opengl state
# after the second time.
# XXX check how it's working on embed platform.
if platform == 'linux' or Window.__class__.__name__ == 'WindowSDL':
# on linux, it's safe for just sending a resize.
self.dispatch('on_resize', *self.system_size)
else:
# on other platform, window are recreated, we need to reload.
from kivy.graphics.context import get_context
get_context().reload()
Clock.schedule_once(lambda x: self.canvas.ask_update(), 0)
self.dispatch('on_resize', *self.system_size)
# ensure the gl viewport is correct
self.update_viewport()
def on_flip(self):
'''Flip between buffers (event)'''
self.flip()
def flip(self):
'''Flip between buffers'''
pass
def _update_childsize(self, instance, value):
self.update_childsize([instance])
def add_widget(self, widget, canvas=None):
'''Add a widget to a window'''
widget.parent = self
self.children.insert(0, widget)
canvas = self.canvas.before if canvas == 'before' else \
self.canvas.after if canvas == 'after' else self.canvas
canvas.add(widget.canvas)
self.update_childsize([widget])
widget.bind(
pos_hint=self._update_childsize,
size_hint=self._update_childsize,
size=self._update_childsize,
pos=self._update_childsize)
def remove_widget(self, widget):
'''Remove a widget from a window
'''
if not widget in self.children:
return
self.children.remove(widget)
if widget.canvas in self.canvas.children:
self.canvas.remove(widget.canvas)
elif widget.canvas in self.canvas.after.children:
self.canvas.after.remove(widget.canvas)
elif widget.canvas in self.canvas.before.children:
self.canvas.before.remove(widget.canvas)
widget.parent = None
widget.unbind(
pos_hint=self._update_childsize,
size_hint=self._update_childsize,
size=self._update_childsize,
pos=self._update_childsize)
def clear(self):
'''Clear the window with the background color'''
# XXX FIXME use late binding
from kivy.graphics.opengl import glClearColor, glClear, \
GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_STENCIL_BUFFER_BIT
cc = self._clearcolor
if cc is not None:
glClearColor(*cc)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT |
GL_STENCIL_BUFFER_BIT)
def set_title(self, title):
'''Set the window title.
.. versionadded:: 1.0.5
'''
self.title = title
def set_icon(self, filename):
'''Set the icon of the window.
.. versionadded:: 1.0.5
'''
self.icon = filename
def to_widget(self, x, y, initial=True, relative=False):
return (x, y)
def to_window(self, x, y, initial=True, relative=False):
return (x, y)
def _apply_transform(self, m):
return m
def get_window_matrix(self, x=0, y=0):
m = Matrix()
m.translate(x, y, 0)
return m
def get_root_window(self):
return self
def get_parent_window(self):
return self
def get_parent_layout(self):
return None
def on_draw(self):
self.clear()
self.render_context.draw()
def on_motion(self, etype, me):
'''Event called when a Motion Event is received.
:Parameters:
`etype`: str
One of 'begin', 'update', 'end'
`me`: :class:`~kivy.input.motionevent.MotionEvent`
The Motion Event currently dispatched.
'''
if me.is_touch:
w, h = self.system_size
if platform == 'ios' or self._density != 1:
w, h = self.size
me.scale_for_screen(w, h, rotation=self._rotation,
smode=self.softinput_mode,
kheight=self.keyboard_height)
if etype == 'begin':
self.dispatch('on_touch_down', me)
elif etype == 'update':
self.dispatch('on_touch_move', me)
elif etype == 'end':
self.dispatch('on_touch_up', me)
FocusBehavior._handle_post_on_touch_up(me)
def on_touch_down(self, touch):
'''Event called when a touch down event is initiated.
.. versionchanged:: 1.9.0
The touch `pos` is now transformed to window coordinates before
this method is called. Before, the touch `pos` coordinate would be
`(0, 0)` when this method was called.
'''
for w in self.children[:]:
if w.dispatch('on_touch_down', touch):
return True
def on_touch_move(self, touch):
'''Event called when a touch event moves (changes location).
.. versionchanged:: 1.9.0
The touch `pos` is now transformed to window coordinates before
this method is called. Before, the touch `pos` coordinate would be
`(0, 0)` when this method was called.
'''
for w in self.children[:]:
if w.dispatch('on_touch_move', touch):
return True
def on_touch_up(self, touch):
'''Event called when a touch event is released (terminated).
.. versionchanged:: 1.9.0
The touch `pos` is now transformed to window coordinates before
this method is called. Before, the touch `pos` coordinate would be
`(0, 0)` when this method was called.
'''
for w in self.children[:]:
if w.dispatch('on_touch_up', touch):
return True
def on_resize(self, width, height):
'''Event called when the window is resized.'''
self.update_viewport()
def update_viewport(self):
from kivy.graphics.opengl import glViewport
from kivy.graphics.transformation import Matrix
from math import radians
w, h = self.system_size
if self._density != 1:
w, h = self.size
smode = self.softinput_mode
kheight = self.keyboard_height
w2, h2 = w / 2., h / 2.
r = radians(self.rotation)
x, y = 0, 0
_h = h
if smode:
y = kheight
if smode == 'scale':
_h -= kheight
# prepare the viewport
glViewport(x, y, w, _h)
# do projection matrix
projection_mat = Matrix()
projection_mat.view_clip(0.0, w, 0.0, h, -1.0, 1.0, 0)
self.render_context['projection_mat'] = projection_mat
# do modelview matrix
modelview_mat = Matrix().translate(w2, h2, 0)
modelview_mat = modelview_mat.multiply(Matrix().rotate(r, 0, 0, 1))
w, h = self.size
w2, h2 = w / 2., h / 2.
modelview_mat = modelview_mat.multiply(Matrix().translate(-w2, -h2, 0))
self.render_context['modelview_mat'] = modelview_mat
# redraw canvas
self.canvas.ask_update()
# and update childs
self.update_childsize()
def update_childsize(self, childs=None):
width, height = self.size
if childs is None:
childs = self.children
for w in childs:
shw, shh = w.size_hint
if shw and shh:
w.size = shw * width, shh * height
elif shw:
w.width = shw * width
elif shh:
w.height = shh * height
for key, value in w.pos_hint.items():
if key == 'x':
w.x = value * width
elif key == 'right':
w.right = value * width
elif key == 'y':
w.y = value * height
elif key == 'top':
w.top = value * height
elif key == 'center_x':
w.center_x = value * width
elif key == 'center_y':
w.center_y = value * height
def screenshot(self, name='screenshot{:04d}.png'):
'''Save the actual displayed image in a file
'''
i = 0
path = None
if name != 'screenshot{:04d}.png':
_ext = name.split('.')[-1]
name = ''.join((name[:-(len(_ext) + 1)], '{:04d}.', _ext))
while True:
i += 1
path = join(getcwd(), name.format(i))
if not exists(path):
break
return path
def on_rotate(self, rotation):
'''Event called when the screen has been rotated.
'''
pass
def on_close(self, *largs):
'''Event called when the window is closed'''
Modules.unregister_window(self)
EventLoop.remove_event_listener(self)
def on_request_close(self, *largs, **kwargs):
'''Event called before we close the window. If a bound function returns
`True`, the window will not be closed. If the the event is triggered
because of the keyboard escape key, the keyword argument `source` is
dispatched along with a value of `keyboard` to the bound functions.
.. warning::
When the bound function returns True the window will not be closed,
so use with care because the user would not be able to close the
program, even if the red X is clicked.
'''
pass
def on_mouse_down(self, x, y, button, modifiers):
'''Event called when the mouse is used (pressed/released)'''
pass
def on_mouse_move(self, x, y, modifiers):
'''Event called when the mouse is moved with buttons pressed'''
pass
def on_mouse_up(self, x, y, button, modifiers):
'''Event called when the mouse is moved with buttons pressed'''
pass
def on_joy_axis(self, stickid, axisid, value):
'''Event called when a joystick has a stick or other axis moved
.. versionadded:: 1.9.0'''
pass
def on_joy_hat(self, stickid, hatid, value):
'''Event called when a joystick has a hat/dpad moved
.. versionadded:: 1.9.0'''
pass
def on_joy_ball(self, stickid, ballid, value):
'''Event called when a joystick has a ball moved
.. versionadded:: 1.9.0'''
pass
def on_joy_button_down(self, stickid, buttonid):
'''Event called when a joystick has a button pressed
.. versionadded:: 1.9.0'''
pass
def on_joy_button_up(self, stickid, buttonid):
'''Event called when a joystick has a button released
.. versionadded:: 1.9.0'''
pass
def on_keyboard(self, key, scancode=None, codepoint=None,
modifier=None, **kwargs):
'''Event called when keyboard is used.
.. warning::
Some providers may omit `scancode`, `codepoint` and/or `modifier`.
'''
if 'unicode' in kwargs:
Logger.warning("The use of the unicode parameter is deprecated, "
"and will be removed in future versions. Use "
"codepoint instead, which has identical "
"semantics.")
# Quit if user presses ESC or the typical OSX shortcuts CMD+q or CMD+w
# TODO If just CMD+w is pressed, only the window should be closed.
is_osx = platform == 'darwin'
if WindowBase.on_keyboard.exit_on_escape:
if key == 27 or all([is_osx, key in [113, 119], modifier == 1024]):
if not self.dispatch('on_request_close', source='keyboard'):
stopTouchApp()
self.close()
return True
if Config:
on_keyboard.exit_on_escape = Config.getboolean('kivy', 'exit_on_escape')
def __exit(section, name, value):
WindowBase.__dict__['on_keyboard'].exit_on_escape = \
Config.getboolean('kivy', 'exit_on_escape')
Config.add_callback(__exit, 'kivy', 'exit_on_escape')
def on_key_down(self, key, scancode=None, codepoint=None,
modifier=None, **kwargs):
'''Event called when a key is down (same arguments as on_keyboard)'''
if 'unicode' in kwargs:
Logger.warning("The use of the unicode parameter is deprecated, "
"and will be removed in future versions. Use "
"codepoint instead, which has identical "
"semantics.")
def on_key_up(self, key, scancode=None, codepoint=None,
modifier=None, **kwargs):
'''Event called when a key is released (same arguments as on_keyboard)
'''
if 'unicode' in kwargs:
Logger.warning("The use of the unicode parameter is deprecated, "
"and will be removed in future versions. Use "
"codepoint instead, which has identical "
"semantics.")
def on_textinput(self, text):
'''Event called whem text: i.e. alpha numeric non control keys or set
of keys is entered. As it is not gaurenteed whether we get one
character or multiple ones, this event supports handling multiple
characters.
.. versionadded:: 1.9.0
'''
pass
def on_dropfile(self, filename):
'''Event called when a file is dropped on the application.
.. warning::
This event currently works with sdl2 window provider, on pygame
window provider and MacOSX with a patched version of pygame.
This event is left in place for further evolution
(ios, android etc.)
.. versionadded:: 1.2.0
'''
pass
@reify
def dpi(self):
'''Return the DPI of the screen. If the implementation doesn't support
any DPI lookup, it will just return 96.
.. warning::
This value is not cross-platform. Use
:attr:`kivy.base.EventLoop.dpi` instead.
'''
return 96.
def configure_keyboards(self):
# Configure how to provide keyboards (virtual or not)
# register system keyboard to listening keys from window
sk = self._system_keyboard
self.bind(
on_key_down=sk._on_window_key_down,
on_key_up=sk._on_window_key_up,
on_textinput=sk._on_window_textinput)
# use the device's real keyboard
self.use_syskeyboard = True
# use the device's real keyboard
self.allow_vkeyboard = False
# one single vkeyboard shared between all widgets
self.single_vkeyboard = True
# the single vkeyboard is always sitting at the same position
self.docked_vkeyboard = False
# now read the configuration
mode = Config.get('kivy', 'keyboard_mode')
if mode not in ('', 'system', 'dock', 'multi', 'systemanddock',
'systemandmulti'):
Logger.critical('Window: unknown keyboard mode %r' % mode)
# adapt mode according to the configuration
if mode == 'system':
self.use_syskeyboard = True
self.allow_vkeyboard = False
self.single_vkeyboard = True
self.docked_vkeyboard = False
elif mode == 'dock':
self.use_syskeyboard = False
self.allow_vkeyboard = True
self.single_vkeyboard = True
self.docked_vkeyboard = True
elif mode == 'multi':
self.use_syskeyboard = False
self.allow_vkeyboard = True
self.single_vkeyboard = False
self.docked_vkeyboard = False
elif mode == 'systemanddock':
self.use_syskeyboard = True
self.allow_vkeyboard = True
self.single_vkeyboard = True
self.docked_vkeyboard = True
elif mode == 'systemandmulti':
self.use_syskeyboard = True
self.allow_vkeyboard = True
self.single_vkeyboard = False
self.docked_vkeyboard = False
Logger.info(
'Window: virtual keyboard %sallowed, %s, %s' % (
'' if self.allow_vkeyboard else 'not ',
'single mode' if self.single_vkeyboard else 'multiuser mode',
'docked' if self.docked_vkeyboard else 'not docked'))
def set_vkeyboard_class(self, cls):
'''.. versionadded:: 1.0.8
Set the VKeyboard class to use. If set to None, it will use the
:class:`kivy.uix.vkeyboard.VKeyboard`.
'''
self._vkeyboard_cls = cls
def release_all_keyboards(self):
'''.. versionadded:: 1.0.8
This will ensure that no virtual keyboard / system keyboard is
requested. All instances will be closed.
'''
for key in list(self._keyboards.keys())[:]:
keyboard = self._keyboards[key]
if keyboard:
keyboard.release()
def request_keyboard(self, callback, target, input_type='text'):
'''.. versionadded:: 1.0.4
Internal widget method to request the keyboard. This method is rarely
required by the end-user as it is handled automatically by the
:class:`~kivy.uix.textinput.TextInput`. We expose it in case you want
to handle the keyboard manually for unique input scenarios.
A widget can request the keyboard, indicating a callback to call
when the keyboard is released (or taken by another widget).
:Parameters:
`callback`: func
Callback that will be called when the keyboard is
closed. This can be because somebody else requested the
keyboard or the user closed it.
`target`: Widget
Attach the keyboard to the specified `target`. This should be
the widget that requested the keyboard. Ensure you have a
different target attached to each keyboard if you're working in
a multi user mode.
.. versionadded:: 1.0.8
`input_type`: string
Choose the type of soft keyboard to request. Can be one of
'text', 'number', 'url', 'mail', 'datetime', 'tel', 'address'.
.. note::
`input_type` is currently only honored on mobile devices.
.. versionadded:: 1.8.0
:Return:
An instance of :class:`Keyboard` containing the callback, target,
and if the configuration allows it, a
:class:`~kivy.uix.vkeyboard.VKeyboard` instance attached as a
*.widget* property.
.. note::
The behavior of this function is heavily influenced by the current
`keyboard_mode`. Please see the Config's
:ref:`configuration tokens <configuration-tokens>` section for
more information.
'''
# release any previous keyboard attached.
self.release_keyboard(target)
# if we can use virtual vkeyboard, activate it.
if self.allow_vkeyboard:
keyboard = None
# late import
global VKeyboard
if VKeyboard is None and self._vkeyboard_cls is None:
from kivy.uix.vkeyboard import VKeyboard
self._vkeyboard_cls = VKeyboard
# if the keyboard doesn't exist, create it.
key = 'single' if self.single_vkeyboard else target
if key not in self._keyboards:
vkeyboard = self._vkeyboard_cls()
keyboard = Keyboard(widget=vkeyboard, window=self)
vkeyboard.bind(
on_key_down=keyboard._on_vkeyboard_key_down,
on_key_up=keyboard._on_vkeyboard_key_up,
on_textinput=keyboard._on_vkeyboard_textinput)
self._keyboards[key] = keyboard
else:
keyboard = self._keyboards[key]
# configure vkeyboard
keyboard.target = keyboard.widget.target = target
keyboard.callback = keyboard.widget.callback = callback
# add to the window
self.add_widget(keyboard.widget)
# only after add, do dock mode
keyboard.widget.docked = self.docked_vkeyboard
keyboard.widget.setup_mode()
else:
# system keyboard, just register the callback.
keyboard = self._system_keyboard
keyboard.callback = callback
keyboard.target = target
# use system (hardware) keyboard according to flag
if self.allow_vkeyboard and self.use_syskeyboard:
self.unbind(
on_key_down=keyboard._on_window_key_down,
on_key_up=keyboard._on_window_key_up,
on_textinput=keyboard._on_window_textinput)
self.bind(
on_key_down=keyboard._on_window_key_down,
on_key_up=keyboard._on_window_key_up,
on_textinput=keyboard._on_window_textinput)
return keyboard
def release_keyboard(self, target=None):
'''.. versionadded:: 1.0.4
Internal method for the widget to release the real-keyboard. Check
:meth:`request_keyboard` to understand how it works.
'''
if self.allow_vkeyboard:
key = 'single' if self.single_vkeyboard else target
if key not in self._keyboards:
return
keyboard = self._keyboards[key]
callback = keyboard.callback
if callback:
keyboard.callback = None
callback()
keyboard.target = None
self.remove_widget(keyboard.widget)
if key != 'single' and key in self._keyboards:
del self._keyboards[key]
elif self._system_keyboard.callback:
# this way will prevent possible recursion.
callback = self._system_keyboard.callback
self._system_keyboard.callback = None
callback()
return True
#: Instance of a :class:`WindowBase` implementation
window_impl = []
if platform == 'linux':
window_impl += [('egl_rpi', 'window_egl_rpi', 'WindowEglRpi')]
if USE_SDL2:
window_impl += [('sdl2', 'window_sdl2', 'WindowSDL')]
else:
window_impl += [
('pygame', 'window_pygame', 'WindowPygame')]
if platform == 'linux':
window_impl += [('x11', 'window_x11', 'WindowX11')]
Window = core_select_lib('window', window_impl, True)
|
the-stack_0_62 | import pytest
from nmcli.data import Connection
from nmcli.dummy._connection import DummyConnectionControl
def test_call():
result_call = [Connection('a', 'b', 'ethernet', 'eth0')]
c = DummyConnectionControl(result_call)
assert c() == result_call
def test_call_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c()
def test_add():
c = DummyConnectionControl()
conn_type = 'ethernet'
options = {
'key': 'value'
}
ifname = 'eth0'
name = 'MyHome'
autoconnect = True
c.add(conn_type, options, ifname, name, autoconnect)
assert c.add_args[0] == (conn_type, options, ifname, name, autoconnect)
c.add(conn_type, options, ifname, name, False)
assert c.add_args[1] == (conn_type, options, ifname, name, False)
c.add(conn_type, options, ifname, name)
assert c.add_args[2] == (conn_type, options, ifname, name, None)
def test_add_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.add('ethernet')
def test_modify():
c = DummyConnectionControl()
options = {
'key': 'value'
}
name = 'MyHome'
c.modify(name, options)
assert c.modify_args[0] == (name, options)
def test_modify_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.modify('ethernet', {'key': 'value'})
def test_delete():
c = DummyConnectionControl()
name = 'MyHome'
c.delete(name)
assert c.delete_args[0] == name
def test_delete_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.delete('ethernet')
def test_up():
c = DummyConnectionControl()
name = 'MyHome'
c.up(name)
assert c.up_args[0] == name
def test_up_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.up('ethernet')
def test_down():
c = DummyConnectionControl()
name = 'MyHome'
c.down(name)
assert c.down_args[0] == name
def test_down_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.down('ethernet')
def test_show():
result_show = {
'key': 'value'
}
c = DummyConnectionControl(result_show=result_show)
name = 'MyHome'
assert c.show(name) == result_show
def test_show_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.show('MyHome')
def test_show_when_no_arguments_are_passed():
c = DummyConnectionControl()
with pytest.raises(ValueError):
c.show('MyHome')
def test_reload():
c = DummyConnectionControl()
c.reload()
assert c.called_reload == 1
def test_reload_when_raise_error():
c = DummyConnectionControl(raise_error=Exception)
with pytest.raises(Exception):
c.reload()
|
the-stack_0_63 | # Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Method test classes for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
from django.test import TestCase
from djangolg import dialects, methods
from djangolg.methods.base import BaseMethod
class MethodTestCase(TestCase):
"""Test djangolg methods."""
def test_available_methods(self):
"""Test available_methods helper."""
methods_map = methods.available_methods("map")
methods_list = methods.available_methods("list")
assert isinstance(methods_map, dict)
assert isinstance(methods_list, list)
try:
methods.available_methods("wrong")
except Exception as e:
assert isinstance(e, ValueError)
assert "{}".format(e) == "invalid output type: wrong"
def test_get_method(self):
"""Test get_method helper."""
for method_name in methods.available_methods("list"):
method = methods.get_method(name=method_name)
assert isinstance(method, BaseMethod)
try:
methods.get_method()
except Exception as e:
assert isinstance(e, methods.MethodNotFound)
try:
methods.get_method(name=dict())
except Exception as e:
assert isinstance(e, methods.LookingGlassMethodError)
def test_method_init_failure(self):
"""Test method initiation failure."""
try:
BaseMethod(dialect="string")
except Exception as e:
assert isinstance(e, TypeError)
def test_method_dialect_functions(self):
"""Test method dialect getter and setter and other methods."""
for method_name in methods.available_methods(output="list"):
method = methods.get_method(name=method_name)
assert method.dialect is None
try:
method.dialect = "wrong_type"
except Exception as e:
assert isinstance(e, TypeError)
for dialect_name in dialects.available_dialects(output="list"):
dialect = dialects.get_dialect(dialect_name)
method.dialect = dialect
assert method.dialect is dialect
if method.options:
for index, option in method.option_choices():
assert method.get_command(target=method.test_target,
option_index=index)
else:
assert method.get_command(target=method.test_target)
|
the-stack_0_64 | import numpy as np
import tensorflow as tf
from numbers import Number
import gym
import time
from spinup.algos.tf1.sac1 import core
from spinup.algos.tf1.sac1.core import get_vars
from spinup.utils.logx import EpochLogger
from gym.spaces import Box, Discrete
from spinup.utils.frame_stack import FrameStack
import os
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for SAC agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(obs1=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs])
"""
Soft Actor-Critic
(With slight variations that bring it closer to TD3)
"""
def sac1(args, env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=1000, epochs=100, replay_size=int(2e6), gamma=0.99, reward_scale=1.0,
polyak=0.995, lr=5e-4, alpha=0.2, batch_size=200, start_steps=10000,
max_ep_len_train=1000, max_ep_len_test=1000, logger_kwargs=dict(), save_freq=1):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``mu`` (batch, act_dim) | Computes mean actions from policy
| given states.
``pi`` (batch, act_dim) | Samples actions from policy given
| states.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by
| ``pi``. Critical: must be differentiable
| with respect to policy parameters all
| the way through action sampling.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q1_pi`` (batch,) | Gives the composition of ``q1`` and
| ``pi`` for states in ``x_ph``:
| q1(x, pi(x)).
``q2_pi`` (batch,) | Gives the composition of ``q2`` and
| ``pi`` for states in ``x_ph``:
| q2(x, pi(x)).
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to SAC.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
lr (float): Learning rate (used for policy/value/alpha learning).
alpha (float/'auto'): Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) / 'auto': alpha is automated.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
if not args.is_test:
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.set_random_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(3), env_fn(1)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)
# Main outputs from computation graph
with tf.variable_scope('main'):
mu, pi, logp_pi, logp_pi2, q1, q2, q1_pi, q2_pi = actor_critic(x_ph, x2_ph, a_ph, **ac_kwargs)
# Target value network
with tf.variable_scope('target'):
_, _, logp_pi_, _, _, _,q1_pi_, q2_pi_= actor_critic(x2_ph, x2_ph, a_ph, **ac_kwargs)
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in
['main/pi', 'main/q1', 'main/q2', 'main'])
print(('\nNumber of parameters: \t pi: %d, \t' + \
'q1: %d, \t q2: %d, \t total: %d\n')%var_counts)
######
if alpha == 'auto':
target_entropy = (-np.prod(env.action_space.shape))
log_alpha = tf.get_variable( 'log_alpha', dtype=tf.float32, initializer=0.0)
alpha = tf.exp(log_alpha)
alpha_loss = tf.reduce_mean(-log_alpha * tf.stop_gradient(logp_pi + target_entropy))
alpha_optimizer = tf.train.AdamOptimizer(learning_rate=lr*0.1, name='alpha_optimizer')
train_alpha_op = alpha_optimizer.minimize(loss=alpha_loss, var_list=[log_alpha])
######
# Min Double-Q:
min_q_pi = tf.minimum(q1_pi_, q2_pi_)
# Targets for Q and V regression
v_backup = tf.stop_gradient(min_q_pi - alpha * logp_pi2)
q_backup = r_ph + gamma*(1-d_ph)*v_backup
# Soft actor-critic losses
pi_loss = tf.reduce_mean(alpha * logp_pi - q1_pi)
q1_loss = 0.5 * tf.reduce_mean((q_backup - q1)**2)
q2_loss = 0.5 * tf.reduce_mean((q_backup - q2)**2)
value_loss = q1_loss + q2_loss
# Policy train op
# (has to be separate from value train op, because q1_pi appears in pi_loss)
pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))
# Value train op
# (control dep of train_pi_op because sess.run otherwise evaluates in nondeterministic order)
value_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
value_params = get_vars('main/q')
with tf.control_dependencies([train_pi_op]):
train_value_op = value_optimizer.minimize(value_loss, var_list=value_params)
# Polyak averaging for target variables
# (control flow because sess.run otherwise evaluates in nondeterministic order)
with tf.control_dependencies([train_value_op]):
target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
# All ops to call during one training step
if isinstance(alpha, Number):
step_ops = [pi_loss, q1_loss, q2_loss, q1, q2, logp_pi, tf.identity(alpha),
train_pi_op, train_value_op, target_update]
else:
step_ops = [pi_loss, q1_loss, q2_loss, q1, q2, logp_pi, alpha,
train_pi_op, train_value_op, target_update, train_alpha_op]
# Initializing targets to match main variables
target_init = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(target_init)
############################## save and restore ############################
saver = tf.train.Saver()
checkpoint_path = logger_kwargs['output_dir'] + '/checkpoints'
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
if args.is_test or args.is_restore_train:
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored.")
def get_action(o, deterministic=False):
act_op = mu if deterministic else pi
return sess.run(act_op, feed_dict={x_ph: o.reshape(1,-1)})[0]
############################## test ############################
if args.is_test:
test_env = gym.make(args.env)
ave_ep_ret = 0
for j in range(10000):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not d: # (d or (ep_len == 2000)):
o, r, d, _ = test_env.step(get_action(o, True))
ep_ret += r
ep_len += 1
if args.test_render:
test_env.render()
ave_ep_ret = (j*ave_ep_ret + ep_ret)/(j+1)
print('ep_len', ep_len, 'ep_ret:', ep_ret, 'ave_ep_ret:',ave_ep_ret,'({}/10000)'.format(j+1) )
return
############################## train ############################
def test_agent(n=25):
global sess, mu, pi, q1, q2, q1_pi, q2_pi
for j in range(n):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not(d or (ep_len == max_ep_len_test)):
# Take deterministic actions at test time
o, r, d, _ = test_env.step(get_action(o, True))
ep_ret += r
ep_len += 1
# test_env.render()
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
ep_index = 0
test_ep_ret_best = test_ep_ret = -10000.0
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy.
"""
if t > start_steps:
a = get_action(o)
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
# d = False if ep_len==max_ep_len_train else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of episode. Training (ep_len times).
if d or (ep_len == max_ep_len_train):
ep_index += 1
print('episode: {}, ep_len: {}, reward: {}'.format(ep_index, ep_len, ep_ret/reward_scale))
"""
Perform all SAC updates at the end of the trajectory.
This is a slight difference from the SAC specified in the
original paper.
"""
for j in range(int(1.5*ep_len)):
batch = replay_buffer.sample_batch(batch_size)
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done'],
}
# step_ops = [pi_loss, q1_loss, q2_loss, q1, q2, logp_pi, alpha, train_pi_op, train_value_op, target_update]
outs = sess.run(step_ops, feed_dict)
logger.store(LossPi=outs[0], LossQ1=outs[1], LossQ2=outs[2],
Q1Vals=outs[3], Q2Vals=outs[4],
LogPi=outs[5], Alpha=outs[6])
logger.store(EpRet=ep_ret/reward_scale, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# End of epoch wrap-up
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
test_agent(10)
# test_ep_ret = logger.get_stats('TestEpRet')[0]
# print('TestEpRet', test_ep_ret, 'Best:', test_ep_ret_best)
if logger.get_stats('TestEpRet')[0] >= 180:
print('Recalculating TestEpRet...')
test_agent(100)
test_ep_ret = logger.get_stats('TestEpRet')[0]
# logger.epoch_dict['TestEpRet'] = []
print('TestEpRet', test_ep_ret, 'Best:', test_ep_ret_best)
# logger.store(): store the data; logger.log_tabular(): log the data; logger.dump_tabular(): write the data
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('Num_Ep', ep_index)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=False)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Alpha',average_only=True)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
# logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('LogPi', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ1', average_only=True)
logger.log_tabular('LossQ2', average_only=True)
# logger.log_tabular('LossV', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
# Save model
if ((epoch % save_freq == 0) or (epoch == epochs - 1)) and test_ep_ret > test_ep_ret_best:
save_path = saver.save(sess, checkpoint_path+'/model.ckpt', t)
print("Model saved in path: %s" % save_path)
test_ep_ret_best = test_ep_ret
if test_ep_ret >= 200:
print("Model saved in path: %s" % save_path)
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(ep_index, test_ep_ret))
exit()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='LunarLanderContinuous-v2') # 'Pendulum-v0'
parser.add_argument('--is_restore_train', type=bool, default=False)
parser.add_argument('--is_test', type=bool, default=False)
parser.add_argument('--test_render', type=bool, default=False)
parser.add_argument('--max_ep_len_test', type=int, default=2000) # 'BipedalWalkerHardcore-v2' max_ep_len is 2000
parser.add_argument('--max_ep_len_train', type=int, default=1000) # max_ep_len_train < 2000//3 # 'BipedalWalkerHardcore-v2' max_ep_len is 2000
parser.add_argument('--start_steps', type=int, default=100)
parser.add_argument('--hid', type=int, default=300)
parser.add_argument('--l', type=int, default=1)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--seed', '-s', type=int, default=np.random.random_integers(1000))
parser.add_argument('--epochs', type=int, default=10000)
parser.add_argument('--alpha', default='auto', help="alpha can be either 'auto' or float(e.g:0.2).")
parser.add_argument('--reward_scale', type=float, default=1.0)
parser.add_argument('--act_noise', type=float, default=0.3)
parser.add_argument('--obs_noise', type=float, default=0.0)
parser.add_argument('--exp_name', type=str, default='sac1_LunarLanderContinuous-v2_debug3')
parser.add_argument('--stack_frames', type=int, default=4)
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
class Wrapper(object):
def __init__(self, env, action_repeat):
self._env = env
self.action_repeat = action_repeat
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
r = 0.0
for _ in range(self.action_repeat):
obs_, reward_, done_, info_ = self._env.step(action)
reward_ = reward_ if reward_ > -99.0 else 0.0
r = r + reward_
if done_:
return obs_, r, done_, info_
return obs_, r, done_, info_
# env = FrameStack(env, args.stack_frames)
env_lunar1 = gym.make(args.env)
env_lunar3 = Wrapper(gym.make(args.env),3)
sac1(args, lambda n : env_lunar3 if n==3 else env_lunar1, actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(hidden_sizes=[200,150]), start_steps = args.start_steps,
gamma=args.gamma, seed=args.seed, epochs=args.epochs, alpha=args.alpha,
logger_kwargs=logger_kwargs, lr = args.lr, reward_scale=args.reward_scale,
max_ep_len_train = args.max_ep_len_train, max_ep_len_test=args.max_ep_len_test)
|
the-stack_0_66 | """1248. Count Number of Nice Subarrays
Medium"""
class Solution(object):
def numberOfSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
#########
m = [0]*50001
res = 0
curr = 0
m[0] = 1
for i in range(len(nums)):
curr += (nums[i]%2)
if curr >= k:
res += m[curr-k]
m[curr] += 1
return res
#######
return self.atMost(nums, k)-self.atMost(nums, k-1)
def atMost(self, nums, k):
res = 0
count = 0
left = 0
right = 0
while right < len(nums):
n = nums[right]
count += n%2
while count >= k:
c = nums[left]
count -= c%2
left += 1
res += right - left + 1
right += 1
return res
#################
def atMost(k):
res = 0
left = 0
for right in range(len(nums)):
k -= nums[right]%2
while k < 0:
k += nums[left]%2
left += 1
res += right -left + 1
return res
return atMost(k) - atMost(k-1)
#
|
the-stack_0_68 | #!/usr/bin/env python
# coding=utf-8
"""
__created__ = '4/22/16'
__author__ = 'deling.ma'
"""
import multiprocessing
bind = '0.0.0.0:7777'
max_requests = 10000
keepalive = 5
proc_name = 'fitahol'
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gaiohttp'
loglevel = 'info'
errorlog = '-'
x_forwarded_for_header = 'X-FORWARDED-FOR'
|
the-stack_0_69 | import numpy as np
import tensorflow as tf
import tensorflow.compat.v1.keras as keras
import pickle
import os
from math import ceil
from utils import preprocess_flags, save_kernel, save_kernel_partial
from utils import load_data,load_model,load_model_json,load_kernel
from utils import data_folder,kernel_folder,arch_folder
def main(_):
FLAGS = tf.compat.v1.app.flags.FLAGS.flag_values_dict()
FLAGS = preprocess_flags(FLAGS)
globals().update(FLAGS)
if init_dist != "gaussian":
raise NotImplementedError("Initialization distributions other than Gaussian are not implemented for computing kernels!")
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print(rank)
if n_gpus>0:
os.environ["CUDA_VISIBLE_DEVICES"]=str((rank)%n_gpus)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
set_session = keras.backend.set_session
config.log_device_placement = False # to log device placement (on which device the operation ran)
config.allow_soft_placement = True # so that it uses any other existing and supported devices, if the requested GPU:0 isn't found
sess = tf.compat.v1.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
train_images,flat_train_images,_,test_images,_ = load_data(FLAGS)
image_size = train_images.shape[1]
number_channels = train_images.shape[-1]
#print("image_size", image_size)
X = train_images
flat_X = flat_train_images
if compute_for_GP_train:
test_images = test_images[:1000]
data = test_images
tp_order = np.concatenate([[0,len(data.shape)-1], np.arange(1, len(data.shape)-1)])
print(data.shape,tp_order)
flat_data = np.transpose(data, tp_order) # NHWC -> NCHW # this is because the cnn GP kernels assume this
flat_test_images = np.array([test_image.flatten() for test_image in flat_data])
Xfull = np.concatenate([flat_train_images,flat_test_images])
flat_X = Xfull
X = np.concatenate([train_images,test_images])
print("compute kernel", network, dataset)
# COMPUTE KERNEL
if use_empirical_NTK:
from nngp_kernel.empirical_ntk import empirical_NTK
print(ceil(int(X.shape[0])*n_samples_repeats))
from tensorflow.keras.models import model_from_json
model = load_model(FLAGS)
K = empirical_NTK(model,X)#,sess=sess)
elif use_empirical_K:
from nngp_kernel.empirical_kernel import empirical_K
print("n_samples_repeats",n_samples_repeats)
print(ceil(int(X.shape[0])*n_samples_repeats))
arch_json_string = load_model_json(FLAGS)
K = empirical_K(arch_json_string,X,ceil(int(X.shape[0])*n_samples_repeats),sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus,empirical_kernel_batch_size=empirical_kernel_batch_size, sess=sess, truncated_init_dist=truncated_init_dist,data_parallelism=False,store_partial_kernel=store_partial_kernel,partial_kernel_n_proc=partial_kernel_n_proc,partial_kernel_index=partial_kernel_index)
if rank == 0:
if not (use_empirical_K or use_empirical_NTK):
if network=="cnn":
from nngp_kernel.cnn_kernel import kernel_matrix
K = kernel_matrix(flat_X,image_size=image_size,number_channels=number_channels,filter_sizes=filter_sizes,padding=padding,strides=strides,sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus)
elif network=="resnet":
from nngp_kernel.resnet_kernel import kernel_matrix
K = kernel_matrix(flat_X,depth=number_layers,image_size=image_size,number_channels=number_channels,n_blocks=3,sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus)
elif network == "fc":
from nngp_kernel.fc_kernel import kernel_matrix
K = kernel_matrix(flat_X,number_layers=number_layers,sigmaw=sigmaw,sigmab=sigmab,n_gpus=n_gpus)
print(K)
'''SAVE KERNEL'''
if store_partial_kernel:
save_kernel_partial(K,FLAGS,partial_kernel_index)
else:
save_kernel(K,FLAGS)
if __name__ == '__main__':
f = tf.compat.v1.app.flags
from utils import define_default_flags
define_default_flags(f)
f.DEFINE_boolean('compute_for_GP_train', False, "Whether to add a bit of test set to kernel, to be able to use it for GP training")
f.DEFINE_boolean('store_partial_kernel', False, "Whether to store the kernels partially on a file to free the processes")
f.DEFINE_integer('empirical_kernel_batch_size', 256, "batch size to use when computing the empirical kernel, larger models need smaller values, but smaller models can use larger values")
f.DEFINE_integer('partial_kernel_n_proc', 175, "number of processes over which we are parallelizing the when computing partial kernels and saving")
f.DEFINE_integer('partial_kernel_index', 0, "index of the process when using partial_kernels method")
tf.compat.v1.app.run()
|
the-stack_0_71 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_equal, assert_false
from django.contrib.auth.models import User
from django.urls import reverse
import desktop.conf as desktop_conf
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_to_group
from desktop.models import Document
from hadoop.pseudo_hdfs4 import get_db_prefix, is_live_cluster
from beeswax import data_export
from beeswax.design import hql_query
from beeswax.data_export import download
from beeswax.models import SavedQuery, QueryHistory
from beeswax.server import dbms
from beeswax.test_base import get_query_server_config, wait_for_query_to_finish, fetch_query_result_data
from beeswax.tests import _make_query
from impala import conf
from impala.dbms import ImpalaDbms
LOG = logging.getLogger(__name__)
class MockDbms:
def get_databases(self):
return ['db1', 'db2']
def get_tables(self, database):
return ['table1', 'table2']
class TestMockedImpala:
def setUp(self):
self.client = make_logged_in_client()
# Mock DB calls as we don't need the real ones
self.prev_dbms = dbms.get
dbms.get = lambda a, b: MockDbms()
def tearDown(self):
# Remove monkey patching
dbms.get = self.prev_dbms
def test_basic_flow(self):
response = self.client.get("/impala/")
assert_true(re.search('Impala', response.content), response.content)
assert_true('Query Editor' in response.content)
response = self.client.get("/impala/execute/")
assert_true('Query Editor' in response.content)
def test_saved_queries(self):
user = User.objects.get(username='test')
response = self.client.get("/impala/list_designs")
assert_equal(len(response.context[0]['page'].object_list), 0)
try:
beewax_query = create_saved_query('beeswax', user)
response = self.client.get("/impala/list_designs")
assert_equal(len(response.context[0]['page'].object_list), 0)
impala_query = create_saved_query('impala', user)
response = self.client.get("/impala/list_designs")
assert_equal(len(response.context[0]['page'].object_list), 1)
# Test my query page
QueryHistory.objects.create(owner=user, design=impala_query, query='', last_state=QueryHistory.STATE.available.value)
resp = self.client.get('/impala/my_queries')
assert_equal(len(resp.context[0]['q_page'].object_list), 1)
assert_equal(resp.context[0]['h_page'].object_list[0].design.name, 'create_saved_query')
finally:
if beewax_query is not None:
beewax_query.delete()
if impala_query is not None:
impala_query.delete()
class TestImpalaIntegration:
@classmethod
def setup_class(cls):
cls.finish = []
if not is_live_cluster():
raise SkipTest
cls.client = make_logged_in_client()
cls.user = User.objects.get(username='test')
add_to_group('test')
cls.db = dbms.get(cls.user, get_query_server_config(name='impala'))
cls.DATABASE = get_db_prefix(name='impala')
queries = ["""
DROP TABLE IF EXISTS %(db)s.tweets;
""" % {'db': cls.DATABASE}, """
DROP DATABASE IF EXISTS %(db)s CASCADE;
""" % {'db': cls.DATABASE}, """
CREATE DATABASE %(db)s;
""" % {'db': cls.DATABASE}]
for query in queries:
resp = _make_query(cls.client, query, database='default', local=False, server_name='impala')
resp = wait_for_query_to_finish(cls.client, resp, max=180.0)
content = json.loads(resp.content)
assert_true(content['status'] == 0, resp.content)
queries = ["""
CREATE TABLE tweets (row_num INTEGER, id_str STRING, text STRING) STORED AS PARQUET;
""", """
INSERT INTO TABLE tweets VALUES (1, "531091827395682000", "My dad looks younger than costa");
""", """
INSERT INTO TABLE tweets VALUES (2, "531091827781550000", "There is a thin line between your partner being vengeful and you reaping the consequences of your bad actions towards your partner.");
""", """
INSERT INTO TABLE tweets VALUES (3, "531091827768979000", "@Mustang_Sally83 and they need to get into you :))))");
""", """
INSERT INTO TABLE tweets VALUES (4, "531091827114668000", "@RachelZJohnson thank you rach!xxx");
""", """
INSERT INTO TABLE tweets VALUES (5, "531091827949309000", "i think @WWERollins was robbed of the IC title match this week on RAW also i wonder if he will get a rematch i hope so @WWE");
"""]
for query in queries:
resp = _make_query(cls.client, query, database=cls.DATABASE, local=False, server_name='impala')
resp = wait_for_query_to_finish(cls.client, resp, max=180.0)
content = json.loads(resp.content)
assert_true(content['status'] == 0, resp.content)
@classmethod
def teardown_class(cls):
# We need to drop tables before dropping the database
queries = ["""
DROP TABLE IF EXISTS %(db)s.tweets;
""" % {'db': cls.DATABASE}, """
DROP DATABASE %(db)s CASCADE;
""" % {'db': cls.DATABASE}]
for query in queries:
resp = _make_query(cls.client, query, database='default', local=False, server_name='impala')
resp = wait_for_query_to_finish(cls.client, resp, max=180.0)
# Check the cleanup
databases = cls.db.get_databases()
assert_false(cls.DATABASE in databases)
assert_false('%(db)s_other' % {'db': cls.DATABASE} in databases)
for f in cls.finish:
f()
def test_basic_flow(self):
dbs = self.db.get_databases()
assert_true('_impala_builtins' in dbs, dbs)
assert_true(self.DATABASE in dbs, dbs)
tables = self.db.get_tables(database=self.DATABASE)
assert_true('tweets' in tables, tables)
QUERY = """
SELECT * FROM tweets ORDER BY row_num;
"""
response = _make_query(self.client, QUERY, database=self.DATABASE, local=False, server_name='impala')
content = json.loads(response.content)
query_history = QueryHistory.get(content['id'])
response = wait_for_query_to_finish(self.client, response, max=180.0)
results = []
# Check that we multiple fetches get all the result set
while len(results) < 5:
content = fetch_query_result_data(self.client, response, n=len(results), server_name='impala') # We get less than 5 results most of the time, so increase offset
results += content['results']
assert_equal([1, 2, 3, 4, 5], [col[0] for col in results])
# Check start over
results_start_over = []
while len(results_start_over) < 5:
content = fetch_query_result_data(self.client, response, n=len(results_start_over), server_name='impala')
results_start_over += content['results']
assert_equal(results_start_over, results)
# Check cancel query
resp = self.client.post(reverse('impala:api_cancel_query', kwargs={'query_history_id': query_history.id}))
content = json.loads(resp.content)
assert_equal(0, content['status'])
def test_data_download(self):
hql = 'SELECT * FROM tweets %(limit)s'
FETCH_SIZE = data_export.FETCH_SIZE
data_export.FETCH_SIZE = 2 # Decrease fetch size to validate last fetch logic
try:
query = hql_query(hql % {'limit': ''})
handle = self.db.execute_and_wait(query)
# Get the result in csv. Should have 5 + 1 header row.
csv_resp = download(handle, 'csv', self.db)
csv_content = ''.join(csv_resp.streaming_content)
assert_equal(len(csv_content.strip().split('\n')), 5 + 1)
query = hql_query(hql % {'limit': 'LIMIT 0'})
handle = self.db.execute_and_wait(query)
csv_resp = download(handle, 'csv', self.db)
csv_content = ''.join(csv_resp.streaming_content)
assert_equal(len(csv_content.strip().split('\n')), 1)
query = hql_query(hql % {'limit': 'LIMIT 1'})
handle = self.db.execute_and_wait(query)
csv_resp = download(handle, 'csv', self.db)
csv_content = ''.join(csv_resp.streaming_content)
assert_equal(len(csv_content.strip().split('\n')), 1 + 1)
query = hql_query(hql % {'limit': 'LIMIT 2'})
handle = self.db.execute_and_wait(query)
csv_resp = download(handle, 'csv', self.db)
csv_content = ''.join(csv_resp.streaming_content)
assert_equal(len(csv_content.strip().split('\n')), 1 + 2)
finally:
data_export.FETCH_SIZE = FETCH_SIZE
def test_explain(self):
QUERY = """
SELECT * FROM tweets ORDER BY row_num;
"""
response = _make_query(self.client, QUERY, database=self.DATABASE, local=False, server_name='impala', submission_type='Explain')
json_response = json.loads(response.content)
assert_true('MERGING-EXCHANGE' in json_response['explanation'], json_response)
assert_true('SCAN HDFS' in json_response['explanation'], json_response)
def test_get_table_sample(self):
client = make_logged_in_client()
resp = client.get(reverse('impala:get_sample_data', kwargs={'database': self.DATABASE, 'table': 'tweets'}))
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_equal([u'row_num', u'id_str', u'text'], data['headers'], data)
assert_true(len(data['rows']), data)
def test_get_session(self):
session = None
try:
# Create open session
session = self.db.open_session(self.user)
resp = self.client.get(reverse("impala:api_get_session"))
data = json.loads(resp.content)
assert_true('properties' in data)
assert_true(data['properties'].get('http_addr'))
assert_true('session' in data, data)
assert_true('id' in data['session'], data['session'])
finally:
if session is not None:
try:
self.db.close_session(session)
except Exception:
pass
def test_get_settings(self):
resp = self.client.get(reverse("impala:get_settings"))
json_resp = json.loads(resp.content)
assert_equal(0, json_resp['status'])
assert_true('QUERY_TIMEOUT_S' in json_resp['settings'])
def test_invalidate_tables(self):
# Helper function to get Impala and Beeswax (HMS) tables
def get_impala_beeswax_tables():
impala_resp = self.client.get(reverse('impala:api_autocomplete_tables', kwargs={'database': self.DATABASE}))
impala_tables_meta = json.loads(impala_resp.content)['tables_meta']
impala_tables = [table['name'] for table in impala_tables_meta]
beeswax_resp = self.client.get(reverse('beeswax:api_autocomplete_tables', kwargs={'database': self.DATABASE}))
beeswax_tables_meta = json.loads(beeswax_resp.content)['tables_meta']
beeswax_tables = [table['name'] for table in beeswax_tables_meta]
return impala_tables, beeswax_tables
impala_tables, beeswax_tables = get_impala_beeswax_tables()
assert_equal(impala_tables, beeswax_tables,
"\ntest_invalidate_tables: `%s`\nImpala Tables: %s\nBeeswax Tables: %s" % (self.DATABASE, ','.join(impala_tables), ','.join(beeswax_tables)))
hql = """
CREATE TABLE new_table (a INT);
"""
resp = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.DATABASE)
impala_tables, beeswax_tables = get_impala_beeswax_tables()
# New table is not found by Impala
assert_true('new_table' in beeswax_tables, beeswax_tables)
assert_false('new_table' in impala_tables, impala_tables)
resp = self.client.post(reverse('impala:invalidate'), {'database': self.DATABASE})
impala_tables, beeswax_tables = get_impala_beeswax_tables()
# Invalidate picks up new table
assert_equal(impala_tables, beeswax_tables,
"\ntest_invalidate_tables: `%s`\nImpala Tables: %s\nBeeswax Tables: %s" % (self.DATABASE, ','.join(impala_tables), ','.join(beeswax_tables)))
def test_refresh_table(self):
# Helper function to get Impala and Beeswax (HMS) columns
def get_impala_beeswax_columns():
impala_resp = self.client.get(reverse('impala:api_autocomplete_columns', kwargs={'database': self.DATABASE, 'table': 'tweets'}))
impala_columns = json.loads(impala_resp.content)['columns']
beeswax_resp = self.client.get(reverse('beeswax:api_autocomplete_columns', kwargs={'database': self.DATABASE, 'table': 'tweets'}))
beeswax_columns = json.loads(beeswax_resp.content)['columns']
return impala_columns, beeswax_columns
impala_columns, beeswax_columns = get_impala_beeswax_columns()
assert_equal(impala_columns, beeswax_columns,
"\ntest_refresh_table: `%s`.`%s`\nImpala Columns: %s\nBeeswax Columns: %s" % (self.DATABASE, 'tweets', ','.join(impala_columns), ','.join(beeswax_columns)))
hql = """
ALTER TABLE tweets ADD COLUMNS (new_column INT);
"""
resp = _make_query(self.client, hql, wait=True, local=False, max=180.0, database=self.DATABASE)
impala_columns, beeswax_columns = get_impala_beeswax_columns()
# New column is not found by Impala
assert_true('new_column' in beeswax_columns, beeswax_columns)
assert_false('new_column' in impala_columns, impala_columns)
resp = self.client.post(reverse('impala:refresh_table', kwargs={'database': self.DATABASE, 'table': 'tweets'}))
impala_columns, beeswax_columns = get_impala_beeswax_columns()
# Invalidate picks up new column
assert_equal(impala_columns, beeswax_columns,
"\ntest_refresh_table: `%s`.`%s`\nImpala Columns: %s\nBeeswax Columns: %s" % (self.DATABASE, 'tweets', ','.join(impala_columns), ','.join(beeswax_columns)))
def test_get_exec_summary(self):
query = """
SELECT COUNT(1) FROM tweets;
"""
response = _make_query(self.client, query, database=self.DATABASE, local=False, server_name='impala')
content = json.loads(response.content)
query_history = QueryHistory.get(content['id'])
wait_for_query_to_finish(self.client, response, max=180.0)
resp = self.client.post(reverse('impala:get_exec_summary', kwargs={'query_history_id': query_history.id}))
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_true('nodes' in data['summary'], data)
assert_true(len(data['summary']['nodes']) > 0, data['summary']['nodes'])
# Attempt to call get_exec_summary on a closed query
resp = self.client.post(reverse('impala:get_exec_summary', kwargs={'query_history_id': query_history.id}))
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_true('nodes' in data['summary'], data)
assert_true(len(data['summary']['nodes']) > 0, data['summary']['nodes'])
def test_get_runtime_profile(self):
query = """
SELECT COUNT(1) FROM tweets;
"""
response = _make_query(self.client, query, database=self.DATABASE, local=False, server_name='impala')
content = json.loads(response.content)
query_history = QueryHistory.get(content['id'])
wait_for_query_to_finish(self.client, response, max=180.0)
resp = self.client.post(reverse('impala:get_runtime_profile', kwargs={'query_history_id': query_history.id}))
data = json.loads(resp.content)
assert_equal(0, data['status'], data)
assert_true('Execution Profile' in data['profile'], data)
# Could be refactored with SavedQuery.create_empty()
def create_saved_query(app_name, owner):
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = SavedQuery(owner=owner, type=query_type)
design.name = 'create_saved_query'
design.desc = ''
design.data = hql_query('show $tables', database='db1').dumps()
design.is_auto = False
design.save()
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
return design
def test_ssl_cacerts():
for desktop_kwargs, conf_kwargs, expected in [
({'present': False}, {'present': False}, ''),
({'present': False}, {'data': 'local-cacerts.pem'}, 'local-cacerts.pem'),
({'data': 'global-cacerts.pem'}, {'present': False}, 'global-cacerts.pem'),
({'data': 'global-cacerts.pem'}, {'data': 'local-cacerts.pem'}, 'local-cacerts.pem'),
]:
resets = [
desktop_conf.SSL_CACERTS.set_for_testing(**desktop_kwargs),
conf.SSL.CACERTS.set_for_testing(**conf_kwargs),
]
try:
assert_equal(conf.SSL.CACERTS.get(), expected,
'desktop:%s conf:%s expected:%s got:%s' % (desktop_kwargs, conf_kwargs, expected, conf.SSL.CACERTS.get()))
finally:
for reset in resets:
reset()
def test_ssl_validate():
for desktop_kwargs, conf_kwargs, expected in [
({'present': False}, {'present': False}, True),
({'present': False}, {'data': False}, False),
({'present': False}, {'data': True}, True),
({'data': False}, {'present': False}, False),
({'data': False}, {'data': False}, False),
({'data': False}, {'data': True}, True),
({'data': True}, {'present': False}, True),
({'data': True}, {'data': False}, False),
({'data': True}, {'data': True}, True),
]:
resets = [
desktop_conf.SSL_VALIDATE.set_for_testing(**desktop_kwargs),
conf.SSL.VALIDATE.set_for_testing(**conf_kwargs),
]
try:
assert_equal(conf.SSL.VALIDATE.get(), expected,
'desktop:%s conf:%s expected:%s got:%s' % (desktop_kwargs, conf_kwargs, expected, conf.SSL.VALIDATE.get()))
finally:
for reset in resets:
reset()
class TestImpalaDbms():
def test_get_impala_nested_select(self):
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'id', None), ('id', '`default`.`customers`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'email_preferences', 'categories/promos/'),
('email_preferences.categories.promos', '`default`.`customers`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'addresses', 'key'),
('key', '`default`.`customers`.`addresses`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'addresses', 'value/street_1/'),
('street_1', '`default`.`customers`.`addresses`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'orders', 'item/order_date'),
('order_date', '`default`.`customers`.`orders`'))
assert_equal(ImpalaDbms.get_nested_select('default', 'customers', 'orders', 'item/items/item/product_id'),
('product_id', '`default`.`customers`.`orders`.`items`'))
|
the-stack_0_72 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import math
def concat(list):
return reduce(lambda l, c: l+c, list, [])
def makedata(topic_count, word_count, paper_count, common_words, number_topics_paper=1, word_count_min=1, word_count_max=20, common_word_count_min=10, common_word_count_max=100):
# generate topic specific words
# res :: [[[(string, string, string, string)]]]
with open("lda_big.csv", "w", 10**9) as f:
for paper in range(paper_count):
for topicval in [random.randint(1, topic_count) for _ in range(number_topics_paper)]:
for word in range(word_count):
f.write(','.join(("paper-"+str(paper),"word-"+str(word)+str(topicval), str(random.randint(word_count_min,word_count_max)), str(topicval), "\n")))
# generate general words
# res2 :: [[(string, string, string, string)]]
for paper in range(paper_count):
for word in range(common_words):
f.write(','.join(("paper-"+str(paper),"word-"+str(word), str(int(math.ceil(random.uniform(common_word_count_min, common_word_count_max)))), "-1", "\n")))
if __name__ == '__main__':
makedata(10000, 1000, 20000, 100000)
|
the-stack_0_74 | #!/usr/bin/env python3
##############################################################################
# EVOLIFE http://evolife.telecom-paris.fr Jean-Louis Dessalles #
# Telecom Paris 2021 www.dessalles.fr #
# -------------------------------------------------------------------------- #
# License: Creative Commons BY-NC-SA #
##############################################################################
##############################################################################
# Draw curves offline using matplotlib #
##############################################################################
""" Draw curves offline.
Takes a csv file as input and draws curves.
Creates image file.
"""
import sys
import os
import re
import glob
import numpy as np
import matplotlib
matplotlib.use('Agg') # to use offline
import matplotlib.pyplot as plt
import logging # for tracing
# modifying print priority of console handler
logging.basicConfig(level='WARNING')
sys.path.append('..')
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
import Evolife.Scenarii.Parameters as EP
try: import TableCsv as CSV
except ImportError: import Evolife.Tools.TableCsv as CSV
def figsave(FileName):
if os.path.exists(FileName): os.remove(FileName)
plt.savefig(FileName)
print("%s created" % FileName)
def str2nb(x):
try: return int(x)
except ValueError: return float(x)
"""
plt.plot(*zip(*Maxima), c='k', linewidth=1, marker='o')
plt.clf()
plt.scatter(alphaValues, [p[1] for p in Prices], color=colours, s=44)
plt.plot(alphaValues, [p[1] for p in Prices], 'r', label='Signal prices')
plt.scatter(alphaValues, [thetaU(a, UC) for a in alphaValues], color=colours, s=44)
"""
class Plot:
def __init__(self, ExpeFile, FieldDraw=True, ConstantConfigFileName=None):
self.ExpeFile = os.path.splitext(ExpeFile)[0]
if self.ExpeFile.endswith('_res'):
self.ExpeFile = self.ExpeFile[:-4]
SkipFile = True # not a data file
OutputFile = self.ExpeFile + '.png'
if not os.path.exists(OutputFile):
self.Dirname, self.ExpeName = os.path.split(self.ExpeFile)
PlotFile = self.ExpeFile + '.csv'
self.ConfigFileName = self.ExpeFile + '_res.csv'
self.Cfg = self.RetrieveConfig(self.ConfigFileName) # retrieve actual parameters from _res file
self.RelevantParam = self.RelevantConfig(self.ExpeName, ConstantConfigFileName) # display parameters
# drawing curves
plt.figure(1, figsize=(6 + 6 * FieldDraw, 4))
if FieldDraw: plt.subplot(1,2,1)
ymax = self.Draw_Curve(PlotFile)
if self.RelevantParam: plt.title(' '.join(sorted(['%s = %s' % (P, self.RelevantParam[P]) for P in self.RelevantParam])))
if FieldDraw:
# drawing field
plt.subplot(1,2,2)
# self.Draw_Field(self.ExpeFile + '_dmp.csv', ymax=ymax)
self.Draw_Field(self.ExpeFile + '_dmp.csv', ymax=100)
plt.title(self.ExpeFile)
self.save(OutputFile)
else: print('%s already exists' % OutputFile)
def Draw_Curve(self, CurveFileName):
# colours = ['#000000', '#00BF00', '#78FF78', '#BF0000', '#FF7878', '#0000BF', '#7878FF']
colours = ['#00BF00', '#78FF78', '#BF0000', '#FF7878', '#0000BF', '#7878FF']
# Retrieving coordinates
PlotOrders = CSV.load(CurveFileName, sniff=True) # loading csv file
# Retrieving legend
try: Legend = next(PlotOrders) # reading first line with curve names
except StopIteration: sys.exit(0)
# Retrieving data
Data = list(zip(*PlotOrders))
Data = list(map(lambda L: list(map(str2nb, L)), Data))
# Data = list(map(lambda L: list(map(str2nb, L)), [*PlotOrders]))
for Col in range(1,len(Data)):
plt.plot(Data[0], Data[Col], linewidth=2, color=colours[Col-1], label=Legend[Col])
x1,x2,y1,y2 = plt.axis()
plt.axis((x1, x2, 0, y2+0.05))
# plt.ylim(top=100)
plt.xlabel('year')
# plt.ylabel('price or sales')
# plt.legend(bbox_to_anchor=(0.1, 1))
plt.legend(loc='upper right')
return plt.ylim()[1] # max coordinate
@classmethod
def RetrieveConfig(self, ConfigFile):
" Retrieves parameters from _res file "
if os.path.exists(ConfigFile):
CfgLines = open(ConfigFile).readlines()
# reading parameters
Sep = max([';', '\t', ','], key=lambda x: CfgLines[0].count(x))
if len(CfgLines) > 1:
Parameters = dict(zip(*map(lambda x: x.strip().split(Sep), CfgLines[:2])))
return EP.Parameters(ParamDict=Parameters)
return None
def RelevantConfig(self, ExpeName, ConstantParameterFile):
" Try to find relevant parameters "
Irrelevant = ['BatchMode', 'DisplayPeriod', 'TimeLimit', 'DumpStart']
if self.Cfg is None or not ConstantParameterFile:
print('ConfigFile not found')
return None
RelevantParameters = {}
CP = EP.Parameters(ConstantParameterFile)
# determining relevant parameters
for p in CP:
if p in Irrelevant: continue
if p in self.Cfg and CP[p] != self.Cfg[p]:
# print(p, RelevantParameters[p], self.Cfg[p])
RelevantParameters[p] = self.Cfg[p]
# CP.addParameter(p, self.Cfg[p])
RelevantParameters = EP.Parameters(ParamDict=RelevantParameters)
print(RelevantParameters)
return RelevantParameters
def Draw_Field(self, DumpFile, ymax=None):
if not os.path.exists(DumpFile): return None
Lines = open(DumpFile).readlines()
# reading recorded positions
FieldPlot = None
if len(Lines) > 1:
FieldPlot = Lines[1].strip().split(';')[1:]
NbP = len(FieldPlot)
plt.scatter(list(range(NbP)), list(map(float, FieldPlot)), s=11)
# print(FieldPlot)
if ymax is not None:
plt.ylim(top=ymax)
plt.xlabel('quality')
plt.ylabel('signal')
return FieldPlot
def save(self, OutputFile): figsave(OutputFile)
def Parse(Args):
Files = []
ConstantConfigFileName = None
if len(Args) < 2:
# find last file
CsvFiles = glob.glob('___Results/*.csv')
if CsvFiles:
CsvFiles.sort(key=lambda x: os.stat(x).st_mtime)
Files = [CsvFiles[-1]]
elif len(Args) > 3:
print('''Usage: %s <curve file name> [<constant config file name>]''' % os.path.basename(Args[0]))
else:
Files = glob.glob(Args[1])
ConstantConfigFileName = Args[2] if (len(Args) == 3) else None
for Argfile in Files:
yield (Argfile, ConstantConfigFileName)
if __name__ == "__main__":
for (Argfile, ConstantConfigFileName) in Parse(sys.argv):
if Argfile:
print(Argfile)
plot = Plot(Argfile, FieldDraw=True, ConstantConfigFileName=ConstantConfigFileName)
# print()
__author__ = 'Dessalles'
|
the-stack_0_76 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for bucketing data into groups.
The classes and functions in this module are used to queue up data into
buckets conditional on side information (e.g. sequence length).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_as_original_type = input_py._as_original_type
_as_tensor_list = input_py._as_tensor_list
_restore_sparse_tensors = input_py._restore_sparse_tensors
_dtypes = input_py._dtypes
_store_sparse_tensors = input_py._store_sparse_tensors
_shapes = input_py._shapes
_which_queue = input_py._which_queue
# pylint: enable=protected-access
def _validate_bucket(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in bucket().")
return tensor_list
def bucket(tensors,
which_bucket,
batch_size,
num_buckets,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=None,
shared_name=None,
name=None):
"""Lazy bucketing of input tensors according to `which_bucket`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
The tensors entering this function are put into the bucket given by
`which_bucket`. Each bucket has its own queue. When a bucket contains
`batch_size` elements, this minibatch is pushed onto a top queue. The
tensors returned from this function are a the result of dequeueing the
next minibatch from this top queue.
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
As the returned tensors are the result of of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queues are closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape()` method will have a 0th `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
num_buckets: A python integer, the number of buckets.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor
controls whether the input is added to the queue or not. If it evaluates
`True`, then `tensors` are added to the bucket; otherwise they are
dropped. This tensor essentially acts as a filtering mechanism.
The default behavior is to assume `keep_input=True`.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(bucket, outputs)` where `bucket` is
a `int32` scalar tensor and `outputs` is a list or
dictionary of batched outputs corresponding to elements of `tensors`.
Every step will receive a new bucket of outputs.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors` or if batch_size is a sequence
but it's length != num_buckets.
"""
batch_size_per_bucket = False
if isinstance(batch_size, (list, tuple)):
batch_size_per_bucket = True
if len(batch_size) != num_buckets:
raise ValueError(
"If batch_size is a list it must have num_buckets elements")
else:
batch_size = [batch_size] * num_buckets
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "bucket", tensor_list) as name:
tensor_list = _validate_bucket(tensor_list)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many=False, keep_input=constant_op.constant(True))
# Round-trip batch_size to a tensor, and possibly back
for i, bucket_batch_size in enumerate(batch_size):
bucket_batch_size = ops.convert_to_tensor(
bucket_batch_size, dtype=dtypes.int32, name="batch_size")
static_batch_size = tensor_util.constant_value(bucket_batch_size)
batch_size[i] = (static_batch_size if static_batch_size is not None else
bucket_batch_size)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many=False)
which_bucket = ops.convert_to_tensor(
which_bucket, dtype=dtypes.int32, name="which_bucket")
queue_creator = _which_queue(dynamic_pad)
bucket_queues = []
for i in range(num_buckets):
shared_name_i = ("%s_%d" % (shared_name, i) if shared_name is not None
else None)
bucket_queues.append(
queue_creator(
capacity=capacity,
dtypes=types,
shapes=shapes,
shared_name=shared_name_i,
name="bucket_queue_%d" % i))
maybe_static_batch_size = (
None if (allow_smaller_final_batch or batch_size_per_bucket)
else static_batch_size)
bucket_shapes = [
tensor_shape.vector(maybe_static_batch_size).concatenate(s)
for s in bucket_queues[0].shapes
]
# top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO
# queues because if we use allow_smaller_final_batch, shapes will
# contain Nones in their first entry; as a result, a regular
# FIFOQueue would die when being passed shapes that are not fully defined.
top_queue = data_flow_ops.PaddingFIFOQueue(
capacity=capacity,
dtypes=[dtypes.int32] + types,
shapes=[tensor_shape.scalar()] + bucket_shapes,
shared_name=shared_name,
name="top_queue")
def enqueue_which():
def enqueue_single(i):
return bucket_queues[i].enqueue(tensor_list)
enqueues = [
control_flow_ops.cond(
math_ops.equal(which_bucket, i),
functools.partial(enqueue_single, i), control_flow_ops.no_op)
for i in range(num_buckets)
]
return control_flow_ops.group(*enqueues, name="group_enqueues")
if keep_input is not None:
# TODO(ebrevdo): Expand keep_input param to core training
# methods, and pipe through to _store_sparse_tensors; so
# that expensive serialization is guarded by keep_input.
maybe_enqueue = control_flow_ops.cond(keep_input, enqueue_which,
control_flow_ops.no_op)
else:
maybe_enqueue = enqueue_which()
bucket_enqueue_ops = [maybe_enqueue] * num_threads
if allow_smaller_final_batch:
which_dequeue = lambda q: q.dequeue_up_to
else:
which_dequeue = lambda q: q.dequeue_many
enqueues_to_top = [
top_queue.enqueue(
[constant_op.constant(i)] + which_dequeue(q)(
bs, name="read_bucket_%d" % i),
name="enqueue_from_bucket_%d" % i)
for i, (q, bs) in enumerate(zip(bucket_queues, batch_size))
]
for i, q in enumerate(bucket_queues):
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
q, [enqueues_to_top[i]],
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
top_queue,
bucket_enqueue_ops,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
for q in bucket_queues:
summary.scalar("bucket/%s/size" % q.name,
math_ops.cast(top_queue.size(), dtypes.float32))
summary.scalar("bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
math_ops.cast(top_queue.size(), dtypes.float32) *
(1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
dequeued = dequeued[1:]
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return (which_bucket_dequeued, _as_original_type(tensors, dequeued))
def bucket_by_sequence_length(input_length,
tensors,
batch_size,
bucket_boundaries,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=None,
shared_name=None,
name=None):
"""Lazy bucketing of inputs according to their length.
This method calls `tf.contrib.training.bucket` under the hood, after first
subdividing the bucket boundaries into separate buckets and identifying which
bucket the given `input_length` belongs to. See the documentation for
`which_bucket` for details of the other arguments.
Args:
input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
bucket_boundaries: int list, increasing non-negative numbers.
The edges of the buckets to use when bucketing tensors. Two extra buckets
are created, one for `input_length < bucket_boundaries[0]` and
one for `input_length >= bucket_boundaries[-1]`.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor
controls whether the input is added to the queue or not. If it evaluates
`True`, then `tensors` are added to the bucket; otherwise they are
dropped. This tensor essentially acts as a filtering mechanism.
The default behavior is to assume `keep_input=True`.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
TypeError: if `bucket_boundaries` is not a list of python integers.
ValueError: if `bucket_boundaries` is empty or contains non-increasing
values or if batch_size is a list and it's length doesn't equal the number
of buckets.
"""
tensor_list = _as_tensor_list(tensors)
if not isinstance(bucket_boundaries, (list, tuple)):
raise TypeError(
"bucket_boundaries must be a list or tuple, but received: %s" %
bucket_boundaries)
if not bucket_boundaries:
raise ValueError("bucket_boundaries must not be empty")
for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
if not isinstance(s, int) or not isinstance(e, int):
raise TypeError("bucket boundaries must be integers, but saw: %s and %s" %
(s, e))
if s >= e:
raise ValueError(
"Buckets must contain sequential increasing lengths, but saw: "
"%d before %d" % (s, e))
with ops.name_scope(name, "bucket_by_sequence_length",
[input_length] + tensor_list) as name:
input_length = ops.convert_to_tensor(
input_length, dtype=dtypes.int32, name="input_length")
# Bucketing conditions are:
# l < b[0]
# b[0] <= l < b[1]
# b[1] <= l < b[2]
# ...
# b[N-2] <= l < b[N-1]
# b[N-1] <= l
# Equivalent to:
# [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
which_bucket = math_ops.to_int32(which_bucket)
if shapes is not None:
shapes = [tensor_shape.scalar()] + shapes
_, dequeued = bucket(
tensors=[input_length] + tensor_list,
which_bucket=which_bucket,
batch_size=batch_size,
num_buckets=len(bucket_boundaries) + 1,
num_threads=num_threads,
capacity=capacity,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
keep_input=keep_input,
shared_name=shared_name)
return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
__all__ = ["bucket", "bucket_by_sequence_length"]
|
the-stack_0_77 | import hashlib
import json
import logging
from pathlib import Path
from typing import List
import ckanapi
import pandas as pd
from airflow.models.baseoperator import BaseOperator
from airflow.utils.decorators import apply_defaults
class BackupDatastoreResourceOperator(BaseOperator):
"""
Reads datastore resource, creates backup files for fields (json) and records (parquet). Args:
- address: CKAN instance URL
- apikey: CKAN API key
- resource_task_id: task_id that returns resource object (ie. GetOrCreateResourcePackage)
- dir_task_id: task_id that returns backup directory
Returns dictionary containing:
- fields: json file path containing fields for datastore resource
- data: parquet file path containing fields for datastore resource
- columns: number of columns in datastore resource
- rows: number of rows in datastore_resource
- resource_id: datastore resource ID
"""
@apply_defaults
def __init__(
self,
address: str,
apikey: str,
resource_task_id: str,
dir_task_id: str,
sort_columns: List[str] = [],
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dir_task_id = dir_task_id
self.resource_task_id = resource_task_id
self.sort_columns = sort_columns
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def _checksum_datastore_response(self, datastore_response):
data = pd.DataFrame(datastore_response["records"])
if "_id" in data.columns.values:
data = data.drop("_id", axis=1)
if len(self.sort_columns) > 0:
data = data.sort_values(by=self.sort_columns)
data_hash = hashlib.md5()
data_hash.update(data.to_csv(index=False).encode("utf-8"))
return data_hash.hexdigest()
def _build_dataframe(self, records):
data = pd.DataFrame(records)
if "_id" in data.columns.values:
data = data.drop("_id", axis=1)
return data
def _save_fields_json(self, datastore_response, checksum, backups_dir):
fields_file_path = backups_dir / f"fields.{checksum}.json"
if not fields_file_path.exists():
fields = [f for f in datastore_response["fields"] if f["id"] != "_id"]
with open(fields_file_path, "w") as f:
json.dump(fields, f)
return fields_file_path
def _save_data_parquet(self, datastore_response, checksum, backups_dir, data):
data_file_path = backups_dir / f"data.{checksum}.parquet"
if not data_file_path.exists():
data.to_parquet(path=data_file_path, engine="fastparquet", compression=None)
return data_file_path
def execute(self, context):
# get a resource and backup directory via xcom
ti = context["ti"]
resource = ti.xcom_pull(task_ids=self.resource_task_id)
backups_dir = Path(ti.xcom_pull(task_ids=self.dir_task_id))
# get number of records for this datastore resource
record_count = self.ckan.action.datastore_search(id=resource["id"], limit=0)[
"total"
]
# get data from datastore resource
datastore_response = self.ckan.action.datastore_search(
id=resource["id"], limit=record_count
)
# turn data into dataframe
data = self._build_dataframe(datastore_response["records"])
checksum = self._checksum_datastore_response(datastore_response)
# return filepath for fields json, data parquet, row/col counts, checksum, and resource_id
result = {
"fields_file_path": self._save_fields_json(
datastore_response, checksum, backups_dir
),
"data_file_path": self._save_data_parquet(
datastore_response, checksum, backups_dir, data
),
"records": data.shape[0],
"columns": data.shape[1],
"resource_id": datastore_response["resource_id"],
"checksum": checksum,
}
logging.info(f"Returning: {result}")
return result
class DeleteDatastoreResourceOperator(BaseOperator):
"""
Deletes a datastore resource
Inputs:
- address: CKAN instance URL
- apikey: CKAN API key
- resource_id: CKAN resource id to be deleted
Resource id can be given with n actual value, or with a reference to a task_id and task_key that returns the value
Note: Deleting the entire resource also deletes the data dictionary (i.e. schema, field definitions and types).
To keep the existing schema, delete the datastore resource records instead by using the DeleteDatastoreResourceRecordsOperator - this keeps the schema.
"""
@apply_defaults
def __init__(
self,
address: str,
apikey: str,
resource_id: str = None,
resource_id_task_id: str = None,
resource_id_task_key: str = None,
**kwargs,
) -> None:
# init ckan client and resource_id to be truncated
super().__init__(**kwargs)
self.resource_id, self.resource_id_task_id, self.resource_id_task_key = resource_id, resource_id_task_id, resource_id_task_key
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def execute(self, context):
# get task instance from context
ti = context['ti']
# get resource id from task, if task info provided in input
if self.resource_id_task_id and self.resource_id_task_key:
self.resource_id = ti.xcom_pull(task_ids=self.resource_id_task_id)[self.resource_id_task_key]
self.resource = ti.xcom_pull(task_ids=self.resource_id_task_id)
logging.info(self.resource)
logging.info("Pulled {} from {} via xcom".format(self.resource_id, self.resource_id_task_id) )
assert self.resource_id, "Resource ID is empty! This operator needs a way to get the resource ID in order to delete the right datastore resource!"
# Delete the resource
try:
self.ckan.action.datastore_delete(id=self.resource_id, force=True)
logging.info("Deleted " + self.resource_id)
except Exception as e:
logging.error("Error while trying to delete resource: " + e)
class DeleteDatastoreResourceRecordsOperator(BaseOperator):
"""
Deletes datastore resource records. Args:
- address: CKAN instance URL
- apikey: CKAN API key
- backup_task_id: task_id that returns backup file information (BackupDatastoreResourceOperator)
"""
@apply_defaults
def __init__(
self, address: str, apikey: str, backup_task_id: str, **kwargs,
) -> None:
super().__init__(**kwargs)
self.backup_task_id = backup_task_id
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def execute(self, context):
backups_info = context["ti"].xcom_pull(task_ids=self.backup_task_id)
self.ckan.action.datastore_delete(id=backups_info["resource_id"], force=True)
with open(Path(backups_info["fields_file_path"]), "r") as f:
fields = json.load(f)
self.ckan.action.datastore_create(id=backups_info["resource_id"], fields=fields, force=True)
record_count = self.ckan.action.datastore_search(
id=backups_info["resource_id"], limit=0
)["total"]
assert record_count == 0, f"Resource not empty after cleanup: {record_count}"
class InsertDatastoreResourceRecordsOperator(BaseOperator):
@apply_defaults
def __init__(
self,
address: str,
apikey: str,
resource_task_id: str,
parquet_filepath_task_id: str = None,
fields_json_path_task_id: str = None,
chunk_size: int = 20000,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.parquet_filepath_task_id = parquet_filepath_task_id
self.resource_task_id = resource_task_id
self.chunk_size = chunk_size
self.fields_json_path_task_id = fields_json_path_task_id
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def _create_empty_resource_with_fields(self, fields_path, resource_id):
with open(fields_path, "r") as f:
fields = json.load(f)
self.ckan.action.datastore_create(id=resource_id, fields=fields, force=True)
def execute(self, context):
ti = context["ti"]
resource = ti.xcom_pull(task_ids=self.resource_task_id)
if self.fields_json_path_task_id is not None:
fields_path = Path(ti.xcom_pull(task_ids=self.fields_json_path_task_id))
self._create_empty_resource_with_fields(fields_path, resource["id"])
if self.parquet_filepath_task_id is not None:
path = Path(ti.xcom_pull(task_ids=self.parquet_filepath_task_id))
data = pd.read_parquet(path)
records = data.to_dict(orient="records")
chunks = [
records[i : i + self.chunk_size]
for i in range(0, len(records), self.chunk_size)
]
for chunk in chunks:
clean_records = []
logging.info(f"Removing NaNs and inserting {len(records)} records")
for r in chunk:
record = {}
for key, value in r.items():
if value == value:
record[key] = value
clean_records.append(record)
self.ckan.action.datastore_create(
id=resource["id"], records=clean_records, force=True
)
logging.info(f"Records inserted: {data.shape[0]}")
return data.shape[0]
class RestoreDatastoreResourceBackupOperator(BaseOperator):
@apply_defaults
def __init__(
self, address: str, apikey: str, backup_task_id: str, **kwargs,
) -> None:
super().__init__(**kwargs)
self.backup_task_id = backup_task_id
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def execute(self, context):
backups_info = context["ti"].xcom_pull(task_ids=self.backup_task_id)
assert backups_info is not None, "No backup information"
resource_id = backups_info["resource_id"]
with open(Path(backups_info["fields_file_path"]), "r") as f:
fields = json.load(f)
data = pd.read_parquet(Path(backups_info["data_file_path"]))
records = data.to_dict(orient="records")
try:
self.ckan.action.datastore_delete(id=resource_id)
except Exception as e:
logging.error(e)
result = self.ckan.action.datastore_create(
id=resource_id, fields=fields, records=records
)
logging.info(f"Result: {result}")
return result
class InsertDatastoreResourceRecordsFromJSONOperator(BaseOperator):
'''
Reads a JSON file and write the output into a CKAN datastore resource.
JSON must be a list of dicts, with each dict being a record, like the following:
[
{ "column1": "string", "column2": 100, "column3": true},
{ "column1": "some other string", "column2": 34, "column3": false}
]
The fields must match the CKAN standard, like the following:
[
{
"id": "column1",
"type": "text" ,
"info": {
"notes": "Description of the field goes here. Info key is optional."
}
},
{
"id": "column2",
"type": "int"
},
{
"id": "column3",
"type": "bool"
}
]
Expects as inputs:
- address - url of target ckan
- apikey - key needed to make authorized ckan calls
- resource_id - id of the resource that will receive this data
- data_path - location of the json data file
- fields_path - location of the data's fields, already in a CKAN-friendly format
All of the above, except the address and apikey, can be given with an actual value, or with a reference to a task_id and task_key that returns the value
'''
@apply_defaults
def __init__(
self,
address: str,
apikey: str,
resource_id: str = None,
resource_id_task_id: str = None,
resource_id_task_key: str = None,
data_path: str = None,
data_path_task_id: str = None,
data_path_task_key: str = None,
fields_path: str = None,
fields_path_task_id: str = None,
fields_path_task_key: str = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.resource_id, self.resource_id_task_id, self.resource_id_task_key = resource_id, resource_id_task_id, resource_id_task_key
self.data_path, self.data_path_task_id, self.data_path_task_key = data_path, data_path_task_id, data_path_task_key
self.fields_path, self.fields_path_task_id, self.fields_path_task_key = fields_path, fields_path_task_id, fields_path_task_key
self.ckan = ckanapi.RemoteCKAN(apikey=apikey, address=address)
def execute(self, context):
# init task instance from context
ti = context['ti']
# assign important vars if provided from other tasks
if self.resource_id_task_id and self.resource_id_task_key:
self.resource_id = ti.xcom_pull(task_ids=self.resource_id_task_id)[self.resource_id_task_key]
if self.data_path_task_id and self.data_path_task_key:
self.data_path = ti.xcom_pull(task_ids=self.data_path_task_id)[self.data_path_task_key]
if self.fields_path_task_id and self.fields_path_task_key:
self.fields_path = ti.xcom_pull(task_ids=self.fields_path_task_id)[self.fields_path_task_key]
# get fields from file
with open(self.fields_path, "r") as f:
fields = json.load(f)
logging.info("Loaded the following fields from {}: {}".format( self.fields_path, fields ))
# populate that resource w data from the path provided
assert self.data_path, "Data path, or the filepath to the data to be inserted, must be provided!"
with open(self.data_path) as f:
data = json.load(f)
logging.info("Data parsed from JSON file")
logging.info("Fields from fields file: " + str(fields))
logging.info("Fields from data file: " + str(data[0].keys()))
self.ckan.action.datastore_create(id=self.resource_id, fields=fields, records=data)
logging.info("Resource created and populated from input fields and data")
return {"resource_id": self.resource_id, "data_inserted": len(data)}
|
the-stack_0_78 | import os
import torch
from utils.runs import Run
from utils.utils import print_message, save_checkpoint
from parameters import SAVED_CHECKPOINTS
def print_progress(scores):
positive_avg, negative_avg = round(scores[:, 0].mean().item(), 2), round(scores[:, 1].mean().item(), 2)
print("#>>> ", positive_avg, negative_avg, '\t\t|\t\t', positive_avg - negative_avg)
def manage_checkpoints(args, colbert, optimizer, batch_idx):
arguments = args.input_arguments.__dict__
path = os.path.join(Run.path, 'checkpoints')
if not os.path.exists(path):
os.mkdir(path)
if batch_idx % 2000 == 0:
name = os.path.join(path, "colbert.dnn")
save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)
if batch_idx in SAVED_CHECKPOINTS:
name = os.path.join(path, "colbert-{}.dnn".format(batch_idx))
save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)
|
the-stack_0_80 | import os
import numpy as np
import tables
import os
from .normalize import normalize_data_storage, reslice_image_set
def create_data_file(out_file, n_channels, n_samples, n_truth_labels, image_shape):
""" Initializes the hdf5 file and gives pointers for its three arrays
"""
try:
os.makedirs(os.path.dirname(out_file))
except:
pass
hdf5_file = tables.open_file(out_file, mode='w')
filters = tables.Filters(complevel=5, complib='blosc')
data_shape = tuple([0, n_channels] + list(image_shape))
truth_shape = tuple([0, n_truth_labels] + list(image_shape))
data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,
filters=filters, expectedrows=n_samples)
truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,
filters=filters, expectedrows=n_samples)
affine_storage = hdf5_file.create_earray(hdf5_file.root, 'affine', tables.Float32Atom(), shape=(0, 4, 4),
filters=filters, expectedrows=n_samples)
return hdf5_file, data_storage, truth_storage, affine_storage
def write_image_data_to_file(image_files, data_storage, truth_storage, image_shape, n_channels, affine_storage,
truth_dtype=np.uint8, crop=True):
for set_of_files in image_files: # set_of_files is both the volume file and the label file
images = reslice_image_set(set_of_files, image_shape, label_indices=len(set_of_files) - 1,
crop=crop) # both volume and label is resliced
subject_data = [image.get_data() for image in images]
add_data_to_storage(data_storage, truth_storage, affine_storage, subject_data, images[0].affine, n_channels,
truth_dtype)
return data_storage, truth_storage
def add_data_to_storage(data_storage, truth_storage, affine_storage, subject_data, affine, n_channels, truth_dtype):
data_storage.append(np.asarray(subject_data[:n_channels])[
np.newaxis]) # Anything but the last element of subject_data must be volume data
# split_truth_into_binary_labels(subject_data[n_channels])
# what_to_append = split_truth_into_binary_labels(subject_data[n_channels], truth_dtype, truth_labels)[np.newaxis]
# truth_storage.append(what_to_append)
truth_storage.append(np.asarray(subject_data[n_channels], dtype=truth_dtype)[np.newaxis][
np.newaxis]) # the last element of subject_data must be the labels
affine_storage.append(np.asarray(affine)[np.newaxis])
def write_data_to_file(training_data_files, out_file, image_shape, truth_dtype=np.uint8, subject_ids=None,
normalize=True, crop=True):
"""
Takes in a set of training images and writes those images to an hdf5 file.
:param training_data_files: List of tuples containing the training data files. The modalities should be listed in
the same order in each tuple. The last item in each tuple must be the labeled image.
Example: [('sub1-T1.nii.gz', 'sub1-T2.nii.gz', 'sub1-truth.nii.gz'),
('sub2-T1.nii.gz', 'sub2-T2.nii.gz', 'sub2-truth.nii.gz')]
:param out_file: Where the hdf5 file will be written to.
:param image_shape: Shape of the images that will be saved to the hdf5 file.
:param truth_dtype: Default is 8-bit unsigned integer.
:return: Location of the hdf5 file with the image data written to it.
"""
n_samples = len(training_data_files)
n_channels = len(training_data_files[0]) - 1
n_truth_labels = 1
try:
hdf5_file, data_storage, truth_storage, affine_storage = create_data_file(out_file,
n_channels=n_channels,
n_samples=n_samples,
n_truth_labels=n_truth_labels,
image_shape=image_shape)
except Exception as e:
# If something goes wrong, delete the incomplete data file
os.remove(out_file)
raise e
write_image_data_to_file(training_data_files, data_storage, truth_storage, image_shape, truth_dtype=truth_dtype,
n_channels=n_channels, affine_storage=affine_storage, crop=crop)
if subject_ids:
hdf5_file.create_earray(hdf5_file.root, 'subject_ids', obj=subject_ids)
if normalize:
normalize_data_storage(data_storage)
hdf5_file.close()
return out_file
def open_data_file(filename, readwrite="r"):
return tables.open_file(filename, readwrite)
|
the-stack_0_83 | import os
import time
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.utils import save_image
from config import get_cfg
# models
from models.volume_rendering import VolumeRenderer
from models.anim_nerf import AnimNeRF
from models.body_model_params import BodyModelParams
# losses
# datasets
from datasets import dataset_dict
# optimizer, scheduler, visualization
from utils import *
from utils.util import load_pickle_file
# pytorch-lightning
from torchmetrics.functional import psnr, ssim
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningDataModule, LightningModule, Trainer
from pytorch_lightning.loggers import TensorBoardLogger
class AnimNeRFData(LightningDataModule):
def __init__(self, hparams):
super(AnimNeRFData, self).__init__()
# self.hparams = hparams
self.save_hyperparameters(hparams)
def setup(self, stage=None):
dataset = dataset_dict[self.hparams.dataset_name]
if self.hparams.deformation_dim + self.hparams.apperance_dim > 0 or self.hparams.optim_body_params:
frame_ids_index = {}
for i, frame_id in enumerate(self.hparams.frame_IDs):
frame_ids_index[frame_id] = i
else:
frame_ids_index = None
kwargs = {'root_dir': self.hparams.root_dir,
'img_wh': tuple(self.hparams.img_wh),
'frame_start_ID': self.hparams.train.frame_start_ID,
'frame_end_ID': self.hparams.train.frame_end_ID,
'frame_skip': self.hparams.train.frame_skip,
'subsampletype': self.hparams.train.subsampletype,
'subsamplesize': self.hparams.train.subsamplesize,
'model_type': self.hparams.model_type,
'cam_IDs': self.hparams.train.cam_IDs
}
self.train_dataset = dataset(mode='train', frame_ids_index=frame_ids_index, **kwargs)
kwargs = {'root_dir': self.hparams.root_dir,
'img_wh': tuple(self.hparams.img_wh),
'frame_start_ID': self.hparams.val.frame_start_ID,
'frame_end_ID': self.hparams.val.frame_end_ID,
'frame_skip': self.hparams.val.frame_skip,
'model_type': self.hparams.model_type,
'cam_IDs': self.hparams.val.cam_IDs
}
self.val_dataset = dataset(mode='val', frame_ids_index=frame_ids_index, **kwargs)
kwargs = {'root_dir': self.hparams.root_dir,
'img_wh': tuple(self.hparams.img_wh),
'frame_start_ID': self.hparams.test.frame_start_ID,
'frame_end_ID': self.hparams.test.frame_end_ID,
'frame_skip': self.hparams.test.frame_skip,
'model_type': self.hparams.model_type,
'cam_IDs': self.hparams.test.cam_IDs
}
self.test_dataset = dataset(mode='val', frame_ids_index=frame_ids_index, **kwargs)
def train_dataloader(self):
return DataLoader(self.train_dataset,
shuffle=True,
num_workers=self.hparams.train.num_workers,
batch_size=self.hparams.train.batch_size,
pin_memory=False)
def val_dataloader(self):
return DataLoader(self.val_dataset,
shuffle=False,
num_workers=self.hparams.val.num_workers,
batch_size=self.hparams.val.batch_size, # validate one image (H*W rays) at a time
pin_memory=False)
def test_dataloader(self):
return DataLoader(self.test_dataset,
shuffle=False,
num_workers=self.hparams.test.num_workers,
batch_size=self.hparams.test.batch_size, # validate one image (H*W rays) at a time
pin_memory=False)
class AnimNeRFSystem(LightningModule):
def __init__(self, hparams):
super(AnimNeRFSystem, self).__init__()
# self.hparams = hparams
self.save_hyperparameters(hparams)
self.anim_nerf = AnimNeRF(
model_path=self.hparams.model_path,
model_type=self.hparams.model_type,
gender=self.hparams.gender,
freqs_xyz=self.hparams.freqs_xyz,
freqs_dir=self.hparams.freqs_dir,
use_view=self.hparams.use_view,
k_neigh=self.hparams.k_neigh,
use_knn=self.hparams.use_knn,
use_unpose=self.hparams.use_unpose,
unpose_view=self.hparams.unpose_view,
use_deformation=self.hparams.use_deformation,
pose_dim=self.hparams.pose_dim,
deformation_dim=self.hparams.deformation_dim,
apperance_dim=self.hparams.apperance_dim,
use_fine=self.hparams.n_importance>0 or self.hparams.n_depth>0,
share_fine=self.hparams.share_fine,
dis_threshold=self.hparams.dis_threshold,
query_inside=self.hparams.query_inside,
)
self.models = [self.anim_nerf]
if self.hparams.deformation_dim > 0 or self.hparams.apperance_dim > 0:
self.hparams.latent_dim = self.hparams.deformation_dim + self.hparams.apperance_dim
self.latent_codes = nn.Embedding(self.hparams.num_frames, self.hparams.latent_dim)
self.latent_codes.weight.data.normal_(0, 0.1)
self.models += [self.latent_codes]
self.body_model_params = BodyModelParams(self.hparams.num_frames, model_type=self.hparams.model_type)
self.load_body_model_params()
if self.hparams.optim_body_params:
optim_params = self.body_model_params.param_names
for param_name in optim_params:
self.body_model_params.set_requires_grad(param_name, requires_grad=True)
self.models += [self.body_model_params]
self.volume_renderer = VolumeRenderer(n_coarse=self.hparams.n_samples, n_fine=self.hparams.n_importance, n_fine_depth=self.hparams.n_depth, share_fine=self.hparams.share_fine, white_bkgd=self.hparams.white_bkgd)
def load_body_model_params(self):
body_model_params = {param_name: [] for param_name in self.body_model_params.param_names}
body_model_params_dir = os.path.join(self.hparams.root_dir, '{}s'.format(self.hparams.model_type))
for frame_id in self.hparams.frame_IDs:
params = load_pickle_file(os.path.join(body_model_params_dir, "{:0>6}.pkl".format(frame_id)))
for param_name in body_model_params.keys():
body_model_params[param_name].append(torch.from_numpy(params[param_name]).float().unsqueeze(0))
for param_name in body_model_params.keys():
body_model_params[param_name] = torch.cat(body_model_params[param_name], dim=0)
self.body_model_params.init_parameters(param_name, body_model_params[param_name], requires_grad=False)
@torch.no_grad()
def decode_batch(self, batch):
frame_id = batch['frame_id']
cam_id = batch['cam_id']
frame_idx = batch['frame_idx']
rays = batch['rays'] # (bs, n_rays, 8)
rgbs = batch['rgbs'] # (bs, n_rays, 3)
alphas = batch['alphas'] # (bs, n_rays, 1)
body_model_params = {
'betas': batch['betas'],
'global_orient': batch['global_orient'],
'body_pose': batch['body_pose'],
'transl': batch['transl']
}
body_model_params_template = {
'betas': batch['betas_template'],
'global_orient': batch['global_orient_template'],
'body_pose': batch['body_pose_template'],
'transl': batch['transl_template']
}
fg_points = batch['fg_points'] # (bs, num_points, 3)
bg_points = batch['bg_points'] # (bs, num_points, 3)
return frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points
def forward(self, rays, body_model_params, body_model_params_template, latent_code=None, perturb=1.0):
bs, n_rays = rays.shape[:2]
results = defaultdict(list)
chunk = self.hparams.chunk
self.anim_nerf.set_body_model(body_model_params, body_model_params_template)
rays = self.anim_nerf.convert_to_body_model_space(rays)
self.anim_nerf.clac_ober2cano_transform()
if latent_code is not None:
self.anim_nerf.set_latent_code(latent_code)
for i in range(0, n_rays, chunk):
rays_chunk = rays[:, i:i+chunk, :]
rendered_ray_chunks = self.volume_renderer(self.anim_nerf, rays_chunk, perturb=perturb)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 1)
return results
def configure_optimizers(self):
parameters = [ {'params': self.anim_nerf.parameters(), 'lr': self.hparams.train.lr}]
if self.hparams.deformation_dim > 0 or self.hparams.apperance_dim > 0:
parameters.append({'params': self.latent_codes.parameters(), 'lr': self.hparams.train.lr})
if self.hparams.optim_body_params:
parameters.append({'params': self.body_model_params.parameters(), 'lr': self.hparams.train.lr*0.5})
self.optimizer = get_optimizer(self.hparams.train, parameters)
self.scheduler = get_scheduler(self.hparams.train, self.optimizer)
return [self.optimizer], [self.scheduler]
def compute_loss(self, rgbs, alphas, results, frame_idx=None, latent_code=None, fg_points=None, bg_points=None):
loss = 0
loss_details = {}
# rgb
loss_rgb = F.mse_loss(results['rgbs'], rgbs, reduction='mean')
loss += loss_rgb
loss_details['loss_rgb'] = loss_rgb
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
loss_rgb_fine = F.mse_loss(results['rgbs_fine'], rgbs, reduction='mean')
loss += loss_rgb_fine
loss_details['loss_rgb_fine'] = loss_rgb_fine
# alphas
loss_alphas = F.l1_loss(results['alphas'], alphas)
loss += self.hparams.train.lambda_alphas * loss_alphas
loss_details['loss_alphas'] = loss_alphas
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
loss_alphas_fine = F.l1_loss(results['alphas_fine'], alphas)
loss += self.hparams.train.lambda_alphas * loss_alphas_fine
loss_details['loss_alphas_fine'] = loss_alphas_fine
# if latent_code is not None:
# loss_latent = torch.mean(torch.pow(latent_code, 2))
# loss += self.hparams.lambda_latent * loss_latent
# loss_details['loss_latent'] = loss_latent
# frame_idx_ = torch.clamp(frame_idx+1, 0, self.hparams.num_frames)
# latent_code_ = self.latent_codes(frame_idx_)
# loss_latent_smooth = F.mse_loss(latent_code, latent_code_)
# loss += self.hparams.lambda_latent_smooth * loss_latent_smooth
# loss_details['loss_latent_smooth'] = loss_latent_smooth
if self.hparams.use_unpose and fg_points is not None:
fg_points_sigma = self.anim_nerf.query_canonical_space(fg_points, use_fine=False, only_sigma=True)
loss_foreground = torch.mean(torch.exp(-2.0/self.hparams.n_samples * torch.relu(fg_points_sigma)))
loss += self.hparams.train.lambda_foreground * loss_foreground
loss_details['loss_foreground'] = loss_foreground
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
fg_points_sigma_fine = self.anim_nerf.query_canonical_space(fg_points, use_fine=True, only_sigma=True)
loss_foreground_fine = torch.mean(torch.exp(-2.0/self.hparams.n_samples * torch.relu(fg_points_sigma_fine)))
loss += self.hparams.train.lambda_foreground * loss_foreground_fine
loss_details['loss_foreground_fine'] = loss_foreground_fine
if self.hparams.use_unpose and bg_points is not None:
bg_points_sigma = self.anim_nerf.query_canonical_space(bg_points, use_fine=False, only_sigma=True)
loss_background = torch.mean(1 - torch.exp(-2.0/self.hparams.n_samples * torch.relu(bg_points_sigma)))
loss += self.hparams.train.lambda_background * loss_background
loss_details['loss_background'] = loss_background
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
bg_points_sigma_fine = self.anim_nerf.query_canonical_space(bg_points, use_fine=True, only_sigma=True)
loss_background_fine = torch.mean(1 - torch.exp(-2.0/self.hparams.n_samples * torch.relu(bg_points_sigma_fine)))
loss += self.hparams.train.lambda_background * loss_background_fine
loss_details['loss_background_fine'] = loss_background_fine
# normal
points = self.anim_nerf.verts_template.detach()
points += torch.randn_like(points) * self.hparams.dis_threshold * 0.5
points_neighbs = points + torch.randn_like(points) * self.hparams.train.epsilon
points_normal = self.anim_nerf.query_canonical_space(points, use_fine=False, only_normal=True)
points_neighbs_normal = self.anim_nerf.query_canonical_space(points_neighbs, use_fine=False, only_normal=True)
points_normal = points_normal / (torch.norm(points_normal, p=2, dim=-1, keepdim=True) + 1e-5)
points_neighbs_normal = points_neighbs_normal / (torch.norm(points_neighbs_normal, p=2, dim=-1, keepdim=True) + 1e-5)
loss_normals = F.mse_loss(points_normal, points_neighbs_normal)
# loss_normals = torch.mean((torch.norm(points_normal, p=2, dim=-1) - 1)**2)
loss += self.hparams.train.lambda_normals * loss_normals
loss_details['loss_normals'] = loss_normals
if self.hparams.n_importance > 0 and not self.hparams.share_fine:
points_normal_fine = self.anim_nerf.query_canonical_space(points, use_fine=True, only_normal=True)
points_neighbs_normal_fine = self.anim_nerf.query_canonical_space(points_neighbs, use_fine=True, only_normal=True)
points_normal_fine = points_normal_fine / (torch.norm(points_normal_fine, p=2, dim=-1, keepdim=True) + 1e-5)
points_neighbs_normal_fine = points_neighbs_normal_fine / (torch.norm(points_neighbs_normal_fine, p=2, dim=-1, keepdim=True) + 1e-5)
loss_normals_fine = F.mse_loss(points_normal_fine, points_neighbs_normal_fine)
# loss_normals_fine = torch.mean((torch.norm(points_normal_fine, p=2, dim=-1) - 1)**2)
loss += self.hparams.train.lambda_normals * loss_normals_fine
loss_details['loss_normals_fine'] = loss_normals_fine
# if body_model_params is not None:
# loss_pose = F.mse_loss(results['joints'].clone(), self.anim_nerf.model(**body_model_params)['joints'].clone())
# loss += self.hparams.lambda_pose * loss_pose
# loss_details['loss_pose'] = loss_pose
# frame_id_ = torch.clamp(frame_id+1, 0, self.body_model_params.num_frame-1)
# body_model_params_ref_ = self.body_model_params(frame_id_)
# loss_pose_smooth = F.mse_loss(self.anim_nerf.joints, self.anim_nerf.model(**body_model_params_ref_)['joints'])
# loss += self.hparams.lambda_pose_smooth * loss_pose_smooth
# loss_details['loss_pose_smooth'] = loss_pose_smooth
return loss, loss_details
def training_step(self, batch, batch_nb):
frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points = self.decode_batch(batch)
if self.hparams.latent_dim > 0:
latent_code = self.latent_codes(frame_idx)
else:
latent_code = None
if self.hparams.optim_body_params:
body_model_params = self.body_model_params(frame_idx)
results = self(rays, body_model_params, body_model_params_template, latent_code=latent_code)
loss, loss_details = self.compute_loss(rgbs, alphas, results, frame_idx=frame_idx, fg_points=fg_points, bg_points=bg_points)
self.log('train/loss', loss, on_step=True, on_epoch=False, prog_bar=True, logger=True)
for loss_name in loss_details.keys():
self.log('train/{}'.format(loss_name), loss_details[loss_name], on_step=True, on_epoch=False, prog_bar=True, logger=True)
with torch.no_grad():
if 'rgbs_fine' in results:
train_psnr = psnr(results['rgbs_fine'], rgbs)
else:
train_psnr = psnr(results['rgbs'], rgbs)
self.log('train/psnr', train_psnr, on_step=True, on_epoch=False, prog_bar=True, logger=True)
lr = get_learning_rate(self.optimizer)
self.log('lr', lr, on_step=False, on_epoch=True, prog_bar=False, logger=True)
return loss
def validation_step(self, batch, batch_nb):
frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points = self.decode_batch(batch)
if self.hparams.latent_dim > 0:
if frame_idx != -1:
latent_code = self.latent_codes(frame_idx)
else:
latent_code = self.latent_codes(torch.zeros_like(frame_idx))
else:
latent_code = None
if self.hparams.optim_body_params and frame_idx != -1:
body_model_params = self.body_model_params(frame_idx)
# else:
# body_model_params['betas'] = self.body_model_params.betas(torch.zeros_like(frame_idx))
results = self(rays, body_model_params, body_model_params_template, latent_code=latent_code)
loss, _ = self.compute_loss(rgbs, alphas, results)
self.log('val/loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=True)
if 'rgbs_fine' in results:
val_psnr = psnr(results['rgbs_fine'], rgbs)
else:
val_psnr = psnr(results['rgbs'], rgbs)
self.log('val/psnr', val_psnr, on_step=False, on_epoch=True, prog_bar=True, logger=True)
W, H = self.hparams.img_wh
def visualize(frame_id, cam_id, rgbs_gt, rgbs, depths, W, H):
img = rgbs.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)
img_gt = rgbs_gt.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)
depth = visualize_depth(depths.cpu().view(H, W))
stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)
self.logger.experiment.add_images('val/GT_pred_depth_cam{:0>3d}_{:0>6d}'.format(cam_id, frame_id), stack, self.global_step)
if batch_nb % self.hparams.val.vis_freq == 0:
if 'rgbs_fine' in results:
visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs_fine'], results['depths_fine'], W, H)
else:
visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs'], results['depths'], W, H)
return loss
def test_step(self, batch, batch_nb):
frame_id, cam_id, frame_idx, rays, rgbs, alphas, body_model_params, body_model_params_template, fg_points, bg_points = self.decode_batch(batch)
if self.hparams.latent_dim > 0:
if frame_idx != -1:
latent_code = self.latent_codes(frame_idx)
else:
latent_code = self.latent_codes(torch.zeros_like(frame_idx))
else:
latent_code = None
if self.hparams.optim_body_params and frame_idx != -1:
body_model_params = self.body_model_params(frame_idx)
# else:
# body_model_params['betas'] = self.body_model_params.betas(torch.zeros_like(frame_idx))
results = self(rays, body_model_params, body_model_params_template, latent_code=latent_code, perturb=0.0)
loss, _ = self.compute_loss(rgbs, alphas, results)
self.log('test/loss', loss, on_step=False, on_epoch=True, prog_bar=True, logger=False)
if 'rgbs_fine' in results:
test_psnr = psnr(results['rgbs_fine'], rgbs)
else:
test_psnr = psnr(results['rgbs'], rgbs)
self.log('test/psnr', test_psnr, on_step=False, on_epoch=True, prog_bar=True, logger=False)
W, H = self.hparams.img_wh
def visualize(frame_id, cam_id, rgbs_gt, rgbs, depths, W, H):
img = rgbs.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)
img_gt = rgbs_gt.cpu().view(H, W, 3).permute(2, 0, 1) # (3, H, W)
depth = visualize_depth(depths.cpu().view(H, W))
stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)
os.makedirs(os.path.join(self.hparams.outputs_dir, self.hparams.exp_name, 'cam{:0>3d}'.format(cam_id)), exist_ok=True)
save_image(stack, '{}/{}/cam{:0>3d}/{:0>6d}.png'.format(self.hparams.outputs_dir, self.hparams.exp_name, cam_id, frame_id))
#self.logger.experiment.add_images('test/GT_pred_depth_{}'.format(nb), stack, self.global_step)
if batch_nb % self.hparams.test.vis_freq == 0:
if 'rgbs_fine' in results:
visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs_fine'], results['depths_fine'], W, H)
else:
visualize(frame_id.item(), cam_id.item(), rgbs, results['rgbs'], results['depths'], W, H)
return loss
if __name__ == '__main__':
# torch.autograd.set_detect_anomaly(True)
train_start_time = time.time()
cfg = get_cfg()
data = AnimNeRFData(cfg)
system = AnimNeRFSystem(cfg)
print(system)
if cfg.train.ckpt_path is not None:
for model_name in cfg.train.model_names_to_load:
load_ckpt(getattr(system, model_name), cfg.train.ckpt_path, model_name)
for param in getattr(system, model_name).parameters():
param.requires_grad = cfg.train.pretrained_model_requires_grad
checkpoint_callback = ModelCheckpoint(dirpath=f'{cfg.checkpoints_dir}/{cfg.exp_name}',
filename='{epoch:d}',
monitor='train/psnr',
mode='max',
save_top_k=cfg.train.save_top_k,
save_last=cfg.train.save_last)
logger = TensorBoardLogger(
save_dir=cfg.logs_dir,
name=cfg.exp_name,
)
trainer = Trainer(max_epochs=cfg.train.max_epochs,
callbacks=[checkpoint_callback],
logger=logger,
gpus=cfg.num_gpus,
strategy=cfg.train.strategy,
num_sanity_val_steps=1,
benchmark=True,
profiler="simple")
trainer.fit(system, data, ckpt_path=cfg.train.ckpt_path if cfg.train.resume else None)
trainer.test(datamodule=data)
train_end_message = 'End of training \t Time Taken: %.3f hours' % ((time.time() - train_start_time)/3600.0)
print(train_end_message) |
the-stack_0_84 | from mmdet.models.builder import DETECTORS
from .single_stage_text_detector import SingleStageTextDetector
from .text_detector_mixin import TextDetectorMixin
@DETECTORS.register_module()
class FCENet(TextDetectorMixin, SingleStageTextDetector):
"""The class for implementing FCENet text detector
FCENet(CVPR2021): Fourier Contour Embedding for Arbitrary-shaped Text
Detection
[https://arxiv.org/abs/2104.10442]
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
show_score=False,
init_cfg=None):
SingleStageTextDetector.__init__(self, backbone, neck, bbox_head,
train_cfg, test_cfg, pretrained,
init_cfg)
TextDetectorMixin.__init__(self, show_score)
def simple_test(self, img, img_metas, rescale=False):
x = self.extract_feat(img)
outs = self.bbox_head(x)
boundaries = self.bbox_head.get_boundary(outs, img_metas, rescale)
return [boundaries]
|
the-stack_0_87 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import string
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
else:
string_types = basestring,
integer_types = (int, long)
SEP = "____"
KLS_NAME_CHARSET = set(string.ascii_letters + string.digits)
VAR_NAME_CHARSET = set(string.ascii_lowercase + string.digits + "_")
VAR_FORBIDDEN_CHARSET = set(
r"""~`!@#$%^&*()-+={}[]|\:;"'<,>.?/""" + string.ascii_uppercase)
INDEX_KEY_FORBIDDEN_CHARSET = set(r"""~`!@#$%^&*()-+={}[]|\:;"'<,>.?/""")
WHITE_SPACE = set(string.whitespace)
def is_valid_class_name(name):
"""Check if it is a valid variable name.
A valid variable name has to:
- start wither upper case
- only alpha digits
"""
try:
assert name[0].isupper()
assert len(set(name).difference(KLS_NAME_CHARSET)) == 0
return True
except:
return False
def is_valid_variable_name(name):
"""Check if it is a valid variable name.
A valid variable name has to:
- start wither lower case
- reserved SEPTERATOR is not in it.
"""
try:
assert name[0].islower()
assert SEP not in name
assert len(set(name).difference(VAR_NAME_CHARSET)) == 0
return True
except:
return False
def is_valid_surfix(name):
"""Surfix is the attribute name used for index.
**中文文档**
此方法暂时没用。
"""
try:
assert SEP not in name
assert len(VAR_FORBIDDEN_CHARSET.intersection(name)) == 0
return True
except:
return False
def to_variable_name(cls_name):
"""Convert class name to variable name format. usually use "_" to connect
each word.
**中文文档**
将类名转化为其实例的变量名。
"""
assert is_valid_class_name(cls_name)
words = list()
chunks = list()
for char in cls_name:
if char.isupper():
words.append("".join(chunks))
chunks = ["_", char.lower()]
else:
chunks.append(char)
words.append("".join(chunks))
return "".join(words)[1:]
def to_index_key(value):
"""Convert a value to it's index key in string.
Only alpha and digits and underscore is allowed. Whitespace delimiter will
replaced with underscore.
`` *David# #John* `` -> ``David_John``
"""
if isinstance(value, integer_types):
key = str(value)
elif isinstance(value, string_types):
l = list()
for c in value:
if c not in INDEX_KEY_FORBIDDEN_CHARSET:
if c in WHITE_SPACE:
l.append(" ")
else:
l.append(c)
words = [word for word in "".join(
l).strip().split(" ") if word.strip()]
key = "_".join(words)
elif isinstance(value, float):
key = str(value).replace(".", "d")
else:
raise TypeError("%r is not an indexable value.")
return key
def test_is_valid_class_name():
for name in ["User", "MyClass", "TestCase"]:
assert is_valid_class_name(name) is True
for name in ["user", "My_Class", "testCase"]:
assert is_valid_class_name(name) is False
def test_is_valid_variable_name():
for name in ["name", "my_class", "num1"]:
assert is_valid_variable_name(name) is True
for name in ["Name", "myClass", "1a"]:
assert is_valid_variable_name(name) is False
def test_is_valid_surfix():
assert is_valid_surfix("大卫") is True
def test_to_variable_name():
assert to_variable_name("User") == "user"
assert to_variable_name("MyClass") == "my_class"
def test_to_index_key():
assert to_index_key(1) == "1"
assert to_index_key("David John") == "David_John"
assert to_index_key(" *David+ +John* ") == "David_John"
assert to_index_key("中文") == "中文"
assert to_index_key(" 英 文 ") == "英_文"
assert to_index_key(3.14) == "3d14"
if __name__ == "__main__":
test_is_valid_class_name()
test_is_valid_variable_name()
test_is_valid_surfix()
test_to_variable_name()
test_to_index_key()
|
the-stack_0_88 | from typing import List
import dash_html_components as html
from .. import WebvizPluginABC
class ExampleTour(WebvizPluginABC):
@property
def tour_steps(self) -> List[dict]:
return [
{"id": self.uuid("blue_text"), "content": "This is the first step"},
{"id": self.uuid("red_text"), "content": "This is the second step"},
]
@property
def layout(self) -> html.Div:
return html.Div(
children=[
html.Span(
"Here is some blue text to explain... ",
id=self.uuid("blue_text"),
style={"color": "blue"},
),
html.Span(
" ...and here is some red text that also needs an explanation.",
id=self.uuid("red_text"),
style={"color": "red"},
),
]
)
|
the-stack_0_92 | #!/usr/bin/env python
try:
from setuptools import setup
requires = {
'install_requires': ['django >= 4.0'],
}
except ImportError:
from distutils.core import setup
requires = {}
from os.path import abspath, dirname, join
with open(join(dirname(abspath(__file__)), 'src', 'rfdoc', 'version.py')) as f:
exec(f.read())
# Maximum width in Windows installer seems to be 70 characters -------|
DESCRIPTION = """
RFDoc is a web application for storing and searching Robot Framework
test library and resource file documentations.
Required packages:
django >= 4.0
"""[1:-1]
CLASSIFIERS = """
Development Status :: 5 - Production/Stable
License :: OSI Approved :: Apache Software License
Operating System :: OS Independent
Programming Language :: Python
Topic :: Software Development :: Testing
"""[1:-1]
setup(
name = 'robotframework-rfdoc',
version = VERSION,
description = 'Web-based Robot Framework library documentation server',
long_description = DESCRIPTION,
author = 'Robot Framework Developers',
author_email = '[email protected]',
url = 'http://code.google.com/p/rfdoc/',
license = 'Apache License 2.0',
keywords = 'robotframework testing testautomation documentation',
platforms = 'any',
classifiers = CLASSIFIERS.splitlines(),
package_dir = {'rfdoc': 'src/rfdoc'},
packages = ['rfdoc', 'rfdoc.rfdocapp', 'rfdoc.rfdocapp.views',
'rfdoc.rfdocapp.templatetags', 'rfdoc.rfdocapp.utils'],
package_data = {'rfdoc': ['*.tmpl', 'rfdocapp/templates/*.html',
'rfdocapp/static/*.css',
'rfdocapp/static/*.js']},
**requires
)
|
the-stack_0_94 | from pelican import signals
from pelican.generators import ArticlesGenerator, PagesGenerator
# Make sure than when a title breaks, there will never be
# a single word "alone" on its line
# Does not work if the last "word" of the title is an emoji
# in the form of an image (like Twemoji)
# Title has to be more than four words
# in order to be considered
SMART_BREAK_MIN_LEN = 4
def smart_break(document):
# Get the number of words
splited = document.title.split(' ')
length = len(splited)
if length > SMART_BREAK_MIN_LEN:
# Join the last two elements with a non-breaking space
end = ' '.join(splited[length - 2:])
# Get the start of the title back
start = ' '.join(splited[:length-2])
# Glue the title back together
final = f'{start} {end}'
# Write to a custom property
# Writing the title directly leads to not being
# interpreted at various places
document.smart_title = final
def run(generators):
for g in generators:
if isinstance(g, ArticlesGenerator):
for a in g.articles:
smart_break(a)
if isinstance(g, PagesGenerator):
for p in g.pages:
smart_break(p)
def register():
signals.all_generators_finalized.connect(run)
|
the-stack_0_95 | from werkzeug.local import LocalStack, LocalProxy
def _find_bot():
from .wx import get_bot
top = _wx_ctx_stack.top
if top is None:
top = get_bot()
_wx_ctx_stack.push(top)
return top
_wx_ctx_stack = LocalStack()
current_bot = LocalProxy(_find_bot)
|
the-stack_0_96 | _base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline)) |
the-stack_0_99 | #!/usr/bin/env python3
import os
os.environ['NOCRASH'] = '1'
import unittest
import matplotlib
matplotlib.use('svg')
from selfdrive.config import Conversions as CV
from selfdrive.car.honda.values import CruiseButtons as CB
from selfdrive.test.longitudinal_maneuvers.maneuver import Maneuver
from selfdrive.manager.process_config import managed_processes
from common.file_helpers import mkdirs_exists_ok
from common.params import Params
def check_no_collision(log):
return min(log['d_rel']) > 0
def check_fcw(log):
return any(log['fcw'])
def check_engaged(log):
return log['controls_state_msgs'][-1][-1].active
maneuvers = [
Maneuver(
'while cruising at 40 mph, change cruise speed to 50mph',
duration=30.,
initial_speed=40. * CV.MPH_TO_MS,
cruise_button_presses=[(CB.DECEL_SET, 2.), (0, 2.3),
(CB.RES_ACCEL, 10.), (0, 10.1),
(CB.RES_ACCEL, 10.2), (0, 10.3)],
checks=[check_engaged],
),
Maneuver(
'while cruising at 60 mph, change cruise speed to 50mph',
duration=30.,
initial_speed=60. * CV.MPH_TO_MS,
cruise_button_presses=[(CB.DECEL_SET, 2.), (0, 2.3),
(CB.DECEL_SET, 10.), (0, 10.1),
(CB.DECEL_SET, 10.2), (0, 10.3)],
checks=[check_engaged],
),
Maneuver(
'while cruising at 20mph, uphill grade of 10%',
duration=25.,
initial_speed=20. * CV.MPH_TO_MS,
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
grade_values=[0., 0., .1],
grade_breakpoints=[0., 10., 11.],
checks=[check_engaged],
),
Maneuver(
'while cruising at 20mph, downhill grade of -10%',
duration=25.,
initial_speed=20. * CV.MPH_TO_MS,
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
grade_values=[0., 0., -.1],
grade_breakpoints=[0., 10., 11.],
checks=[check_engaged],
),
Maneuver(
'approaching a 40mph car while cruising at 60mph from 100m away',
duration=30.,
initial_speed=60. * CV.MPH_TO_MS,
lead_relevancy=True,
initial_distance_lead=100.,
speed_lead_values=[40. * CV.MPH_TO_MS, 40. * CV.MPH_TO_MS],
speed_lead_breakpoints=[0., 100.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
'approaching a 0mph car while cruising at 40mph from 150m away',
duration=30.,
initial_speed=40. * CV.MPH_TO_MS,
lead_relevancy=True,
initial_distance_lead=150.,
speed_lead_values=[0. * CV.MPH_TO_MS, 0. * CV.MPH_TO_MS],
speed_lead_breakpoints=[0., 100.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 1m/s^2',
duration=50.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 20., 0.],
speed_lead_breakpoints=[0., 15., 35.0],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 2m/s^2',
duration=50.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 20., 0.],
speed_lead_breakpoints=[0., 15., 25.0],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 3m/s^2',
duration=50.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 20., 0.],
speed_lead_breakpoints=[0., 15., 21.66],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_fcw],
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 5m/s^2',
duration=40.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 20., 0.],
speed_lead_breakpoints=[0., 15., 19.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3)],
checks=[check_engaged, check_fcw],
),
Maneuver(
'starting at 0mph, approaching a stopped car 100m away',
duration=30.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=100.,
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"following a car at 60mph, lead accel and decel at 0.5m/s^2 every 2s",
duration=25.,
initial_speed=30.,
lead_relevancy=True,
initial_distance_lead=49.,
speed_lead_values=[30., 30., 29., 31., 29., 31., 29.],
speed_lead_breakpoints=[0., 6., 8., 12., 16., 20., 24.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"following a car at 10mph, stop and go at 1m/s2 lead dece1 and accel",
duration=70.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0., 10.],
speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"green light: stopped behind lead car, lead car accelerates at 1.5 m/s",
duration=30.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=4.,
speed_lead_values=[0, 0, 45],
speed_lead_breakpoints=[0, 10., 40.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"stop and go with 1m/s2 lead decel and accel, with full stops",
duration=70.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0., 0.],
speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"stop and go with 1.5m/s2 lead accel and 3.3m/s^2 lead decel, with full stops",
duration=45.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0., 0.],
speed_lead_breakpoints=[10., 13., 26., 33., 36., 45.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"accelerate from 20 while lead vehicle decelerates from 40 to 20 at 1m/s2",
duration=30.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=10.,
speed_lead_values=[20., 10.],
speed_lead_breakpoints=[1., 11.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"accelerate from 20 while lead vehicle decelerates from 40 to 0 at 2m/s2",
duration=30.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=10.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[1., 11.],
cruise_button_presses=[(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)],
checks=[check_engaged, check_no_collision],
),
Maneuver(
"fcw: traveling at 30 m/s and approaching lead traveling at 20m/s",
duration=15.,
initial_speed=30.,
lead_relevancy=True,
initial_distance_lead=100.,
speed_lead_values=[20.],
speed_lead_breakpoints=[1.],
cruise_button_presses=[],
checks=[check_fcw],
),
Maneuver(
"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 1m/s2",
duration=18.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[3., 23.],
cruise_button_presses=[],
checks=[check_fcw],
),
Maneuver(
"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 3m/s2",
duration=13.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[3., 9.6],
cruise_button_presses=[],
checks=[check_fcw],
),
Maneuver(
"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 5m/s2",
duration=8.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[3., 7.],
cruise_button_presses=[],
checks=[check_fcw],
)
]
def setup_output():
output_dir = os.path.join(os.getcwd(), 'out/longitudinal')
if not os.path.exists(os.path.join(output_dir, "index.html")):
# write test output header
css_style = """
.maneuver_title {
font-size: 24px;
text-align: center;
}
.maneuver_graph {
width: 100%;
}
"""
view_html = "<html><head><style>%s</style></head><body><table>" % (css_style,)
for i, man in enumerate(maneuvers):
view_html += "<tr><td class='maneuver_title' colspan=5><div>%s</div></td></tr><tr>" % (man.title,)
for c in ['distance.svg', 'speeds.svg', 'acceleration.svg', 'pedals.svg', 'pid.svg']:
view_html += "<td><img class='maneuver_graph' src='%s'/></td>" % (os.path.join("maneuver" + str(i + 1).zfill(2), c), )
view_html += "</tr>"
mkdirs_exists_ok(output_dir)
with open(os.path.join(output_dir, "index.html"), "w") as f:
f.write(view_html)
class LongitudinalControl(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['SIMULATION'] = "1"
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['NO_CAN_TIMEOUT'] = "1"
setup_output()
params = Params()
params.clear_all()
params.put_bool("Passive", bool(os.getenv("PASSIVE")))
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("CommunityFeaturesToggle", True)
# hack
def test_longitudinal_setup(self):
pass
def run_maneuver_worker(k):
man = maneuvers[k]
output_dir = os.path.join(os.getcwd(), 'out/longitudinal')
def run(self):
print(man.title)
valid = False
for _ in range(3):
managed_processes['radard'].start()
managed_processes['controlsd'].start()
managed_processes['plannerd'].start()
plot, valid = man.evaluate()
plot.write_plot(output_dir, "maneuver" + str(k + 1).zfill(2))
managed_processes['radard'].stop()
managed_processes['controlsd'].stop()
managed_processes['plannerd'].stop()
if valid:
break
self.assertTrue(valid)
return run
for k in range(len(maneuvers)):
setattr(LongitudinalControl, "test_longitudinal_maneuvers_%d" % (k + 1), run_maneuver_worker(k))
if __name__ == "__main__":
unittest.main(failfast=True)
|
the-stack_0_101 | import numpy as np
from .._helpers import _writer_map, read, reader_map, write
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--output-format",
"-o",
type=str,
choices=sorted(list(_writer_map.keys())),
help="output file format",
default=None,
)
parser.add_argument(
"--ascii",
"-a",
action="store_true",
help="write in ASCII format variant (where applicable, default: binary)",
)
parser.add_argument("outfile", type=str, help="mesh file to be written to")
parser.add_argument(
"--float-format",
"-f",
type=str,
help="float format used in output ASCII files (default: .16e)",
)
parser.add_argument(
"--sets-to-int-data",
"-s",
action="store_true",
help="if possible, convert sets to integer data (useful if the output type does not support sets)",
)
parser.add_argument(
"--int-data-to-sets",
"-d",
action="store_true",
help="if possible, convert integer data to sets (useful if the output type does not support integer data)",
)
def convert(args):
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
# Some converters (like VTK) require `points` to be contiguous.
mesh.points = np.ascontiguousarray(mesh.points)
if args.sets_to_int_data:
mesh.point_sets_to_data()
mesh.cell_sets_to_data()
if args.int_data_to_sets:
for key in mesh.point_data:
mesh.point_data_to_sets(key)
for key in mesh.cell_data:
mesh.cell_data_to_sets(key)
# write it out
kwargs = {"file_format": args.output_format}
if args.float_format is not None:
kwargs["float_fmt"] = args.float_format
if args.ascii:
kwargs["binary"] = False
write(args.outfile, mesh, **kwargs)
|
the-stack_0_102 | from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_predict, cross_val_score
import matplotlib.pyplot as plt
import pandas as pd
# load the data set we'll be working with. In this case the Boston housing
boston = load_boston()
boston_df = pd.DataFrame(data=boston.data, columns=boston.feature_names) # get it into a pandas data frame
y = pd.DataFrame(data=boston.data) # get it into a pandas data frame
X = boston_df[['LSTAT', 'AGE']]
# boston_df.describe() # take a look at the data
boston = None # help garbage collector
# Task 2) make a linear regression model with LSTAT+AGE to predict median value
lr1 = LinearRegression() # create the object
lr1.fit(X, y)
# cross_val_predict returns an array of the same size as `y` where each entry
# is a prediction obtained by cross validation:
predicted = cross_val_predict(lr1, X, y, cv=10)
scores = cross_val_score(lr1, X, y, cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
fig, ax = plt.subplots()
ax.scatter(y, predicted, edgecolors=(0, 0, 0)) # predicted values
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4) # regression line
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
# uncomment line below to show graph
plt.show()
|
the-stack_0_103 | ###########################################################################
### Estimation of Slope along the boundary using the buffer distance ###
### Author : Lakshmi E ###
### Last Edit: 13-April-2020 ###
###########################################################################
import arcpy
import os,glob
import numpy as np
from arcpy.sa import *
from arcpy import env
import dbf
import csv
# work in the current directory
env.workspace=(input("give the current directory:")) #'C:\Users\Laks\Desktop\REGSim module'
dirpath = os.getcwd()
#assign the buffer distance
buffer_dist = input('Buffer distance between the study area (meters):')
num_pts = input('no. of points considered across the boundary:')
# Load required toolboxes
arcpy.ImportToolbox(".\Module\CreatePointsLines.tbx")
arcpy.CheckOutExtension("spatial")
# create buffer in and out
def buffer(bound):
print('Creating buffer inside and outside the boundary area...')
arcpy.Buffer_analysis(bound, 'buffin{0}.shp'.format(buffer_dist),'-{0}'.format(buffer_dist),'FULL','ROUND','NONE','')
arcpy.Buffer_analysis(bound, 'bufout{0}.shp'.format(buffer_dist),'{0}'.format(buffer_dist),'FULL','ROUND','NONE','')
bound='bound_hmda.shp'
buffer(bound)
# create points to the feature class
print('Converting polygon to line feature class...')
def ext_pts(bound,boundin,boundout,bufin,bufout):
list=[bound,boundin,boundout,bufin,bufout]
for i in list:
print(i)
arcpy.FeatureToLine_management(i,'{0}_line.shp'.format(i[:-4]),'','ATTRIBUTES')
arcpy.AddField_management('{0}_line.shp'.format(i[:-4]),'Length','FLOAT','','','','','NULLABLE','NON_REQUIRED',"")
arcpy.CalculateField_management('{0}_line.shp'.format(i[:-4]), "Length", "!SHAPE.Length!", "PYTHON", "")
length = arcpy.da.SearchCursor('{0}_line.shp'.format(i[:-4]), "Length").next()[0]
dist_intv = length/num_pts #point_num
arcpy.CreatePointsLines_CreatePointsLines('{0}_line.shp'.format(i[:-4]),'INTERVAL BY DISTANCE', 'BEGINNING','NO','',dist_intv,'NO','{0}_pts.shp'.format(i[:-4]))
print('Created points to the feature class...')
bound = 'bound_hmda.shp'
boundin = 'bndin_hmda.shp'
boundout = 'bndou_hmda.shp'
bufin = 'buffin{0}.shp'.format(buffer_dist)
bufout = 'bufout{0}.shp'.format(buffer_dist)
ext_pts(bound,boundin,boundout,bufin,bufout)
# extract elevation value to the points
print('Extracting the elevation data from the raster to the point featureclass...')
def pts_value(raster,list):
for i in raster:
print(i)
ExtractValuesToPoints('bound_hmda_pts.shp','{0}'.format(i),'bound{1}_{0}_extrpts{2}_{3}.shp'.format(i[9:12],buffer_dist,num_pts,i[2:4]),'INTERPOLATE','VALUE_ONLY')
arcpy.AddField_management('bound{1}_{0}_extrpts{2}_{3}.shp'.format(i[9:12],buffer_dist,num_pts,i[2:4]),"Slope","DOUBLE","", "", "", "", "NULLABLE", "NON_REQUIRED", "")
for j,z in zip(list,list_bound):
print(j)
print(z)
ExtractValuesToPoints('{0}_pts.shp'.format(j[:-4]),'{0}'.format(i),'{0}_{1}_extrpts.shp'.format(j[0:5],i[9:12]),'INTERPOLATE','VALUE_ONLY')
ExtractValuesToPoints('{0}_pts.shp'.format(z[:-4]),'{0}'.format(i),'{0}_{1}_extrpts.shp'.format(z[0:5],i[9:12]),'INTERPOLATE','VALUE_ONLY')
for k,l in zip(list_bound,list):
arcpy.Near_analysis('{0}_{1}_extrpts.shp'.format(k[0:5],i[9:12]),'{0}_{1}_extrpts.shp'.format(l[0:5],i[9:12]),'','NO_LOCATION','NO_ANGLE')
arcpy.JoinField_management('{0}_{1}_extrpts.shp'.format(k[0:5],i[9:12]),'NEAR_FID','{0}_{1}_extrpts.shp'.format(l[0:5],i[9:12]),"FID","#")
arcpy.AddField_management('{0}_{1}_extrpts.shp'.format(k[0:5],i[9:12]), "Slope", "FLOAT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.AddField_management('{0}_{1}_extrpts.shp'.format(l[0:5],i[9:12]), "Slope", "FLOAT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management('bndou_{0}_extrpts.shp'.format(i[9:12]), "Slope", "(!RASTERVALU!- !RASTERVA_1!) / !NEAR_DIST!", "PYTHON_9.3", "")
arcpy.CalculateField_management('bndin_{0}_extrpts.shp'.format(i[9:12]), "Slope", "(!RASTERVA_1!-!RASTERVALU!) / !NEAR_DIST!", "PYTHON_9.3", "")
raster=sorted(glob.glob("*_GWL_*.tif"))
list=['buffin{0}.shp'.format(buffer_dist),'bufout{0}.shp'.format(buffer_dist)]
list_bound = ['bndin_hmda.shp','bndou_hmda.shp']
pts_value(raster,list)
# estimae the average slope
print('Estimating slope in each point of the boundary area...')
filesav = []
def avg_sl(raster):
for i in raster:
list=sorted(glob.glob('bnd*{0}_extrpts.dbf'.format(i[9:12])))
print(list)
tabin=dbf.Table('{0}'.format(list[0]))
tabin.open()
tabout=dbf.Table('{0}'.format(list[1]))
tabout.open()
tabbou=dbf.Table('bound{1}_{0}_extrpts{2}_{3}.dbf'.format(i[9:12],buffer_dist,num_pts,i[2:4]))
tabbou.open(mode=dbf.READ_WRITE)
for l,j,k in zip(tabin,tabout,range(0,len(tabbou))):
mas=l[-1]
sla=j[-1]
res=((mas+sla)/2)
with tabbou[k] as record:
record.slope=res
tabin.close()
tabout.close()
tabbou.close()
print(tabbou)
f = 'bound{1}_{0}_extrpts{2}_{3}'.format(i[9:12],buffer_dist,num_pts,i[2:4])
filesav.append(f)
raster=sorted(glob.glob("*_GWL_*.tif"))
avg_sl(raster)
print(' Saving the output file')
with open('output.csv', 'wb') as output:
csvwriter = csv.writer(output,dialect='excel')
for row in filesav:
csvwriter.writerow([row])
output.close()
#end of the script
|
the-stack_0_104 | import leveldb
db = leveldb.LevelDB('./db')
# single put
db.Put(b'hello', b'hello world')
print(db.Get(b'hello').decode('utf-8'))
# multiple put/delete applied atomically, and committed to disk
batch = leveldb.WriteBatch()
batch.Put(b'hello', b'world')
batch.Put(b'hello again', b'world')
batch.Delete(b'hello')
db.Write(batch, sync = True)
|
the-stack_0_106 | from __future__ import absolute_import, unicode_literals
from datetime import date
from django.db import models
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalKey
from taggit.models import TaggedItemBase
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel, InlinePanel, MultiFieldPanel, PageChooserPanel)
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Orderable, Page
from wagtail.wagtaildocs.edit_handlers import DocumentChooserPanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
# ABSTRACT MODELS
# =============================
class AbstractLinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
api_fields = ('link', )
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
class AbstractRelatedLink(AbstractLinkFields):
title = models.CharField(max_length=255, help_text="Link title")
api_fields = ('title', ) + AbstractLinkFields.api_fields
panels = [
FieldPanel('title'),
MultiFieldPanel(AbstractLinkFields.panels, "Link"),
]
class Meta:
abstract = True
class AbstractCarouselItem(AbstractLinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
caption = models.CharField(max_length=255, blank=True)
api_fields = (
'image',
'embed_url',
'caption',
) + AbstractLinkFields.api_fields
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('caption'),
MultiFieldPanel(AbstractLinkFields.panels, "Link"),
]
class Meta:
abstract = True
class ContactFieldsMixin(models.Model):
telephone = models.CharField(max_length=20, blank=True)
email = models.EmailField(blank=True)
address_1 = models.CharField(max_length=255, blank=True)
address_2 = models.CharField(max_length=255, blank=True)
city = models.CharField(max_length=255, blank=True)
country = models.CharField(max_length=255, blank=True)
post_code = models.CharField(max_length=10, blank=True)
api_fields = (
'telephone',
'email',
'address_1',
'address_2',
'city',
'country',
'post_code',
)
panels = [
FieldPanel('telephone'),
FieldPanel('email'),
FieldPanel('address_1'),
FieldPanel('address_2'),
FieldPanel('city'),
FieldPanel('country'),
FieldPanel('post_code'),
]
class Meta:
abstract = True
# PAGE MODELS
# =============================
# Home page
class HomePage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
body = RichTextField(blank=True)
api_fields = (
'body',
'carousel_items',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('body'),
]
class Meta:
verbose_name = "homepage"
class HomePageCarouselItem(Orderable, AbstractCarouselItem):
page = ParentalKey('HomePage', related_name='carousel_items', on_delete=models.CASCADE)
class HomePageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('HomePage', related_name='related_links', on_delete=models.CASCADE)
HomePage.content_panels = Page.content_panels + [
FieldPanel('body', classname="full"),
InlinePanel('carousel_items', label="Carousel items"),
InlinePanel('related_links', label="Related links"),
]
# Standard pages
class StandardPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
intro = RichTextField(blank=True)
body = RichTextField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'intro',
'body',
'feed_image',
'carousel_items',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('intro'),
index.SearchField('body'),
]
class StandardPageCarouselItem(Orderable, AbstractCarouselItem):
page = ParentalKey('StandardPage', related_name='carousel_items', on_delete=models.CASCADE)
class StandardPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('StandardPage', related_name='related_links', on_delete=models.CASCADE)
StandardPage.content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('related_links', label="Related links"),
]
StandardPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
class StandardIndexPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
intro = RichTextField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'intro',
'feed_image',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('intro'),
]
class StandardIndexPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('StandardIndexPage', related_name='related_links', on_delete=models.CASCADE)
StandardIndexPage.content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
InlinePanel('related_links', label="Related links"),
]
StandardIndexPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
# Blog pages
class BlogEntryPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
body = RichTextField()
tags = ClusterTaggableManager(through='BlogEntryPageTag', blank=True)
date = models.DateField("Post date")
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'body',
'tags',
'date',
'feed_image',
'carousel_items',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('body'),
]
def get_blog_index(self):
# Find closest ancestor which is a blog index
return BlogIndexPage.ancestor_of(self).last()
class BlogEntryPageCarouselItem(Orderable, AbstractCarouselItem):
page = ParentalKey('BlogEntryPage', related_name='carousel_items', on_delete=models.CASCADE)
class BlogEntryPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('BlogEntryPage', related_name='related_links', on_delete=models.CASCADE)
class BlogEntryPageTag(TaggedItemBase):
content_object = ParentalKey('BlogEntryPage', related_name='tagged_items', on_delete=models.CASCADE)
BlogEntryPage.content_panels = Page.content_panels + [
FieldPanel('date'),
FieldPanel('body', classname="full"),
InlinePanel('carousel_items', label="Carousel items"),
InlinePanel('related_links', label="Related links"),
]
BlogEntryPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
FieldPanel('tags'),
]
class BlogIndexPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
intro = RichTextField(blank=True)
api_fields = (
'intro',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('intro'),
]
def get_blog_entries(self):
# Get list of live blog pages that are descendants of this page
entries = BlogEntryPage.objects.descendant_of(self).live()
# Order by most recent date first
entries = entries.order_by('-date')
return entries
def get_context(self, request):
# Get blog entries
entries = self.get_blog_entries()
# Filter by tag
tag = request.GET.get('tag')
if tag:
entries = entries.filter(tags__name=tag)
paginator, entries = paginate(request, entries, page_key='page', per_page=10)
# Update template context
context = super(BlogIndexPage, self).get_context(request)
context['entries'] = entries
return context
class BlogIndexPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('BlogIndexPage', related_name='related_links', on_delete=models.CASCADE)
BlogIndexPage.content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
InlinePanel('related_links', label="Related links"),
]
# Events pages
class EventPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
AUDIENCE_CHOICES = (
('public', "Public"),
('private', "Private"),
)
date_from = models.DateField("Start date")
date_to = models.DateField(
"End date",
null=True,
blank=True,
help_text="Not required if event is on a single day"
)
time_from = models.TimeField("Start time", null=True, blank=True)
time_to = models.TimeField("End time", null=True, blank=True)
audience = models.CharField(max_length=255, choices=AUDIENCE_CHOICES)
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
cost = models.CharField(max_length=255)
signup_link = models.URLField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'date_from',
'date_to',
'time_from',
'time_to',
'audience',
'location',
'body',
'cost',
'signup_link',
'feed_image',
'carousel_items',
'related_links',
'speakers',
)
search_fields = Page.search_fields + [
index.SearchField('get_audience_display'),
index.SearchField('location'),
index.SearchField('body'),
]
def get_event_index(self):
# Find closest ancestor which is an event index
return EventIndexPage.objects.ancester_of(self).last()
class EventPageCarouselItem(Orderable, AbstractCarouselItem):
page = ParentalKey('EventPage', related_name='carousel_items', on_delete=models.CASCADE)
class EventPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('EventPage', related_name='related_links', on_delete=models.CASCADE)
class EventPageSpeaker(Orderable, AbstractLinkFields):
page = ParentalKey('EventPage', related_name='speakers', on_delete=models.CASCADE)
first_name = models.CharField("Name", max_length=255, blank=True)
last_name = models.CharField("Surname", max_length=255, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'first_name',
'last_name',
'image',
)
panels = [
FieldPanel('first_name'),
FieldPanel('last_name'),
ImageChooserPanel('image'),
MultiFieldPanel(AbstractLinkFields.panels, "Link"),
]
EventPage.content_panels = Page.content_panels + [
FieldPanel('date_from'),
FieldPanel('date_to'),
FieldPanel('time_from'),
FieldPanel('time_to'),
FieldPanel('location'),
FieldPanel('audience'),
FieldPanel('cost'),
FieldPanel('signup_link'),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('speakers', label="Speakers"),
InlinePanel('related_links', label="Related links"),
]
EventPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
class EventIndexPage(Page):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
intro = RichTextField(blank=True)
api_fields = (
'intro',
'related_links',
)
search_fields = Page.search_fields + [
index.SearchField('intro'),
]
def get_events(self):
# Get list of live event pages that are descendants of this page
events = EventPage.objects.descendant_of(self).live()
# Filter events list to get ones that are either
# running now or start in the future
events = events.filter(date_from__gte=date.today())
# Order by date
events = events.order_by('date_from')
return events
class EventIndexPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('EventIndexPage', related_name='related_links', on_delete=models.CASCADE)
EventIndexPage.content_panels = Page.content_panels + [
FieldPanel('intro', classname="full"),
InlinePanel('related_links', label="Related links"),
]
# Person page
class PersonPage(Page, ContactFieldsMixin):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
intro = RichTextField(blank=True)
biography = RichTextField(blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'first_name',
'last_name',
'intro',
'biography',
'image',
'feed_image',
'related_links',
) + ContactFieldsMixin.api_fields
search_fields = Page.search_fields + [
index.SearchField('first_name'),
index.SearchField('last_name'),
index.SearchField('intro'),
index.SearchField('biography'),
]
class PersonPageRelatedLink(Orderable, AbstractRelatedLink):
page = ParentalKey('PersonPage', related_name='related_links', on_delete=models.CASCADE)
PersonPage.content_panels = Page.content_panels + [
FieldPanel('first_name'),
FieldPanel('last_name'),
FieldPanel('intro', classname="full"),
FieldPanel('biography', classname="full"),
ImageChooserPanel('image'),
MultiFieldPanel(ContactFieldsMixin.panels, "Contact"),
InlinePanel('related_links', label="Related links"),
]
PersonPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
# Contact page
class ContactPage(Page, ContactFieldsMixin):
page_ptr = models.OneToOneField(Page, parent_link=True, related_name='+', on_delete=models.CASCADE)
body = RichTextField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
api_fields = (
'body',
'feed_image',
) + ContactFieldsMixin.api_fields
search_fields = Page.search_fields + [
index.SearchField('body'),
]
ContactPage.content_panels = Page.content_panels + [
FieldPanel('body', classname="full"),
MultiFieldPanel(ContactFieldsMixin.panels, "Contact"),
]
ContactPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
|
the-stack_0_109 | #!/usr/bin/env python
from os import path
import setuptools
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
from metaappscriptsdk import info
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst')) as f:
long_description = f.read()
packages = [
'metaappscriptsdk',
'metaappscriptsdk.logger',
'metaappscriptsdk.services',
'metaappscriptsdk.schedule',
'metaappscriptsdk.feed',
]
install_reqs = parse_requirements('requirements.txt')
reqs = install_reqs
setuptools.setup(
name=info.__package_name__,
version=info.__version__,
description='Meta App Scripts SDK',
long_description=long_description,
url='https://github.com/rw-meta/meta-app-script-py-sdk',
author='Artur Geraschenko',
author_email='[email protected]',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3'
],
install_requires=reqs,
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'metaappscriptsdk': 'metaappscriptsdk'},
include_package_data=True,
)
|
the-stack_0_110 | """
Module description:
"""
__version__ = '0.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo'
__email__ = '[email protected], [email protected]'
import numpy as np
from ast import literal_eval as make_tuple
from tqdm import tqdm
from elliot.dataset.samplers import pointwise_pos_neg_sampler as pws
from elliot.recommender.neural.NeuMF.neural_matrix_factorization_model import NeuralMatrixFactorizationModel
from elliot.recommender.recommender_utils_mixin import RecMixin
from elliot.utils.write import store_recommendation
from elliot.recommender.base_recommender_model import BaseRecommenderModel
from elliot.recommender.base_recommender_model import init_charger
np.random.seed(42)
class NeuMF(RecMixin, BaseRecommenderModel):
r"""
Neural Collaborative Filtering
For further details, please refer to the `paper <https://arxiv.org/abs/1708.05031>`_
Args:
mf_factors: Number of MF latent factors
mlp_factors: Number of MLP latent factors
mlp_hidden_size: List of units for each layer
lr: Learning rate
dropout: Dropout rate
is_mf_train: Whether to train the MF embeddings
is_mlp_train: Whether to train the MLP layers
To include the recommendation model, add it to the config file adopting the following pattern:
.. code:: yaml
models:
NeuMF:
meta:
save_recs: True
epochs: 10
mf_factors: 10
mlp_factors: 10
mlp_hidden_size: (64,32)
lr: 0.001
dropout: 0.0
is_mf_train: True
is_mlp_train: True
"""
@init_charger
def __init__(self, data, config, params, *args, **kwargs):
self._random = np.random
self._sampler = pws.Sampler(self._data.i_train_dict)
self._params_list = [
("_learning_rate", "lr", "lr", 0.001, None, None),
("_mf_factors", "mf_factors", "mffactors", 10, int, None),
("_mlp_factors", "mlp_factors", "mlpfactors", 10, int, None),
("_mlp_hidden_size", "mlp_hidden_size", "mlpunits", "(64,32)", lambda x: list(make_tuple(str(x))), lambda x: self._batch_remove(str(x), " []").replace(",", "-")),
("_dropout", "dropout", "drop", 0, None, None),
("_is_mf_train", "is_mf_train", "mftrain", True, None, None),
("_is_mlp_train", "is_mlp_train", "mlptrain", True, None, None),
]
self.autoset_params()
if self._batch_size < 1:
self._batch_size = self._data.transactions
self._ratings = self._data.train_dict
self._sp_i_train = self._data.sp_i_train
self._i_items_set = list(range(self._num_items))
self._model = NeuralMatrixFactorizationModel(self._num_users, self._num_items, self._mf_factors,
self._mlp_factors, self._mlp_hidden_size,
self._dropout, self._is_mf_train, self._is_mlp_train,
self._learning_rate)
@property
def name(self):
return "NeuMF"\
+ "_e:" + str(self._epochs) \
+ "_bs:" + str(self._batch_size) \
+ f"_{self.get_params_shortcut()}"
def train(self):
if self._restore:
return self.restore_weights()
best_metric_value = 0
for it in range(self._epochs):
loss = 0
steps = 0
with tqdm(total=int(self._data.transactions // self._batch_size), disable=not self._verbose) as t:
for batch in self._sampler.step(self._data.transactions, self._batch_size):
steps += 1
loss += self._model.train_step(batch)
t.set_postfix({'loss': f'{loss.numpy() / steps:.5f}'})
t.update()
if not (it + 1) % self._validation_rate:
recs = self.get_recommendations(self.evaluator.get_needed_recommendations())
result_dict = self.evaluator.eval(recs)
self._results.append(result_dict)
print(f'Epoch {(it + 1)}/{self._epochs} loss {loss/steps:.5f}')
if self._results[-1][self._validation_k]["val_results"][self._validation_metric] > best_metric_value:
print("******************************************")
best_metric_value = self._results[-1][self._validation_k]["val_results"][self._validation_metric]
if self._save_weights:
self._model.save_weights(self._saving_filepath)
if self._save_recs:
store_recommendation(recs, self._config.path_output_rec_result + f"{self.name}-it:{it + 1}.tsv")
def get_recommendations(self, k: int = 100):
predictions_top_k = {}
for index, offset in enumerate(range(0, self._num_users, self._batch_size)):
offset_stop = min(offset + self._batch_size, self._num_users)
predictions = self._model.get_recs(
(
np.repeat(np.array(list(range(offset, offset_stop)))[:, None], repeats=self._num_items, axis=1),
np.array([self._i_items_set for _ in range(offset, offset_stop)])
)
)
v, i = self._model.get_top_k(predictions, self.get_train_mask(offset, offset_stop), k=k)
items_ratings_pair = [list(zip(map(self._data.private_items.get, u_list[0]), u_list[1]))
for u_list in list(zip(i.numpy(), v.numpy()))]
predictions_top_k.update(dict(zip(map(self._data.private_users.get,
range(offset, offset_stop)), items_ratings_pair)))
return predictions_top_k
|
the-stack_0_112 | class BuySellStock:
# @param prices, a list of stock prices
# @return index of buy and sell price
def choiceStocks(self, prices):
n = len(prices)
if n == 0:
return None, None
if n == 1:
return 0, 0
maxPrice = prices[n - 1]
mpIndex = n - 1
maxProfit = 0
for price in range(n):
currPrice = prices[n - price - 1]
if currPrice > maxPrice:
maxPrice = currPrice
mpIndex = n - price - 1
currProfit = maxPrice - currPrice
if currProfit > maxProfit:
maxProfit = currProfit
bpIndex = n - price - 1
return bpIndex, mpIndex
# Driver code to test the program
run = BuySellStock()
print(run.choiceStocks([5,6,7,8,10,3,8,7,11,1,2,11]))
|
the-stack_0_114 | m = h = mu = 0
while True:
print(25*'-')
print(' CADASTRE UMA PESSOA')
print(25*'-')
i = int(input('Idade: '))
if i > 17:
m+=1
while True:
s = input('Sexo: [M/F] ').strip().upper()[0]
if s in 'MF':
break
print(25*'-')
if s == 'M':
h+=1
if s == 'F' and i < 21:
mu+=1
while True:
q = input('Quer continuar? [S/N] ').strip().upper()[0]
if q in 'SN':
break
if q == 'N':
break
print(f'====== FIM DO PROGRAMA ======\nTotal de pessoas com mais de 18 anos: {m}\nAo todo temos {h} homens cadastrados.\nE temos {mu} mulheres com menos de 20 anos.') |
the-stack_0_115 | # -*- coding: utf-8 -*-
"""
MTD Parser to sqlAlchemy model.
Creates a Python file side by side with the original MTD file.
Can be overloaded with a custom class to enhance/change available
functions. See pineboolib/pnobjectsfactory.py
"""
from pineboolib import application, logging
from pineboolib.application import file
import os
from typing import List, Union, TYPE_CHECKING
if TYPE_CHECKING:
from pineboolib.application.metadata import pnfieldmetadata, pntablemetadata # pragma: no cover
LOGGER = logging.get_logger(__name__)
RESERVER_WORDS = ["pass"]
def mtd_parse(
meta_or_name: Union[str, "pntablemetadata.PNTableMetaData"], path_mtd: str = ""
) -> str:
"""
Parse MTD into SqlAlchemy model.
"""
if application.PROJECT.conn_manager is None:
raise Exception("Project is not connected yet")
dest_file = "%s_model.py" % (
path_mtd
if isinstance(meta_or_name, str)
else "%s/cache/%s" % (application.PROJECT.tmpdir, meta_or_name.name())
)
if os.path.exists(dest_file):
return dest_file
if isinstance(meta_or_name, str):
metadata = application.PROJECT.conn_manager.manager().metadata(meta_or_name, True)
else:
metadata = meta_or_name
if metadata is None:
return ""
lines = _generate_model(metadata)
if not lines:
dest_file = ""
else:
_write_file(dest_file, lines)
return dest_file
def _write_file(file_name: str, lines: List[str]) -> None:
"""Write lines to a file."""
file_ = open(file_name, "w", encoding="UTF-8")
file_.writelines([line if line.endswith("\n") else "%s\n" % line for line in lines])
file_.close()
def _get_meta(file_mtd: "file.File") -> List[str]:
"""Return list with meta."""
mtd_data_list: List[str] = []
if os.path.exists(file_mtd.path()):
mtd_data = application.PROJECT.conn_manager.manager().metadata(file_mtd.filename, True)
if mtd_data is not None:
mtd_data_list = _generate_model(mtd_data, False)
return mtd_data_list
def _generate_model(mtd_table: "pntablemetadata.PNTableMetaData", header: bool = True) -> List[str]:
"""
Create a list of lines from a mtd_table (pntablemetadata.PNTableMetaData).
"""
return _create_declaration(mtd_table, header)
def generate_field(field: "pnfieldmetadata.PNFieldMetaData") -> str:
"""
Get text representation for sqlAlchemy of a field type given its pnfieldmetadata.PNFieldMetaData.
"""
data: List[str] = []
# TYPE
# = "String"
ret = ""
type_ = field.type()
if type_ in ("int, serial"):
ret = "sqlalchemy.Integer"
elif type_ in ("uint"):
ret = "sqlalchemy.BigInteger"
elif type_ in ("calculated"):
ret = "sqlalchemy.String"
# elif field.type() in ("double"):
# ret = "sqlalchemy.Numeric"
# ret += "(%s , %s)" % (field.partInteger(), field.partDecimal())
elif type_ == "double":
ret = "sqlalchemy.Float"
elif type_ in ("string", "stringlist", "pixmap"):
ret = "sqlalchemy.String"
if field.length():
ret += "(%s)" % field.length()
elif type_ in ("bool", "unlock"):
ret = "sqlalchemy.Boolean"
elif type_ == "timestamp":
ret = "sqlalchemy.DateTime"
elif type_ == "json":
ret = "sqlalchemy.types.JSON"
elif type_ == "time":
ret = "sqlalchemy.Time"
elif type_ == "date":
ret = "sqlalchemy.Date"
elif type_ in ("bytearray"):
ret = "sqlalchemy.LargeBinary"
else:
ret = "Desconocido %s" % type_
data.append(ret)
if field.isPrimaryKey():
data.append("primary_key = True")
return ", ".join(data)
def generate_field_metadata(field: "pnfieldmetadata.PNFieldMetaData") -> List[str]:
"""Generate field data from a PNFieldMetaData."""
field_data: List = []
# NAME
field_data.append("'name' : '%s'" % field.name())
# ALIAS
if field.alias():
field_data.append("'alias' : '%s'" % field.alias().replace("'", '"'))
# PK
if field.isPrimaryKey():
field_data.append("'pk' : True")
# CK
if field.isCompoundKey():
field_data.append("'ck' : True")
# TYPE
field_relation: List[str] = []
field_data.append("'type' : '%s'" % field.type())
# LENGTH
if field.length():
field_data.append("'length' : %s" % field.length())
# REGEXP
if field.regExpValidator():
field_data.append("'regexp' : '%s'" % field.regExpValidator())
rel_list: List[str]
# RELATIONS 1M
for rel in field.relationList():
rel_list = []
rel_list.append("'card' : '%s'" % rel.cardinality())
rel_list.append("'table' : '%s'" % rel.foreignTable())
rel_list.append("'field' : '%s'" % rel.foreignField())
if rel.deleteCascade():
rel_list.append("'delc' : True")
if rel.updateCascade():
rel_list.append("'updc' : True")
if not rel.checkIn():
rel_list.append("'checkin' : False")
field_relation.append("{%s}" % ", ".join(rel_list))
# if field_relation:
# field_data.append("'relations' : [%s]" % ", ".join(field_relation))
# RELATIONS M1
if field.private._relation_m1:
rel = field.private._relation_m1
rel_list = []
rel_list.append("'card' : '%s'" % rel.cardinality())
rel_list.append("'table' : '%s'" % rel.foreignTable())
rel_list.append("'field' : '%s'" % rel.foreignField())
if rel.deleteCascade():
rel_list.append("'delC' : True")
if rel.updateCascade():
rel_list.append("'updC' : True")
if not rel.checkIn():
rel_list.append("'checkIn' : False")
field_relation.append("{%s}" % ", ".join(rel_list))
if field_relation:
field_data.append("'relations' : [%s]" % ", ".join(field_relation))
# ASSOCIATED
if field.private.associated_field_name:
field_data.append(
"'associated':{'with' : '%s', 'by' : '%s' }"
% (field.private.associated_field_filter_to, field.private.associated_field_name)
)
# UNIQUE
if field.isUnique():
field_data.append("'isunique' : True")
# ALLOW_NULL
if not field.allowNull():
field_data.append("'null' : False")
# DEFAULT_VALUE
if field.defaultValue() is not None:
value = (
field.defaultValue()
if field.type() in ["bool", "unlock", "int", "uint", "double", "serial", "json"]
else "'%s'" % field.defaultValue()
)
field_data.append("'default' : %s" % value)
# OUT_TRANSACTION
if field.outTransaction():
field_data.append("'outtransaction' : True")
# COUNTER
if field.isCounter():
field_data.append("'counter' : True")
# CALCULATED
if field.calculated():
field_data.append("'calculated' : True")
# FULLY_CALCULATED
if field.fullyCalculated():
field_data.append("'fullycalculated' : True")
# TRIMMED
if field.trimmed():
field_data.append("'trimmed' : True")
# VISIBLE
if not field.visible():
field_data.append("'visible' : False")
# VISIBLE_GRID
if not field.visibleGrid():
field_data.append("'visiblegrid' : False")
# EDITABLE
if not field.editable():
field_data.append("'editable' : False")
if field.type() == "double":
# PARTI
if field.partInteger():
field_data.append("'partI' : %s" % field.partInteger())
# PARTD
if field.partDecimal():
field_data.append("'partD' : %s" % field.partDecimal())
# INDEX
if field.isIndex():
field_data.append("'index' : True")
# OPTIONS_LIST
if field.optionsList():
texto = ""
for item in field.optionsList():
texto += "'%s', " % item
field_data.append("'optionslist' : [%s]" % texto)
# SEARCH_OPTIONS
if field.searchOptions():
texto = ""
for item in field.searchOptions():
texto += "'%s', " % item
field_data.append("'searchoptions' : [%s]" % texto)
return field_data
def use_mtd_fields(path_model: str) -> bool:
"""Return if models use mtd fields."""
file_ = open(path_model, "r", encoding="UTF-8")
lines = file_.readlines()
file_.close()
for line in lines:
if line.find("legacy_metadata") > -1:
return False
return True
def populate_fields(dest_file_name: str, mtd_name: str) -> str:
"""Populate models fields with mtd field."""
new_file_path: str = ""
if mtd_name in application.PROJECT.files.keys():
file_mtd = application.PROJECT.files[mtd_name]
file_ = open(dest_file_name, "r")
lines = file_.readlines()
file_.close()
new_lines: List[str] = []
for number, line in enumerate(list(lines)):
if line.find("__tablename__") > -1:
new_lines = lines[0 : number + 1] + _get_meta(file_mtd) + lines[number + 1 :]
break
if new_lines:
new_key = "%s_model.py" % file_mtd.filename[:-4]
conn = application.PROJECT.conn_manager.mainConn()
db_name = conn.DBName()
application.PROJECT.files[new_key] = file.File(
file_mtd.module,
"%s_model.py" % file_mtd.path(),
basedir=file_mtd.basedir,
sha=file_mtd.sha,
db_name=db_name,
)
application.PROJECT.files[new_key].filekey = "%s_model.py" % file_mtd.filekey
new_file_path = application.PROJECT.files[new_key].path()
if os.path.exists(new_file_path):
os.remove(new_file_path)
_write_file(new_file_path, new_lines)
return new_file_path
def _create_declaration(
mtd_table: "pntablemetadata.PNTableMetaData", header: bool = True
) -> List[str]:
"""Create metadata section."""
data: List[str] = []
list_data_field: List[str] = []
validator_list: List[str] = []
metadata_table: List = []
metadata_table.append("'name' : '%s'" % mtd_table.name())
metadata_table.append("'alias' : '%s'" % mtd_table.alias())
if mtd_table.isQuery():
metadata_table.append("'query':'%s'" % mtd_table.query())
if mtd_table.concurWarn():
metadata_table.append("'concurwarn': True")
if mtd_table.detectLocks():
metadata_table.append("'detectlocks':True")
if mtd_table.FTSFunction():
metadata_table.append("'ftsfunction' :'%s'" % mtd_table.FTSFunction())
try:
mtd_table.primaryKey()
except Exception as error: # noqa: F841
pass
field_list: List[List[str]] = []
pk_found = False
for field in mtd_table.fieldList(): # Crea los campos
if field.isPrimaryKey():
pk_found = True
if field.name() in validator_list:
LOGGER.warning(
"Hay un campo %s duplicado en %s.mtd. Omitido", field.name(), mtd_table.name()
)
else:
field_data = []
field_data.append(" ")
if field.name() in RESERVER_WORDS:
field_data.append("%s_" % field.name())
else:
field_data.append(field.name())
field_data.append(" = sqlalchemy.Column('%s', " % field.name())
field_list.append(generate_field_metadata(field))
field_data.append(generate_field(field))
field_data.append(")")
validator_list.append(field.name())
if field.isPrimaryKey():
pk_found = True
list_data_field.append("".join(field_data))
meta_fields: List = []
for meta_field in field_list:
meta_fields.append("{%s}" % ", ".join(meta_field))
metadata_table.append(
"\n 'fields' : [\n %s\n ]" % ",\n ".join(meta_fields)
)
class_name = "%s%s" % (mtd_table.name()[0].upper(), mtd_table.name()[1:])
if header:
data.append("# -*- coding: utf-8 -*-")
data.append("# Translated with pineboolib %s" % application.PINEBOO_VER)
data.append(
'"""%s%s_model module."""' % (mtd_table.name()[0].upper(), mtd_table.name()[1:])
)
data.append("")
data.append("from pineboolib.application.database.orm import basemodel")
data.append("from pineboolib.qsa import qsa")
data.append("")
data.append("import sqlalchemy")
data.append("")
data.append("")
data.append("# @class_declaration Oficial")
data.append("class Oficial(basemodel.BaseModel): # type: ignore [misc] # noqa: F821")
data.append(' """Oficial class."""')
data.append(" __tablename__ = '%s'" % mtd_table.name()) # si query nombre query
data.append("")
else:
data.append("")
data.append("")
data.append(" # --- POPULATED WITH METADATA FIELDS ---")
data.append("")
data.append("")
data.append(" # --- Metadata --->")
data.append(" legacy_metadata = {%s}" % ", ".join(metadata_table))
data.append("\n")
data.append(" # <--- Metadata ---")
data.append("")
data.append("")
data.append(" # --- Fields --->")
data.append("")
for data_field in list_data_field:
data.append(data_field)
data.append("")
data.append(" # <--- Fields ---")
data.append("")
if header:
data.append("# @class_declaration %s" % class_name)
data.append(
"class %s(Oficial): # type: ignore [misc] # noqa: F821" % class_name
) # si query nombre query
data.append(' """%s class."""' % class_name)
data.append(" pass")
if not pk_found and not mtd_table.isQuery():
LOGGER.warning(
"La tabla %s no tiene definida una clave primaria. No se generará el model."
% (mtd_table.name())
)
data = []
return data
|
the-stack_0_116 | #!/usr/bin/python
# Tests if the SS segment override prefix is not explicitly produced when unnecessary
# Github issue: #9
# Author: Duncan (mrexodia)
from keystone import *
import regress
class TestX86(regress.RegressTest):
def runTest(self):
# Initialize Keystone engine
ks = Ks(KS_ARCH_X86, KS_MODE_32)
# Assemble to get back insn encoding & statement count
encoding1, _ = ks.asm(b"MOV EAX,DWORD PTR SS:[ESP+8]")
encoding2, _ = ks.asm(b"MOV EAX,DWORD PTR SS:[EBP+8]")
# Assert the result
self.assertEqual(encoding1, [ 0x8B, 0x44, 0x24, 0x08 ])
self.assertEqual(encoding2, [ 0x8B, 0x45, 0x08 ])
encoding, _ = ks.asm(b"MOV DWORD PTR SS:[EBP-0xC],0x1994000")
self.assertEqual(encoding, [ 0xC7, 0x45, 0xF4, 0x00, 0x40, 0x99, 0x01 ])
if __name__ == '__main__':
regress.main()
|
the-stack_0_117 | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
"""
The main training/evaluation loop
Modified from: https://github.com/facebookresearch/deit
"""
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.data import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
import utils
import xcit
def get_args_parser():
parser = argparse.ArgumentParser('XCiT training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=400, type=int)
# Model parameters
parser.add_argument('--model', default='xcit_s_12', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Dataset parameters
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100', 'IMNET',
'INAT', 'INAT19', 'CARS', 'FLOWERS',
'IMNET22k'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--test-freq', default=1, type=int, help='Number of epochs between \
validation runs.')
parser.add_argument('--full_crop', action='store_true', help='use crop_ratio=1.0 instead of the\
default 0.875 (Used by CaiT).')
parser.add_argument("--pretrained", default=None, type=str, help='Path to pre-trained checkpoint')
parser.add_argument('--surgery', default=None, type=str, help='Path to checkpoint to copy the \
patch projection from. \
Can improve stability for very \
large models.')
return parser
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.repeated_aug:
sampler_train = RASampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
else:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=None
)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.pretrained:
if args.pretrained.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.pretrained, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.pretrained, map_location='cpu')
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
model.load_state_dict(checkpoint_model, strict=True)
model.to(device)
if args.surgery:
checkpoint = torch.load(args.surgery, map_location='cpu')
checkpoint_model = checkpoint['model']
patch_embed_weights = {key.replace("patch_embed.", ""): value for key,
value in checkpoint['model'].items() if 'patch_embed' in key}
model.patch_embed.load_state_dict(patch_embed_weights)
for p in model.patch_embed.parameters():
p.requires_grad = False
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
criterion = LabelSmoothingCrossEntropy()
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
teacher_model = None
if args.distillation_type != 'none':
assert args.teacher_path, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher_model}")
teacher_model = create_model(
args.teacher_model,
pretrained=False,
num_classes=args.nb_classes,
global_pool='avg',
)
if args.teacher_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.teacher_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.teacher_path, map_location='cpu')
teacher_model.load_state_dict(checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
output_dir = Path(args.output_dir)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
resume_path = os.path.join(output_dir, 'checkpoint.pth')
if args.resume and os.path.exists(resume_path):
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
print("Loading from checkpoint ...")
checkpoint = torch.load(resume_path, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.model_ema:
utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, model_ema, mixup_fn,
surgery=args.surgery
)
lr_scheduler.step(epoch)
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'scaler': loss_scaler.state_dict(),
'args': args,
}, checkpoint_path)
if (epoch % args.test_freq == 0) or (epoch == args.epochs - 1):
test_stats = evaluate(data_loader_val, model, device)
if test_stats["acc1"] >= max_accuracy:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'model_ema': get_state_dict(model_ema),
'args': args,
}, os.path.join(output_dir, 'best_model.pth'))
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('XCiT training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
the-stack_0_118 | ###################################
# File Name : exception_performance.py
###################################
#!/usr/bin/python3
import os
import time
TRY_TEST_FILE="performance_try_file"
TRY_ELSE_TEST_FILE="performance_try_else_file"
def write_file_only_try():
try:
f = open(TRY_TEST_FILE, "w")
for i in range(10000000):
f.write(str(i))
f.close()
except:
print ("File open error")
finally:
os.remove(TRY_TEST_FILE)
def write_file_try_else():
try:
f = open(TRY_ELSE_TEST_FILE, "w")
except:
print ("File open error")
else:
for i in range(10000000):
f.write(str(i))
f.close()
finally:
os.remove(TRY_ELSE_TEST_FILE)
def check_runtime(func):
accumulate_time = 0
for i in range(10):
start = time.time()
func()
accumulate_time += (time.time() - start)
print ("Run time summary : %s" % str(accumulate_time / 10))
if __name__ == "__main__":
print ("=== Try Performance Test ===")
check_runtime(write_file_only_try)
print ("=== Try/Else Performance Test ===")
check_runtime(write_file_try_else)
|
the-stack_0_120 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
###########################################################
Runner Validation Test Suite for Cross-language Transforms
###########################################################
As per Beams's Portability Framework design, Cross-language transforms
should work out of the box. In spite of this, there always exists a
possibility of rough edges existing. It could be caused due to unpolished
implementation of any part of the execution code path, for example:
- Transform expansion [SDK]
- Pipeline construction [SDK]
- Cross-language artifact staging [Runner]
- Language specific serialization/deserialization of PCollection (and
other data types) [Runner/SDK]
In an effort to improve developer visibility into potential problems,
this test suite validates correct execution of 5 Core Beam transforms when
used as cross-language transforms within the Python SDK from any foreign SDK:
- ParDo
(https://beam.apache.org/documentation/programming-guide/#pardo)
- GroupByKey
(https://beam.apache.org/documentation/programming-guide/#groupbykey)
- CoGroupByKey
(https://beam.apache.org/documentation/programming-guide/#cogroupbykey)
- Combine
(https://beam.apache.org/documentation/programming-guide/#combine)
- Flatten
(https://beam.apache.org/documentation/programming-guide/#flatten)
- Partition
(https://beam.apache.org/documentation/programming-guide/#partition)
See Runner Validation Test Plan for Cross-language transforms at
https://docs.google.com/document/d/1xQp0ElIV84b8OCVz8CD2hvbiWdR8w4BvWxPTZJZA6NA
for further details.
"""
import logging
import os
import typing
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
TEST_PREFIX_URN = "beam:transforms:xlang:test:prefix"
TEST_MULTI_URN = "beam:transforms:xlang:test:multi"
TEST_GBK_URN = "beam:transforms:xlang:test:gbk"
TEST_CGBK_URN = "beam:transforms:xlang:test:cgbk"
TEST_COMGL_URN = "beam:transforms:xlang:test:comgl"
TEST_COMPK_URN = "beam:transforms:xlang:test:compk"
TEST_FLATTEN_URN = "beam:transforms:xlang:test:flatten"
TEST_PARTITION_URN = "beam:transforms:xlang:test:partition"
class CrossLanguageTestPipelines(object):
def __init__(self, expansion_service=None):
self.expansion_service = expansion_service or (
'localhost:%s' % os.environ.get('EXPANSION_PORT'))
def run_prefix(self, pipeline):
"""
Target transform - ParDo
(https://beam.apache.org/documentation/programming-guide/#pardo)
Test scenario - Mapping elements from a single input collection to a
single output collection
Boundary conditions checked -
- PCollection<?> to external transforms
- PCollection<?> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create(['a', 'b']).with_output_types(str)
| beam.ExternalTransform(
TEST_PREFIX_URN,
ImplicitSchemaPayloadBuilder({'data': u'0'}),
self.expansion_service))
assert_that(res, equal_to(['0a', '0b']))
def run_multi_input_output_with_sideinput(self, pipeline):
"""
Target transform - ParDo
(https://beam.apache.org/documentation/programming-guide/#pardo)
Test scenario - Mapping elements from multiple input collections (main
and side) to multiple output collections (main and side)
Boundary conditions checked -
- PCollectionTuple to external transforms
- PCollectionTuple from external transforms
"""
with pipeline as p:
main1 = p | 'Main1' >> beam.Create(
['a', 'bb'], reshuffle=False).with_output_types(str)
main2 = p | 'Main2' >> beam.Create(
['x', 'yy', 'zzz'], reshuffle=False).with_output_types(str)
side = p | 'Side' >> beam.Create(['s']).with_output_types(str)
res = dict(
main1=main1, main2=main2, side=side) | beam.ExternalTransform(
TEST_MULTI_URN, None, self.expansion_service)
assert_that(res['main'], equal_to(['as', 'bbs', 'xs', 'yys', 'zzzs']))
assert_that(res['side'], equal_to(['ss']), label='CheckSide')
def run_group_by_key(self, pipeline):
"""
Target transform - GroupByKey
(https://beam.apache.org/documentation/programming-guide/#groupbykey)
Test scenario - Grouping a collection of KV<K,V> to a collection of
KV<K, Iterable<V>> by key
Boundary conditions checked -
- PCollection<KV<?, ?>> to external transforms
- PCollection<KV<?, Iterable<?>>> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create([(0, "1"), (0, "2"),
(1, "3")], reshuffle=False).with_output_types(
typing.Tuple[int, str])
| beam.ExternalTransform(TEST_GBK_URN, None, self.expansion_service)
| beam.Map(lambda x: "{}:{}".format(x[0], ','.join(sorted(x[1])))))
assert_that(res, equal_to(['0:1,2', '1:3']))
def run_cogroup_by_key(self, pipeline):
"""
Target transform - CoGroupByKey
(https://beam.apache.org/documentation/programming-guide/#cogroupbykey)
Test scenario - Grouping multiple input collections with keys to a
collection of KV<K, CoGbkResult> by key
Boundary conditions checked -
- KeyedPCollectionTuple<?> to external transforms
- PCollection<KV<?, Iterable<?>>> from external transforms
"""
with pipeline as p:
col1 = p | 'create_col1' >> beam.Create(
[(0, "1"), (0, "2"), (1, "3")], reshuffle=False).with_output_types(
typing.Tuple[int, str])
col2 = p | 'create_col2' >> beam.Create(
[(0, "4"), (1, "5"), (1, "6")], reshuffle=False).with_output_types(
typing.Tuple[int, str])
res = (
dict(col1=col1, col2=col2)
| beam.ExternalTransform(TEST_CGBK_URN, None, self.expansion_service)
| beam.Map(lambda x: "{}:{}".format(x[0], ','.join(sorted(x[1])))))
assert_that(res, equal_to(['0:1,2,4', '1:3,5,6']))
def run_combine_globally(self, pipeline):
"""
Target transform - Combine
(https://beam.apache.org/documentation/programming-guide/#combine)
Test scenario - Combining elements globally with a predefined simple
CombineFn
Boundary conditions checked -
- PCollection<?> to external transforms
- PCollection<?> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create([1, 2, 3]).with_output_types(int)
| beam.ExternalTransform(
TEST_COMGL_URN, None, self.expansion_service))
assert_that(res, equal_to([6]))
def run_combine_per_key(self, pipeline):
"""
Target transform - Combine
(https://beam.apache.org/documentation/programming-guide/#combine)
Test scenario - Combining elements per key with a predefined simple
merging function
Boundary conditions checked -
- PCollection<?> to external transforms
- PCollection<?> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create([('a', 1), ('a', 2),
('b', 3)]).with_output_types(typing.Tuple[str, int])
| beam.ExternalTransform(
TEST_COMPK_URN, None, self.expansion_service))
assert_that(res, equal_to([('a', 3), ('b', 3)]))
def run_flatten(self, pipeline):
"""
Target transform - Flatten
(https://beam.apache.org/documentation/programming-guide/#flatten)
Test scenario - Merging multiple collections into a single collection
Boundary conditions checked -
- PCollectionList<?> to external transforms
- PCollection<?> from external transforms
"""
with pipeline as p:
col1 = p | 'col1' >> beam.Create([1, 2, 3]).with_output_types(int)
col2 = p | 'col2' >> beam.Create([4, 5, 6]).with_output_types(int)
res = ((col1, col2)
| beam.ExternalTransform(
TEST_FLATTEN_URN, None, self.expansion_service))
assert_that(res, equal_to([1, 2, 3, 4, 5, 6]))
def run_partition(self, pipeline):
"""
Target transform - Partition
(https://beam.apache.org/documentation/programming-guide/#partition)
Test scenario - Splitting a single collection into multiple collections
with a predefined simple PartitionFn
Boundary conditions checked -
- PCollection<?> to external transforms
- PCollectionList<?> from external transforms
"""
with pipeline as p:
res = (
p
| beam.Create([1, 2, 3, 4, 5, 6]).with_output_types(int)
| beam.ExternalTransform(
TEST_PARTITION_URN, None, self.expansion_service))
assert_that(res['0'], equal_to([2, 4, 6]), label='check_even')
assert_that(res['1'], equal_to([1, 3, 5]), label='check_odd')
@attr('UsesCrossLanguageTransforms')
@unittest.skipUnless(
os.environ.get('EXPANSION_PORT'),
"EXPANSION_PORT environment var is not provided.")
class ValidateRunnerXlangTest(unittest.TestCase):
_multiprocess_can_split_ = True
def create_pipeline(self):
test_pipeline = TestPipeline()
test_pipeline.not_use_test_runner_api = True
return test_pipeline
def test_prefix(self, test_pipeline=None):
CrossLanguageTestPipelines().run_prefix(
test_pipeline or self.create_pipeline())
def test_multi_input_output_with_sideinput(self, test_pipeline=None):
CrossLanguageTestPipelines().run_multi_input_output_with_sideinput(
test_pipeline or self.create_pipeline())
def test_group_by_key(self, test_pipeline=None):
CrossLanguageTestPipelines().run_group_by_key(
test_pipeline or self.create_pipeline())
def test_cogroup_by_key(self, test_pipeline=None):
CrossLanguageTestPipelines().run_cogroup_by_key(
test_pipeline or self.create_pipeline())
def test_combine_globally(self, test_pipeline=None):
CrossLanguageTestPipelines().run_combine_globally(
test_pipeline or self.create_pipeline())
def test_combine_per_key(self, test_pipeline=None):
CrossLanguageTestPipelines().run_combine_per_key(
test_pipeline or self.create_pipeline())
def test_flatten(self, test_pipeline=None):
CrossLanguageTestPipelines().run_flatten(
test_pipeline or self.create_pipeline())
def test_partition(self, test_pipeline=None):
CrossLanguageTestPipelines().run_partition(
test_pipeline or self.create_pipeline())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
the-stack_0_121 | # coding: utf-8
"""
EPIC API
REST API for interacting with EPIC (https://epic.zenotech.com) services. <br /> Please note this API is in BETA and does not yet contain all EPIC functionality. # noqa: E501
The version of the OpenAPI document: v2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from epiccore.configuration import Configuration
class TeamDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'name': 'str',
'number_of_members': 'int',
'user_role': 'str',
'members': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'number_of_members': 'number_of_members',
'user_role': 'user_role',
'members': 'members'
}
def __init__(self, id=None, name=None, number_of_members=None, user_role=None, members=None, local_vars_configuration=None): # noqa: E501
"""TeamDetails - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._number_of_members = None
self._user_role = None
self._members = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if number_of_members is not None:
self.number_of_members = number_of_members
if user_role is not None:
self.user_role = user_role
if members is not None:
self.members = members
@property
def id(self):
"""Gets the id of this TeamDetails. # noqa: E501
ID for this team # noqa: E501
:return: The id of this TeamDetails. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TeamDetails.
ID for this team # noqa: E501
:param id: The id of this TeamDetails. # noqa: E501
:type id: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this TeamDetails. # noqa: E501
Name of this team # noqa: E501
:return: The name of this TeamDetails. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TeamDetails.
Name of this team # noqa: E501
:param name: The name of this TeamDetails. # noqa: E501
:type name: str
"""
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def number_of_members(self):
"""Gets the number_of_members of this TeamDetails. # noqa: E501
Number of members in this team # noqa: E501
:return: The number_of_members of this TeamDetails. # noqa: E501
:rtype: int
"""
return self._number_of_members
@number_of_members.setter
def number_of_members(self, number_of_members):
"""Sets the number_of_members of this TeamDetails.
Number of members in this team # noqa: E501
:param number_of_members: The number_of_members of this TeamDetails. # noqa: E501
:type number_of_members: int
"""
self._number_of_members = number_of_members
@property
def user_role(self):
"""Gets the user_role of this TeamDetails. # noqa: E501
Your role in this team # noqa: E501
:return: The user_role of this TeamDetails. # noqa: E501
:rtype: str
"""
return self._user_role
@user_role.setter
def user_role(self, user_role):
"""Sets the user_role of this TeamDetails.
Your role in this team # noqa: E501
:param user_role: The user_role of this TeamDetails. # noqa: E501
:type user_role: str
"""
self._user_role = user_role
@property
def members(self):
"""Gets the members of this TeamDetails. # noqa: E501
List of user ids and roles for members of this team # noqa: E501
:return: The members of this TeamDetails. # noqa: E501
:rtype: str
"""
return self._members
@members.setter
def members(self, members):
"""Sets the members of this TeamDetails.
List of user ids and roles for members of this team # noqa: E501
:param members: The members of this TeamDetails. # noqa: E501
:type members: str
"""
self._members = members
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TeamDetails):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TeamDetails):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_123 | """InVEST specific code utils."""
import codecs
import math
import os
import contextlib
import logging
import tempfile
import shutil
from datetime import datetime
import time
import pandas
import numpy
from shapely.wkt import loads
from osgeo import gdal
from osgeo import osr
import pygeoprocessing
LOGGER = logging.getLogger(__name__)
LOG_FMT = (
"%(asctime)s "
"(%(name)s) "
"%(module)s.%(funcName)s(%(lineno)d) "
"%(levelname)s %(message)s")
# GDAL has 5 error levels, python's logging has 6. We skip logging.INFO.
# A dict clarifies the mapping between levels.
GDAL_ERROR_LEVELS = {
gdal.CE_None: logging.NOTSET,
gdal.CE_Debug: logging.DEBUG,
gdal.CE_Warning: logging.WARNING,
gdal.CE_Failure: logging.ERROR,
gdal.CE_Fatal: logging.CRITICAL,
}
# In GDAL 3.0 spatial references no longer ignore Geographic CRS Axis Order
# and conform to Lat first, Lon Second. Transforms expect (lat, lon) order
# as opposed to the GIS friendly (lon, lat). See
# https://trac.osgeo.org/gdal/wiki/rfc73_proj6_wkt2_srsbarn Axis order
# issues. SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) swaps the
# axis order, which will use Lon,Lat order for Geographic CRS, but otherwise
# leaves Projected CRS alone
DEFAULT_OSR_AXIS_MAPPING_STRATEGY = osr.OAMS_TRADITIONAL_GIS_ORDER
@contextlib.contextmanager
def capture_gdal_logging():
"""Context manager for logging GDAL errors with python logging.
GDAL error messages are logged via python's logging system, at a severity
that corresponds to a log level in ``logging``. Error messages are logged
with the ``osgeo.gdal`` logger.
Args:
``None``
Returns:
``None``
"""
osgeo_logger = logging.getLogger('osgeo')
def _log_gdal_errors(err_level, err_no, err_msg):
"""Log error messages to osgeo.
All error messages are logged with reasonable ``logging`` levels based
on the GDAL error level.
Args:
err_level (int): The GDAL error level (e.g. ``gdal.CE_Failure``)
err_no (int): The GDAL error number. For a full listing of error
codes, see: http://www.gdal.org/cpl__error_8h.html
err_msg (string): The error string.
Returns:
``None``
"""
osgeo_logger.log(
level=GDAL_ERROR_LEVELS[err_level],
msg='[errno {err}] {msg}'.format(
err=err_no, msg=err_msg.replace('\n', ' ')))
gdal.PushErrorHandler(_log_gdal_errors)
try:
yield
finally:
gdal.PopErrorHandler()
def _format_time(seconds):
"""Render the integer number of seconds as a string. Returns a string."""
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
hours = int(hours)
minutes = int(minutes)
if hours > 0:
return "%sh %sm %ss" % (hours, minutes, seconds)
if minutes > 0:
return "%sm %ss" % (minutes, seconds)
return "%ss" % seconds
@contextlib.contextmanager
def prepare_workspace(
workspace, name, logging_level=logging.NOTSET, exclude_threads=None):
"""Prepare the workspace."""
if not os.path.exists(workspace):
os.makedirs(workspace)
logfile = os.path.join(
workspace,
'InVEST-{modelname}-log-{timestamp}.txt'.format(
modelname='-'.join(name.replace(':', '').split(' ')),
timestamp=datetime.now().strftime("%Y-%m-%d--%H_%M_%S")))
with capture_gdal_logging(), log_to_file(logfile,
exclude_threads=exclude_threads,
logging_level=logging_level):
with sandbox_tempdir(dir=workspace):
logging.captureWarnings(True)
LOGGER.info('Writing log messages to %s', logfile)
start_time = time.time()
try:
yield
finally:
LOGGER.info('Elapsed time: %s',
_format_time(round(time.time() - start_time, 2)))
logging.captureWarnings(False)
class ThreadFilter(logging.Filter):
"""Filters out log messages issued by the given thread.
Any log messages generated by a thread with the name matching the
threadname provided to the constructor will be excluded.
"""
def __init__(self, thread_name):
"""Construct a ThreadFilter.
Args:
thread_name (string): The thread name to filter on.
"""
logging.Filter.__init__(self)
self.thread_name = thread_name
def filter(self, record):
"""Filter the given log record.
Args:
record (log record): The log record to filter.
Returns:
True if the record should be included, false if not.
"""
if record.threadName == self.thread_name:
return False
return True
@contextlib.contextmanager
def log_to_file(logfile, exclude_threads=None, logging_level=logging.NOTSET,
log_fmt=LOG_FMT, date_fmt=None):
"""Log all messages within this context to a file.
Args:
logfile (string): The path to where the logfile will be written.
If there is already a file at this location, it will be
overwritten.
exclude_threads=None (list): If None, logging from all threads will be
included in the log. If a list, it must be a list of string thread
names that should be excluded from logging in this file.
logging_level=logging.NOTSET (int): The logging threshold. Log
messages with a level less than this will be automatically
excluded from the logfile. The default value (``logging.NOTSET``)
will cause all logging to be captured.
log_fmt=LOG_FMT (string): The logging format string to use. If not
provided, ``utils.LOG_FMT`` will be used.
date_fmt (string): The logging date format string to use.
If not provided, ISO8601 format will be used.
Yields:
``handler``: An instance of ``logging.FileHandler`` that
represents the file that is being written to.
Returns:
``None``
"""
try:
if os.path.exists(logfile):
LOGGER.warning('Logfile %s exists and will be overwritten',
logfile)
except SystemError:
# This started happening in Windows tests:
# SystemError: <built-in function stat> returned NULL without
# setting an error
# Looking at https://bugs.python.org/issue28040#msg276223, this might
# be a low-level python error.
pass
handler = logging.FileHandler(logfile, 'w', encoding='UTF-8')
formatter = logging.Formatter(log_fmt, date_fmt)
root_logger = logging.getLogger()
root_logger.setLevel(logging.NOTSET)
root_logger.addHandler(handler)
handler.setFormatter(formatter)
handler.setLevel(logging_level)
if exclude_threads is not None:
for threadname in exclude_threads:
thread_filter = ThreadFilter(threadname)
handler.addFilter(thread_filter)
try:
yield handler
finally:
handler.close()
root_logger.removeHandler(handler)
@contextlib.contextmanager
def sandbox_tempdir(suffix='', prefix='tmp', dir=None):
"""Create a temporary directory for this context and clean it up on exit.
Parameters are identical to those for :py:func:`tempfile.mkdtemp`.
When the context manager exits, the created temporary directory is
recursively removed.
Args:
suffix='' (string): a suffix for the name of the directory.
prefix='tmp' (string): the prefix to use for the directory name.
dir=None (string or None): If a string, a directory that should be
the parent directory of the new temporary directory. If None,
tempfile will determine the appropriate tempdir to use as the
parent folder.
Yields:
``sandbox`` (string): The path to the new folder on disk.
Returns:
``None``
"""
sandbox = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield sandbox
finally:
try:
shutil.rmtree(sandbox)
except OSError:
LOGGER.exception('Could not remove sandbox %s', sandbox)
def make_suffix_string(args, suffix_key):
"""Make an InVEST appropriate suffix string.
Creates an InVEST appropriate suffix string given the args dictionary and
suffix key. In general, prepends an '_' when necessary and generates an
empty string when necessary.
Args:
args (dict): the classic InVEST model parameter dictionary that is
passed to `execute`.
suffix_key (string): the key used to index the base suffix.
Returns:
If `suffix_key` is not in `args`, or `args['suffix_key']` is ""
return "",
If `args['suffix_key']` starts with '_' return `args['suffix_key']`
else return '_'+`args['suffix_key']`
"""
try:
file_suffix = args[suffix_key]
if file_suffix != "" and not file_suffix.startswith('_'):
file_suffix = '_' + file_suffix
except KeyError:
file_suffix = ''
return file_suffix
def exponential_decay_kernel_raster(expected_distance, kernel_filepath):
"""Create a raster-based exponential decay kernel.
The raster created will be a tiled GeoTiff, with 256x256 memory blocks.
Args:
expected_distance (int or float): The distance (in pixels) of the
kernel's radius, the distance at which the value of the decay
function is equal to `1/e`.
kernel_filepath (string): The path to the file on disk where this
kernel should be stored. If this file exists, it will be
overwritten.
Returns:
None
"""
max_distance = expected_distance * 5
kernel_size = int(numpy.round(max_distance * 2 + 1))
driver = gdal.GetDriverByName('GTiff')
kernel_dataset = driver.Create(
kernel_filepath.encode('utf-8'), kernel_size, kernel_size, 1,
gdal.GDT_Float32, options=[
'BIGTIFF=IF_SAFER', 'TILED=YES', 'BLOCKXSIZE=256',
'BLOCKYSIZE=256'])
# Make some kind of geotransform, it doesn't matter what but
# will make GIS libraries behave better if it's all defined
kernel_dataset.SetGeoTransform([0, 1, 0, 0, 0, -1])
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
kernel_dataset.SetProjection(srs.ExportToWkt())
kernel_band = kernel_dataset.GetRasterBand(1)
kernel_band.SetNoDataValue(-9999)
cols_per_block, rows_per_block = kernel_band.GetBlockSize()
n_cols = kernel_dataset.RasterXSize
n_rows = kernel_dataset.RasterYSize
n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))
integration = 0.0
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
# Numpy creates index rasters as ints by default, which sometimes
# creates problems on 32-bit builds when we try to add Int32
# matrices to float64 matrices.
row_indices, col_indices = numpy.indices((row_block_width,
col_block_width),
dtype=float)
row_indices += float(row_offset - max_distance)
col_indices += float(col_offset - max_distance)
kernel_index_distances = numpy.hypot(
row_indices, col_indices)
kernel = numpy.where(
kernel_index_distances > max_distance, 0.0,
numpy.exp(-kernel_index_distances / expected_distance))
integration += numpy.sum(kernel)
kernel_band.WriteArray(kernel, xoff=col_offset,
yoff=row_offset)
# Need to flush the kernel's cache to disk before opening up a new Dataset
# object in interblocks()
kernel_band.FlushCache()
kernel_dataset.FlushCache()
for block_data in pygeoprocessing.iterblocks(
(kernel_filepath, 1), offset_only=True):
kernel_block = kernel_band.ReadAsArray(**block_data)
kernel_block /= integration
kernel_band.WriteArray(kernel_block, xoff=block_data['xoff'],
yoff=block_data['yoff'])
kernel_band.FlushCache()
kernel_dataset.FlushCache()
kernel_band = None
kernel_dataset = None
def build_file_registry(base_file_path_list, file_suffix):
"""Combine file suffixes with key names, base filenames, and directories.
Args:
base_file_tuple_list (list): a list of (dict, path) tuples where
the dictionaries have a 'file_key': 'basefilename' pair, or
'file_key': list of 'basefilename's. 'path'
indicates the file directory path to prepend to the basefile name.
file_suffix (string): a string to append to every filename, can be
empty string
Returns:
dictionary of 'file_keys' from the dictionaries in
`base_file_tuple_list` mapping to full file paths with suffixes or
lists of file paths with suffixes depending on the original type of
the 'basefilename' pair.
Raises:
ValueError if there are duplicate file keys or duplicate file paths.
ValueError if a path is not a string or a list of strings.
"""
all_paths = set()
duplicate_keys = set()
duplicate_paths = set()
f_reg = {}
def _build_path(base_filename, path):
"""Internal helper to avoid code duplication."""
pre, post = os.path.splitext(base_filename)
full_path = os.path.join(path, pre+file_suffix+post)
# Check for duplicate keys or paths
if full_path in all_paths:
duplicate_paths.add(full_path)
else:
all_paths.add(full_path)
return full_path
for base_file_dict, path in base_file_path_list:
for file_key, file_payload in base_file_dict.items():
# check for duplicate keys
if file_key in f_reg:
duplicate_keys.add(file_key)
else:
# handle the case whether it's a filename or a list of strings
if isinstance(file_payload, str):
full_path = _build_path(file_payload, path)
f_reg[file_key] = full_path
elif isinstance(file_payload, list):
f_reg[file_key] = []
for filename in file_payload:
full_path = _build_path(filename, path)
f_reg[file_key].append(full_path)
else:
raise ValueError(
"Unknown type in base_file_dict[%s]=%s" % (
file_key, path))
if len(duplicate_paths) > 0 or len(duplicate_keys):
raise ValueError(
"Cannot consolidate because of duplicate paths or keys: "
"duplicate_keys: %s duplicate_paths: %s" % (
duplicate_keys, duplicate_paths))
return f_reg
def build_lookup_from_csv(
table_path, key_field, column_list=None, to_lower=True):
"""Read a CSV table into a dictionary indexed by ``key_field``.
Creates a dictionary from a CSV whose keys are unique entries in the CSV
table under the column named by ``key_field`` and values are dictionaries
indexed by the other columns in ``table_path`` including ``key_field``
whose values are the values on that row of the CSV table.
If an entire row is NA/NaN (including ``key_field``) then it is dropped
from the table and a warning is given of the dropped rows.
Args:
table_path (string): path to a CSV file containing at
least the header key_field
key_field: (string): a column in the CSV file at `table_path` that
can uniquely identify each row in the table and sets the row index.
column_list (list): a list of column names to subset from the CSV
file, default=None
to_lower (bool): if True, converts all unicode in the CSV,
including headers and values to lowercase, otherwise uses raw
string values. default=True.
Returns:
lookup_dict (dict): a dictionary of the form
{key_field_0: {csv_header_0: value0, csv_header_1: value1...},
key_field_1: {csv_header_0: valuea, csv_header_1: valueb...}}
if ``to_lower`` all strings including key_fields and values are
converted to lowercase unicode.
Raise:
ValueError
If ValueError occurs during conversion to dictionary.
KeyError
If ``key_field`` is not present during ``set_index`` call.
"""
# Reassign to avoid mutation
col_list = column_list
# if a list of columns are provided to use and return, make sure
# 'key_field' is one of them.
if col_list and key_field not in col_list:
col_list.append(key_field)
table = read_csv_to_dataframe(
table_path, to_lower=to_lower, sep=None, index_col=False,
engine='python')
# if 'to_lower`, case handling is done before trying to access the data.
# the columns are stripped of leading/trailing whitespace in
# ``read_csv_to_dataframe``, and also lowercased if ``to_lower`` so we only
# need to convert the rest of the table.
if to_lower:
key_field = key_field.lower()
# lowercase column names
if col_list:
col_list = [col.lower() for col in col_list]
# lowercase values
table = table.applymap(
lambda x: x.lower() if isinstance(x, str) else x)
# Set 'key_field' as the index of the dataframe
try:
table.set_index(key_field, drop=False, inplace=True)
except KeyError:
# If 'key_field' is not a column then KeyError is raised for using
# it as the index column
LOGGER.error(f"'key_field' : '{key_field}' could not be found as a"
f" column in the table. Table path: {table_path}.")
raise
# Subset dataframe by columns if desired
if col_list:
table = table.loc[:, col_list]
# look for NaN values and warn if any are found.
table_na = table.isna()
if table_na.values.any():
LOGGER.warning(
f"Empty or NaN values were found in the table: {table_path}.")
# look to see if an entire row is NA values
table_na_rows = table_na.all(axis=1)
na_rows = table_na_rows.index[table_na_rows].tolist()
# if a completely empty row, drop it
if na_rows:
LOGGER.warning(
"Encountered an entirely blank row on line(s)"
f" {[x+2 for x in na_rows]}. Dropping rows from table.")
table.dropna(how="all", inplace=True)
# fill the rest of empty or NaN values with empty string
table.fillna(value="", inplace=True)
try:
lookup_dict = table.to_dict(orient='index')
except ValueError:
# If 'key_field' is not unique then a value error is raised.
LOGGER.error(f"The 'key_field' : '{key_field}' column values are not"
f" unique: {table.index.tolist()}")
raise
return lookup_dict
def read_csv_to_dataframe(
path, to_lower=False, sep=None, encoding=None, engine='python',
**kwargs):
"""Return a dataframe representation of the CSV.
Wrapper around ``pandas.read_csv`` that standardizes the column names by
stripping leading/trailing whitespace and optionally making all lowercase.
This helps avoid common errors caused by user-supplied CSV files with
column names that don't exactly match the specification.
Args:
path (string): path to a CSV file
to_lower (bool): if True, convert all column names to lowercase
sep: separator to pass to pandas.read_csv. Defaults to None, which
lets the Python engine infer the separator (if engine='python').
encoding (string): name of encoding codec to pass to `pandas.read_csv`.
Defaults to None. Setting engine='python' when encoding=None allows
a lot of non-UTF8 encodings to be read without raising an error.
Any special characters in other encodings may get replaced with the
replacement character.
If encoding=None, and the file begins with a BOM, the encoding gets
set to 'utf-8-sig'; otherwise the BOM causes an error.
engine (string): kwarg for pandas.read_csv: 'c', 'python', or None.
Defaults to 'python' (see note about encoding).
**kwargs: any kwargs that are valid for ``pandas.read_csv``
Returns:
pandas.DataFrame with the contents of the given CSV
"""
# Check if the file encoding is UTF-8 BOM first
# allow encoding kwarg to override this if it's provided
if not encoding and has_utf8_bom(path):
encoding = 'utf-8-sig'
try:
dataframe = pandas.read_csv(path, engine=engine, encoding=encoding,
sep=sep, **kwargs)
except UnicodeDecodeError as error:
LOGGER.error(
f'{path} must be encoded as utf-8 or ASCII')
raise error
# this won't work on integer types, which happens if you set header=None
# however, there's little reason to use this function if there's no header
dataframe.columns = dataframe.columns.str.strip()
if to_lower:
dataframe.columns = dataframe.columns.str.lower()
return dataframe
def make_directories(directory_list):
"""Create directories in `directory_list` if they do not already exist."""
if not isinstance(directory_list, list):
raise ValueError(
"Expected `directory_list` to be an instance of `list` instead "
"got type %s instead", type(directory_list))
for path in directory_list:
# From http://stackoverflow.com/a/14364249/42897
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def mean_pixel_size_and_area(pixel_size_tuple):
"""Convert to mean and raise Exception if they are not close.
Parameter:
pixel_size_tuple (tuple): a 2 tuple indicating the x/y size of a
pixel.
Returns:
tuple of (mean absolute average of pixel_size, area of pixel size)
Raises:
ValueError if the dimensions of pixel_size_tuple are not almost
square.
"""
x_size, y_size = abs(pixel_size_tuple[0]), abs(pixel_size_tuple[1])
if not numpy.isclose(x_size, y_size):
raise ValueError(
"pixel size is not square. dimensions: %s" % repr(
pixel_size_tuple))
return (x_size, x_size*y_size)
def create_coordinate_transformer(
base_ref, target_ref,
osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY):
"""Create a spatial reference coordinate transformation function.
Args:
base_ref (osr spatial reference): A defined spatial reference to
transform FROM
target_ref (osr spatial reference): A defined spatial reference
to transform TO
osr_axis_mapping_strategy (int): OSR axis mapping strategy for
``SpatialReference`` objects. Defaults to
``utils.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter should
not be changed unless you know what you are doing.
Returns:
An OSR Coordinate Transformation object
"""
# Make a copy of the base and target spatial references to avoid side
# effects from mutation of setting the axis mapping strategy
base_ref_wkt = base_ref.ExportToWkt()
target_ref_wkt = target_ref.ExportToWkt()
base_ref_copy = osr.SpatialReference()
target_ref_copy = osr.SpatialReference()
base_ref_copy.ImportFromWkt(base_ref_wkt)
target_ref_copy.ImportFromWkt(target_ref_wkt)
base_ref_copy.SetAxisMappingStrategy(osr_axis_mapping_strategy)
target_ref_copy.SetAxisMappingStrategy(osr_axis_mapping_strategy)
transformer = osr.CreateCoordinateTransformation(
base_ref_copy, target_ref_copy)
return transformer
def _assert_vectors_equal(
expected_vector_path, actual_vector_path, field_value_atol=1e-3):
"""Assert two vectors are equal.
Assert spatial reference, feature count, geometries, field names, and
values are equal with no respect to order of field names or geometries.
Args:
actual_vector_path (string): path on disk to a gdal Vector dataset.
expected_vector_path (string): path on disk to a gdal Vector dataset
to use as the ground truth.
field_value_atol (float): the absolute tolerance for comparing field
attribute values, default=1e-3.
Returns:
None on success
Raise:
AssertionError
If vector projections, feature counts, field names, or geometries
do not match.
"""
try:
# Open vectors
actual_vector = gdal.OpenEx(actual_vector_path, gdal.OF_VECTOR)
actual_layer = actual_vector.GetLayer()
expected_vector = gdal.OpenEx(expected_vector_path, gdal.OF_VECTOR)
expected_layer = expected_vector.GetLayer()
# Check projections
expected_projection = expected_layer.GetSpatialRef()
expected_projection_wkt = expected_projection.ExportToWkt()
actual_projection = actual_layer.GetSpatialRef()
actual_projection_wkt = actual_projection.ExportToWkt()
if expected_projection_wkt != actual_projection_wkt:
raise AssertionError(
"Vector projections are not the same. \n"
f"Expected projection wkt: {expected_projection_wkt}. \n"
f"Actual projection wkt: {actual_projection_wkt}. ")
# Check feature count
actual_feat_count = actual_layer.GetFeatureCount()
expected_feat_count = expected_layer.GetFeatureCount()
if expected_feat_count != actual_feat_count:
raise AssertionError(
"Vector feature counts are not the same. \n"
f"Expected feature count: {expected_feat_count}. \n"
f"Actual feature count: {actual_feat_count}. ")
# Check field names
expected_field_names = [field.name for field in expected_layer.schema]
actual_field_names = [field.name for field in actual_layer.schema]
if sorted(expected_field_names) != sorted(actual_field_names):
raise AssertionError(
"Vector field names are not the same. \n"
f"Expected field names: {sorted(expected_field_names)}. \n"
f"Actual field names: {sorted(actual_field_names)}. ")
# Check field values and geometries
for expected_feature in expected_layer:
fid = expected_feature.GetFID()
expected_values = [
expected_feature.GetField(field)
for field in expected_field_names]
actual_feature = actual_layer.GetFeature(fid)
actual_values = [
actual_feature.GetField(field)
for field in expected_field_names]
for av, ev in zip(actual_values, expected_values):
if av is not None:
# Number comparison
if isinstance(av, int) or isinstance(av, float):
if not numpy.allclose(numpy.array([av]),
numpy.array([ev]),
atol=field_value_atol):
raise AssertionError(
"Vector field values are not equal: \n"
f"Expected value: {ev}. \n"
f"Actual value: {av}. ")
# String and other comparison
else:
if av != ev:
raise AssertionError(
"Vector field values are not equal. \n"
f"Expected value : {ev}. \n"
f"Actual value : {av}. ")
else:
if ev is not None:
raise AssertionError(
"Vector field values are not equal: \n"
f"Expected value: {ev}. \n"
f"Actual value: {av}. ")
expected_geom = expected_feature.GetGeometryRef()
expected_geom_wkt = expected_geom.ExportToWkt()
actual_geom = actual_feature.GetGeometryRef()
actual_geom_wkt = actual_geom.ExportToWkt()
expected_geom_shapely = loads(expected_geom_wkt)
actual_geom_shapely = loads(actual_geom_wkt)
# Try comparing geoms exactly equal allowing for different
# geometry ordering
geoms_equal = expected_geom_shapely.equals(actual_geom_shapely)
if not geoms_equal:
# Try almost_equal allowing for precision differences
geoms_almost_eq = expected_geom_shapely.almost_equals(
actual_geom_shapely)
if not geoms_almost_eq:
raise AssertionError(
"Vector geometry assertion fail. \n"
f"Expected geometry: {expected_geom_wkt}. \n"
f"Actual geometry: {actual_geom_wkt}. ")
expected_feature = None
actual_feature = None
finally:
actual_layer = None
actual_vector = None
expected_layer = None
expected_vector = None
return None
def has_utf8_bom(textfile_path):
"""Determine if the text file has a UTF-8 byte-order marker.
Args:
textfile_path (str): The path to a file on disk.
Returns:
A bool indicating whether the textfile has a BOM. If ``True``, a BOM
is present.
"""
with open(textfile_path, 'rb') as file_obj:
first_line = file_obj.readline()
return first_line.startswith(codecs.BOM_UTF8)
def reclassify_raster(
raster_path_band, value_map, target_raster_path, target_datatype,
target_nodata, error_details):
"""A wrapper function for calling ``pygeoprocessing.reclassify_raster``.
This wrapper function is helpful when added as a ``TaskGraph.task`` so
a better error message can be provided to the users if a
``pygeoprocessing.ReclassificationMissingValuesError`` is raised.
Args:
raster_path_band (tuple): a tuple including file path to a raster
and the band index to operate over. ex: (path, band_index)
value_map (dictionary): a dictionary of values of
{source_value: dest_value, ...} where source_value's type is the
same as the values in ``base_raster_path`` at band ``band_index``.
Must contain at least one value.
target_raster_path (string): target raster output path; overwritten if
it exists
target_datatype (gdal type): the numerical type for the target raster
target_nodata (numerical type): the nodata value for the target raster
Must be the same type as target_datatype
error_details (dict): a dictionary with key value pairs that provide
more context for a raised
``pygeoprocessing.ReclassificationMissingValuesError``.
keys must be {'raster_name', 'column_name', 'table_name'}. Values
each key represent:
'raster_name' - string for the raster name being reclassified
'column_name' - name of the table column that ``value_map``
dictionary keys came from.
'table_name' - table name that ``value_map`` came from.
Returns:
None
Raises:
ValueError if ``values_required`` is ``True`` and a pixel value from
``raster_path_band`` is not a key in ``value_map``.
"""
# Error early if 'error_details' keys are invalid
raster_name = error_details['raster_name']
column_name = error_details['column_name']
table_name = error_details['table_name']
try:
pygeoprocessing.reclassify_raster(
raster_path_band, value_map, target_raster_path, target_datatype,
target_nodata, values_required=True)
except pygeoprocessing.ReclassificationMissingValuesError as err:
error_message = (
f"Values in the {raster_name} raster were found that are not"
f" represented under the '{column_name}' column of the"
f" {table_name} table. The missing values found in the"
f" {raster_name} raster but not the table are:"
f" {err.missing_values}.")
raise ValueError(error_message)
|
the-stack_0_125 | # -*- coding: utf-8 -*-
# python std lib
import random
# rediscluster imports
from .crc import crc16
from .exceptions import RedisClusterException, RedisClusterConfigError
# 3rd party imports
from redis import Redis
from redis._compat import unicode, long, basestring
from redis.connection import Encoder
from redis import ConnectionError, TimeoutError, ResponseError
class NodeManager(object):
"""
"""
RedisClusterHashSlots = 16384
def __init__(self, startup_nodes=None, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False,
host_port_remap=None, **connection_kwargs):
"""
:skip_full_coverage_check:
Skips the check of cluster-require-full-coverage config, useful for clusters
without the CONFIG command (like aws)
:nodemanager_follow_cluster:
The node manager will during initialization try the last set of nodes that
it was operating on. This will allow the client to drift along side the cluster
if the cluster nodes move around alot.
"""
self.connection_kwargs = connection_kwargs
self.nodes = {}
self.slots = {}
self.startup_nodes = [] if startup_nodes is None else startup_nodes
self.orig_startup_nodes = [node for node in self.startup_nodes]
self.reinitialize_counter = 0
self.reinitialize_steps = reinitialize_steps or 25
self._skip_full_coverage_check = skip_full_coverage_check
self.nodemanager_follow_cluster = nodemanager_follow_cluster
self.encoder = Encoder(
connection_kwargs.get('encoding', 'utf-8'),
connection_kwargs.get('encoding_errors', 'strict'),
connection_kwargs.get('decode_responses', False)
)
self._validate_host_port_remap(host_port_remap)
self.host_port_remap = host_port_remap
if not self.startup_nodes:
raise RedisClusterException("No startup nodes provided")
def _validate_host_port_remap(self, host_port_remap):
"""
Helper method that validates all entries in the host_port_remap config.
"""
if host_port_remap is None:
# Nothing to validate if config not set
return
if not isinstance(host_port_remap, list):
raise RedisClusterConfigError("host_port_remap must be a list")
for item in host_port_remap:
if not isinstance(item, dict):
raise RedisClusterConfigError("items inside host_port_remap list must be of dict type")
# If we have from_host, we must have a to_host option to allow for translation to work
if ('from_host' in item and 'to_host' not in item) or ('from_host' not in item and 'to_host' in item):
raise RedisClusterConfigError("Both from_host and to_host must be present in remap item if either is defined")
if ('from_port' in item and 'to_port' not in item) or ('from_port' not in item and 'to_port' in item):
raise RedisClusterConfigError("Both from_port and to_port must be present in remap item")
def keyslot(self, key):
"""
Calculate keyslot for a given key.
Tuned for compatibility with python 2.7.x
"""
k = self.encoder.encode(key)
start = k.find(b"{")
if start > -1:
end = k.find(b"}", start + 1)
if end > -1 and end != start + 1:
k = k[start + 1:end]
return crc16(k) % self.RedisClusterHashSlots
def node_from_slot(self, slot):
"""
"""
for node in self.slots[slot]:
if node['server_type'] == 'master':
return node
def all_nodes(self):
"""
"""
for node in self.nodes.values():
yield node
def all_masters(self):
"""
"""
for node in self.nodes.values():
if node["server_type"] == "master":
yield node
def random_startup_node(self):
"""
"""
random.shuffle(self.startup_nodes)
return self.startup_nodes[0]
def random_startup_node_ittr(self):
"""
Generator that will return a random startup nodes. Works as a generator.
"""
while True:
yield random.choice(self.startup_nodes)
def random_node(self):
"""
"""
key = random.choice(list(self.nodes.keys()))
return self.nodes[key]
def get_redis_link(self, host, port, decode_responses=False):
"""
"""
allowed_keys = (
'host',
'port',
'db',
'username',
'password',
'socket_timeout',
'socket_connect_timeout',
'socket_keepalive',
'socket_keepalive_options',
'connection_pool',
'unix_socket_path',
'encoding',
'encoding_errors',
'charset',
'errors',
'decode_responses',
'retry_on_timeout',
'ssl',
'ssl_keyfile',
'ssl_certfile',
'ssl_cert_reqs',
'ssl_ca_certs',
'max_connections',
)
disabled_keys = (
'host',
'port',
'decode_responses',
)
connection_kwargs = {k: v for k, v in self.connection_kwargs.items() if k in set(allowed_keys) - set(disabled_keys)}
return Redis(host=host, port=port, decode_responses=decode_responses, **connection_kwargs)
def initialize(self):
"""
Init the slots cache by asking all startup nodes what the current cluster configuration is
"""
nodes_cache = {}
tmp_slots = {}
all_slots_covered = False
disagreements = []
startup_nodes_reachable = False
nodes = self.orig_startup_nodes
# With this option the client will attempt to connect to any of the previous set of nodes instead of the original set of nodes
if self.nodemanager_follow_cluster:
nodes = self.startup_nodes
for node in nodes:
try:
r = self.get_redis_link(host=node["host"], port=node["port"], decode_responses=True)
cluster_slots = r.execute_command("cluster", "slots")
startup_nodes_reachable = True
except (ConnectionError, TimeoutError):
continue
except ResponseError as e:
# Isn't a cluster connection, so it won't parse these exceptions automatically
message = e.__str__()
if 'CLUSTERDOWN' in message or 'MASTERDOWN' in message:
continue
else:
raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node))
except Exception:
raise RedisClusterException("ERROR sending 'cluster slots' command to redis server: {0}".format(node))
all_slots_covered = True
# If there's only one server in the cluster, its ``host`` is ''
# Fix it to the host in startup_nodes
if (len(cluster_slots) == 1 and len(cluster_slots[0][2][0]) == 0 and len(self.startup_nodes) == 1):
cluster_slots[0][2][0] = self.startup_nodes[0]['host']
# No need to decode response because Redis should handle that for us...
for slot in cluster_slots:
master_node = slot[2]
if master_node[0] == '':
master_node[0] = node['host']
master_node[1] = int(master_node[1])
master_node = self.remap_internal_node_object(master_node)
node, node_name = self.make_node_obj(master_node[0], master_node[1], 'master')
nodes_cache[node_name] = node
for i in range(int(slot[0]), int(slot[1]) + 1):
if i not in tmp_slots:
tmp_slots[i] = [node]
slave_nodes = [slot[j] for j in range(3, len(slot))]
for slave_node in slave_nodes:
slave_node = self.remap_internal_node_object(slave_node)
target_slave_node, slave_node_name = self.make_node_obj(slave_node[0], slave_node[1], 'slave')
nodes_cache[slave_node_name] = target_slave_node
tmp_slots[i].append(target_slave_node)
else:
# Validate that 2 nodes want to use the same slot cache setup
if tmp_slots[i][0]['name'] != node['name']:
disagreements.append("{0} vs {1} on slot: {2}".format(
tmp_slots[i][0]['name'], node['name'], i),
)
if len(disagreements) > 5:
raise RedisClusterException("startup_nodes could not agree on a valid slots cache. {0}".format(", ".join(disagreements)))
self.populate_startup_nodes()
self.refresh_table_asap = False
if self._skip_full_coverage_check:
need_full_slots_coverage = False
else:
need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache)
# Validate if all slots are covered or if we should try next startup node
for i in range(0, self.RedisClusterHashSlots):
if i not in tmp_slots and need_full_slots_coverage:
all_slots_covered = False
if all_slots_covered:
# All slots are covered and application can continue to execute
break
if not startup_nodes_reachable:
raise RedisClusterException("Redis Cluster cannot be connected. Please provide at least one reachable node.")
if not all_slots_covered:
raise RedisClusterException("All slots are not covered after query all startup_nodes. {0} of {1} covered...".format(
len(tmp_slots), self.RedisClusterHashSlots))
# Set the tmp variables to the real variables
self.slots = tmp_slots
self.nodes = nodes_cache
self.reinitialize_counter = 0
def remap_internal_node_object(self, node_obj):
if not self.host_port_remap:
# No remapping rule set, return object unmodified
return node_obj
for remap_rule in self.host_port_remap:
if 'from_host' in remap_rule and 'to_host' in remap_rule:
if remap_rule['from_host'] in node_obj[0]:
# print('remapping host', node_obj[0], remap_rule['to_host'])
node_obj[0] = remap_rule['to_host']
## The port value is always an integer
if 'from_port' in remap_rule and 'to_port' in remap_rule:
if remap_rule['from_port'] == node_obj[1]:
# print('remapping port', node_obj[1], remap_rule['to_port'])
node_obj[1] = remap_rule['to_port']
return node_obj
def increment_reinitialize_counter(self, ct=1):
for i in range(min(ct, self.reinitialize_steps)):
self.reinitialize_counter += 1
if self.reinitialize_counter % self.reinitialize_steps == 0:
self.initialize()
def cluster_require_full_coverage(self, nodes_cache):
"""
if exists 'cluster-require-full-coverage no' config on redis servers,
then even all slots are not covered, cluster still will be able to
respond
"""
nodes = nodes_cache or self.nodes
def node_require_full_coverage(node):
try:
r_node = self.get_redis_link(host=node["host"], port=node["port"], decode_responses=True)
return "yes" in r_node.config_get("cluster-require-full-coverage").values()
except ConnectionError:
return False
except Exception:
raise RedisClusterException("ERROR sending 'config get cluster-require-full-coverage' command to redis server: {0}".format(node))
# at least one node should have cluster-require-full-coverage yes
return any(node_require_full_coverage(node) for node in nodes.values())
def set_node_name(self, n):
"""
Format the name for the given node object
# TODO: This shold not be constructed this way. It should update the name of the node in the node cache dict
"""
if "name" not in n:
n["name"] = "{0}:{1}".format(n["host"], n["port"])
def make_node_obj(self, host, port, server_type):
"""
Create a node datastructure.
Returns the node datastructure and the node name
"""
node_name = "{0}:{1}".format(host, port)
node = {
'host': host,
'port': port,
'name': node_name,
'server_type': server_type
}
return (node, node_name)
def set_node(self, host, port, server_type=None):
"""
Update data for a node.
"""
node, node_name = self.make_node_obj(host, port, server_type)
self.nodes[node_name] = node
return node
def populate_startup_nodes(self):
"""
Do something with all startup nodes and filters out any duplicates
"""
for item in self.startup_nodes:
self.set_node_name(item)
for n in self.nodes.values():
if n not in self.startup_nodes:
self.startup_nodes.append(n)
# freeze it so we can set() it
uniq = {frozenset(node.items()) for node in self.startup_nodes}
# then thaw it back out into a list of dicts
self.startup_nodes = [dict(node) for node in uniq]
def reset(self):
"""
Drop all node data and start over from startup_nodes
"""
self.initialize()
|
the-stack_0_126 | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class TargetingGeoLocationLocationCluster(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isTargetingGeoLocationLocationCluster = True
super(TargetingGeoLocationLocationCluster, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
key = 'key'
id = 'id'
_field_types = {
'key': 'int',
'id': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
|
the-stack_0_129 | from unittest import mock
from django.conf import settings
from django.test import TestCase, override_settings
from daiquiri.jobs.tests.mixins import AsyncTestMixin
from daiquiri.query.models import QueryJob, Example
@override_settings(QUERY_ANONYMOUS=True)
@mock.patch(settings.ADAPTER_DATABASE + '.submit_query', mock.Mock())
@mock.patch(settings.ADAPTER_DATABASE + '.fetch_nrows', mock.Mock(return_value=100))
@mock.patch(settings.ADAPTER_DATABASE + '.fetch_size', mock.Mock(return_value=100))
@mock.patch(settings.ADAPTER_DATABASE + '.count_rows', mock.Mock(return_value=100))
@mock.patch(settings.ADAPTER_DATABASE + '.rename_table', mock.Mock())
@mock.patch(settings.ADAPTER_DATABASE + '.drop_table', mock.Mock())
@mock.patch(settings.ADAPTER_DATABASE + '.create_user_schema_if_not_exists', mock.Mock())
class AsyncTestCase(AsyncTestMixin, TestCase):
databases = ('default', 'data', 'tap', 'oai')
fixtures = (
'auth.json',
'metadata.json',
'jobs.json',
'queryjobs.json',
'examples.json'
)
users = (
('admin', 'admin'),
('user', 'user'),
('evil', 'evil'),
('anonymous', None),
)
url_names = {
'list': 'tap:async-list',
'detail': 'tap:async-detail',
'results': 'tap:async-results',
'result': 'tap:async-result',
'parameters': 'tap:async-parameters',
'destruction': 'tap:async-destruction',
'executionduration': 'tap:async-executionduration',
'phase': 'tap:async-phase',
'error': 'tap:async-error',
'quote': 'tap:async-quote',
'owner': 'tap:async-owner'
}
jobs = QueryJob.objects.filter(owner__username='user')
def get_parameter_for_new_jobs(self, username):
return [{
'LANG': example.query_language,
'QUERY': example.query_string
} for example in Example.objects.filter(access_level='PUBLIC')]
def get_parameter_for_new_jobs_internal(self, username):
return [{
'LANG': example.query_language,
'QUERY': example.query_string
} for example in Example.objects.filter(access_level='INTERNAL')]
|
the-stack_0_130 | import json
import traceback
from datetime import timedelta
from flask import request, g, current_app
from sqlalchemy import desc, func
from apps.auth.models.users import User
from apps.project.business.credit import CreditBusiness
from apps.project.models.assets import Phone, PhoneRecord, VirtualAsset, PhoneBorrow
from apps.project.models.credit import Credit
from apps.public.models.public import Config
from library.api.db import db
from library.api.transfer import transfer2json
from library.notification import notification
from library.trpc import Trpc
user_trpc = Trpc('auth')
class PhoneBusiness(object):
public_trpc = Trpc('public')
user_trpc = Trpc('auth')
message_trpc = Trpc('message')
@classmethod
def _query(cls):
return Phone.query.add_columns(
Phone.id.label('id'),
Phone.name.label('name'),
Phone.asset_id.label('asset_id'),
Phone.vendor.label('vendor'),
Phone.device_number.label('device_number'),
Phone.os.label('os'),
Phone.cpu.label('cpu'),
Phone.core.label('core'),
Phone.ram.label('ram'),
Phone.rom.label('rom'),
Phone.resolution.label('resolution'),
Phone.buy_date.label('buy_date'),
Phone.region.label('region'),
Phone.status.label('status'),
Phone.borrow_id.label('borrow_id'),
Phone.creator_id.label('creator_id'),
Phone.device_source.label('device_source'),
Phone.device_belong.label('device_belong'),
)
@classmethod
@transfer2json(
'?id|!name|!asset_id|!vendor|!device_number|!os|!cpu|!core|!ram|!rom|!resolution|!buy_date|!region|!status|'
'!borrow_id|!creator_id|!device_source|!device_belong'
)
def query_all_json(cls, page_size, page_index):
ret = cls._query().filter(
Phone.status == Phone.ACTIVE).order_by(
desc(Phone.id)).limit(int(page_size)).offset(
int(page_index - 1) * int(page_size)).all()
return ret
@classmethod
def query_all_count(cls):
count = cls._query().filter(Phone.status == Phone.ACTIVE).count()
return count
@classmethod
@transfer2json(
'?id|!name|!asset_id|!vendor|!device_number|!os|!cpu|!core|!ram|!rom|!resolution|!buy_date|!region|!status|'
'!borrow_id|!creator_id|!device_source|!device_belong'
)
def query_json_by_id(cls, pid):
return cls._query().filter(
Phone.id == pid, Phone.status == Phone.ACTIVE).all()
@classmethod
def get_phone_by_id(cls, pid):
users = user_trpc.requests(method='get', path='/user')
phone = cls.query_json_by_id(pid)
if len(phone) <= 0:
return 101, 'phone not exist!'
phone = phone[0]
for user in users:
if user.get('userid') == phone.get('creator_id'):
phone['creator_nickname'] = user.get('nickname')
if user.get('userid') == phone.get('borrow_id'):
phone['borrow_nickname'] = user.get('nickname')
return 0, [phone]
@classmethod
def send_message(cls, user_list, creator, text):
if cls.message_trpc.requests('post', '/message',
body={'send_id': creator, 'rec_id': user_list, 'content': text}):
current_app.logger.info('发送站内信成功')
else:
current_app.logger.info('发送站内信失败')
@classmethod
def get_phone_all(cls, page_size, page_index):
# 通过设备名称进行搜索
name = request.args.get('name', '')
# 通过制造商进行搜索
vendor = request.args.get('vendor', '')
# 通过系统进行搜索
os = request.args.get('os', '')
# 通过分辨率进行搜索
resolution = request.args.get('resolution', '')
# 通过借用人进行搜索
borrower_id = request.args.get('borrower_id')
# 通过持有人进行搜索
creator_id = request.args.get('creator_id')
# 通过 归属
device_belong = request.args.get('device_belong', '')
# 通过 来源
device_source = request.args.get('device_source', '')
# 通过 归属人
# 获取所有 手机设备列表
phones, count = cls.search_phone_all(name, vendor, os, resolution, borrower_id, device_belong,
device_source, creator_id, page_size, page_index)
# 获取所有用户的 基本信息
users = {int(user.get('userid')): user
for user in user_trpc.requests(method='get', path='/user', query={'base_info': True})}
# 获取所有借用关系列表
phone_borrows = {phone_borrow.phone_id: phone_borrow for phone_borrow in PhoneBorrow.query.all()}
data = []
for phone in phones:
phone_borrow = phone_borrows.get(phone.get('id'))
if g.userid == phone.get('borrow_id'):
phone["move_status"] = 1
else:
phone["move_status"] = 0
if PhoneBusiness.in_confirm_status(phone_borrow):
phone["move_status"] = 2
if PhoneBusiness.need_confirm_status(phone_borrow):
phone["confirm_status"] = 0
else:
phone["confirm_status"] = 1
try:
borrower = users.get(phone.get('borrow_id')).get("nickname")
creator = users.get(phone.get('creator_id')).get('nickname')
phone['borrow_nickname'] = borrower
phone['creator_nickname'] = creator
# 有此条借用记录
if phone_borrow:
user_list = [int(uid) for uid in phone_borrow.user_list.split(',') if uid != '']
# 有需要确认的用户
if phone_borrow.confirm_userid != 0:
confirm_user_nickname = users.get(phone_borrow.confirm_userid).get('nickname')
phone['borrow_status'] = f'[{confirm_user_nickname}] 待接收'
# 用户借用列表
elif user_list:
user_list_temp = [users.get(userid).get('nickname') for userid in user_list]
phone['borrow_status'] = f'[{",".join(user_list_temp)}] 申请借用'
phone['move_status'] = 3 if phone["move_status"] == 1 else 0
# 无借用、确认、归还
else:
phone['borrow_status'] = f'[{borrower}] 持有'
else:
phone['borrow_status'] = f'[{borrower}] 持有'
except Exception as e:
current_app.logger.error(e)
phone['borrow_status'] = '未知'
phone['borrow_nickname'] = '未知'
data.append(phone)
current_app.logger.info(data)
return data, count
@classmethod
@transfer2json(
'?id|!name|!asset_id|!vendor|!device_number|!os|!cpu|!core|!ram|!rom|!resolution|!buy_date|!region|!status|'
'!borrow_id|!creator_id|!device_source|!device_belong'
)
def search_phone_json(cls, data):
return data.all()
@classmethod
def search_phone_all(cls, name, vendor, os, resolution, borrower_id, device_belong, device_source, creator_id,
page_size, page_index):
try:
data_all = cls._query().filter(Phone.status == Phone.ACTIVE)
if name != '':
data_all = data_all.filter(Phone.name.like(f'%{name}%'))
if vendor != '':
data_all = data_all.filter(Phone.vendor.like(f'%{vendor}%'))
if os != '':
data_all = data_all.filter(Phone.os.like(f'%{os}%'))
if resolution != '':
data_all = data_all.filter(Phone.resolution.like(f'%{resolution}%'))
if device_belong != '':
data_all = data_all.filter(Phone.device_belong.like(f'%{device_belong}%'))
if device_source != '':
data_all = data_all.filter(Phone.device_source.like(f'%{device_source}%'))
if borrower_id:
data_all = data_all.filter(Phone.borrow_id == borrower_id)
if creator_id:
data_all = data_all.filter(Phone.creator_id == creator_id)
count = data_all.count()
data = cls.search_phone_json(
data_all.order_by(desc(Phone.id)).limit(int(page_size)).offset(int(page_index - 1) * int(page_size)))
return data, count
except Exception as e:
current_app.logger.error(e)
@classmethod
def get_holder_json(cls):
# 获取所有持有者的信息
try:
data_all = []
temp = []
phones = Phone.query.add_columns(Phone.borrow_id.label('borrow_id')).filter(
Phone.status == Phone.ACTIVE).all()
for phone in phones:
if phone.borrow_id not in temp:
temp.append(phone.borrow_id)
user = cls.user_trpc.requests('get', '/user/{}'.format(phone.borrow_id))[0]
data = {
'nickname': user.get('nickname'),
'id': user.get('userid')
}
data_all.append(data)
return data_all
except Exception as e:
current_app.logger.error(e)
@classmethod
def can_move_status(cls, phone_id):
# 判断此设备是否归属于当前用户
phone = Phone.query.get(phone_id)
if phone and phone.borrow_id == g.userid:
return True
else:
return False
@classmethod
def need_confirm_status(cls, phone_borrow):
# 判断此手机需要是否当前用户确认
try:
if phone_borrow is not None:
if int(phone_borrow.confirm_userid) == g.userid:
return True
else:
return False
else:
return False
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 101, str(e)
@classmethod
def in_confirm_status(cls, phone_borrow):
# 判断此设备是否存在于确认流程中
try:
if phone_borrow is not None:
if int(phone_borrow.confirm_userid) != 0:
return True
return False
else:
return False
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 101, str(e)
@classmethod
def qyweixin_email(cls, user_ids, text):
if not isinstance(user_ids, list):
user_ids = [user_ids]
notification.send_notification(user_ids, text, creator=0)
return 0, 'success'
@classmethod
def send_need_confirm_msg(cls, current_phone, phone_current_holder, phone_new_holder):
deadline = PhoneBusiness.deadline(current_phone)
new_holder_msg_text = """[TCloud] {} ({})
您有一台设备需要确认接收:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
现持有人 : {} (微信号: {})
请及时到系统中确认接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid, phone_new_holder.nickname,
phone_new_holder.wx_userid)
# phone_current_holder 原持有人
# phone_new_holder 确认人
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_cancel_move_msg(cls, current_phone, phone_current_holder, phone_new_holder):
deadline = PhoneBusiness.deadline(current_phone)
new_holder_msg_text = """[TCloud] {} ({})
您有一台设备由于超过 3 天没有接收,已被系统退回:
设备 : {},
资产编号 : {},
现持有人 : {} (微信号: {})
""".format(phone_new_holder.nickname, phone_new_holder.wx_userid, current_phone.name, current_phone.asset_id,
phone_new_holder.nickname, phone_new_holder.wx_userid)
# phone_current_holder 原持有人
# phone_new_holder 确认人
ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_need_move_msg(cls, current_phone, phone_current_holder):
new_holder_msg_text = """[TCloud] {} ({})
您有一条借用请求需要处理:
设备 : {}
资产编号 : {}
请及时到系统中处理!
请通过 TCloud->资产->流转 进行转出。""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id,
phone_current_holder.wx_userid)
# phone_current_holder 当前持有人
ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_create_msg_qywx(cls, current_phone, phone_holder):
msg_text = """[TCloud] {} ({})
您拥有了一台新的设备:
设备 : {},
资产编号 : {},
持有人 : {} (微信号: {})""".format(phone_holder.nickname, phone_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_holder.nickname,
phone_holder.wx_userid, )
ret, msg = PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)
return ret, msg
@classmethod
def send_delay_msg_qywx(cls, current_phone, phone_holder):
deadline = PhoneBusiness.deadline(current_phone)
msg_text = """[TCloud] {} ({})
您拥有的一台设备需要归还:
设备 : {},
资产编号 : {},
持有人 : {} (微信号: {})
到期时间: {}
续借 : 请到系统中点击 续借 进行续借
归还 : 请到系统中点击 退回 进行归还
过期 2 天后会根据超时时间扣除信用分!请及时归还!""".format(phone_holder.nickname, phone_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_holder.nickname,
phone_holder.wx_userid, deadline)
return PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)
@classmethod
def send_move_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):
if phone_new_holder.id == phone_current_holder.id:
current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))
return
current_holder_msg_text = """[TCloud] {} ({})
您的一台设备状态将要发生变化:
设备 : {},
资产编号 : {},
变化 : 持有人将 由 {} (微信号: {}) 变为 {} (微信号: {})
状态 : 等待接收人确认""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid, phone_new_holder.nickname,
phone_new_holder.wx_userid)
ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)
deadline = PhoneBusiness.deadline(current_phone)
new_holder_msg_text = """[TCloud] {} ({})
您将拥有一台新的设备:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
现持有人 : {} (微信号: {})
可持有时间: {} 天
到期时间: {}
请及时到系统中确认接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid, Phone.HOLD_DATE, deadline)
# phone_current_holder 原持有人
# phone_new_holder 新持有人
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_move_confirm_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):
if phone_new_holder.id == phone_current_holder.id:
current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))
return
current_holder_msg_text = """[TCloud] {} ({})
您的一台设备状态发生了变化:
设备 : {},
资产编号 : {},
变化 : 持有人已 由 {} (微信号: {}) 变为 {} (微信号: {})
状态 : 已接收""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid, phone_new_holder.nickname,
phone_new_holder.wx_userid)
ret, msg = PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)
deadline = PhoneBusiness.deadline(current_phone)
new_holder_msg_text = """[TCloud] {} ({})
您拥有了一台新的设备:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
现持有人 : {} (微信号: {})
可持有时间: {} 天
到期时间: {}
状态: 已接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid, Phone.HOLD_DATE, deadline)
# phone_current_holder 原持有人
# phone_new_holder 新持有人
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_return_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):
if phone_new_holder.id == phone_current_holder.id:
current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))
return
current_holder_msg_text = """[TCloud] {} ({})
您归还了一台设备:
设备 : {},
资产编号 : {},
变化 : 持有人将 由 {} (微信号: {}) 变为 {} (微信号: {})
状态 : 等待接收人确认""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id,
phone_current_holder.nickname,
phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid)
PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)
new_holder_msg_text = """[TCloud] {} ({})
您收到别人归还的一台设备:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
持有人 : {} (微信号: {})
状态 : 等待确认
请到系统中及时确认接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid, current_phone.name,
current_phone.asset_id,
phone_current_holder.nickname, phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid)
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def send_return_confirm_msg_qywx(cls, current_phone, phone_current_holder, phone_new_holder):
if phone_new_holder.id == phone_current_holder.id:
current_app.logger.info('[{}](资产编号:{}) 设备状态未发生状态变化'.format(current_phone.name, current_phone.asset_id))
return
current_holder_msg_text = """[TCloud] {} ({})
您成功归还了一台设备:
设备 : {},
资产编号 : {},
变化 : 持有人已 由 {} (微信号: {}) 变为 {} (微信号: {})
状态 : 接收人已接收""".format(phone_current_holder.nickname, phone_current_holder.wx_userid,
current_phone.name, current_phone.asset_id, phone_current_holder.nickname,
phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid)
PhoneBusiness.qyweixin_email(phone_current_holder.id, current_holder_msg_text)
new_holder_msg_text = """[TCloud] {} ({})
您已接收别人归还的一台设备:
设备 : {},
资产编号 : {},
原持有人 : {} (微信号: {})
持有人 : {} (微信号: {})
状态 : 您已接收!""".format(phone_new_holder.nickname, phone_new_holder.wx_userid, current_phone.name, current_phone.asset_id,
phone_current_holder.nickname, phone_current_holder.wx_userid,
phone_new_holder.nickname, phone_new_holder.wx_userid)
ret, msg = PhoneBusiness.qyweixin_email(phone_new_holder.id, new_holder_msg_text)
return ret, msg
@classmethod
def deadline(cls, current_phone):
# 根据 phone 最后一条记录计算到期时间
phone_recorder = PhoneRecord.query.filter(PhoneRecord.phone_id == current_phone.id).order_by(
PhoneRecord.id.desc()).first()
deadline = phone_recorder.creation_time + timedelta(days=Phone.HOLD_DATE) # 到期时间
return deadline
@classmethod
def create(cls, name, asset_id, vendor, device_number, os, cpu, core, ram, rom, resolution, buy_date, region,
borrow_id, device_source, device_belong, creator_id):
try:
t = Phone(
name=name,
asset_id=asset_id,
vendor=vendor,
device_number=device_number,
os=os,
cpu=cpu,
core=core,
ram=ram,
rom=rom,
resolution=resolution,
buy_date=buy_date,
region=region,
borrow_id=borrow_id or g.userid,
creator_id=creator_id or g.userid,
device_source=device_source,
device_belong=device_belong,
)
db.session.add(t)
db.session.flush()
PhoneRecordBusiness.create(t, g.userid)
db.session.commit()
phone_holder = User.query.get(t.creator_id)
# 发送企业微信
PhoneBusiness.send_create_msg_qywx(t, phone_holder)
return 0, None
except Exception as e:
current_app.logger.error(str(e))
current_app.logger.error(traceback.format_exc())
return 102, str(e)
# 发起流转
@classmethod
def move_to_user(cls, id, borrow_id):
try:
t = Phone.query.get(id)
phone_new_holder = User.query.get(borrow_id)
phone_current_holder = User.query.get(t.borrow_id)
# 消除对应设备已有的申请借用用户列表, 将老用户 id 放入,等待接收
PhoneBorrowBusiness.clear_borrow_user_list(id, phone_current_holder.id)
# 将设备的借出标志置为 1,等待接受者确认
PhoneBorrowBusiness.add_user_to_confirm(id, phone_new_holder.id)
# 发送企业微信
PhoneBusiness.send_move_msg_qywx(t, phone_current_holder, phone_new_holder)
return 0, None
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
# 确认流转
@classmethod
def move(cls, id, borrow_id):
try:
t = Phone.query.get(id)
phone_new_holder = User.query.get(borrow_id)
if not phone_new_holder:
return 101, '要转移的用户不存在,请检查用户信息'
t.borrow_id = borrow_id
db.session.add(t)
PhoneRecordBusiness.update(t, g.userid)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(e)
return 102, str(e)
# 退回设备
@classmethod
def return_to_admin(cls, id):
try:
# 此处返还给 创建人
current_phone = Phone.query.get(id)
admin_id = current_phone.creator_id
phone_current_holder = User.query.get(current_phone.borrow_id)
phone_new_holder = User.query.get(admin_id)
PhoneRecordBusiness.update(current_phone, g.userid)
# 发送企业微信
PhoneBusiness.send_return_msg_qywx(current_phone, phone_current_holder, phone_new_holder)
# 消除对应设备已有的申请借用用户列表, 将老用户 id 放入,等待接收
PhoneBorrowBusiness.clear_borrow_user_list(id, phone_current_holder.id)
# 增加 admin 到 确认名单
PhoneBorrowBusiness.add_user_to_confirm(id, admin_id)
return 0, None
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
# 超时 3 天未接收设备,将退回
@classmethod
def cancel_move_to(cls, id):
try:
# 直接清除 phone borrow 数据
current_phone = Phone.query.get(id)
phone_borrow = PhoneBorrowBusiness.get_borrow_by_phone_id(phone_id=id)
admin_id = current_phone.creator_id
phone_current_holder = User.query.get(phone_borrow.confirm_userid)
phone_new_holder = User.query.get(admin_id)
# 发送企业微信
cls.send_cancel_move_msg(current_phone, phone_current_holder, phone_new_holder)
ret, msg = PhoneBorrowBusiness.update(phone_borrow.id, phone_borrow.phone_id, 0, '')
return ret, msg
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
def update(cls, id, name, asset_id, vendor, device_number, os, cpu, core, ram, rom, resolution, buy_date, region,
borrow_id, device_source, device_belong, creator_id):
try:
t = Phone.query.get(id)
t.name = name
t.asset_id = asset_id
t.vendor = vendor
t.device_number = device_number
t.os = os
t.cpu = cpu
t.core = core
t.ram = ram
t.rom = rom
t.resolution = resolution
t.buy_date = buy_date
t.region = region
t.borrow_id = borrow_id
t.device_source = device_source
t.device_belong = device_belong
t.creator_id = creator_id
db.session.add(t)
PhoneRecordBusiness.update(t, g.userid)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def delete(cls, id):
try:
t = Phone.query.get(id)
if t is None:
return 0
t.status = Phone.DISABLE
db.session.add(t)
PhoneRecordBusiness.delete(t, g.userid)
db.session.commit()
return 0
except Exception as e:
current_app.logger.error(str(e))
return 105, str(e)
class PhoneRecordBusiness(object):
@classmethod
@transfer2json(
'?id|!phone_id|!name|!asset_id|!vendor|!creation_time|!modified_time|!device_number|!os|!cpu|!core|!ram|'
'!rom|!resolution|!buy_date|!region|!status|!borrow_id|!creator_id|!device_source|!device_belong|!editor_id'
)
def query_json_by_id(cls, id):
return cls._query().filter(
PhoneRecord.phone_id == id, Phone.status == Phone.ACTIVE).all()
@classmethod
@transfer2json(
'?id|!phone_id|!name|!asset_id|!vendor|!creation_time|!modified_time|!device_number|!os|!cpu|!core|!ram|!rom'
'|!resolution|!buy_date|!region|!status|!borrow_id|!creator_id|!device_source|!device_belong|!editor_id'
)
def query_record_json(cls, phone_id):
ret = cls._query().filter(PhoneRecord.phone_id == phone_id).order_by(PhoneRecord.id).all()
return ret
@classmethod
def _query(cls):
return PhoneRecord.query.add_columns(
PhoneRecord.id.label('id'),
PhoneRecord.phone_id.label('phone_id'),
PhoneRecord.name.label('name'),
PhoneRecord.asset_id.label('asset_id'),
PhoneRecord.vendor.label('vendor'),
func.date_format(PhoneRecord.creation_time, "%Y-%m-%d %H:%i:%s").label('creation_time'),
func.date_format(PhoneRecord.modified_time, "%Y-%m-%d %H:%i:%s").label('modified_time'),
PhoneRecord.device_number.label('device_number'),
PhoneRecord.os.label('os'),
PhoneRecord.cpu.label('cpu'),
PhoneRecord.core.label('core'),
PhoneRecord.ram.label('ram'),
PhoneRecord.rom.label('rom'),
PhoneRecord.resolution.label('resolution'),
PhoneRecord.buy_date.label('buy_date'),
PhoneRecord.region.label('region'),
PhoneRecord.status.label('status'),
PhoneRecord.borrow_id.label('borrow_id'),
PhoneRecord.creator_id.label('creator_id'),
PhoneRecord.device_source.label('device_source'),
PhoneRecord.device_belong.label('device_belong'),
PhoneRecord.editor_id.label('editor_id'),
)
@classmethod
def create(cls, t, editor_id):
t_record = PhoneRecord(
phone_id=t.id,
name=t.name,
asset_id=t.asset_id,
vendor=t.vendor,
device_number=t.device_number,
os=t.os,
cpu=t.cpu,
core=t.core,
ram=t.ram,
rom=t.rom,
resolution=t.resolution,
buy_date=t.buy_date,
region=t.region,
borrow_id=t.borrow_id,
creator_id=t.creator_id,
device_source=t.device_source,
device_belong=t.device_belong,
editor_id=editor_id,
)
db.session.add(t_record)
@classmethod
def update(cls, t, editor_id):
t_record = PhoneRecord(
phone_id=t.id,
name=t.name,
asset_id=t.asset_id,
vendor=t.vendor,
device_number=t.device_number,
os=t.os,
cpu=t.cpu,
core=t.core,
ram=t.ram,
rom=t.rom,
resolution=t.resolution,
buy_date=t.buy_date,
region=t.region,
borrow_id=t.borrow_id,
creator_id=t.creator_id,
device_source=t.device_source,
device_belong=t.device_belong,
editor_id=editor_id,
)
db.session.add(t_record)
@classmethod
def delete(cls, t, editor_id):
t_record = PhoneRecord(
phone_id=t.id,
name=t.name,
asset_id=t.asset_id,
vendor=t.vendor,
device_number=t.device_number,
os=t.os,
cpu=t.cpu,
core=t.core,
ram=t.ram,
rom=t.rom,
resolution=t.resolution,
buy_date=t.buy_date,
region=t.region,
borrow_id=t.borrow_id,
creator_id=t.creator_id,
device_source=t.device_source,
device_belong=t.device_belong,
editor_id=editor_id,
)
db.session.add(t_record)
@classmethod
def query_record_detail(cls, phone_id):
ret = cls.query_record_json(phone_id)
if not ret:
return []
ret_list = []
asset_config = Config.query.add_columns(Config.content.label('content')).filter(Config.module == 'asset',
Config.module_type == 1).first()
content = json.loads(asset_config.content)
operation_dict = content['operation_dict']
# name = operation_dict.get('name')
# asset_id = operation_dict.get('asset_id')
# status = operation_dict.get('status')
# borrow_id = operation_dict.get('borrow_id')
ret_dict = {}
user_creater = User.query.get(int(ret[0]['editor_id']))
ret_dict['modified_time'] = ret[0]['creation_time']
ret_dict['operation'] = "[{}({})] : 增加新的资产 {}".format(user_creater.nickname, user_creater.wx_userid,
ret[0]['name'])
ret_list.append(ret_dict)
current_app.logger.info(ret)
for r in range(1, len(ret)):
for asset_key, asset_value in ret[r - 1].items():
if asset_key in operation_dict.keys():
current_app.logger.info(
"修改的字段:" + str(asset_key) + ", 字段值:" + str(asset_value) + "-->" + str(ret[r][asset_key]))
user_editor = User.query.get(int(ret[r]['editor_id']))
ret_dict = None
if asset_key in ('borrow_id',):
ret_dict = {'modified_time': ret[r]['modified_time']}
if asset_value != ret[r][asset_key]:
user_from = User.query.filter(User.id == int(asset_value)).first()
user_to = User.query.filter(User.id == int(ret[r][asset_key])).first()
ret_dict['operation'] = "[{}({})] : {} 由 {}({}) 变更为 {}({})".format(user_editor.nickname,
user_editor.wx_userid,
operation_dict[
asset_key],
user_from.nickname,
user_from.wx_userid,
user_to.nickname,
user_to.wx_userid)
else:
# user_from = User.query.filter(User.id == int(asset_value)).first()
user_to = User.query.filter(User.id == int(ret[r][asset_key])).first()
ret_dict['operation'] = "[{}({})] : 续借了设备,{} 为 {}({})".format(user_editor.nickname,
user_editor.wx_userid,
operation_dict[asset_key],
user_to.nickname,
user_to.wx_userid)
else:
if asset_value != ret[r][asset_key]:
ret_dict = {
'modified_time': ret[r]['modified_time'],
'operation': "[{}({})] : 修改了{} {} 为 {}".format(user_editor.nickname,
user_editor.wx_userid,
operation_dict[asset_key],
asset_value,
ret[r][asset_key])
}
if ret_dict is not None:
ret_list.append(ret_dict)
ret_list = ret_list[::-1]
return ret_list
class VirtualAssetBusiness(object):
@classmethod
def _query(cls):
return VirtualAsset.query.add_columns(
VirtualAsset.id.label('id'),
VirtualAsset.asset_id.label('asset_id'),
VirtualAsset.passwd.label('passwd'),
VirtualAsset.administrator.label('administrator'),
VirtualAsset.bind_tel.label('bind_tel'),
VirtualAsset.idcard.label('idcard'),
VirtualAsset.status.label('status'),
VirtualAsset.asset_type.label('asset_type'),
VirtualAsset.operator.label('operator')
)
@classmethod
@transfer2json(
'?id|!asset_id|!passwd|!administrator|!idcard|!bind_tel|!status|!asset_type|!operator'
)
def query_json_by_id(cls, id):
return cls._query().filter(VirtualAsset.id == id,
VirtualAsset.status != VirtualAsset.DISABLE).all()
@classmethod
def create(cls, asset_id, passwd, administrator, bind_tel, idcard, asset_type, operator):
try:
va = VirtualAsset(
asset_id=asset_id,
passwd=passwd,
administrator=administrator,
bind_tel=bind_tel,
idcard=idcard,
asset_type=asset_type,
operator=operator,
)
db.session.add(va)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def update(cls, id, asset_id, passwd, administrator, bind_tel, idcard, asset_type, operator):
try:
va = VirtualAsset.query.get(id)
va.asset_id = asset_id
va.passwd = passwd
va.administrator = administrator
va.bind_tel = bind_tel
va.idcard = idcard
va.asset_type = asset_type
va.operator = operator
db.session.add(va)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def delete(cls, id):
try:
va = VirtualAsset.query.get(id)
if va is None:
return 0
va.status = VirtualAsset.DISABLE
db.session.add(va)
db.session.commit()
return 0
except Exception as e:
current_app.logger.error(str(e))
return 105, str(e)
@classmethod
@transfer2json(
'?id|!asset_id|!passwd|!administrator|!idcard|!bind_tel|!status|!asset_type|!operator',
ispagination=True
)
def paginate_data(cls, page_size, page_index):
asset_type = request.args.get('type')
query = cls._query().filter(VirtualAsset.status != VirtualAsset.DISABLE)
if asset_type:
query = query.filter(VirtualAsset.asset_type == int(asset_type))
count = query.count()
data = query.order_by(desc(VirtualAsset.id)).limit(
int(page_size)).offset(int(page_index - 1) * int(page_size)).all()
return data, count
class PhoneBorrowBusiness(object):
user_trpc = Trpc('auth')
@classmethod
def _query(cls):
return PhoneBorrow.query.add_columns(
PhoneBorrow.id.label('id'),
PhoneBorrow.phone_id.label('phone_id'),
PhoneBorrow.user_list.label('user_list'),
PhoneBorrow.confirm_userid.label('confirm_userid'),
func.date_format(PhoneBorrow.creation_time, "%Y-%m-%d %H:%i:%s").label('creation_time'),
func.date_format(PhoneBorrow.modified_time, "%Y-%m-%d %H:%i:%s").label('modified_time'),
)
@classmethod
@transfer2json('?id|!phone_id|!user_list|!confirm_userid|!creation_time|!modified_time')
def get_borrow_all(cls):
phone_borrows = cls._query().all()
return phone_borrows
@classmethod
def get_borrow_by_phone_id(cls, phone_id):
phone_borrow = cls._query().filter(PhoneBorrow.phone_id == phone_id).first()
return phone_borrow
@classmethod
def create(cls, phone_id, confirm_userid=0, user_list=''):
try:
phone_borrow = PhoneBorrow(
phone_id=phone_id,
user_list=user_list,
confirm_userid=confirm_userid,
)
db.session.add(phone_borrow)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def update(cls, id, phone_id, confirm_userid, user_list):
try:
phone_borrow = PhoneBorrow.query.get(id)
if not phone_borrow:
cls.create(phone_id, confirm_userid, user_list)
phone_borrow.user_list = user_list
phone_borrow.confirm_userid = confirm_userid
db.session.add(phone_borrow)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(e)
return 102, str(e)
@classmethod
def clear_borrow_user_list(cls, phone_id, old_holder_id):
# 清除 申请用户列表
# 只剩 原持有者 ID
try:
old_holder_id = str(old_holder_id)
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if not phone_borrow:
ret, msg = cls.create(phone_id, 0, old_holder_id)
else:
ret, msg = cls.update(phone_borrow.id, phone_borrow.phone_id, 0, old_holder_id)
return ret, msg
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
def add_user_to_confirm(cls, phone_id, user_id):
# 添加 用户ID 到 当前设备的 接收确认列表
try:
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if not phone_borrow:
ret, msg = cls.create(phone_id, user_id)
else:
ret, msg = cls.update(phone_borrow.id, phone_borrow.phone_id, user_id, phone_borrow.user_list)
return ret, msg
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
def add_user_to_userlist(cls, phone_id, user_id):
# 将 申请用户 ID 添加到申请列表
try:
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if not phone_borrow:
cls.create(phone_id)
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
old_user_list = [id for id in phone_borrow.user_list.split(',')]
user_id = str(user_id)
if user_id not in old_user_list:
old_user_list.append(user_id)
else:
return 103, "不能重复借用"
new_user_list = ','.join(old_user_list)
cls.update(phone_borrow.id, phone_id, 0, new_user_list)
return 0, None
except Exception as e:
current_app.logger.error(e)
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
@transfer2json(
'?id|!nickname'
)
def get_user_list_by_phone_id(cls, phone_id):
try:
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if not phone_borrow:
return []
user_list = [id for id in phone_borrow.user_list.split(',')]
users = []
for user_id in user_list:
if len(user_id) > 0:
user = User.query.get(int(user_id))
if user:
users.append(user)
return users
except Exception as e:
current_app.logger.error(str(e))
current_app.logger.error(traceback.format_exc())
return 102, str(e)
@classmethod
def send_borrow_msg_qywx(cls, current_phone, phone_holder, current_user):
current_user_nickname = current_user.nickname
current_user_wx_userid = current_user.wx_userid
receiver_id = phone_holder.wx_userid
msg_text = """[TCloud] {}({})
您收到一个设备借用请求:
借用的设备 : {},
资产编号 : {},
借用人 : {} (微信号: {}),
请通过企业微信沟通,如借出,请通过 TCloud->资产->流转 进行转出。""".format(phone_holder.nickname, phone_holder.wx_userid,
current_phone.name, current_phone.asset_id, current_user_nickname,
current_user_wx_userid)
PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)
@classmethod
def send_borrow_continue_msg_qywx(cls, current_phone, phone_holder, current_user):
deadline = PhoneBusiness.deadline(current_phone)
current_user_nickname = current_user.nickname
current_user_wx_userid = current_user.wx_userid
receiver_id = phone_holder.wx_userid
msg_text = """[TCloud] {} ({})
您续借了一台设备:
借用的设备 : {},
资产编号 : {},
借用人 : {} (微信号: {})
可持有时间: {} 天
到期时间: {}""".format(phone_holder.nickname, phone_holder.wx_userid,
current_phone.name, current_phone.asset_id, current_user_nickname, current_user_wx_userid,
Phone.HOLD_DATE, deadline)
PhoneBusiness.qyweixin_email(phone_holder.id, msg_text)
@classmethod
def borrow(cls, phone_id):
# 发起借用
try:
ret, msg = 0, None
current_phone = Phone.query.get(phone_id)
if current_phone:
current_user = User.query.get(g.userid)
phone_holder = User.query.get(current_phone.borrow_id)
if current_phone.borrow_id == g.userid:
ret, msg = PhoneBusiness.move(phone_id, phone_holder.id)
PhoneBorrowBusiness.send_borrow_continue_msg_qywx(current_phone, phone_holder, current_user)
else:
ret, msg = PhoneBorrowBusiness.add_user_to_userlist(phone_id, g.userid)
if ret == 103:
return ret, msg
PhoneBorrowBusiness.send_borrow_msg_qywx(current_phone, phone_holder, current_user)
else:
return 101, '设备无效'
return ret, msg
except Exception as e:
current_app.logger.error(traceback.format_exc())
current_app.logger.error(e)
return 101, e
@classmethod
def confirm_borrow(cls, phone_id):
# 确认借用, admin 确认接收
try:
current_phone = Phone.query.get(phone_id)
phone_borrow = cls.get_borrow_by_phone_id(phone_id)
if int(phone_borrow.confirm_userid) != g.userid:
return 403, '只有接收人可以确认'
phone_current_holder = User.query.get(current_phone.borrow_id)
phone_new_holder = User.query.get(phone_borrow.confirm_userid)
ret, msg = PhoneBusiness.move(phone_id, int(phone_borrow.confirm_userid))
admins = cls.user_trpc.requests('get', '/user/admin')
current_app.logger.info('{} 确认接收设备'.format(int(phone_borrow.confirm_userid)))
if (int(phone_borrow.confirm_userid) in admins or
int(phone_borrow.confirm_userid) == current_phone.creator_id):
try:
PhoneBusiness.send_return_confirm_msg_qywx(current_phone, phone_current_holder, phone_new_holder)
reason = '成功归还了设备 {}({}) '.format(current_phone.name, current_phone.asset_id)
current_app.logger.info(reason)
user_old_id = int(phone_borrow.user_list)
ret, msg = CreditBusiness.add_sub_score(user_old_id, Credit.CREDIT_ADD_ONCE, reason)
except Exception as e:
current_app.logger.error(e)
else:
PhoneBusiness.send_move_confirm_msg_qywx(current_phone, phone_current_holder, phone_new_holder)
ret, msg = cls.update(phone_borrow.id, phone_borrow.phone_id, 0, '')
return ret, msg
except Exception as e:
current_app.logger.error(str(e))
current_app.logger.error(traceback.format_exc())
return 102, e
|
the-stack_0_135 | from functools import reduce
from jinja2 import Markup
import json
import logging
import os
import shutil
from sigal import signals
from sigal.utils import url_from_path
from sigal.writer import AbstractWriter
logger = logging.getLogger(__name__)
ASSETS_PATH = os.path.normpath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'static', 'js'))
class PageWriter(AbstractWriter):
'''A writer for writing media pages, based on writer'''
template_file = "search.html"
def write(self, album):
''' Generate the media page and save it '''
from sigal import __url__ as sigal_link
page = self.template.render({
'album': album,
'index_title': self.index_title,
'settings': self.settings,
'sigal_link': sigal_link,
'theme': {'name': os.path.basename(self.theme),
'url': url_from_path(os.path.relpath(self.theme_path,
album.dst_path))},
})
output_file = os.path.join(album.dst_path, 'search.html')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(page)
def generate_search(gallery):
id = 1
output_file = os.path.join(gallery.albums['.'].dst_path, 'static/js/search-content.js')
store = {}
for album in gallery.albums.values():
album_titles = " , ".join([*map(lambda x: x[1], album.breadcrumb)])
for item in album.medias:
data = {}
data['title'] = item.title
if 'author' in item.meta:
data['author'] = item.meta['author'][0]
data['url'] = "/" + item.path + "/" + item.url
data['thumbnail'] = item.thumbnail
data['mime'] = item.mime
if 'slides' in item.meta:
data['slides'] = item.meta['slides'][0]
data['album'] = album_titles;
store[str(id)] = data
id = id + 1
with open(output_file, 'w', encoding='utf8') as f:
f.write("window.store = ")
f.write(json.dumps(store))
writer = PageWriter(gallery.settings, index_title="Search Results")
writer.write(gallery.albums['.'])
shutil.copyfile(os.path.join(ASSETS_PATH, 'lunr.js'),
os.path.join(gallery.albums['.'].dst_path, 'static', 'js', 'lunr.js'))
def register(settings):
signals.gallery_build.connect(generate_search)
|
the-stack_0_136 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 12 11:52:04 2021
@author: Sarah
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 7 11:30:49 2021
@author: Sarah
"""
import pandas as pd
import pandasql
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc #this had to be changed
#from dash import dcc
import dash_html_components as html #this as well
#from dash import html
import plotly.express as px
from urllib.request import urlopen
import json
pd.options.mode.chained_assignment = None # default='warn'
# get vaccination data from rki vaccination github repo:
# (https://github.com/robert-koch-institut/COVID-19-Impfungen_in_Deutschland)
url_vacc_data = "https://raw.githubusercontent.com/robert-koch-institut/COVID-19-Impfungen_in_Deutschland/master/Aktuell_Deutschland_Impfquoten_COVID-19.csv"
# read-in data from csv-file (filter out Deutschland & Bundesressorts)
vacc_data = pd.read_csv(url_vacc_data, skiprows=[1, 18])
# Open Germany map as GeoJSON
with urlopen("https://raw.githubusercontent.com/isellsoap/deutschlandGeoJSON/main/2_bundeslaender/2_hoch.geo.json") as file:
germany_states = json.load(file)
# Read-in Covid-Data (States)
with urlopen("https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=LAN_ew_AGS,LAN_ew_GEN,Aktualisierung,cases7_bl_per_100k,death7_bl,cases7_bl_per_100k_txt,cases7_bl&outSR=4326&f=json") as cases_states:
covid_states = json.load(cases_states)
covid_data = pd.json_normalize(covid_states, record_path=['features'])
## Read in Voting-Results
with urlopen("https://raw.githubusercontent.com/julianrosenberger/VisualizationSDU/main/data/kerg2.csv?token=AQYCHUSY2GHUHR23UV3RZU3BYGNO2") as f:
data = pd.read_csv(f, delimiter=';', skiprows=9, usecols=['Gebietsnummer', 'Gebietsname', 'UegGebietsnummer', 'Gruppenart', 'Gruppenname', 'Gruppenreihenfolge', 'Stimme', 'Prozent'])
# #Deleting where Gruppenart!=Partei
df_clear=data[data.Gruppenart=="Partei"]
# deleting Stimme==2:
df_clear2 = df_clear[df_clear.Stimme==1]
# Grouped dataframe with only the states 1-16 (both incl.)
df_clear3 = df_clear2[df_clear2.Gebietsnummer < 17]
# Make sure Gebietsnummer belongs to state
df_clear4 = df_clear3[df_clear3.UegGebietsnummer == 99]
df_clear = df_clear4
# cleaning
print(type('Prozent')) # string --> convert to int
#(nan --> 0
df_clear['Prozent'] = df_clear['Prozent'].fillna(0)
# , --> .
df_clear['Prozent'] = (df_clear['Prozent'].replace(',', '.', regex=True).astype(float))
# string --> int
df_clear['Prozent'] = pd.to_numeric(df_clear['Prozent'])
#print(df_clear.to_string())
# Gruping by state:
df_group = df_clear.groupby('Gebietsnummer')
print(df_group)
#print(df_group['Gebietsnummer'] == 11)
for key, item in df_group:
print(df_group.get_group(key))
# Get the indices of the original dataframe to find out which party etc. it belongs to:
#idx = df_group(['Gebietsnummer'])['Prozent'].transform(max) == df_clear['Prozent']
#print(idx.head())
maximums = df_group['Prozent'].max()
#print(maximums.to_string())
#print(df_clear.loc[df_clear.groupby(['Gebietsnummer'])['Prozent'].idxmax()].reset_index(drop=True))
winners = df_clear.loc[df_clear.groupby(['Gebietsnummer'])['Prozent'].idxmax()].reset_index(drop=True)
print(winners.to_string())
## Plot Vaccination Map
vacc = px.choropleth_mapbox(
mapbox_style='white-bg',
data_frame=vacc_data,
geojson=germany_states,
locations='Bundesland',
featureidkey='properties.name',
hover_name='Bundesland',
hover_data={'Bundesland': False,
'Impfquote_gesamt_voll': ':.2f%',
'Datum': True},
color='Impfquote_gesamt_voll',
color_continuous_scale=px.colors.sequential.Blues,
labels={'Impfquote_gesamt_voll': 'Fully vaccinated', 'Bundesland': 'State', 'Datum': 'Date'}
)
vacc.update_mapboxes(
center_lat=51.5,
center_lon=10.25,
zoom=4.6
)
vacc.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0})
## Plot Covid-Map
cov = px.choropleth_mapbox(
mapbox_style='white-bg',
data_frame=covid_data,
geojson=germany_states,
locations='attributes.LAN_ew_GEN',
featureidkey='properties.name',
hover_name='attributes.LAN_ew_GEN',
hover_data={'attributes.LAN_ew_GEN': False,
'attributes.cases7_bl_per_100k': ':.2f',
'attributes.death7_bl': True},
color='attributes.cases7_bl_per_100k',
color_continuous_scale=px.colors.sequential.YlOrRd,
labels={'attributes.cases7_bl_per_100k': '7-day incidence', 'attributes.LAN_ew_GEN': 'State', 'attributes.death7_bl': '7-day deaths'}
)
cov.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0})
cov.update_mapboxes(
center_lat=51.5,
center_lon=10.25,
zoom=4.6
)
## Plot Voting-results
vote = px.choropleth_mapbox(
mapbox_style='white-bg',
data_frame=winners,
geojson=germany_states,
locations='Gebietsname',
featureidkey='properties.name',
hover_name='Gebietsname',
hover_data={'Gebietsname': False,
'Gruppenname': True,
'Prozent': ':.2f%'},
color='Gruppenname',
color_discrete_map={'SPD': "#E3000F",
"CDU": "#32302e",
"CSU": "#32302e",
"AfD": "#009ee0"},
labels={'Gebietsname': 'State', 'Gruppenname': 'Party', 'Prozent': 'Result'}
)
vote.update_layout(
margin={"r": 0, "t": 0, "l": 0, "b": 0})
vote.update_mapboxes(
center_lat=51.5,
center_lon=10.25,
zoom=4.6
)
## Plot Voting-results in form of pie chart:
# want for entire Germany, instead of states:
vote_germ=data[data.Gebietsnummer==99]
vote_germ = vote_germ[vote_germ.Stimme==1]
vote_germ=vote_germ[vote_germ.Gruppenart=="Partei"]
vote_germ=vote_germ[vote_germ.Gebietsname=="Bundesgebiet"]
# cleaning
#(nan --> 0
vote_germ['Prozent'] = vote_germ['Prozent'].fillna(0)
# , --> .
vote_germ['Prozent'] = (vote_germ['Prozent'].replace(',', '.', regex=True).astype(float))
# string --> int
vote_germ['Prozent'] = pd.to_numeric(vote_germ['Prozent'])
#print(vote_germ.to_string())
# 47 different states. Diving into: SPD, CDU/CSU, AfD, and "Others":
#vote_germ.loc[vote_germ['Gruppenname'] == "CDU", 'Gruppenname'] = "CDU/CSU"
#vote_germ.loc[vote_germ['Gruppenname'] == "CSU", 'Gruppenname'] = "CDU/CSU"
vote_germ.loc[vote_germ['Prozent'] < 6, 'Gruppenname'] = "Other"
vote_germ.loc[vote_germ['Gruppenname'] == "FDP", 'Gruppenname'] = "Other"
vote_germ.loc[vote_germ['Gruppenname'] == "GRÜNE", 'Gruppenname'] = "Other"
vote_chart = px.pie(vote_germ, values='Prozent', names='Gruppenname', color='Gruppenname',
color_discrete_map={'SPD':'#E3000F',
'CDU':'32302e',
'CSU':'#0080c8',
'AfD':'009ee0',
'Other':'grey'})
#vote_chart.show()
## Build web app with dash
app = dash.Dash(__name__)
app.layout = lambda: html.Div([
# H1-Header
html.H1(children="Does voting against vaccinations mean voting for COVID?",
style={'textAlign': 'center', 'fontFamily': 'Helvetica, Arial, sans-serif'}),
html.Div([
html.Div([
dcc.Graph(figure=vacc)
], style={'width': '33%', 'float': 'left'}),
html.Div([
dcc.Graph(figure=cov)
], style={'width': '33%', 'float': 'left'}),
html.Div([
dcc.Graph(figure=vote)
], style={'width': '33%', 'float': 'left'})
]),
html.Div([
html.Div([
dcc.Graph(figure=vacc)
], style={'width': '33%', 'float': 'left'}),
html.Div([
dcc.Graph(figure=cov)
], style={'width': '33%', 'float': 'left'}),
html.Div([
dcc.Graph(figure=vote)
], style={'width': '33%', 'float': 'left'})
])
])
if __name__ == '__main__':
app.run_server(debug=True, port=8080)
|
the-stack_0_137 | # -*- coding: utf-8 -*-
__version__ = '19.9.0.dev1'
PROJECT_NAME = "galaxy-data"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_DESCRIPTION = 'Galaxy Datatype Framework and Datatypes'
PROJECT_EMAIL = '[email protected]'
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
|
the-stack_0_138 | from __future__ import unicode_literals
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Field, Layout, Submit
from django import forms
from django.contrib.auth import get_user_model
from . import models
User = get_user_model()
class UserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Field('name'),
)
class Meta:
model = User
fields = ['name']
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Field('picture'),
Field('bio'),
Submit('update', 'Update', css_class="btn-success"),
)
class Meta:
model = models.Profile
fields = ['picture', 'bio']
|
the-stack_0_139 | # SPDX-FileCopyrightText: 2019 Scott Shawcroft for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_bitmap_font.bdf`
====================================================
Loads BDF format fonts.
* Author(s): Scott Shawcroft
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import gc
from fontio import Glyph
from .glyph_cache import GlyphCache
__version__ = "1.3.4"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Bitmap_Font.git"
class BDF(GlyphCache):
"""Loads glyphs from a BDF file in the given bitmap_class."""
def __init__(self, f, bitmap_class):
super().__init__()
self.file = f
self.name = f
self.file.seek(0)
self.bitmap_class = bitmap_class
line = self.file.readline()
line = str(line, "utf-8")
if not line or not line.startswith("STARTFONT 2.1"):
raise ValueError("Unsupported file version")
self.point_size = None
self.x_resolution = None
self.y_resolution = None
self._ascent = None
self._descent = None
@property
def descent(self):
"""The number of pixels below the baseline of a typical descender"""
if self._descent is None:
self.file.seek(0)
while True:
line = self.file.readline()
if not line:
break
if line.startswith(b"FONT_DESCENT "):
self._descent = int(line.split()[1])
break
return self._descent
@property
def ascent(self):
"""The number of pixels above the baseline of a typical ascender"""
if self._ascent is None:
self.file.seek(0)
while True:
line = self.file.readline()
line = str(line, "utf-8")
if not line:
break
if line.startswith("FONT_ASCENT "):
self._ascent = int(line.split()[1])
break
return self._ascent
def get_bounding_box(self):
"""Return the maximum glyph size as a 4-tuple of: width, height, x_offset, y_offset"""
self.file.seek(0)
while True:
line = self.file.readline()
line = str(line, "utf-8")
if not line:
break
if line.startswith("FONTBOUNDINGBOX "):
_, x, y, x_offset, y_offset = line.split()
return (int(x), int(y), int(x_offset), int(y_offset))
return None
def load_glyphs(self, code_points):
# pylint: disable=too-many-statements,too-many-branches,too-many-nested-blocks,too-many-locals
metadata = True
character = False
code_point = None
bytes_per_row = 1
desired_character = False
current_info = {}
current_y = 0
rounded_x = 1
if isinstance(code_points, int):
remaining = set()
remaining.add(code_points)
elif isinstance(code_points, str):
remaining = set(ord(c) for c in code_points)
elif isinstance(code_points, set):
remaining = code_points
else:
remaining = set(code_points)
for code_point in remaining.copy():
if code_point in self._glyphs and self._glyphs[code_point]:
remaining.remove(code_point)
if not remaining:
return
x, _, _, _ = self.get_bounding_box()
self.file.seek(0)
while True:
line = self.file.readline()
if not line:
break
if line.startswith(b"CHARS "):
metadata = False
elif line.startswith(b"SIZE"):
_, self.point_size, self.x_resolution, self.y_resolution = line.split()
elif line.startswith(b"COMMENT"):
pass
elif line.startswith(b"STARTCHAR"):
# print(lineno, line.strip())
# _, character_name = line.split()
character = True
elif line.startswith(b"ENDCHAR"):
character = False
if desired_character:
bounds = current_info["bounds"]
shift = current_info["shift"]
gc.collect()
self._glyphs[code_point] = Glyph(
current_info["bitmap"],
0,
bounds[0],
bounds[1],
bounds[2],
bounds[3],
shift[0],
shift[1],
)
remaining.remove(code_point)
if not remaining:
return
desired_character = False
elif line.startswith(b"BBX"):
if desired_character:
_, x, y, x_offset, y_offset = line.split()
x = int(x)
y = int(y)
x_offset = int(x_offset)
y_offset = int(y_offset)
current_info["bounds"] = (x, y, x_offset, y_offset)
current_info["bitmap"] = self.bitmap_class(x, y, 2)
elif line.startswith(b"BITMAP"):
if desired_character:
rounded_x = x // 8
if x % 8 > 0:
rounded_x += 1
bytes_per_row = rounded_x
if bytes_per_row % 4 > 0:
bytes_per_row += 4 - bytes_per_row % 4
current_y = 0
elif line.startswith(b"ENCODING"):
_, code_point = line.split()
code_point = int(code_point)
if code_point in remaining:
desired_character = True
current_info = {"bitmap": None, "bounds": None, "shift": None}
elif line.startswith(b"DWIDTH"):
if desired_character:
_, shift_x, shift_y = line.split()
shift_x = int(shift_x)
shift_y = int(shift_y)
current_info["shift"] = (shift_x, shift_y)
elif line.startswith(b"SWIDTH"):
pass
elif character:
if desired_character:
bits = int(line.strip(), 16)
width = current_info["bounds"][0]
start = current_y * width
x = 0
for i in range(rounded_x):
val = (bits >> ((rounded_x - i - 1) * 8)) & 0xFF
for j in range(7, -1, -1):
if x >= width:
break
bit = 0
if val & (1 << j) != 0:
bit = 1
current_info["bitmap"][start + x] = bit
x += 1
current_y += 1
elif metadata:
# print(lineno, line.strip())
pass
|
the-stack_0_142 | import torch
from torch import nn
from torch.nn import functional as F
class DetLoss(nn.Module):
def __init__(self):
super().__init__()
self.hm_criterion = nn.BCEWithLogitsLoss(reduction='none')
self.ori_criterion = nn.SmoothL1Loss(reduction='none')
self.box_criterion = nn.SmoothL1Loss(reduction='none')
def forward(self,
pred_heatmaps, heatmaps,
pred_sizemaps, sizemaps,
pred_orimaps , orimaps,
):
size_w, _ = heatmaps.max(dim=1, keepdim=True)
p_det = torch.sigmoid(pred_heatmaps * (1-2*heatmaps))
det_loss = (self.hm_criterion(pred_heatmaps, heatmaps)*p_det).mean() / p_det.mean()
box_loss = (size_w * self.box_criterion(pred_sizemaps, sizemaps)).mean() / size_w.mean()
ori_loss = (size_w * self.ori_criterion(pred_orimaps, orimaps)).mean() / size_w.mean()
return det_loss, box_loss, ori_loss
class SegLoss(nn.Module):
def __init__(self):
super().__init__()
self.criterion = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, pred_bev, bev):
return self.criterion(pred_bev, bev).mean()
class MotLoss(nn.Module):
def __init__(self, distill, smooth):
super().__init__()
self.bc_criterion = nn.L1Loss(reduction='none')
self.cmd_criterion = nn.BCELoss()
self.distill = distill
self.smooth = smooth
def forward(self, plan_locs, cast_locs, locs, pred_cmds, expert_locs, expert_cmds, cmds, idxs=None):
T = locs.size(1)
N = pred_cmds.size(1)
plan_locs = plan_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)
plan_losses = self.bc_criterion(plan_locs, locs).mean(dim=[1,2])
if self.distill:
cast_loss = self.bc_criterion(cast_locs, expert_locs.detach()).mean()
cmd_loss = self.cmd_criterion(pred_cmds, expert_cmds.detach())
else:
cast_locs = cast_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)
cast_loss = self.bc_criterion(cast_locs, locs).mean()
cmds_label = (1.-self.smooth) * F.one_hot(cmds, N) + self.smooth / N
cmd_loss = self.cmd_criterion(pred_cmds, cmds_label)
if idxs is None:
plan_loss = plan_losses.mean()
else:
plan_loss = plan_losses[idxs].mean()
return (plan_loss + cast_loss) / 2, cmd_loss
def others_forward(self, cast_locs, expert_locs, locs):
if self.distill:
return self.bc_criterion(cast_locs, expert_locs).mean()
else:
other_bc_losses = self.bc_criterion(cast_locs, locs).mean(dim=[2,3])
return other_bc_losses.min(1)[0].mean()
def bev_forward(self, plan_locs, cast_locs, locs, pred_cmds, cmds, idxs=None):
T = locs.size(1)
N = pred_cmds.size(1)
plan_locs = plan_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)
plan_losses = self.bc_criterion(plan_locs, locs).mean(dim=[1,2])
cast_locs = cast_locs.gather(1, cmds.expand(T,2,1,-1).permute(3,2,0,1)).squeeze(1)
cast_loss = self.bc_criterion(cast_locs, locs).mean()
cmd_loss = self.cmd_criterion(pred_cmds, F.one_hot(cmds, N).float())
if idxs is None:
plan_loss = plan_losses.mean()
else:
plan_loss = plan_losses[idxs].mean()
return (plan_loss + cast_loss) / 2, cmd_loss
|
the-stack_0_143 | from django.db.models.signals import pre_save
from django.dispatch import receiver
from order.models import Order
from order.tpaga import revertedPaid
@receiver(pre_save, sender=Order)
def changeReverted(sender, instance, **kwargs):
try:
old = sender.objects.get(id=instance.id)
status = old.status
except:
status = 'created'
if instance.status == 'reverted':
isSuccess = revertedPaid(instance.token)
if not isSuccess:
instance.status = status
instance.save()
|
the-stack_0_144 | import bing_face_api as bfa
if __name__ == '__main__':
'''
コマンドライン引数を使用する場合
顔認識する画像のディレクトリ
search_dir = sys.argv[0]
'''
# 顔認識する画像のディレクトリ
search_dir = "./image/original/"
# 顔認識する画像のファイル名を取得
img_path_list = bfa.get_image_path_list(search_dir)
# 顔認識
bfa.detect_image(img_path_list)
|
the-stack_0_148 | # -*- coding: utf-8 -*-
import asyncio
from datetime import datetime
from cmyui import log, Ansi
from cmyui.osu import Mods
from discord import Embed
from discord.ext import commands
from discord.threads import Thread
from tinydb.operations import set as dbset
from tinydb.queries import Query
from objects.sakuro import Sakuro, ContextWrap
from osu.calculator import Calculator
from objects import glob, config
from utils.misc import convert_status_str, convert_str_status, make_safe_name, convert_grade_emoji, sakuru_only
from objects.user import UserHelper
from utils.wrappers import sakuroCommand
from utils.misc import BEATMAP_REGEX
QUEUE_EMOJIS = (
'1️⃣',
'2️⃣',
'3️⃣',
'4️⃣',
'5️⃣'
)
class AdminCog(commands.Cog, name='Admin'):
"""Utilities for admins."""
def __init__(self, bot: Sakuro):
self.bot = bot
self.hide = True
@sakuroCommand(name='reload', hidden=True)
@commands.is_owner()
async def _reload(self, ctx: ContextWrap, module: str):
"""Reloads a module."""
try:
self.bot.unload_extension(module)
self.bot.load_extension(module)
except Exception as e:
await ctx.send('\N{PISTOL}')
await ctx.send('{}: {}'.format(type(e).__name__, e))
else:
await ctx.send('\N{OK HAND SIGN}')
@sakuroCommand(hidden=True)
@commands.is_owner()
async def shutdown(self, ctx: ContextWrap) -> None:
await ctx.send('Night night..')
await self.bot.close()
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def replay(self, ctx: ContextWrap, nickname: str, mods: str, map_id: int):
player = await UserHelper.getOsuUserByName(make_safe_name(nickname), 'info')
description = ""
if not player:
async with glob.http.get("https://sakuru.pw/api/search", params={
"q": nickname
}) as resp:
if resp.status == 200:
data = await resp.json()
if data['length'] == 0:
return await ctx.send(f"Nothing matched with {nickname} not found, check your spelling.")
embed = Embed(
color=ctx.author.color,
timestamp=datetime.now()
)
embed.set_author(name=f"Search queue for {nickname}")
for idx, row in enumerate(data['matches']):
description += f"**{idx + 1}.** [{row['name']}](https://sakuru.pw/u/{row['id']})\n"
embed.description = description
description = ""
message = await ctx.send(embed=embed)
for emoji in QUEUE_EMOJIS[:data['length']]:
await message.add_reaction(emoji)
try:
reaction, user = await self.bot.wait_for('reaction_add',
check=lambda r, u: r.message.id == message.id \
and r.emoji in QUEUE_EMOJIS \
and u == ctx.author,
timeout=60.0)
except asyncio.TimeoutError:
await ctx.send('Time is out!')
else:
player = await UserHelper.getOsuUserByName(
make_safe_name(
data['matches'][QUEUE_EMOJIS.index(reaction.emoji)]['name']),
'info'
)
await message.delete()
else:
return await ctx.send("Error! Try again.")
scores = await UserHelper.getUserScores(player['id'], 0, mods, 5, 'best', map_id)
if len(scores) == 0:
return await ctx.send(f"This player has no scores on `{map_id}`!")
map_fullname = ""
for idx, score in enumerate(scores):
calc = await Calculator.calculate(
score['beatmap']['id'],
0,
score['mods'],
score['acc'],
None
)
map_fullname = calc['map_fullname']
description += f"""** {idx + 1}. {f' +{Mods(score["mods"])!r}' if score['mods'] != 0 else ''}** [{calc['stars']:.2f}★]\n""" \
f"▸ {convert_grade_emoji(score['grade'])} ▸ **{score['pp']:.2f}PP**" \
f"""{f' *({calc["pp"]:.2f}PP for {score["acc"]:.2f}% FC)*' if score['grade'] not in ('S', 'SS', 'X', 'SH') else ''} """ \
f"▸ {score['acc']:.2f}%\n▸ {score['score']} ▸ x{score['max_combo']}/{score['beatmap']['max_combo']} " \
f"▸ [{score['n300']}/{score['n100']}/{score['n50']}/{score['nmiss']}]\n" \
f"▸ [Score Set <t:{datetime.fromisoformat(score['play_time']).timestamp().__int__()}:R>]" \
f"(https://osu.sakuru.pw/api/get_replay?id={score['id']})\n"
embed = Embed(color=ctx.author.color, description=description)
embed.set_author(name=f"Top {len(scores)} Plays for {player['name']} on {map_fullname}",
url=f"https://sakuru.pw/u/{player['id']}",
icon_url=f"https://sakuru.pw/static/flags/{player['country'].upper()}.png")
embed.set_footer(text="Click on Score Set to download replay.",
icon_url="https://sakuru.pw/static/ingame.png")
await ctx.send(embed=embed)
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def restrict(self, ctx: ContextWrap, nickname: str, *reason: str):
if not await UserHelper.getOsuUserByName(make_safe_name(nickname), 'info'):
return await ctx.send(f"Player with nickname {nickname} not found.")
admin = await UserHelper.getDiscordUser(ctx.message.author.id)
async with glob.http.get("https://osu.sakuru.pw/api/handle_admin",
params={
"secret": config.API_SECRET,
"action": "restrict",
"nickname": make_safe_name(nickname),
"reason": ' '.join(reason),
"admin": admin['safe_name']
}) as resp:
if resp.status == 200:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
else:
return await ctx.send("Error occurred.")
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def unrestrict(self, ctx: ContextWrap, nickname: str, *reason: str):
if not await UserHelper.getOsuUserByName(make_safe_name(nickname), 'info'):
return await ctx.send(f"Player with nickname {nickname} not found.")
admin = await UserHelper.getDiscordUser(ctx.message.author.id)
async with glob.http.get("https://osu.sakuru.pw/api/handle_admin",
params={
"secret": config.API_SECRET,
"action": "unrestrict",
"nickname": make_safe_name(nickname),
"reason": ' '.join(reason),
"admin": admin['safe_name']
}) as resp:
if resp.status == 200:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
else:
return await ctx.send("Error occurred.")
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def rqmap(self, ctx: ContextWrap, status: str, type: str):
if (
not isinstance(ctx.message.channel, Thread) or
not ctx.message.channel.parent_id == config.MAP_REQS
):
return
if ctx.message.channel.archived:
return
req_table = glob.db.table('map_reqs')
Requests = Query()
req = req_table.get((Requests.thread_id == ctx.message.channel.id) & (Requests.active == True))
admin = await UserHelper.getDiscordUser(ctx.message.author.id)
if not admin:
return await ctx.send('who are yo')
if type not in ('map', 'set'):
msg = await ctx.reply('Invalid type! (map, set)')
await msg.delete(delay=15)
await ctx.message.delete(delay=15)
return
if status not in ('love', 'rank', 'unrank'):
msg = await ctx.reply('Invalid status! (love, rank, unrank)')
await msg.delete(delay=15)
await ctx.message.delete(delay=15)
return
if type == "map":
params = {
"set_id": req['beatmap']['set_id']
}
async with glob.http.get("https://osu.sakuru.pw/api/get_map_info", params=params) as resp:
if (
resp and resp.status == 200 and
resp.content.total_bytes != 2 # b'[]'
):
bmaps = await resp.json()
embed = Embed(
title=f"Pick a map to edit status on.",
timestamp=datetime.now(),
color=0xeaff00
)
description = ""
for idx, bmap in enumerate(bmaps['set']):
description += f"`#{idx + 1}.` **[{bmap['version']}]** - {convert_status_str(int(bmap['status']))}\n"
embed.description = description
emb_mess = await ctx.send("**Send position in chat to pick a map.**", embed=embed)
valid = False
while valid is False:
try:
reply = await self.bot.wait_for('message', check=lambda x: x.channel == ctx.channel and x.author == ctx.author and x.content.isdecimal(),
timeout=60.0)
except asyncio.TimeoutError:
msg = await ctx.send('Time is up!')
await msg.delete(delay=15)
await emb_mess.delete(delay=15)
return
else:
reply.content = int(reply.content)
if reply.content > len(bmaps) or reply.content <= 0:
msg = await ctx.send('Specified position is out of range.')
await reply.delete(delay=15)
await msg.delete(delay=15)
else:
if (bm_status := bmaps['set'][reply.content - 1]['status']) == convert_str_status(status):
msg = await ctx.send(f"This map is already {convert_status_str(bm_status)}")
await msg.delete(delay=15)
await reply.delete(delay=15)
else:
await reply.delete()
await emb_mess.delete()
valid = True
params = {
"secret": config.API_SECRET,
"action": "status_map",
"admin": admin['safe_name'],
"map_id": bmaps['set'][reply.content - 1]['id'],
"status": status
}
async with glob.http.get("https://osu.sakuru.pw/api/handle_admin", params=params) as resp:
if resp.status == 200:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
else:
pass
elif type =="set":
params = {
"set_id": req['beatmap']['set_id']
}
async with glob.http.get("https://osu.sakuru.pw/api/get_map_info", params=params) as resp:
if (
resp and resp.status == 200 and
resp.content.total_bytes != 2 # b'[]'
):
bmaps = await resp.json()
if all([x['status'] == convert_str_status(status) for x in bmaps['set']]):
msg = await ctx.send(f"This set is already {convert_status_str(bmaps['set'][0]['status'])}")
await ctx.message.delete(delay=15)
await msg.delete(delay=15)
return
params = {
"secret": config.API_SECRET,
"action": "status_set",
"admin": admin['safe_name'],
"set_id": req['beatmap']['set_id'],
"status": status
}
async with glob.http.get("https://osu.sakuru.pw/api/handle_admin", params=params) as resp:
if resp.status == 200:
await ctx.message.add_reaction('\N{OK HAND SIGN}')
else:
pass
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def rqclose(self, ctx: ContextWrap):
if (
not isinstance(ctx.message.channel, Thread) or
not ctx.message.channel.parent_id == config.MAP_REQS
):
return
if ctx.message.channel.archived:
return
req_table = glob.db.table('map_reqs')
Requests = Query()
req = req_table.get((Requests.thread_id == ctx.message.channel.id) & (Requests.active == True))
req_table.update(
dbset('active', False),
doc_ids=[req.doc_id]
)
first_message = await ctx.message.channel.parent.fetch_message(req['original_id'])
await first_message.delete()
await ctx.channel.delete()
@sakuroCommand(hidden=True)
@commands.check(sakuru_only)
@commands.has_permissions(ban_members=True)
async def rqreject(self, ctx: ContextWrap, *reason: str):
if (
not isinstance(ctx.message.channel, Thread) or
not ctx.message.channel.parent_id == config.MAP_REQS
):
return
if ctx.message.channel.archived:
return
req_table = glob.db.table('map_reqs')
Requests = Query()
req = req_table.get((Requests.thread_id == ctx.message.channel.id) & (Requests.active == True))
first_message = await ctx.message.channel.parent.fetch_message(req['original_id'])
requester = ctx.guild.get_member(req['requester'])
params = {
"id": req['beatmap']['id']
}
async with glob.http.get("https://osu.sakuru.pw/api/get_map_info", params=params) as resp:
if (
resp and resp.status == 200 and
resp.content.total_bytes != 2 # b'[]'
):
bmap = (await resp.json())['map']
embed = Embed(
title=f"Map Request: {bmap['artist']} - {bmap['title']}",
color=ctx.author.color,
description=f"Your request has been rejected!\n**Reason:** `{' '.join(reason)}`\n\n**Nominator:** {ctx.author.mention}",
timestamp=datetime.now()
)
embed.set_footer(text="Sakuru.pw osu! Private Server.")
embed.set_thumbnail(url=ctx.author.avatar.url)
await requester.send(embed=embed)
req_table.update(
dbset('active', False),
doc_ids=[req.doc_id]
)
await first_message.delete()
await ctx.channel.delete()
def setup(bot):
log(f"Initiated {__name__} cog!", Ansi.CYAN)
bot.add_cog(AdminCog(bot))
|
the-stack_0_151 | from gpflow.kernels import Kernel
from gpflow.utilities import positive
from gpflow import Parameter
import tensorflow as tf
from tensorflow_probability import bijectors as tfb
class Batch_simple_SSK(Kernel):
"""
with hyperparameters:
1) match_decay float
decrease the contribution of long subsequences
3) max_subsequence_length int
largest subsequence considered
"""
def __init__(self,active_dims=[0],decay=0.1,max_subsequence_length=3,
alphabet = [], maxlen=0, batch_size=100):
super().__init__(active_dims=active_dims)
# constrain decay kernel params to between 0 and 1
self.logistic = tfb.Chain([tfb.Shift(tf.cast(0,tf.float64))(tfb.Scale(tf.cast(1,tf.float64))),tfb.Sigmoid()])
self.decay_param= Parameter(decay, transform=self.logistic ,name="decay")
# use will use copies of the kernel params to stop building expensive computation graph
# we instead efficientely calculate gradients using dynamic programming
# These params are updated at every call to K and K_diag (to check if parameters have been updated)
self.decay = self.decay_param.numpy()
self.decay_unconstrained = self.decay_param.unconstrained_variable.numpy()
self.order_coefs=tf.ones(max_subsequence_length,dtype=tf.float64)
# store additional kernel parameters
self.max_subsequence_length = tf.constant(max_subsequence_length)
self.alphabet = tf.constant(alphabet)
self.alphabet_size=tf.shape(self.alphabet)[0]
self.maxlen = tf.constant(maxlen)
self.batch_size = tf.constant(batch_size)
# build a lookup table of the alphabet to encode input strings
self.table = tf.lookup.StaticHashTable(
initializer=tf.lookup.KeyValueTensorInitializer(
keys=tf.constant(["PAD"]+alphabet),
values=tf.constant(range(0,len(alphabet)+1)),),default_value=0)
# initialize helful construction matricies to be lazily computed once needed
self.D = None
self.dD_dgap = None
def K_diag(self, X):
r"""
The diagonal elements of the string kernel are always unity (due to normalisation)
"""
return tf.ones(tf.shape(X)[:-1],dtype=tf.float64)
def K(self, X1, X2=None):
r"""
Vectorized kernel calc.
Following notation from Beck (2017), i.e have tensors S,D,Kpp,Kp
Input is two tensors of shape (# strings , # characters)
and we calc the pair-wise kernel calcs between the elements (i.e n kern calcs for two lists of length n)
D is the tensor than unrolls the recursion and allows vecotrizaiton
"""
# Turn our inputs into lists of integers using one-hot embedding
# first split up strings and pad to fixed length and prep for gpu
# pad until all have length of self.maxlen
# turn into one-hot i.e. shape (# strings, #characters+1, alphabet size)
X1 = tf.strings.split(tf.squeeze(X1,1)).to_tensor("PAD",shape=[None,self.maxlen])
X1 = self.table.lookup(X1)
# keep track of original input sizes
X1_shape = tf.shape(X1)[0]
X1 = tf.one_hot(X1,self.alphabet_size+1,dtype=tf.float64)
if X2 is None:
X2 = X1
X2_shape = X1_shape
self.symmetric = True
else:
self.symmetric = False
X2 = tf.strings.split(tf.squeeze(X2,1)).to_tensor("PAD",shape=[None,self.maxlen])
X2 = self.table.lookup(X2)
X2_shape = tf.shape(X2)[0]
X2 = tf.one_hot(X2,self.alphabet_size+1,dtype=tf.float64)
# prep the decay tensors
self._precalc()
# combine all target strings and remove the ones in the first column that encode the padding (i.e we dont want them to count as a match)
X_full = tf.concat([X1,X2],0)[:,:,1:]
# get indicies of all possible pairings from X and X2
# this way allows maximum number of kernel calcs to be squished onto the GPU (rather than just doing individual rows of gram)
indicies_2, indicies_1 = tf.meshgrid(tf.range(0, X1_shape ),tf.range(X1_shape , tf.shape(X_full)[0]))
indicies = tf.concat([tf.reshape(indicies_1,(-1,1)),tf.reshape(indicies_2,(-1,1))],axis=1)
if self.symmetric:
# if symmetric then only calc upper matrix (fill in rest later)
indicies = tf.boolean_mask(indicies,tf.greater_equal(indicies[:,1]+ X1_shape ,indicies[:,0]))
else:
# if not symmetric need to calculate some extra kernel evals for the normalization later on
indicies = tf.concat([indicies,tf.tile(tf.expand_dims(tf.range(tf.shape(X_full)[0]),1),(1,2))],0)
# make kernel calcs in batches
num_batches = tf.cast(tf.math.ceil(tf.shape(indicies)[0]/self.batch_size),dtype=tf.int32)
k_split = tf.TensorArray(tf.float64, size=num_batches,clear_after_read=False,infer_shape=False)
# iterate through batches
for j in tf.range(num_batches):
# collect strings for this batch
indicies_batch = indicies[self.batch_size*j:self.batch_size*(j+1)]
X_batch = tf.gather(X_full,indicies_batch[:,0],axis=0)
X2_batch = tf.gather(X_full,indicies_batch[:,1],axis=0)
# Make S: the similarity tensor of shape (# strings, #characters, # characters)
#S = tf.matmul( tf.matmul(X_batch,self.sim),tf.transpose(X2_batch,perm=(0,2,1)))
S = tf.matmul(X_batch,tf.transpose(X2_batch,perm=(0,2,1)))
# collect results for the batch
result = self.kernel_calc(S)
k_split = k_split.write(j,result)
# combine batch results
k = tf.expand_dims(k_split.concat(),1)
k_split.close()
# put results into the right places in the gram matrix and normalize
if self.symmetric:
# if symmetric then only put in top triangle (inc diag)
mask = tf.linalg.band_part(tf.ones((X1_shape,X2_shape),dtype=tf.int64), 0, -1)
non_zero = tf.not_equal(mask, tf.constant(0, dtype=tf.int64))
# Extracting the indices of upper triangle elements
indices = tf.where(non_zero)
out = tf.SparseTensor(indices,tf.squeeze(k),dense_shape=tf.cast((X1_shape,X2_shape),dtype=tf.int64))
k_results = tf.sparse.to_dense(out)
# add in mising elements (lower diagonal)
k_results = k_results + tf.linalg.set_diag(tf.transpose(k_results),tf.zeros(X1_shape,dtype=tf.float64))
# normalise
X_diag_Ks = tf.linalg.diag_part(k_results)
norm = tf.tensordot(X_diag_Ks, X_diag_Ks,axes=0)
k_results = tf.divide(k_results, tf.sqrt(norm))
else:
# otherwise can just reshape into gram matrix
# but first take extra kernel calcs off end of k and use them to normalise
X_diag_Ks = tf.reshape(k[X1_shape*X2_shape:X1_shape*X2_shape+X1_shape],(-1,))
X2_diag_Ks = tf.reshape(k[-X2_shape:],(-1,))
k = k[0:X1_shape*X2_shape]
k_results = tf.transpose(tf.reshape(k,[X2_shape,X1_shape]))
# normalise
norm = tf.tensordot(X_diag_Ks, X2_diag_Ks,axes=0)
k_results = tf.divide(k_results, tf.sqrt(norm))
return k_results
def _precalc(self):
r"""
Update stored kernel params (incase they have changed)
and precalc D and dD_dgap as required for kernel calcs
following notation from Beck (2017)
"""
self.decay = self.decay_param.numpy()
self.decay_unconstrained = self.decay_param.unconstrained_variable.numpy()
tril = tf.linalg.band_part(tf.ones((self.maxlen,self.maxlen),dtype=tf.float64), -1, 0)
# get upper triangle matrix of increasing intergers
values = tf.TensorArray(tf.int32, size= self.maxlen)
for i in tf.range(self.maxlen):
values = values.write(i,tf.range(-i-1,self.maxlen-1-i))
power = tf.cast(values.stack(),tf.float64)
values.close()
power = tf.linalg.band_part(power, 0, -1) - tf.linalg.band_part(power, 0, 0) + tril
tril = tf.transpose(tf.linalg.band_part(tf.ones((self.maxlen,self.maxlen),dtype=tf.float64), -1, 0))-tf.eye(self.maxlen,dtype=tf.float64)
gaps = tf.fill([self.maxlen, self.maxlen],self.decay)
self.D = tf.pow(gaps*tril, power)
self.dD_dgap = tf.pow((tril * gaps), (power - 1.0)) * tril * power
@tf.custom_gradient
def kernel_calc(self,S):
# fake computations to ensure we take the custom gradients for these two params
a = tf.square(self.decay_param)
if self.symmetric:
k, dk_dgap = tf.stop_gradient(self.kernel_calc_with_grads(S))
else:
k = tf.stop_gradient(self.kernel_calc_without_grads(S))
def grad(dy, variables=None):
# get gradients of unconstrained params
grads= {}
if self.symmetric:
grads['decay:0'] = tf.reduce_sum(tf.multiply(dy,dk_dgap*tf.math.exp(self.logistic.forward_log_det_jacobian(self.decay_unconstrained,0))))
gradient = [grads[v.name] for v in variables]
else:
gradient = [None for v in variables]
return ((None),gradient)
return k, grad
def kernel_calc_without_grads(self,S):
# store squared match coef for easier calc later
match_sq = tf.square(self.decay)
# calc subkernels for each subsequence length (See Moss et al. 2020 for notation)
Kp = tf.TensorArray(tf.float64,size=self.max_subsequence_length,clear_after_read=False)
# fill in first entries
Kp = Kp.write(0, tf.ones(shape=tf.stack([tf.shape(S)[0], self.maxlen,self.maxlen]), dtype=tf.float64))
# calculate dynamic programs
for i in tf.range(self.max_subsequence_length-1):
Kp_temp = tf.multiply(S, Kp.read(i))
Kp_temp0 = match_sq * Kp_temp
Kp_temp1 = tf.matmul(Kp_temp0,self.D)
Kp_temp2 = tf.matmul(self.D,Kp_temp1,transpose_a=True)
Kp = Kp.write(i+1,Kp_temp2)
# Final calculation. We gather all Kps
Kp_stacked = Kp.stack()
Kp.close()
# combine and get overall kernel
aux = tf.multiply(S, Kp_stacked)
aux = tf.reduce_sum(aux, -1)
sum2 = tf.reduce_sum(aux, -1)
Ki = sum2 * match_sq
k = tf.linalg.matvec(tf.transpose(Ki),self.order_coefs)
return k
def kernel_calc_with_grads(self,S):
# store squared match coef for easier calc later
match_sq = tf.square(self.decay)
# calc subkernels for each subsequence length (See Moss et al. 2020 for notation)
Kp = tf.TensorArray(tf.float64,size=self.max_subsequence_length,clear_after_read=False)
dKp_dgap = tf.TensorArray(tf.float64, size=self.max_subsequence_length, clear_after_read=False)
# fill in first entries
Kp = Kp.write(0, tf.ones(shape=tf.stack([tf.shape(S)[0], self.maxlen,self.maxlen]), dtype=tf.float64))
dKp_dgap = dKp_dgap.write(0, tf.zeros(shape=tf.stack([tf.shape(S)[0], self.maxlen,self.maxlen]), dtype=tf.float64))
# calculate dynamic programs
for i in tf.range(self.max_subsequence_length-1):
Kp_temp = tf.multiply(S, Kp.read(i))
Kp_temp0 = match_sq * Kp_temp
Kp_temp1 = tf.matmul(Kp_temp0,self.D)
Kp_temp2 = tf.matmul(self.D,Kp_temp1,transpose_a=True)
Kp = Kp.write(i+1,Kp_temp2)
dKp_dgap_temp_1 = tf.matmul(self.dD_dgap,Kp_temp1,transpose_a=True)
dKp_dgap_temp_2 = tf.multiply(S, dKp_dgap.read(i))
dKp_dgap_temp_2 = dKp_dgap_temp_2 * match_sq
dKp_dgap_temp_2 = tf.matmul(dKp_dgap_temp_2,self.D)
dKp_dgap_temp_2 = dKp_dgap_temp_2 + tf.matmul(Kp_temp0,self.dD_dgap)
dKp_dgap_temp_2 = tf.matmul(self.D,dKp_dgap_temp_2,transpose_a=True)
dKp_dgap = dKp_dgap.write(i+1,dKp_dgap_temp_1 + dKp_dgap_temp_2)
# Final calculation. We gather all Kps
Kp_stacked = Kp.stack()
Kp.close()
dKp_dgap_stacked = dKp_dgap.stack()
dKp_dgap.close()
# combine and get overall kernel
# get k
aux = tf.multiply(S, Kp_stacked)
aux = tf.reduce_sum(aux, -1)
sum2 = tf.reduce_sum(aux, -1)
Ki = sum2 * match_sq
k = tf.linalg.matvec(tf.transpose(Ki),self.order_coefs)
# get gap decay grads
temp = tf.multiply(S, dKp_dgap_stacked)
temp = tf.reduce_sum(temp, -1)
temp = tf.reduce_sum(temp, -1)
temp = temp * match_sq
dk_dgap = tf.linalg.matvec(tf.transpose(temp),self.order_coefs)
return k, dk_dgap
|
the-stack_0_154 | from os import getcwd
import sys
sys.path.append(getcwd() + '/..') # Add src/ dir to import path
import traceback
import logging
from os.path import join
from itertools import combinations
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import libs.osLib as ol
def removeDiagonal(A):
m = A.shape[0]
strided = np.lib.stride_tricks.as_strided
s0,s1 = A.strides
return strided(A.ravel()[1:], shape=(m-1, m), strides=(s0+s1, s1)).reshape(m, -1)
if __name__ == '__main__':
root = logging.getLogger()
root.setLevel(logging.DEBUG)
baseDir, outputDir = '../../data/adjacencyMatrices', '../../data/plots'
loadNodeMappings, loadAdjacencies = True, False
numClusters = 2
classMapping = {
'time': 'T',
'content': 'C',
'tag': 'G',
'location': 'L',
}
try:
# metapaths = [['time', 'content', 'time'], ['tag', 'content', 'tag'], ['location', 'content', 'location'] ] # ['time', 'content', 'time'] # #
metapaths = [['time', 'content', 'time']]
metapaths = [[classMapping[t] for t in metapath] for metapath in metapaths]
for metapath in metapaths:
nodeMapping = ol.loadPickle(join(baseDir, f'nodeMapping.pickle'))
# PathSim load
similarityM = ol.loadSparce(join(baseDir, f'similarity-{"".join(metapath)}.npz')).toarray()
# Sclump load
# similarityM = ol.loadNumpy(join(baseDir, f'SClump-similarity.npy'))
similarityM = removeDiagonal(similarityM) # Carefull here - we're removing the relation with itself but breaking the mapping from nodeMapping
# Remove all zeros
print(f'Orig shape: {similarityM.shape}')
similarityM = similarityM[~np.all(similarityM == 0, axis=1)]
similarityM = similarityM[:, ~np.all(similarityM == 0, axis=0)]
print(f'Without zeros shape: {similarityM.shape}')
# Plot simple value histogram
flattenSim = pd.Series(similarityM.flatten())
g = sns.distplot(flattenSim, kde=False, bins=10)
g.set_yscale('log')
plt.savefig(join(outputDir, f'similarityValueDistribution-{"".join(metapath)}.png'))
plt.title('Value count in Similarity Matrix')
print(similarityM.max())
# Count non-zeros per row
rowCountNonZero = np.count_nonzero(similarityM, axis=1)
# Count max value per row
rowCountMax = np.amax(similarityM, 1)
# Count min value (that's not a zero) per row
rowCountMinNonZero = np.where(similarityM > 0, similarityM, similarityM.max()).min(1)
# Count mean value (that's not a zero) per row
rowCountMeanNonZero = np.true_divide(similarityM.sum(1), (similarityM!=0).sum(1))
plotDf = None
for k, x in {
'Non zeros per row': rowCountNonZero,
'Max per row': rowCountMax,
'Mean per row (no zeros)': rowCountMeanNonZero,
'Min per row (no zeros)': rowCountMinNonZero,
}.items():
auxDf = pd.Series(x, name='Count').to_frame()
auxDf['Measure'] = k
plotDf = auxDf if plotDf is None else pd.concat([plotDf, auxDf], ignore_index=False)
# Make boxplot
fig, ax = plt.subplots(figsize=(15, 15))
g = sns.boxplot(ax=ax, data=plotDf, x='Measure', y='Count', palette="Set2", showfliers=True, showmeans=True)
g.set_yscale('log')
g.set_yticklabels(g.get_yticks(), size=16)
# g.set_xticklabels(g.get_xticks(), size=16)
plt.savefig(join(outputDir, f'statsPerRow-log-{"".join(metapath)}.png'))
plt.close()
# Make boxplot
fig, ax = plt.subplots(figsize=(15, 15))
g = sns.boxplot(ax=ax, data=plotDf, x='Measure', y='Count', palette="Set2", showfliers=False, showmeans=True)
g.set_yticklabels(g.get_yticks(), size=16)
# g.set_xticklabels(g.get_xticks(), size=16)
plt.savefig(join(outputDir, f'statsPerRow-{"".join(metapath)}.png'))
plt.close()
# Make violin plots
fig = plt.figure(figsize=(12, 12))
gs = fig.add_gridspec(3, 2)
ax = fig.add_subplot(gs[0, 0])
sns.violinplot(data=similarityM.flatten())
ax.set_xlabel("Similarity as is")
ax = fig.add_subplot(gs[0, 1])
sns.violinplot(data=rowCountNonZero)
ax.set_xlabel("Non zeros per row")
ax = fig.add_subplot(gs[1, 0])
sns.violinplot(rowCountMeanNonZero)
ax.set_xlabel("Mean per row (no zeros)")
ax = fig.add_subplot(gs[1, 1])
sns.violinplot(rowCountMinNonZero)
ax.set_xlabel("Min per row (no zeros)")
ax = fig.add_subplot(gs[2, 0])
sns.violinplot(data=rowCountMax)
ax.set_xlabel("Max per row")
fig.tight_layout()
plt.savefig(join(outputDir, f'statsViolinPerRow-{"".join(metapath)}.png'))
plt.close()
# Plot as matrix
"""
fig = plt.figure(figsize=(15, 15))
ax = plt.axes()
plt.spy(similarityM, precision=0.1, marker='.', markersize=0.05)
plt.savefig(join(outputDir, f'similarityMatrixPlot-{"".join(metapath)}.png'))
plt.close()
"""
# Select top k most similiar or wtv
# Pick their similarity vectors
# Plot them
except Exception as ex:
print(traceback.format_exc()) |
the-stack_0_155 | import asyncio
import datetime
import logging
import json
import functools
import re
import csv
from io import StringIO, BytesIO
from pathlib import Path
from tabulate import tabulate
from typing import List, Literal, Optional, Union
import discord
from redbot.core import Config, checks, commands
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils.chat_formatting import pagify, humanize_timedelta, humanize_list, box
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import ReactionPredicate
from .api import DestinyAPI
from .converter import DestinyActivity, StatsPage, SearchInfo, DestinyEververseItemType
from .errors import Destiny2APIError, Destiny2MissingManifest
from .menus import BaseMenu, BasePages
DEV_BOTS = [552261846951002112]
# If you want parsing the manifest data to be easier add your
# bots ID to this list otherwise this should help performance
# on bots that are just running the cog like normal
BASE_URL = "https://www.bungie.net/Platform"
IMAGE_URL = "https://www.bungie.net"
AUTH_URL = "https://www.bungie.net/en/oauth/authorize"
TOKEN_URL = "https://www.bungie.net/platform/app/oauth/token/"
_ = Translator("Destiny", __file__)
log = logging.getLogger("red.trusty-cogs.Destiny")
@cog_i18n(_)
class Destiny(DestinyAPI, commands.Cog):
"""
Get information from the Destiny 2 API
"""
__version__ = "1.5.5"
__author__ = "TrustyJAID"
def __init__(self, bot):
self.bot = bot
default_global = {
"api_token": {"api_key": "", "client_id": "", "client_secret": ""},
"manifest_version": "",
}
default_user = {"oauth": {}, "account": {}}
self.config = Config.get_conf(self, 35689771456)
self.config.register_global(**default_global)
self.config.register_user(**default_user)
self.config.register_guild(clan_id=None)
self.throttle: float = 0
def format_help_for_context(self, ctx: commands.Context) -> str:
"""
Thanks Sinbad!
"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\n\nCog Version: {self.__version__}"
async def red_delete_data_for_user(
self,
*,
requester: Literal["discord_deleted_user", "owner", "user", "user_strict"],
user_id: int,
):
"""
Method for finding a user's data inside the cog and deleting it.
"""
await self.config.user_from_id(user_id).clear()
@commands.group()
async def destiny(self, ctx: commands.Context) -> None:
"""Get information from the Destiny 2 API"""
pass
@destiny.command()
async def forgetme(self, ctx: commands.Context) -> None:
"""
Remove your authorization to the destiny API on the bot
"""
await self.red_delete_data_for_user(requester="user", user_id=ctx.author.id)
await ctx.send(_("Your authorization has been reset."))
@destiny.group(aliases=["s"])
async def search(self, ctx: commands.Context) -> None:
"""
Search for a destiny item, vendor, record, etc.
"""
pass
async def get_weapon_possible_perks(self, weapon: dict) -> dict:
perks = {}
slot_counter = 1
count = 2
for socket in weapon["sockets"]["socketEntries"]:
if socket["singleInitialItemHash"] in [
4248210736,
2323986101,
0,
2285418970,
1282012138,
2993594586,
]:
continue
if socket["socketTypeHash"] in [2218962841, 1282012138, 1456031260]:
continue
if "randomizedPlugSetHash" in socket:
pool = (
await self.get_definition(
"DestinyPlugSetDefinition", [socket["randomizedPlugSetHash"]]
)
)[str(socket["randomizedPlugSetHash"])]
pool_perks = [v["plugItemHash"] for v in pool["reusablePlugItems"]]
all_perks = await self.get_definition(
"DestinyInventoryItemLiteDefinition", pool_perks
)
try:
# https://stackoverflow.com/questions/44914727/get-first-and-second-values-in-dictionary-in-cpython-3-6
it = iter(all_perks.values())
key_hash = next(it)["itemCategoryHashes"][0]
key_data = (
await self.get_definition("DestinyItemCategoryDefinition", [key_hash])
)[str(key_hash)]
key = key_data["displayProperties"]["name"]
if key in perks:
key = f"{key} {count}"
count += 1
except IndexError:
key = _("Perk {count}").format(count=slot_counter)
perks[key] = "\n".join(
[p["displayProperties"]["name"] for h, p in all_perks.items()]
)
slot_counter += 1
continue
if "reusablePlugSetHash" in socket:
pool = (
await self.get_definition(
"DestinyPlugSetDefinition", [socket["reusablePlugSetHash"]]
)
)[str(socket["reusablePlugSetHash"])]
pool_perks = [v["plugItemHash"] for v in pool["reusablePlugItems"]]
all_perks = await self.get_definition(
"DestinyInventoryItemLiteDefinition", pool_perks
)
try:
it = iter(all_perks.values())
key_hash = next(it)["itemCategoryHashes"][0]
key_data = (
await self.get_definition("DestinyItemCategoryDefinition", [key_hash])
)[str(key_hash)]
key = key_data["displayProperties"]["name"]
if key in perks:
key = f"{key} {count}"
count += 1
except IndexError:
key = _("Perk {count}").format(count=slot_counter)
perks[key] = "\n".join(
[p["displayProperties"]["name"] for h, p in all_perks.items()]
)
slot_counter += 1
continue
perk_hash = socket["singleInitialItemHash"]
perk = (await self.get_definition("DestinyInventoryItemLiteDefinition", [perk_hash]))[
str(perk_hash)
]
try:
it = iter(all_perks.values())
key_hash = next(it)["itemCategoryHashes"][0]
key_data = (
await self.get_definition("DestinyItemCategoryDefinition", [key_hash])
)[str(key_hash)]
key = key_data[0]["displayProperties"]["name"]
if key in perks:
key = f"{key} {count}"
count += 1
except (IndexError, KeyError):
key = _("Perk {count}").format(count=slot_counter)
perks[key] = perk["displayProperties"]["name"]
slot_counter += 1
return perks
@search.command(aliases=["item"])
@commands.bot_has_permissions(embed_links=True)
@commands.max_concurrency(1, commands.BucketType.default)
async def items(
self, ctx: commands.Context, details_or_lore: Optional[SearchInfo] = None, *, search: str
) -> None:
"""
Search for a specific item in Destiny 2
`[details_or_lore]` signify what information to display for the item
by default this command will show all available perks on weapons
using `details`, `true`, or `stats` will show the weapons stat bars
using `lore` here will instead display the weapons lore card instead if it exists.
"""
show_lore = True if details_or_lore is False else False
if search.startswith("lore "):
search = search.replace("lore ", "")
async with ctx.typing():
try:
items = await self.search_definition("DestinyInventoryItemDefinition", search)
except Destiny2MissingManifest as e:
await ctx.send(e)
return
if not items:
await ctx.send(_("`{search}` could not be found.").format(search=search))
return
embeds = []
# log.debug(items[0])
for item_hash, item in items.items():
if not (item["equippable"]):
continue
embed = discord.Embed()
description = item["flavorText"] + "\n\n"
damage_type = ""
try:
damage_data = (
await self.get_definition(
"DestinyDamageTypeDefinition", [item["defaultDamageTypeHash"]]
)
)[str(item["defaultDamageTypeHash"])]
damage_type = damage_data["displayProperties"]["name"]
except KeyError:
pass
if item["itemType"] in [3] and not show_lore:
stats_str = ""
rpm = ""
recoil = ""
magazine = ""
for stat_hash, value in item["stats"]["stats"].items():
if stat_hash in ["1935470627", "1480404414", "1885944937"]:
continue
stat_info = (
await self.get_definition("DestinyStatDefinition", [stat_hash])
)[str(stat_hash)]
stat_name = stat_info["displayProperties"]["name"]
if not stat_name:
continue
prog = "█" * int(value["value"] / 10)
empty = "░" * int((100 - value["value"]) / 10)
bar = f"{prog}{empty}"
if stat_hash == "4284893193":
rpm = f"{stat_name}: **{value['value']}**\n"
continue
if stat_hash == "3871231066":
recoil = f"{stat_name}: **{value['value']}**\n"
continue
if stat_hash == "2715839340":
magazine = f"{stat_name}: **{value['value']}**\n"
continue
if details_or_lore:
stats_str += f"{stat_name}: **{value['value']}** \n{bar}\n"
stats_str += rpm + recoil + magazine
description += stats_str
embed.description = description
perks = await self.get_weapon_possible_perks(item)
for key, value in perks.items():
embed.add_field(name=key, value=value[:1024])
if "loreHash" in item and (show_lore or item["itemType"] in [2]):
lore = (
await self.get_definition("DestinyLoreDefinition", [item["loreHash"]])
)[str(item["loreHash"])]
description += _("Lore: \n\n") + lore["displayProperties"]["description"]
if len(description) > 2048:
count = 0
for page in pagify(description, page_length=1024):
if count == 0:
embed.description = page
else:
embed.add_field(name=_("Lore Continued"), value=page)
count += 1
else:
embed.description = description
embed.title = damage_type + " " + item["itemTypeAndTierDisplayName"]
name = item["displayProperties"]["name"]
icon_url = IMAGE_URL + item["displayProperties"]["icon"]
embed.set_author(name=name, icon_url=icon_url)
embed.set_thumbnail(url=icon_url)
if item.get("screenshot", False):
embed.set_image(url=IMAGE_URL + item["screenshot"])
embeds.append(embed)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
async def check_gilded_title(self, chars: dict, title: dict) -> bool:
"""
Checks a players records for a completed gilded title
"""
gilding_hash = title["titleInfo"].get("gildingTrackingRecordHash", None)
records = chars["profileRecords"]["data"]["records"]
if str(gilding_hash) in records:
for objective in records[str(gilding_hash)]["objectives"]:
if objective["complete"]:
return True
return False
@destiny.command(name="joinme")
@commands.bot_has_permissions(embed_links=True)
async def destiny_join_command(self, ctx: commands.Context) -> None:
"""
Get your Steam ID to give people to join your in-game fireteam
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
bungie_id = await self.config.user(ctx.author).oauth.membership_id()
creds = await self.get_bnet_user(ctx.author, bungie_id)
steam_id = ""
for cred in creds:
if "credentialAsString" in cred:
steam_id = cred["credentialAsString"]
join_code = f"\n```py\n/join {steam_id}\n```"
msg = _(
"Use the following code in game to join {author}'s Fireteam:{join_code}"
).format(author=ctx.author.display_name, join_code=join_code)
join_code = f"\n```py\n/join {steam_id}\n```"
await ctx.send(msg)
@destiny.group()
@commands.bot_has_permissions(embed_links=True)
async def clan(self, ctx: commands.Context) -> None:
"""
Clan settings
"""
return
@clan.command(name="info")
@commands.bot_has_permissions(embed_links=True)
async def show_clan_info(self, ctx: commands.Context, clan_id: Optional[str]):
"""
Display basic information about the clan set in this server
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
if clan_id:
clan_re = re.compile(
r"(https:\/\/)?(www\.)?bungie\.net\/.*(groupid=(\d+))", flags=re.I
)
clan_invite = clan_re.search(clan_id)
if clan_invite:
clan_id = clan_invite.group(4)
else:
clan_id = await self.config.guild(ctx.guild).clan_id()
if not clan_id:
return await ctx.send(
_(
"No clan ID has been setup for this server. "
"Use `{prefix}destiny clan set` to set one."
).format(prefix=ctx.clean_prefix)
)
try:
clan_info = await self.get_clan_info(ctx.author, clan_id)
embed = await self.make_clan_embed(clan_info)
except Exception:
log.exception("Error getting clan info")
return await ctx.send(
_("I could not find any information about this servers clan.")
)
else:
await ctx.send(embed=embed)
async def make_clan_embed(self, clan_info: dict) -> discord.Embed:
clan_id = clan_info["detail"]["groupId"]
clan_name = clan_info["detail"]["name"]
clan_about = clan_info["detail"]["about"]
clan_motto = clan_info["detail"]["motto"]
clan_callsign = clan_info["detail"]["clanInfo"]["clanCallsign"]
clan_xp_data = clan_info["detail"]["clanInfo"]["d2ClanProgressions"]["584850370"]
weekly_progress = clan_xp_data["weeklyProgress"]
weekly_limit = clan_xp_data["weeklyLimit"]
level = clan_xp_data["level"]
level_cap = clan_xp_data["levelCap"]
members = clan_info["detail"]["memberCount"]
max_members = clan_info["detail"]["features"]["maximumMembers"]
clan_creation_date = datetime.datetime.strptime(
clan_info["detail"]["creationDate"], "%Y-%m-%dT%H:%M:%S.%fZ"
)
clan_create_str = clan_creation_date.strftime("%I:%M %p %Y-%m-%d")
clan_xp_str = _(
"Level: {level}/{level_cap}\nWeekly Progress: " "{weekly_progress}/{weekly_limit}"
).format(
level=level,
level_cap=level_cap,
weekly_progress=weekly_progress,
weekly_limit=weekly_limit,
)
join_link = f"https://www.bungie.net/en/ClanV2?groupid={clan_id}"
embed = discord.Embed(
title=f"{clan_name} [{clan_callsign}]", description=clan_about, url=join_link
)
embed.add_field(name=_("Motto"), value=clan_motto, inline=False)
embed.add_field(name=_("Clan XP"), value=clan_xp_str)
embed.add_field(name=_("Members"), value=f"{members}/{max_members}")
embed.add_field(name=_("Clan Founded"), value=clan_create_str)
return embed
@clan.command(name="set")
@commands.bot_has_permissions(embed_links=True)
@commands.admin_or_permissions(manage_guild=True)
async def set_clan_id(self, ctx: commands.Context, clan_id: str) -> None:
"""
Set the clan ID for this server
`<clan_id>` Must be either the clan's ID or you can provide
the clan invite link at the `clan profile` setting on bungie.net
example link: `https://www.bungie.net/en/ClanV2?groupid=1234567`
the numbers after `groupid=` is the clan ID.
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
clan_re = re.compile(
r"(https:\/\/)?(www\.)?bungie\.net\/.*(groupid=(\d+))", flags=re.I
)
clan_invite = clan_re.search(clan_id)
if clan_invite:
clan_id = clan_invite.group(4)
try:
clan_info = await self.get_clan_info(ctx.author, clan_id)
embed = await self.make_clan_embed(clan_info)
except Exception:
log.exception("Error getting clan info")
return await ctx.send(_("I could not find a clan with that ID."))
else:
await self.config.guild(ctx.guild).clan_id.set(clan_id)
await ctx.send(_("Server's clan set to"), embed=embed)
async def destiny_pick_profile(
self, ctx: commands.Context, pending_users: dict
) -> Optional[dict]:
"""
Allows a clan admin to pick the user they want to approve in the clan
"""
users = pending_users["results"][:9]
embed = discord.Embed(
title=_("Pending Clan Members"),
description=_("React with the user you would like to approve into the clan."),
)
for index, user in enumerate(pending_users["results"]):
destiny_name = user["destinyUserInfo"]["LastSeenDisplayName"]
bungie_name = user["bungieNetUserInfo"]["displayName"]
msg = _("Destiny/Steam Name: {destiny_name}\nBungie.net Name: {bungie_name}").format(
destiny_name=destiny_name, bungie_name=bungie_name
)
embed.add_field(name=_("User {count}").format(count=index + 1), value=msg)
msg = await ctx.send(embed=embed)
emojis = ReactionPredicate.NUMBER_EMOJIS[1 : len(users) + 1]
start_adding_reactions(msg, emojis)
pred = ReactionPredicate.with_emojis(emojis, msg)
try:
await ctx.bot.wait_for("reaction_add", check=pred)
except asyncio.TimeoutError:
return None
else:
return users[pred.result]
@clan.command(name="pending")
@commands.bot_has_permissions(embed_links=True)
@commands.admin_or_permissions(manage_guild=True)
async def clan_pending(self, ctx: commands.Context) -> None:
"""
Display pending clan members.
Clan admin can further approve specified clan members
by reacting to the resulting message.
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
clan_id = await self.config.guild(ctx.guild).clan_id()
if not clan_id:
return await ctx.send(
_(
"No clan ID has been setup for this server. "
"Use `{prefix}destiny clan set` to set one."
).format(prefix=ctx.clean_prefix)
)
clan_pending = await self.get_clan_pending(ctx.author, clan_id)
if not clan_pending["results"]:
return await ctx.send(_("There is no one pending clan approval."))
approved = await self.destiny_pick_profile(ctx, clan_pending)
if not approved:
return await ctx.send(_("No one will be approved into the clan."))
try:
destiny_name = approved["destinyUserInfo"]["LastSeenDisplayName"]
bungie_name = approved["bungieNetUserInfo"]["displayName"]
membership_id = approved["destinyUserInfo"]["membershipId"]
membership_type = approved["destinyUserInfo"]["membershipType"]
await self.approve_clan_pending(
ctx.author, clan_id, membership_type, membership_id, approved
)
except Destiny2APIError as e:
log.exception("error approving clan member.")
await ctx.send(str(e))
else:
await ctx.send(
_("{destiny_name} AKA {bungie_name} has been approved into the clan.").format(
destiny_name=destiny_name, bungie_name=bungie_name
)
)
@clan.command(name="roster")
@commands.bot_has_permissions(embed_links=True)
@commands.mod_or_permissions(manage_messages=True)
async def get_clan_roster(self, ctx: commands.Context, output_format: Optional[str]) -> None:
"""
Get the full clan roster
`[output_format]` if `csv` is provided this will upload a csv file of
the clan roster instead of displaying the output.
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
clan_id = await self.config.guild(ctx.guild).clan_id()
if not clan_id:
return await ctx.send(
_(
"No clan ID has been setup for this server. "
"Use `{prefix}destiny clan set` to set one."
).format(prefix=ctx.clean_prefix)
)
clan = await self.get_clan_members(ctx.author, clan_id)
headers = [
"Discord Name",
"Discord ID",
"Destiny Name",
"Destiny ID",
"Bungie.net Name",
"Bungie.net ID",
"Last Seen Destiny",
"Steam ID",
"Join Date",
]
clan_mems = ""
rows = []
saved_users = await self.config.all_users()
for member in clan["results"]:
last_online = datetime.datetime.utcfromtimestamp(
int(member["lastOnlineStatusChange"])
)
join_date = datetime.datetime.strptime(member["joinDate"], "%Y-%m-%dT%H:%M:%SZ")
destiny_name = member["destinyUserInfo"]["LastSeenDisplayName"]
destiny_id = member["destinyUserInfo"]["membershipId"]
clan_mems += destiny_name + "\n"
discord_id = None
discord_name = None
bungie_id = None
bungie_name = None
steam_id = None
try:
bungie_id = member["bungieNetUserInfo"]["membershipId"]
bungie_name = member["bungieNetUserInfo"]["displayName"]
creds = await self.get_bnet_user(ctx.author, bungie_id)
steam_id = ""
for cred in creds:
if "credentialAsString" in cred:
steam_id = cred["credentialAsString"]
except Exception:
pass
for user_id, data in saved_users.items():
if data["oauth"]["membership_id"] == bungie_id:
discord_user = ctx.guild.get_member(int(user_id))
if discord_user:
discord_name = str(discord_user)
discord_id = discord_user.id
user_info = [
discord_name,
f"'{discord_id}" if discord_id else None,
destiny_name,
f"'{destiny_id}" if destiny_id else None,
bungie_name,
f"'{bungie_id}" if bungie_id else None,
last_online,
f"'{steam_id}" if steam_id else None,
str(join_date),
]
rows.append(user_info)
if output_format == "csv":
outfile = StringIO()
employee_writer = csv.writer(
outfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
employee_writer.writerow(headers)
for row in rows:
employee_writer.writerow(row)
outfile.seek(0)
file = discord.File(outfile, filename="clan_roster.csv")
await ctx.send(file=file)
elif output_format == "md":
data = tabulate(rows, headers=headers, tablefmt="github")
file = discord.File(BytesIO(data.encode()), filename="clan_roster.md")
await ctx.send(file=file)
else:
data = tabulate(rows, headers=headers, tablefmt="pretty")
for page in pagify(data, page_length=1990):
await ctx.send(box(page, lang="css"))
@destiny.command()
@commands.bot_has_permissions(embed_links=True)
async def user(self, ctx: commands.Context, user: discord.Member = None) -> None:
"""
Display a menu of your basic character's info
`[user]` A member on the server who has setup their account on this bot.
"""
async with ctx.typing():
if not await self.has_oauth(ctx, user):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
if not user:
user = ctx.author
try:
chars = await self.get_characters(user)
# await self.save(chars, "character.json")
except Destiny2APIError as e:
log.error(e, exc_info=True)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
embeds = []
currency_datas = await self.get_definition(
"DestinyInventoryItemLiteDefinition",
[v["itemHash"] for v in chars["profileCurrencies"]["data"]["items"]],
)
player_currency = ""
for item in chars["profileCurrencies"]["data"]["items"]:
quantity = item["quantity"]
name = currency_datas[str(item["itemHash"])]["displayProperties"]["name"]
player_currency += f"{name}: **{quantity}**\n"
for char_id, char in chars["characters"]["data"].items():
info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (
await self.get_definition("DestinyGenderDefinition", [char["genderHash"]])
)[str(char["genderHash"])]
char_class = (
await self.get_definition("DestinyClassDefinition", [char["classHash"]])
)[str(char["classHash"])]
info += "{race} {gender} {char_class} ".format(
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
titles = ""
if "titleRecordHash" in char:
# TODO: Add fetch for Destiny.Definitions.Records.DestinyRecordDefinition
char_title = (
await self.get_definition(
"DestinyRecordDefinition", [char["titleRecordHash"]]
)
)[str(char["titleRecordHash"])]
title_info = "**{title_name}**\n{title_desc}\n"
try:
gilded = ""
if await self.check_gilded_title(chars, char_title):
gilded = _("Gilded ")
title_name = (
f"{gilded}"
+ char_title["titleInfo"]["titlesByGenderHash"][
str(char["genderHash"])
]
)
title_desc = char_title["displayProperties"]["description"]
titles += title_info.format(title_name=title_name, title_desc=title_desc)
except KeyError:
pass
embed = discord.Embed(title=info)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
if "emblemPath" in char:
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
if titles:
# embed.add_field(name=_("Titles"), value=titles)
embed.set_author(
name=f"{user.display_name} ({title_name})", icon_url=user.avatar_url
)
# log.debug(data)
stats_str = ""
time_played = humanize_timedelta(seconds=int(char["minutesPlayedTotal"]) * 60)
for stat_hash, value in char["stats"].items():
stat_info = (await self.get_definition("DestinyStatDefinition", [stat_hash]))[
str(stat_hash)
]
stat_name = stat_info["displayProperties"]["name"]
prog = "█" * int(value / 10)
empty = "░" * int((100 - value) / 10)
bar = f"{prog}{empty}"
if stat_hash == "1935470627":
bar = ""
stats_str += f"{stat_name}: **{value}** \n{bar}\n"
stats_str += _("Time Played Total: **{time}**").format(time=time_played)
embed.description = stats_str
embed = await self.get_char_colour(embed, char)
if titles:
embed.add_field(name=_("Titles"), value=titles)
embed.add_field(name=_("Current Currencies"), value=player_currency)
embeds.append(embed)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@search.command()
@commands.bot_has_permissions(embed_links=True)
async def lore(self, ctx: commands.Context, entry: str = None) -> None:
"""
Find Destiny Lore
"""
try:
# the below is to prevent blocking reading the large
# ~130mb manifest files and save on API calls
task = functools.partial(self.get_entities, entity="DestinyLoreDefinition")
task = self.bot.loop.run_in_executor(None, task)
data: dict = await asyncio.wait_for(task, timeout=60)
except Exception:
return await ctx.send(_("The manifest needs to be downloaded for this to work."))
lore = []
for entry_hash, entries in data.items():
em = discord.Embed(title=entries["displayProperties"]["name"])
description = entries["displayProperties"]["description"]
if len(description) < 2048:
em.description = entries["displayProperties"]["description"]
elif len(description) > 2048 and len(description) < 6000:
em.description = description[:2048]
new_desc = description[:2048]
parts = [new_desc[i : i + 1024] for i in range(0, len(new_desc), 1024)]
for i in parts:
em.add_field(name=_("Continued"), value=i)
if entries["displayProperties"]["hasIcon"]:
icon = entries["displayProperties"]["icon"]
em.set_thumbnail(url=f"{IMAGE_URL}{icon}")
lore.append(em)
if entry:
for t in lore:
if entry.lower() in str(t.title).lower():
print(t.title)
lore.insert(0, lore.pop(lore.index(t)))
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
async def save(self, data: dict, loc: str = "sample.json"):
if self.bot.user.id not in DEV_BOTS:
return
base_path = Path(__file__).parent
path = base_path / loc
with path.open(encoding="utf-8", mode="w") as f:
json.dump(data, f, indent=4, sort_keys=False, separators=(",", " : "))
@destiny.command(aliases=["xûr"])
@commands.bot_has_permissions(embed_links=True)
async def xur(self, ctx: commands.Context, full: bool = False) -> None:
"""
Display a menu of Xûr's current wares
`[full=False]` Show perk definition on Xûr's current wares
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
try:
chars = await self.get_characters(ctx.author)
# await self.save(chars, "characters.json")
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
for char_id, char in chars["characters"]["data"].items():
# log.debug(char)
try:
xur = await self.get_vendor(ctx.author, char_id, "2190858386")
xur_def = (
await self.get_definition("DestinyVendorDefinition", ["2190858386"])
)["2190858386"]
except Destiny2APIError:
log.error("I can't seem to see Xûr at the moment")
today = datetime.datetime.utcnow()
friday = today.replace(hour=17, minute=0, second=0) + datetime.timedelta(
(4 - today.weekday()) % 7
)
next_xur = self.humanize_timedelta(timedelta=(friday - today))
await ctx.send(
_("Xûr's not around, come back in {next_xur}.").format(next_xur=next_xur)
)
return
break
# items = [v["itemHash"] for k, v in xur["sales"]["data"].items()]
embeds: List[discord.Embed] = []
# data = await self.get_definition("DestinyInventoryItemDefinition", items)
embed = discord.Embed(
colour=discord.Colour.red(),
description=xur_def["displayProperties"]["description"],
)
embed.set_thumbnail(
url=IMAGE_URL + xur_def["displayProperties"]["largeTransparentIcon"]
)
embed.set_author(name="Xûr's current wares")
# location = xur_def["locations"][0]["destinationHash"]
# log.debug(await self.get_definition("DestinyDestinationDefinition", [location]))
for index, item_base in xur["sales"]["data"].items():
item = (
await self.get_definition(
"DestinyInventoryItemDefinition", [item_base["itemHash"]]
)
)[str(item_base["itemHash"])]
if not (item["equippable"]):
continue
perk_hashes = [
str(p["singleInitialItemHash"]) for p in item["sockets"]["socketEntries"]
]
perk_data = await self.get_definition(
"DestinyInventoryItemDefinition", perk_hashes
)
perks = ""
item_embed = discord.Embed(title=item["displayProperties"]["name"])
item_embed.set_thumbnail(url=IMAGE_URL + item["displayProperties"]["icon"])
item_embed.set_image(url=IMAGE_URL + item["screenshot"])
for perk_hash, perk in perk_data.items():
properties = perk["displayProperties"]
if "Common" in perk["itemTypeAndTierDisplayName"]:
continue
if (
properties["name"] == "Empty Mod Socket"
or properties["name"] == "Default Ornament"
or properties["name"] == "Change Energy Type"
or properties["name"] == "Empty Catalyst Socket"
):
continue
if "name" in properties and "description" in properties:
if not properties["name"]:
continue
# await self.save(perk, properties["name"] + ".json")
if full:
perks += "**{0}** - {1}\n".format(
properties["name"], properties["description"]
)
else:
perks += "- **{0}**\n".format(properties["name"])
stats_str = ""
if "armor" in item["equippingBlock"]["uniqueLabel"]:
total = 0
for stat_hash, stat_data in xur["itemComponents"]["stats"]["data"][index][
"stats"
].items():
stat_info = (
await self.get_definition("DestinyStatDefinition", [stat_hash])
)[str(stat_hash)]
stat_name = stat_info["displayProperties"]["name"]
stat_value = stat_data["value"]
prog = "█" * int(stat_value / 6)
empty = "░" * int((42 - stat_value) / 6)
bar = f"{prog}{empty}"
stats_str += f"{stat_name}: \n{bar} **{stat_value}**\n"
total += stat_value
stats_str += _("Total: **{total}**\n").format(total=total)
msg = (
item["itemTypeAndTierDisplayName"]
+ "\n"
+ stats_str
+ (item["displayProperties"]["description"] + "\n" if full else "")
+ perks
)
item_embed.description = msg
embed.insert_field_at(
0, name="**__" + item["displayProperties"]["name"] + "__**\n", value=msg
)
embeds.insert(0, item_embed)
embeds.insert(0, embed)
# await ctx.send(embed=embed)
# await ctx.tick()
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@destiny.command()
@commands.bot_has_permissions(embed_links=True)
async def eververse(
self, ctx: commands.Context, *, item_types: Optional[DestinyEververseItemType]
) -> None:
"""
Display items currently available on the Eververse in a menu
`[item_types]` can be one of `ghosts`, `ships`, `sparrows`,
`shaders`, `ornaments` and `finishers` to filter specific items.
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
if not item_types:
item_types = {"item_types": [9, 19, 21, 22, 24, 29], "item_sub_types": [21, 20]}
try:
chars = await self.get_characters(ctx.author)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
embeds: List[discord.Embed] = []
eververse_sales = {}
for char_id, char in chars["characters"]["data"].items():
try:
ev = await self.get_vendor(ctx.author, char_id, "3361454721")
eververse_sales.update(ev["sales"]["data"])
except Destiny2APIError:
log.error("I can't seem to see the eververse at the moment", exc_info=True)
await ctx.send(_("I can't access the eververse at the moment."))
return
await self.save(eververse_sales, "eververse.json")
embeds = []
item_hashes = [i["itemHash"] for k, i in eververse_sales.items()]
item_defs = await self.get_definition("DestinyInventoryItemDefinition", item_hashes)
item_costs = [c["itemHash"] for k, i in eververse_sales.items() for c in i["costs"]]
item_cost_defs = await self.get_definition(
"DestinyInventoryItemDefinition", item_costs
)
for item_hash, vendor_item in eververse_sales.items():
item = item_defs[str(vendor_item["itemHash"])]
if (
item["itemType"] not in item_types["item_types"]
and item_types["item_types"] != []
):
# log.debug("ignoring item from type %s" % item["itemType"])
continue
if (
item["itemSubType"] not in item_types["item_sub_types"]
and item_types["item_sub_types"] != []
):
# log.debug("ignoring item from sub type %s" % item["itemSubType"])
continue
embed = discord.Embed()
embed.description = item["displayProperties"]["description"]
embed.title = item["itemTypeAndTierDisplayName"]
name = item["displayProperties"]["name"]
icon_url = IMAGE_URL + item["displayProperties"]["icon"]
embed.set_author(name=name, icon_url=icon_url)
embed.set_thumbnail(url=icon_url)
cost_str = ""
for costs in vendor_item["costs"]:
cost = costs["quantity"]
cost_name = item_cost_defs[str(costs["itemHash"])]["displayProperties"]["name"]
cost_str += f"{cost_name}: **{cost}**\n"
embed.add_field(name=_("Cost"), value=cost_str)
if "screenshot" in item:
embed.set_image(url=IMAGE_URL + item["screenshot"])
embeds.append(embed)
if embeds == []:
return await ctx.send(_("I can't access the eververse at the moment."))
# await ctx.tick()
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@destiny.command()
@commands.bot_has_permissions(embed_links=True)
async def spider(self, ctx: commands.Context) -> None:
"""
Display Spiders wares
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
try:
chars = await self.get_characters(ctx.author)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
for char_id, char in chars["characters"]["data"].items():
try:
spider = await self.get_vendor(ctx.author, char_id, "863940356")
spider_def = (
await self.get_definition("DestinyVendorDefinition", ["863940356"])
)["863940356"]
except Destiny2APIError:
log.error("I can't seem to see the Spider at the moment", exc_info=True)
await ctx.send(_("I can't access the Spider at the moment."))
return
break
# await self.save(spider, "spider.json")
currency_datas = await self.get_definition(
"DestinyInventoryItemLiteDefinition",
[v["itemHash"] for v in chars["profileCurrencies"]["data"]["items"]],
)
embed = discord.Embed(description=spider_def["displayProperties"]["description"])
embed.set_thumbnail(
url=IMAGE_URL + spider_def["displayProperties"]["largeTransparentIcon"]
)
embed.set_author(
name=spider_def["displayProperties"]["name"]
+ ", "
+ spider_def["displayProperties"]["subtitle"]
)
item_hashes = [i["itemHash"] for k, i in spider["sales"]["data"].items()]
item_defs = await self.get_definition(
"DestinyInventoryItemLiteDefinition", item_hashes
)
item_costs = [
c["itemHash"] for k, i in spider["sales"]["data"].items() for c in i["costs"]
]
item_cost_defs = await self.get_definition(
"DestinyInventoryItemLiteDefinition", item_costs
)
for key, data in spider["sales"]["data"].items():
item_hash = data["itemHash"]
item = item_defs[str(item_hash)]
if item["itemType"] in [0, 26]:
continue
try:
costs = data["costs"][0]
cost = item_cost_defs[str(costs["itemHash"])]
cost_str = str(costs["quantity"]) + " " + cost["displayProperties"]["name"]
except IndexError:
cost_str = "None"
embed.add_field(name=item["displayProperties"]["name"], value=cost_str)
await asyncio.sleep(0)
player_currency = ""
for item in chars["profileCurrencies"]["data"]["items"]:
quantity = item["quantity"]
name = currency_datas[str(item["itemHash"])]["displayProperties"]["name"]
player_currency += f"{name}: **{quantity}**\n"
embed.add_field(name=_("Current Currencies"), value=player_currency)
await ctx.send(embed=embed)
@destiny.command(aliases=["banshee-44"])
@commands.bot_has_permissions(embed_links=True)
async def banshee(self, ctx: commands.Context) -> None:
"""
Display Banshee-44's wares
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
try:
chars = await self.get_characters(ctx.author)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
for char_id, char in chars["characters"]["data"].items():
try:
banshee = await self.get_vendor(ctx.author, char_id, "672118013")
banshee_def = (
await self.get_definition("DestinyVendorDefinition", ["672118013"])
)["672118013"]
await self.save(banshee, "banshee.json")
except Destiny2APIError:
log.error(
"I can't seem to see the Banshee-44's wares at the moment", exc_info=True
)
await ctx.send(_("I can't access the Banshee-44 at the moment."))
return
break
# await self.save(spider, "spider.json")
embed = discord.Embed(description=banshee_def["displayProperties"]["description"])
embed.set_thumbnail(
url=IMAGE_URL + banshee_def["displayProperties"]["largeTransparentIcon"]
)
embed.set_author(
name=banshee_def["displayProperties"]["name"]
+ ", "
+ banshee_def["displayProperties"]["subtitle"]
)
item_hashes = [i["itemHash"] for k, i in banshee["sales"]["data"].items()]
item_defs = await self.get_definition(
"DestinyInventoryItemLiteDefinition", item_hashes
)
item_costs = [
c["itemHash"] for k, i in banshee["sales"]["data"].items() for c in i["costs"]
]
item_cost_defs = await self.get_definition(
"DestinyInventoryItemLiteDefinition", item_costs
)
for key, data in banshee["sales"]["data"].items():
item_hash = data["itemHash"]
item = item_defs[str(item_hash)]
if item["itemType"] in [0]:
continue
try:
costs = data["costs"][0]
cost = item_cost_defs[str(costs["itemHash"])]
cost_str = str(costs["quantity"]) + " " + cost["displayProperties"]["name"]
except IndexError:
cost_str = "None"
embed.add_field(name=item["displayProperties"]["name"], value=cost_str)
await asyncio.sleep(0)
await ctx.send(embed=embed)
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def loadout(
self, ctx: commands.Context, full: Optional[bool] = False, user: discord.Member = None
) -> None:
"""
Display a menu of each character's equipped weapons and their info
`[full=False]` Display full information about weapons equipped.
`[user]` A member on the server who has setup their account on this bot.
"""
async with ctx.typing():
if not await self.has_oauth(ctx, user):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
if not user:
user = ctx.author
try:
chars = await self.get_characters(user)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
embeds = []
for char_id, char in chars["characters"]["data"].items():
info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (
await self.get_definition("DestinyGenderDefinition", [char["genderHash"]])
)[str(char["genderHash"])]
char_class = (
await self.get_definition("DestinyClassDefinition", [char["classHash"]])
)[str(char["classHash"])]
info += "{race} {gender} {char_class} ".format(
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
titles = ""
if "titleRecordHash" in char:
# TODO: Add fetch for Destiny.Definitions.Records.DestinyRecordDefinition
char_title = (
await self.get_definition(
"DestinyRecordDefinition", [char["titleRecordHash"]]
)
)[str(char["titleRecordHash"])]
title_info = "**{title_name}**\n{title_desc}\n"
try:
gilded = ""
if await self.check_gilded_title(chars, char_title):
gilded = _("Gilded ")
title_name = (
f"{gilded}"
+ char_title["titleInfo"]["titlesByGenderHash"][
str(char["genderHash"])
]
)
title_desc = char_title["displayProperties"]["description"]
titles += title_info.format(title_name=title_name, title_desc=title_desc)
except KeyError:
pass
embed = discord.Embed(title=info)
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
if "emblemPath" in char:
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
if titles:
# embed.add_field(name=_("Titles"), value=titles)
embed.set_author(
name=f"{user.display_name} ({title_name})", icon_url=user.avatar_url
)
char_items = chars["characterEquipment"]["data"][char_id]["items"]
item_list = [i["itemHash"] for i in char_items]
# log.debug(item_list)
items = await self.get_definition("DestinyInventoryItemDefinition", item_list)
# log.debug(items)
for item_hash, data in items.items():
# log.debug(data)
for item in char_items:
# log.debug(item)
if data["hash"] == item["itemHash"]:
instance_id = item["itemInstanceId"]
item_instance = chars["itemComponents"]["instances"]["data"][instance_id]
if not item_instance["isEquipped"]:
continue
if not (data["equippable"] and data["itemType"] == 3):
continue
name = data["displayProperties"]["name"]
desc = data["displayProperties"]["description"]
item_type = data["itemTypeAndTierDisplayName"]
try:
light = item_instance["primaryStat"]["value"]
except KeyError:
light = ""
perk_list = chars["itemComponents"]["perks"]["data"][instance_id]["perks"]
perk_hashes = [p["perkHash"] for p in perk_list]
perk_data = await self.get_definition(
"DestinySandboxPerkDefinition", perk_hashes
)
perks = ""
for perk_hash, perk in perk_data.items():
properties = perk["displayProperties"]
if "name" in properties and "description" in properties:
if full:
perks += "**{0}** - {1}\n".format(
properties["name"], properties["description"]
)
else:
perks += "- **{0}**\n".format(properties["name"])
value = f"**{light}** {item_type}\n{perks}"
embed.add_field(name=name, value=value, inline=True)
# log.debug(data)
stats_str = ""
for stat_hash, value in char["stats"].items():
stat_info = (await self.get_definition("DestinyStatDefinition", [stat_hash]))[
str(stat_hash)
]
stat_name = stat_info["displayProperties"]["name"]
prog = "█" * int(value / 10)
empty = "░" * int((100 - value) / 10)
bar = f"{prog}{empty}"
if stat_hash == "1935470627":
bar = ""
stats_str += f"{stat_name}: **{value}** \n{bar}\n"
embed.description = stats_str
embed = await self.get_char_colour(embed, char)
embeds.append(embed)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def gambit(self, ctx: commands.Context) -> None:
"""
Display a menu of each characters gambit stats
"""
await ctx.invoke(self.stats, "allPvECompetitive")
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def pvp(self, ctx: commands.Context) -> None:
"""
Display a menu of each character's pvp stats
"""
await ctx.invoke(self.stats, "allPvP")
@destiny.command(aliases=["raids"])
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def raid(self, ctx: commands.Context) -> None:
"""
Display a menu for each character's RAID stats
"""
await ctx.invoke(self.stats, "raid")
@destiny.command(aliases=["qp"])
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def quickplay(self, ctx: commands.Context) -> None:
"""
Display a menu of past quickplay matches
"""
await ctx.invoke(self.history, 70)
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def history(self, ctx: commands.Context, activity: DestinyActivity) -> None:
"""
Display a menu of each character's last 5 activities
`<activity>` The activity type to display stats on available types include:
all, story, strike, raid, allpvp, patrol, allpve, control, clash,
crimsondoubles, nightfall, heroicnightfall, allstrikes, ironbanner, allmayhem,
supremacy, privatematchesall, survival, countdown, trialsofthenine, social,
trialscountdown, trialssurvival, ironbannercontrol, ironbannerclash,
ironbannersupremacy, scorednightfall, scoredheroicnightfall, rumble, alldoubles,
doubles, privatematchesclash, privatematchescontrol, privatematchessupremacy,
privatematchescountdown, privatematchessurvival, privatematchesmayhem,
privatematchesrumble, heroicadventure, showdown, lockdown, scorched,
scorchedteam, gambit, allpvecompetitive, breakthrough, blackarmoryrun,
salvage, ironbannersalvage, pvpcompetitive, pvpquickplay, clashquickplay,
clashcompetitive, controlquickplay, controlcompetitive, gambitprime,
reckoning, menagerie, vexoffensive, nightmarehunt, elimination, momentum,
dungeon, sundial, trialsofosiris
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
user = ctx.author
try:
chars = await self.get_characters(user)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
RAID = {
"assists": _("Assists"),
"kills": _("Kills"),
"deaths": _("Deaths"),
"opponentsDefeated": _("Opponents Defeated"),
"efficiency": _("Efficiency"),
"killsDeathsRatio": _("KDR"),
"killsDeathsAssists": _("KDA"),
"score": _("Score"),
"activityDurationSeconds": _("Duration"),
"playerCount": _("Player Count"),
"teamScore": _("Team Score"),
"completed": _("Completed"),
}
embeds = []
for char_id, char in chars["characters"]["data"].items():
# log.debug(char)
char_info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (
await self.get_definition("DestinyGenderDefinition", [char["genderHash"]])
)[str(char["genderHash"])]
log.debug(gender)
char_class = (
await self.get_definition("DestinyClassDefinition", [char["classHash"]])
)[str(char["classHash"])]
char_info += "{user} - {race} {gender} {char_class} ".format(
user=user.display_name,
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
try:
data = await self.get_activity_history(user, char_id, activity)
except Exception:
log.error(
_(
"Something went wrong I couldn't get info on character {char_id} for activity {activity}"
).format(char_id=char_id, activity=activity)
)
continue
if not data:
continue
for activities in data["activities"]:
activity_hash = str(activities["activityDetails"]["directorActivityHash"])
activity_data = (
await self.get_definition("DestinyActivityDefinition", [activity_hash])
)[str(activity_hash)]
embed = discord.Embed(
title=activity_data["displayProperties"]["name"],
description=activity_data["displayProperties"]["description"],
)
date = datetime.datetime.strptime(activities["period"], "%Y-%m-%dT%H:%M:%SZ")
embed.timestamp = date
if activity_data["displayProperties"]["hasIcon"]:
embed.set_thumbnail(
url=IMAGE_URL + activity_data["displayProperties"]["icon"]
)
elif (
activity_data["pgcrImage"] != "/img/misc/missing_icon_d2.png"
and "emblemPath" in char
):
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
embed.set_author(name=char_info, icon_url=user.avatar_url)
for attr, name in RAID.items():
if activities["values"][attr]["basic"]["value"] < 0:
continue
embed.add_field(
name=name,
value=str(activities["values"][attr]["basic"]["displayValue"]),
)
embed = await self.get_char_colour(embed, char)
embeds.append(embed)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@staticmethod
async def get_extra_attrs(stat_type: str, attrs: dict) -> dict:
"""Helper function to receive the total attributes we care about"""
EXTRA_ATTRS = {}
if stat_type == "allPvECompetitive":
EXTRA_ATTRS = {
"winLossRatio": _("Win Loss Ratio"),
"invasions": _("Invasions"),
"invasionKills": _("Invasion Kills"),
"invasionDeaths": _("Invasion Deaths"),
"invaderDeaths": _("Invader Deaths"),
"invaderKills": _("Invader Kills"),
"primevalKills": _("Primeval Kills"),
"blockerKills": _("Blocker Kills"),
"mobKills": _("Mob Kills"),
"highValueKills": _("High Value Targets Killed"),
"motesPickedUp": _("Motes Picked Up"),
"motesDeposited": _("Motes Deposited"),
"motesDenied": _("Motes Denied"),
"motesLost": _("Motes Lost"),
}
if stat_type == "allPvP":
EXTRA_ATTRS = {"winLossRatio": _("Win Loss Ratio")}
for k, v in EXTRA_ATTRS.items():
attrs[k] = v
return attrs
async def build_character_stats(
self, user: discord.Member, chars: dict, stat_type: str
) -> List[discord.Embed]:
embeds: List[discord.Embed] = []
for char_id, char in chars["characters"]["data"].items():
# log.debug(char)
try:
data = await self.get_historical_stats(user, char_id, 0)
except Exception:
log.error(
_("Something went wrong I couldn't get info on character {char_id}").format(
char_id=char_id
)
)
continue
if not data:
continue
try:
if stat_type != "allPvECompetitive":
embed = await self.build_stat_embed_char_basic(user, char, data, stat_type)
embeds.append(embed)
else:
data = data[stat_type]["allTime"]
embed = await self.build_stat_embed_char_gambit(user, char, data, stat_type)
embeds.append(embed)
except Exception:
log.error(
f"User {user.id} had an issue generating stats for character {char_id}",
exc_info=True,
)
continue
return embeds
async def build_stat_embed_char_basic(
self, user: discord.Member, char: dict, data: dict, stat_type: str
) -> discord.Embed:
char_info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (await self.get_definition("DestinyGenderDefinition", [char["genderHash"]]))[
str(char["genderHash"])
]
char_class = (await self.get_definition("DestinyClassDefinition", [char["classHash"]]))[
str(char["classHash"])
]
char_info += "{user} - {race} {gender} {char_class} ".format(
user=user.display_name,
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
ATTRS = {
"opponentsDefeated": _("Opponents Defeated"),
"efficiency": _("Efficiency"),
"bestSingleGameKills": _("Best Single Game Kills"),
"bestSingleGameScore": _("Best Single Game Score"),
"precisionKills": _("Precision Kills"),
"longestKillSpree": _("Longest Killing Spree"),
"longestSingleLife": _("Longest Single Life"),
"totalActivityDurationSeconds": _("Total time playing"),
"averageLifespan": _("Average Life Span"),
"weaponBestType": _("Best Weapon Type"),
}
embed = discord.Embed(title=stat_type.title())
embed.set_author(name=char_info, icon_url=user.avatar_url)
kills = data[stat_type]["allTime"]["kills"]["basic"]["displayValue"]
deaths = data[stat_type]["allTime"]["deaths"]["basic"]["displayValue"]
assists = data[stat_type]["allTime"]["assists"]["basic"]["displayValue"]
kda = f"{kills} | {deaths} | {assists}"
embed.add_field(name=_("Kills | Deaths | Assists"), value=kda)
if "emblemPath" in char:
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
for stat, values in data[stat_type]["allTime"].items():
if values["basic"]["value"] < 0 or stat not in ATTRS:
continue
embed.add_field(name=ATTRS[stat], value=str(values["basic"]["displayValue"]))
if "killsDeathsRatio" in data[stat_type] and "killsDeathsAssists" in data[stat_type]:
kdr = data[stat_type]["killsDeathsRatio"]
kda = data[stat_type]["killsDeathsAssists"]
if kdr or kda:
embed.add_field(name=_("KDR/KDA"), value=f"{kdr}/{kda}")
if (
"resurrectionsPerformed" in data[stat_type]
and "resurrectionsReceived" in data[stat_type]
):
res = data[stat_type]["resurrectionsPerformed"]
resur = data[stat_type]["resurrectionsReceived"]
if res or resur:
embed.add_field(name=_("Resurrections/Received"), value=f"{res}/{resur}")
return await self.get_char_colour(embed, char)
async def build_stat_embed_char_gambit(
self, user: discord.Member, char: dict, data: dict, stat_type: str
) -> discord.Embed:
char_info = ""
race = (await self.get_definition("DestinyRaceDefinition", [char["raceHash"]]))[
str(char["raceHash"])
]
gender = (await self.get_definition("DestinyGenderDefinition", [char["genderHash"]]))[
str(char["genderHash"])
]
char_class = (await self.get_definition("DestinyClassDefinition", [char["classHash"]]))[
str(char["classHash"])
]
char_info += "{user} - {race} {gender} {char_class} ".format(
user=user.display_name,
race=race["displayProperties"]["name"],
gender=gender["displayProperties"]["name"],
char_class=char_class["displayProperties"]["name"],
)
ATTRS = {
"opponentsDefeated": _("Opponents Defeated"),
"efficiency": _("Efficiency"),
"bestSingleGameKills": _("Best Single Game Kills"),
"bestSingleGameScore": _("Best Single Game Score"),
"precisionKills": _("Precision Kills"),
"longestKillSpree": _("Longest Killing Spree"),
"longestSingleLife": _("Longest Single Life"),
"totalActivityDurationSeconds": _("Total time playing"),
"averageLifespan": _("Average Life Span"),
"weaponBestType": _("Best Weapon Type"),
"winLossRatio": _("Win Loss Ratio"),
}
embed = discord.Embed(title="Gambit")
embed.set_author(name=char_info, icon_url=user.avatar_url)
kills = data["kills"]["basic"]["displayValue"]
deaths = data["deaths"]["basic"]["displayValue"]
assists = data["assists"]["basic"]["displayValue"]
kda = f"{kills} | {deaths} | {assists}"
embed.add_field(name=_("Kills | Deaths | Assists"), value=kda)
small_blocker = data["smallBlockersSent"]["basic"]["displayValue"]
med_blocker = data["mediumBlockersSent"]["basic"]["displayValue"]
large_blocker = data["largeBlockersSent"]["basic"]["displayValue"]
blockers = f"S {small_blocker}, M {med_blocker}, L {large_blocker}"
embed.add_field(name=_("Blockers"), value=blockers)
invasions = _("Invasions: {invasions}").format(
invasions=data["invasions"]["basic"]["displayValue"]
)
invasion_kills = _("Kills: {kills}\nDeaths: {deaths}").format(
kills=data["invasionKills"]["basic"]["displayValue"],
deaths=data["invasionDeaths"]["basic"]["displayValue"],
)
embed.add_field(name=invasions, value=invasion_kills)
invaders = _("Killed: {killed}\nKilled By: {by}").format(
killed=data["invaderKills"]["basic"]["displayValue"],
by=data["invaderDeaths"]["basic"]["displayValue"],
)
embed.add_field(name=_("Invaders"), value=invaders)
motes_dep = data["motesDeposited"]["basic"]["value"]
try:
lost = 1 - (motes_dep / data["motesPickedUp"]["basic"]["value"])
motes_lost = "{:.2%}".format(lost)
except ZeroDivisionError:
motes_lost = "0%"
motes = _("{motes:,} ({lost} Lost)").format(motes=motes_dep, lost=motes_lost)
embed.add_field(name=_("Motes Deposited"), value=motes)
motes_denied = data["motesDenied"]["basic"]["value"]
embed.add_field(name=_("Motes Denied"), value="{:,}".format(motes_denied))
mob_kills = data["mobKills"]["basic"]["value"]
primeval_kills = data["primevalKills"]["basic"]["value"]
high_kills = data["highValueKills"]["basic"]["value"]
kills_msg = _("Primevals: {prime:,}\nHigh Value Targets: {high:,}\nMobs: {mobs:,}").format(
prime=primeval_kills, high=high_kills, mobs=mob_kills
)
embed.add_field(name=_("Kill Stats"), value=kills_msg)
if "killsDeathsRatio" in data and "killsDeathsAssists" in data:
kdr = data["killsDeathsRatio"]["basic"]["displayValue"]
kda = data["killsDeathsAssists"]["basic"]["displayValue"]
if kdr or kda:
embed.add_field(name=_("KDR/KDA"), value=f"{kdr}/{kda}")
if "resurrectionsPerformed" in data and "resurrectionsReceived" in data:
res = data["resurrectionsPerformed"]["basic"]["displayValue"]
resur = data["resurrectionsReceived"]["basic"]["displayValue"]
if res or resur:
embed.add_field(name=_("Resurrections/Received"), value=f"{res}/{resur}")
if "emblemPath" in char:
embed.set_thumbnail(url=IMAGE_URL + char["emblemPath"])
for stat, values in data.items():
if values["basic"]["value"] < 0 or stat not in ATTRS:
continue
embed.add_field(name=ATTRS[stat], value=str(values["basic"]["displayValue"]))
return await self.get_char_colour(embed, char)
@destiny.command()
@commands.bot_has_permissions(embed_links=True, add_reactions=True)
async def stats(self, ctx: commands.Context, stat_type: StatsPage, all: bool = True) -> None:
"""
Display each character's stats for a specific activity
`<activity>` The type of stats to display, available options are:
`raid`, `pvp`, `pve`, patrol, story, gambit, and strikes
"""
async with ctx.typing():
if not await self.has_oauth(ctx):
msg = _(
"You need to authenticate your Bungie.net account before this command will work."
)
return await ctx.send(msg)
user = ctx.author
try:
chars = await self.get_characters(user)
except Destiny2APIError:
# log.debug(e)
msg = _("I can't seem to find your Destiny profile.")
await ctx.send(msg)
return
# base stats should be available for all stat types
embeds = await self.build_character_stats(user, chars, stat_type)
if not embeds:
return await ctx.send(
_("No stats could be found for that activity and character.")
)
await BaseMenu(
source=BasePages(
pages=embeds,
),
delete_message_after=False,
clear_reactions_after=True,
timeout=60,
cog=self,
page_start=0,
).start(ctx=ctx)
@destiny.command()
@checks.is_owner()
@commands.bot_has_permissions(add_reactions=True)
async def manifest(self, ctx: commands.Context, d1: bool = False) -> None:
"""
See the current manifest version and optionally re-download it
"""
if not d1:
try:
headers = await self.build_headers()
except Exception:
return await ctx.send(
_(
"You need to set your API authentication tokens with `[p]destiny token` first."
)
)
manifest_data = await self.request_url(
f"{BASE_URL}/Destiny2/Manifest/", headers=headers
)
version = await self.config.manifest_version()
if not version:
version = _("Not Downloaded")
msg = _("Current manifest version is {version}.").format(version=version)
redownload = _("re-download")
if manifest_data["version"] != version:
msg += _("\n\nThere is an update available to version {version}").format(
version=manifest_data["version"]
)
redownload = _("download")
await ctx.send(msg)
await ctx.trigger_typing()
msg = await ctx.send(
_("Would you like to {redownload} the manifest?").format(redownload=redownload)
)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
try:
react, user = await self.bot.wait_for("reaction_add", check=pred, timeout=15)
except asyncio.TimeoutError:
await msg.delete()
if pred.result:
try:
version = await self.get_manifest()
except Exception:
log.exception("Error getting destiny manifest")
return await ctx.send(_("There was an issue downloading the manifest."))
await msg.delete()
await ctx.send(f"Manifest {version} was downloaded.")
else:
await msg.delete()
else:
try:
version = await self.get_manifest(d1)
except Exception:
log.exception("Error getting D1 manifest")
return await ctx.send(_("There was an issue downloading the manifest."))
@destiny.command()
@checks.is_owner()
async def token(
self, ctx: commands.Context, api_key: str, client_id: str, client_secret: str
) -> None:
"""
Set the API tokens for Destiny 2's API
Required information is found at:
https://www.bungie.net/en/Application
select **Create New App**
Choose **Confidential** OAuth Client type
Select the scope you would like the bot to have access to
Set the redirect URL to https://localhost/
NOTE: It is strongly recommended to use this command in DM
"""
await self.config.api_token.api_key.set(api_key)
await self.config.api_token.client_id.set(client_id)
await self.config.api_token.client_secret.set(client_secret)
if ctx.channel.permissions_for(ctx.me).manage_messages:
await ctx.message.delete()
await ctx.send("Destiny 2 API credentials set!")
|
the-stack_0_157 | from api.api_error import APIError
from api.api_message import APIMessage
from api.json_connector import JSONConnector
from api.api_config import APIConfig
from api.ptp_connector import PTPConnector
class BotMethods:
@staticmethod
def start_bot(req):
"""
Starts a PTP Bot object.
:param req:
:return:
"""
keys = req.keys()
if 'bot_name' not in keys or not req['bot_name']:
return APIError.create(message='No bot name in the request body.', code=400)
elif 'action_name' not in keys or not req['action_name']:
return APIError.create(message='No action name in the request body.', code=400)
bots = JSONConnector.get_json_file_content(
directory=APIConfig.json_save_path,
name=APIConfig.json_bots_file_name
)
bot_actions = JSONConnector.get_json_file_content(
directory=APIConfig.json_save_path,
name=APIConfig.json_bot_actions_file_name
)
found_action = {}
found_bot = {}
for item in bot_actions['bot_actions']:
print(req, item)
if req['bot_name'] == item['bot_name'] and req['action_name'] == item['action_name']:
found_action = item
for item in bots['bots']:
if req['bot_name'] == item['bot_name']:
found_bot = item
if found_action and found_bot:
access_info = {
'access_token': found_bot['access_token'],
'access_token_secret': found_bot['access_token_secret'],
'consumer_key': found_bot['consumer_key'],
'consumer_secret': found_bot['consumer_secret']
}
PTPConnector.start_bot(access_info, found_action['method'], {'actions': found_action['actions']})
return APIMessage.create(message='Bot successfully started.', code=200)
|
the-stack_0_158 | from fastapi import APIRouter, BackgroundTasks, Depends, File, UploadFile
from typing import List
from sqlalchemy.orm import Session
from api.utils.auth import get_db
from api.auth.auth import auth_check
from api.db.crud import templates as crud
from api.db.crud import settings as scrud
from api.db.schemas import templates as schemas
from api.db.models import containers
from api.db.database import engine
from api.actions import resources
from api.actions.apps import _update_self, check_self_update
from api.settings import Settings
from fastapi_jwt_auth import AuthJWT
containers.Base.metadata.create_all(bind=engine)
settings = Settings()
router = APIRouter()
@router.get(
"/variables",
response_model=List[schemas.TemplateVariables],
operation_id="authorize",
)
def read_template_variables(
db: Session = Depends(get_db), Authorize: AuthJWT = Depends()
):
auth_check(Authorize)
return crud.read_template_variables(db=db)
@router.post(
"/variables",
response_model=List[schemas.TemplateVariables],
)
def set_template_variables(
new_variables: List[schemas.TemplateVariables],
db: Session = Depends(get_db),
Authorize: AuthJWT = Depends(),
):
auth_check(Authorize)
return crud.set_template_variables(new_variables=new_variables, db=db)
@router.get(
"/export",
response_model=schemas.Import_Export,
)
def export_settings(db: Session = Depends(get_db), Authorize: AuthJWT = Depends()):
auth_check(Authorize)
return scrud.export_settings(db=db)
@router.post(
"/export",
)
def import_settings(
db: Session = Depends(get_db),
upload: UploadFile = File(...),
Authorize: AuthJWT = Depends(),
):
auth_check(Authorize)
return scrud.import_settings(db=db, upload=upload)
@router.get(
"/prune/{resource}",
)
def prune_resources(resource: str, Authorize: AuthJWT = Depends()):
auth_check(Authorize)
return resources.prune_resources(resource)
@router.get(
"/update",
)
def update_self(background_tasks: BackgroundTasks, Authorize: AuthJWT = Depends()):
auth_check(Authorize)
return _update_self(background_tasks)
@router.get(
"/check/update",
)
def _check_self_update(Authorize: AuthJWT = Depends()):
auth_check(Authorize)
return check_self_update()
|
the-stack_0_161 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define statistical functions of a tensor
import numpy as np
from ..fluid.framework import Variable
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import core, in_dygraph_mode
from ..fluid import layers
from .search import where
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
from paddle import _C_ops
__all__ = []
def mean(x, axis=None, keepdim=False, name=None):
"""
Computes the mean of the input tensor's elements along ``axis``.
Args:
x (Tensor): The input Tensor with data type float32, float64.
axis (int|list|tuple, optional): The axis along which to perform mean
calculations. ``axis`` should be int, list(int) or tuple(int). If
``axis`` is a list/tuple of dimension(s), mean is calculated along
all element(s) of ``axis`` . ``axis`` or element(s) of ``axis``
should be in range [-D, D), where D is the dimensions of ``x`` . If
``axis`` or element(s) of ``axis`` is less than 0, it works the
same way as :math:`axis + D` . If ``axis`` is None, mean is
calculated over all elements of ``x``. Default is None.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of average along ``axis`` of ``x``, with the same data
type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[[1., 2., 3., 4.],
[5., 6., 7., 8.],
[9., 10., 11., 12.]],
[[13., 14., 15., 16.],
[17., 18., 19., 20.],
[21., 22., 23., 24.]]])
out1 = paddle.mean(x)
# [12.5]
out2 = paddle.mean(x, axis=-1)
# [[ 2.5 6.5 10.5]
# [14.5 18.5 22.5]]
out3 = paddle.mean(x, axis=-1, keepdim=True)
# [[[ 2.5]
# [ 6.5]
# [10.5]]
# [[14.5]
# [18.5]
# [22.5]]]
out4 = paddle.mean(x, axis=[0, 2])
# [ 8.5 12.5 16.5]
"""
if isinstance(axis, int):
axis = [axis]
reduce_all = True if axis is None \
or len(axis)==0 \
or len(axis) == len(x.shape) else False
if axis is None or len(axis) == 0:
axis = [0]
if in_dygraph_mode():
return _C_ops.reduce_mean(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
check_variable_and_dtype(x, 'x/input', ['float32', 'float64'],
'mean/reduce_mean')
check_type(axis, 'axis/dim', (int, list, tuple), 'mean/reduce_mean')
if isinstance(axis, (list, tuple)):
for item in axis:
check_type(item, 'elements of axis/dim', (int), 'mean/reduce_mean')
helper = LayerHelper('mean', **locals())
attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='reduce_mean', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
return out
def var(x, axis=None, unbiased=True, keepdim=False, name=None):
"""
Computes the variance of ``x`` along ``axis`` .
Args:
x (Tensor): The input Tensor with data type float32, float64.
axis (int|list|tuple, optional): The axis along which to perform
variance calculations. ``axis`` should be int, list(int) or
tuple(int). If ``axis`` is a list/tuple of dimension(s), variance
is calculated along all element(s) of ``axis`` . ``axis`` or
element(s) of ``axis`` should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is less
than 0, it works the same way as :math:`axis + D` . If ``axis`` is
None, variance is calculated over all elements of ``x``. Default
is None.
unbiased (bool, optional): Whether to use the unbiased estimation. If
``unbiased`` is True, the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along ``axis`` , otherwise the divisor is :math:`N`. Default is True.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of variance along ``axis`` of ``x``, with the same data
type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
out1 = paddle.var(x)
# [2.66666667]
out2 = paddle.var(x, axis=1)
# [1. 4.33333333]
"""
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'var')
u = mean(x, axis, True, name)
out = paddle.sum((x - u)**2, axis, keepdim=keepdim, name=name)
n = paddle.cast(paddle.numel(x), x.dtype) \
/ paddle.cast(paddle.numel(out), x.dtype)
if unbiased:
one_const = paddle.ones([1], x.dtype)
n = where(n > one_const, n - 1., one_const)
out /= n
return out
def std(x, axis=None, unbiased=True, keepdim=False, name=None):
"""
Computes the standard-deviation of ``x`` along ``axis`` .
Args:
x (Tensor): The input Tensor with data type float32, float64.
axis (int|list|tuple, optional): The axis along which to perform
standard-deviation calculations. ``axis`` should be int, list(int)
or tuple(int). If ``axis`` is a list/tuple of dimension(s),
standard-deviation is calculated along all element(s) of ``axis`` .
``axis`` or element(s) of ``axis`` should be in range [-D, D),
where D is the dimensions of ``x`` . If ``axis`` or element(s) of
``axis`` is less than 0, it works the same way as :math:`axis + D` .
If ``axis`` is None, standard-deviation is calculated over all
elements of ``x``. Default is None.
unbiased (bool, optional): Whether to use the unbiased estimation. If
``unbiased`` is True, the standard-deviation is calculated via the
unbiased estimator. If ``unbiased`` is True, the divisor used in
the computation is :math:`N - 1`, where :math:`N` represents the
number of elements along ``axis`` , otherwise the divisor is
:math:`N`. Default is True.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of standard-deviation along ``axis`` of ``x``, with the
same data type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0, 3.0], [1.0, 4.0, 5.0]])
out1 = paddle.std(x)
# [1.63299316]
out2 = paddle.std(x, axis=1)
# [1. 2.081666]
"""
if not in_dygraph_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'std')
out = var(**locals())
return paddle.sqrt(out)
def numel(x, name=None):
"""
Returns the number of elements for a tensor, which is a int64 Tensor with shape [1] in static mode
or a scalar value in imperative mode
Args:
x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.
Returns:
Tensor: The number of elements for the input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.full(shape=[4, 5, 7], fill_value=0, dtype='int32')
numel = paddle.numel(x) # 140
"""
if in_dygraph_mode():
return _C_ops.size(x)
if not isinstance(x, Variable):
raise TypeError("x must be a Tensor in numel")
helper = LayerHelper('numel', **locals())
out = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.INT64)
helper.append_op(type='size', inputs={'Input': x}, outputs={'Out': out})
return out
def median(x, axis=None, keepdim=False, name=None):
"""
Compute the median along the specified axis.
Args:
x (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.
axis (int, optional): The axis along which to perform median calculations ``axis`` should be int.
``axis`` should be in range [-D, D), where D is the dimensions of ``x`` .
If ``axis`` is less than 0, it works the same way as :math:`axis + D`.
If ``axis`` is None, median is calculated over all elements of ``x``. Default is None.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keepdim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of median along ``axis`` of ``x``. If data type of ``x`` is float64, data type of results will be float64, otherwise data type will be float32.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(12).reshape([3, 4])
# x is [[0 , 1 , 2 , 3 ],
# [4 , 5 , 6 , 7 ],
# [8 , 9 , 10, 11]]
y1 = paddle.median(x)
# y1 is [5.5]
y2 = paddle.median(x, axis=0)
# y2 is [4., 5., 6., 7.]
y3 = paddle.median(x, axis=1)
# y3 is [1.5, 5.5, 9.5]
y4 = paddle.median(x, axis=0, keepdim=True)
# y4 is [[4., 5., 6., 7.]]
"""
if not isinstance(x, Variable):
raise TypeError("In median, the input x should be a Tensor.")
is_flatten = axis is None
dims = len(x.shape)
if is_flatten:
x = paddle.flatten(x)
axis = 0
else:
if not isinstance(axis, int) or not (axis < dims and axis >= -dims):
raise ValueError(
"In median, axis should be none or an integer in range [-rank(x), rank(x))."
)
if axis < 0:
axis += dims
sz = x.shape[axis]
kth = sz >> 1
tensor_topk, idx = paddle.topk(x, kth + 1, axis=axis, largest=False)
dtype = 'float64' if x.dtype == core.VarDesc.VarType.FP64 else 'float32'
if sz & 1 == 0:
out_tensor = paddle.slice(
tensor_topk, axes=[axis], starts=[kth - 1],
ends=[kth]) + paddle.slice(
tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1])
out_tensor = paddle.cast(out_tensor, dtype=dtype) / 2
else:
out_tensor = paddle.cast(
paddle.slice(
tensor_topk, axes=[axis], starts=[kth], ends=[kth + 1]),
dtype=dtype)
if not keepdim or is_flatten:
if not is_flatten:
newshape = x.shape[:axis] + x.shape[axis + 1:]
elif not keepdim:
newshape = [1]
else:
newshape = [1] * dims
else:
newshape = out_tensor.shape
out_tensor = out_tensor.reshape(newshape, name=name)
return out_tensor
|
the-stack_0_162 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class XiaobaiheItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
username = Field()
text = Field()
url = Field()
|
the-stack_0_164 | """ Tests for the various cli programs """
from pyontutils.integration_test_helper import _TestCliBase, Folders
class TestCli(Folders, _TestCliBase):
commands = (
['googapis', '--help'],
['graphml-to-ttl', '--help'],
['necromancy', '--help'],
['ontload', '--help'],
['overlaps', '--help'],
['qnamefix', '--help'],
['scigraph-codegen', '--help'],
['scig', '--help'],
['ttlfmt', '--help'],
)
|
the-stack_0_165 | #!/usr/bin/python
import math
import matplotlib.pyplot as plt
from graphtheory.structures.edges import Edge
from graphtheory.structures.graphs import Graph
from graphtheory.structures.factory import GraphFactory
from graphtheory.structures.points import Point
from graphtheory.forests.treeplot import TreePlot
from graphtheory.forests.treeplot import TreePlotRadiusAngle
V = 20
gf = GraphFactory(Graph)
G = gf.make_tree(V)
#G.show()
assert G.e() == V-1
algorithm = TreePlotRadiusAngle(G)
algorithm.run()
#print ( algorithm.point_dict ) # (radius, angle)
D = dict() # node ---> point on the plane
for node in algorithm.point_dict:
(radius, angle) = algorithm.point_dict[node]
D[node] = Point(radius * math.cos(angle), radius * math.sin(angle))
#print ( D )
for edge in G.iteredges():
x = [D[edge.source].x, D[edge.target].x]
y = [D[edge.source].y, D[edge.target].y]
plt.plot(x, y, 'k-') # black line
x = [D[node].x for node in G.iternodes()]
y = [D[node].y for node in G.iternodes()]
plt.plot(x, y, 'bo') # blue circle
plt.title("Random tree")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# EOF
|
the-stack_0_166 | import pandas as pd
import numpy as np
import joblib
import Levenshtein
import argparse
import ast
from scipy import stats
from src import nlp_preprocessing
def preprocess_txt(txt: str):
"""Executa preprocessamento textual padrão"""
cleaned_txt = nlp_preprocessing.clean_text(txt)
token_txt = nlp_preprocessing.custom_tokenizer(cleaned_txt)
return " ".join(token_txt)
def get_top_docs(query, cleaned_doc_list, doc_titles, get_titles=True):
"""Gera as recomedações a partir de uma query e listas de referência"""
cleaned_query = preprocess_txt(query)
dists = [Levenshtein.distance(cleaned_query, doc)
for doc in cleaned_doc_list]
mask = np.array(dists).argsort()[:10]
if get_titles:
return doc_titles.iloc[mask].tolist()
else:
return doc_titles.iloc[mask].index.tolist()
def load_data():
"""Carrega os dados"""
df = pd.concat([
pd.read_pickle("data/train_query.pickle"),
pd.read_pickle("data/test_query.pickle")
])
return df
def series_mode(serie: pd.Series):
"""Calcula a moda de uma série"""
return stats.mode(serie)[0][0]
def remove_duplicates(df, group="product_id",
num_cols=["price", "weight", "minimum_quantity"],
cat_cols=["title", "concatenated_tags"]) -> pd.DataFrame:
"""Função que remove os registros duplicados juntando os por média e moda
a depender dos tipos de coluna"""
mode_stats = {col: series_mode for col in cat_cols}
mean_stats = {col: "mean" for col in num_cols}
agg_stats = dict(**mode_stats, **mean_stats)
return df.groupby(group).agg(agg_stats)
def make_predictions(query, clf_model):
"""Função que realiza as recomendações com predição de categoria
majoritária"""
df = load_data()
prod_titles = (df[["product_id", "title"]].
drop_duplicates().set_index("product_id")["title"])
cleaned_prod_titles = [preprocess_txt(txt) for txt in prod_titles]
prod_id_select = get_top_docs(query,
cleaned_prod_titles,
prod_titles,
False)
selected_df = df.loc[df["product_id"].isin(prod_id_select)]
selected_df = remove_duplicates(selected_df)
predicted_cats = clf_model.predict(selected_df)
major_cat = stats.mode(predicted_cats)[0][0]
print(major_cat)
for _id, title in selected_df["title"].iteritems():
print(f"{_id} - {title}")
# função auxiliar do modelo de classificação
def select_txt(X: pd.DataFrame, col: str):
return X[col]
# função auxiliar do modelo de classificação
def select_base_features(X: pd.DataFrame):
return X[["price", "weight", "minimum_quantity"]]
def load_args() -> pd.DataFrame:
"""Função de carregamento de configurações.
Returns:
pd.DataFrame: resultado a ser categorizado.
"""
# criando os atributos que vão ser recebidos e parseando-os
parser = argparse.ArgumentParser()
parser.add_argument("-c",
"--category",
help="Texto de registro a ser categorizado",
type=str)
parser.add_argument("-r",
"--recommendation",
help="Sistema de recomendação de produtos",
type=str)
args = parser.parse_args()
# extraindo dos atributos recebidos o registro a ser categorizado
# e adequando-o para a predição
if args.category is not None:
product_dict = ast.literal_eval(args.category)
product_df = pd.Series(product_dict).to_frame().T
else:
product_df = None
return product_df, args.recommendation
def predict_single_category(df, clf_model):
product_category = clf_model.predict(df)[0]
print(product_category)
def main():
# carregando o modelo
rf_clf_pipeline = joblib.load("assets/category_rf_clf_pipeline.joblib")
# carregando o registro a ser categorizado
product_df, query = load_args()
# fazendo a previsão da categoria
if product_df is not None:
predict_single_category(product_df, rf_clf_pipeline)
if query is not None:
make_predictions(query, rf_clf_pipeline)
if __name__ == "__main__":
main() |