blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54aa72c6ca565b7aa1d189e7744b9fcb0f24dd40 | d09c6ff7114f69a9326883c5b9fcc70fa994e8a2 | /_pycharm_skeletons/renderdoc/GLVertexAttribute.py | add4c7232eea14baa06e4a12c24a237ae897c01a | [
"MIT"
] | permissive | Lex-DRL/renderdoc-py-stubs | 3dd32d23c0c8219bb66387e6078244cff453cd83 | 75d280e4f500ded506f3315a49fc432b37ab4fa6 | refs/heads/master | 2020-08-22T16:55:39.336657 | 2019-11-03T01:21:26 | 2019-11-03T01:21:26 | 216,441,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | # encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class GLVertexAttribute(SwigPyObject):
"""
Describes the configuration for a single vertex attribute.
.. note:: If old-style vertex attrib pointer setup was used for the vertex attributes then it will
be decomposed into 1:1 attributes and buffers.
"""
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
byteOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""
The byte offset from the start of the vertex data in the vertex buffer from
:data:`vertexBufferSlot`.
"""
enabled = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""``True`` if this vertex attribute is enabled."""
format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The :class:`ResourceFormat` of the vertex attribute."""
genericValue = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A :class:`PixelValue` containing the generic value of a vertex attribute."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
vertexBufferSlot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The vertex buffer input slot where the data is sourced from."""
__dict__ = None # (!) real value is ''
| [
"[email protected]"
] | |
a68196f031bbeb0ba2c2698e025995cba76ce678 | 44a724fbac833f10c73a70f140d6c6692d4c758e | /website/registration/forms.py | da51b193f26f02d67afbf28c947ebf07d916cf8e | [] | no_license | Nussy/owf2014 | 856598b414a58ef5065481dad66841fb9fb01f7d | 09224a3ab82d5ceabe286678bae77967be42537c | refs/heads/master | 2020-12-24T11:33:49.230217 | 2014-07-08T12:15:04 | 2014-07-08T12:15:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,126 | py | from flask.ext.wtf import Form, BooleanField, TextField, TextAreaField, required, email
from flask.ext.wtf.html5 import EmailField
from flask.ext.babel import lazy_gettext as _l
from website.registration.models import Track
__all__ = ['RegistrationForm']
def make_mixin_class():
class DynamicMixin(object):
pass
for track in Track.query.all():
label = "%s: %s" % (track.theme, track.title)
name = "track_%d" % track.id
field = BooleanField(label=label)
setattr(DynamicMixin, name, field)
return DynamicMixin
def make_registration_form_class():
mixin_class = make_mixin_class()
class RegistrationForm(mixin_class, Form):
email = EmailField(label=_l(u"Your email address"),
validators=[required(), email()])
coming_on_oct_3 = BooleanField(label=_l(u"Will you come on Oct. 3th? (Thursday)"))
coming_on_oct_4 = BooleanField(label=_l(u"Will you come on Oct. 4th? (Friday)"))
coming_on_oct_5 = BooleanField(label=_l(u"Will you come on Oct. 5th? (Saturday)"))
return RegistrationForm
def make_confirmation_form_class():
mixin_class = make_mixin_class()
class ConfirmationForm(mixin_class, Form):
email = EmailField(label=_l(u"Your email address"),
validators=[required(), email()])
coming_on_oct_3 = BooleanField(label=_l(u"Will you come on Oct. 3th? (Thursday)"))
coming_on_oct_4 = BooleanField(label=_l(u"Will you come on Oct. 4th? (Friday)"))
coming_on_oct_5 = BooleanField(label=_l(u"Will you come on Oct. 5th? (Saturday)"))
first_name = TextField(label=_l("First name"))
last_name = TextField(label=_l("Last name"))
organization = TextField(label=_l("Organization"))
url = TextField(label=_l("URL"))
url = TextAreaField(label=_l("Biography"))
# twitter_handle = Column(UnicodeText(100), default="", nullable=False)
# github_handle = Column(UnicodeText(200), default="", nullable=False)
# sourceforge_handle = Column(UnicodeText(200), default="", nullable=False)
# linkedin_url = Column(UnicodeText(200), default="", nullable=False)
return ConfirmationForm
| [
"[email protected]"
] | |
51fa016e1c1e8f8413a36b5d13b3ac5e585a1ade | aaddc9b334b4d265d61cd97464d9ff73f32d9bec | /12_DRF_API_ModalViewSet/DRF_API_ModalViewSet/wsgi.py | 59140f929e09da11239464ede2ab10ba8c216e53 | [] | no_license | DharmendraB/DRF-Django-RestFramework-API | f3549955e53d43f7dad2a78468ad0792ebfb70d5 | 4f12ab84ca5f69cf2bb8e392b5490247d5f00e0e | refs/heads/main | 2023-05-30T20:59:46.635078 | 2021-06-11T04:32:52 | 2021-06-11T04:32:52 | 375,905,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | """
WSGI config for DRF_API_ModalViewSet project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DRF_API_ModalViewSet.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
c40028e0be80e217c13d3970ba03c03ab2bcfb82 | 09ae3f372d1000f118ad80874870ae420a4be66f | /scikit-learn-master/sklearn/linear_model/logistic.py | a1d49ac570e92af42337bb68f34b6c9caceb5b80 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | lqkweb/learnMLflow | 998f80c3828879b8d542125bc95c6345b8e9b29a | 13c5decaebba95b1b90f92021be35e343b4764af | refs/heads/master | 2022-10-18T06:17:23.584172 | 2019-01-18T09:51:38 | 2019-01-18T09:51:38 | 166,145,472 | 2 | 0 | Apache-2.0 | 2022-09-30T18:26:17 | 2019-01-17T02:22:29 | Python | UTF-8 | Python | false | false | 92,589 | py | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
# Arthur Mensch <[email protected]
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from scipy.special import expit
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (log_logistic, safe_sparse_dot, softmax,
squared_norm)
from ..utils.extmath import row_norms
from ..utils.fixes import logsumexp
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..utils import deprecated
from ..exceptions import (NotFittedError, ConvergenceWarning,
ChangedBehaviorWarning)
from ..utils.multiclass import check_classification_targets
from ..utils._joblib import Parallel, delayed, effective_n_jobs
from ..utils.fixes import _joblib_parallel_args
from ..model_selection import check_cv
from ..metrics import get_scorer
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
c : float
The intercept.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)),
dtype=X.dtype)
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver(solver, penalty, dual):
if solver == 'warn':
solver = 'liblinear'
warnings.warn("Default solver will be changed to 'lbfgs' in 0.22. "
"Specify a solver to silence this warning.",
FutureWarning)
all_solvers = ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']
if solver not in all_solvers:
raise ValueError("Logistic Regression supports only solvers in %s, got"
" %s." % (all_solvers, solver))
all_penalties = ['l1', 'l2', 'elasticnet', 'none']
if penalty not in all_penalties:
raise ValueError("Logistic Regression supports only penalties in %s,"
" got %s." % (all_penalties, penalty))
if solver not in ['liblinear', 'saga'] and penalty not in ('l2', 'none'):
raise ValueError("Solver %s supports only 'l2' or 'none' penalties, "
"got %s penalty." % (solver, penalty))
if solver != 'liblinear' and dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if penalty == 'elasticnet' and solver != 'saga':
raise ValueError("Only 'saga' solver supports elasticnet penalty,"
" got solver={}.".format(solver))
if solver == 'liblinear' and penalty == 'none':
raise ValueError(
"penalty='none' is not supported for the liblinear solver"
)
return solver
def _check_multi_class(multi_class, solver, n_classes):
if multi_class == 'warn':
multi_class = 'ovr'
if n_classes > 2:
warnings.warn("Default multi_class will be changed to 'auto' in"
" 0.22. Specify the multi_class option to silence "
"this warning.", FutureWarning)
if multi_class == 'auto':
if solver == 'liblinear':
multi_class = 'ovr'
elif n_classes > 2:
multi_class = 'multinomial'
else:
multi_class = 'ovr'
if multi_class not in ('multinomial', 'ovr'):
raise ValueError("multi_class should be 'multinomial', 'ovr' or "
"'auto'. Got %s." % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
return multi_class
@deprecated('logistic_regression_path was deprecated in version 0.21 and '
'will be removed in version 0.23.0')
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
.. deprecated:: 0.21
``logistic_regression_path`` was deprecated in version 0.21 and will
be removed in 0.23.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
return _logistic_regression_path(
X, y, pos_class=None, Cs=10, fit_intercept=True, max_iter=100,
tol=1e-4, verbose=0, solver='lbfgs', coef=None, class_weight=None,
dual=False, penalty='l2', intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True, max_squared_sum=None,
sample_weight=None, l1_ratio=None)
def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver not in ['sag', 'saga']:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=iprint, pgtol=tol, maxiter=max_iter)
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
n_iter_i = min(info['nit'], max_iter)
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
# alpha is for L2-norm, beta is for L1-norm
if penalty == 'l1':
alpha = 0.
beta = 1. / C
elif penalty == 'l2':
alpha = 1. / C
beta = 0.
else: # Elastic-Net penalty
alpha = (1. / C) * (1 - l1_ratio)
beta = (1. / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='warn', random_state=None,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Decides which solver to use.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' and
'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = _logistic_regression_path(
X_train, y_train, Cs=Cs, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, solver=solver, max_iter=max_iter,
class_weight=class_weight, pos_class=pos_class,
multi_class=multi_class, tol=tol, verbose=verbose, dual=dual,
penalty=penalty, intercept_scaling=intercept_scaling,
random_state=random_state, check_input=False,
max_squared_sum=max_squared_sum, sample_weight=sample_weight)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
scores = list()
if isinstance(scoring, str):
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag', 'saga' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
that regularization is applied by default**. It can handle both dense
and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
floats for optimal performance; any other input format will be converted
(and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation, or no regularization. The 'liblinear' solver
supports both L1 and L2 regularization, with a dual formulation only for
the L2 penalty. The Elastic-Net regularization is only supported by the
'saga' solver.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1', 'l2', 'elasticnet' or 'none', optional (default='l2')
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver. If 'none' (not supported by the
liblinear solver), no regularization is applied.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
dual : bool, optional (default=False)
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, optional (default=True)
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, optional (default=1)
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
solver : str, {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
optional (default='liblinear').
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs', 'sag' and 'saga' handle L2 or no penalty
- 'liblinear' and 'saga' also handle L1 penalty
- 'saga' also supports 'elasticnet' penalty
- 'liblinear' does not handle no penalty
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
.. versionchanged:: 0.20
Default will change from 'liblinear' to 'lbfgs' in 0.22.
max_iter : int, optional (default=100)
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
multi_class : str, {'ovr', 'multinomial', 'auto'}, optional (default='ovr')
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
verbose : int, optional (default=0)
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
n_jobs : int or None, optional (default=None)
Number of CPU cores used when parallelizing over classes if
multi_class='ovr'". This parameter is ignored when the ``solver`` is
set to 'liblinear' regardless of whether 'multi_class' is specified or
not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors.
See :term:`Glossary <n_jobs>` for more details.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'`. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Attributes
----------
classes_ : array, shape (n_classes, )
A list of class labels known to the classifier.
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `coef_` corresponds
to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape (1,) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `intercept_`
corresponds to outcome 1 (True) and `-intercept_` corresponds to
outcome 0 (False).
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(random_state=0, solver='lbfgs',
... multi_class='multinomial').fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :]) # doctest: +ELLIPSIS
array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
[9.7...e-01, 2.8...e-02, ...e-08]])
>>> clf.score(X, y)
0.97...
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
LogisticRegressionCV : Logistic regression with built-in cross validation
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
https://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='warn', max_iter=100,
multi_class='warn', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if self.penalty == 'elasticnet':
if (not isinstance(self.l1_ratio, numbers.Number) or
self.l1_ratio < 0 or self.l1_ratio > 1):
raise ValueError("l1_ratio must be between 0 and 1;"
" got (l1_ratio=%r)" % self.l1_ratio)
elif self.l1_ratio is not None:
warnings.warn("l1_ratio parameter is only used when penalty is "
"'elasticnet'. Got "
"(penalty={})".format(self.penalty))
if self.penalty == 'none':
if self.C != 1.0: # default values
warnings.warn(
"Setting penalty='none' will ignore the C and l1_ratio "
"parameters"
)
# Note that check for l1_ratio is done right above
C_ = np.inf
penalty = 'l2'
else:
C_ = self.C
penalty = self.penalty
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if solver in ['newton-cg']:
_dtype = [np.float64, np.float32]
else:
_dtype = np.float64
X, y = check_X_y(X, y, accept_sparse='csr', dtype=_dtype, order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
multi_class = _check_multi_class(self.multi_class, solver,
len(self.classes_))
if solver == 'liblinear':
if effective_n_jobs(self.n_jobs) != 1:
warnings.warn("'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = {}.".format(effective_n_jobs(self.n_jobs)))
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(_logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, pos_class=class_, Cs=[C_],
l1_ratio=self.l1_ratio, fit_intercept=self.fit_intercept,
tol=self.tol, verbose=self.verbose, solver=solver,
multi_class=multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
penalty=penalty, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
ovr = (self.multi_class in ["ovr", "warn"] or
(self.multi_class == 'auto' and (self.classes_.size <= 2 or
self.solver == 'liblinear')))
if ovr:
return super()._predict_proba_lr(X)
else:
decision = self.decision_function(X)
if decision.ndim == 1:
# Workaround for multi_class="multinomial" and binary outcomes
# which requires softmax prediction with only a 1D decision.
decision_2d = np.c_[-decision, decision]
else:
decision_2d = decision
return softmax(decision_2d, copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
See glossary entry for :term:`cross-validation estimator`.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
Elastic-Net penalty is only supported by the saga solver.
For the grid of `Cs` values and `l1_ratios` values, the best
hyperparameter is selected by the cross-validator `StratifiedKFold`, but
it can be changed using the `cv` parameter. The 'newton-cg', 'sag',
'saga' and 'lbfgs' solvers can warm-start the coefficients (see
:term:`Glossary<warm_start>`).
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats or int, optional (default=10)
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, optional (default=True)
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
cv : int or cross-validation generator, optional (default=None)
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
.. versionchanged:: 0.20
``cv`` default value if None will change from 3-fold to 5-fold
in v0.22.
dual : bool, optional (default=False)
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet', optional (default='l2')
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
scoring : string, callable, or None, optional (default=None)
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : str, {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
optional (default='lbfgs')
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty, whereas
'liblinear' and 'saga' handle L1 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can preprocess the data
with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
max_iter : int, optional (default=100)
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
n_jobs : int or None, optional (default=None)
Number of CPU cores used during the cross-validation loop.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, optional (default=0)
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool, optional (default=True)
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
intercept_scaling : float, optional (default=1)
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, optional (default='ovr')
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
l1_ratios : list of float or None, optional (default=None)
The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.
Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to
using ``penalty='l2'``, while 1 is equivalent to using
``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination
of L1 and L2.
Attributes
----------
classes_ : array, shape (n_classes, )
A list of class labels known to the classifier.
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : array, shape (n_cs)
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
l1_ratios_ : array, shape (n_l1_ratios)
Array of l1_ratios used for cross-validation. If no l1_ratio is used
(i.e. penalty is not 'elasticnet'), this is set to ``[None]``
coefs_paths_ : array, shape (n_folds, n_cs, n_features) or \
(n_folds, n_cs, n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, n_cs, n_features)`` or
``(n_folds, n_cs, n_features + 1)`` depending on whether the
intercept is fit or not. If ``penalty='elasticnet'``, the shape is
``(n_folds, n_cs, n_l1_ratios_, n_features)`` or
``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class. Each dict value
has shape ``(n_folds, n_cs`` or ``(n_folds, n_cs, n_l1_ratios)`` if
``penalty='elasticnet'``.
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
`C_` is of shape(n_classes,) when the problem is binary.
l1_ratio_ : array, shape (n_classes,) or (n_classes - 1,)
Array of l1_ratio that maps to the best scores across every class. If
refit is set to False, then for each class, the best l1_ratio is the
average of the l1_ratio's that correspond to the best scores for each
fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds,
n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegressionCV
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegressionCV(cv=5, random_state=0,
... multi_class='multinomial').fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :]).shape
(2, 3)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.98...
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv='warn', dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=None, verbose=0,
refit=True, intercept_scaling=1., multi_class='warn',
random_state=None, l1_ratios=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
self.l1_ratios = l1_ratios
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if self.penalty == 'elasticnet':
if self.l1_ratios is None or len(self.l1_ratios) == 0 or any(
(not isinstance(l1_ratio, numbers.Number) or l1_ratio < 0
or l1_ratio > 1) for l1_ratio in self.l1_ratios):
raise ValueError("l1_ratios must be a list of numbers between "
"0 and 1; got (l1_ratios=%r)" %
self.l1_ratios)
l1_ratios_ = self.l1_ratios
else:
if self.l1_ratios is not None:
warnings.warn("l1_ratios parameter is only used when penalty "
"is 'elasticnet'. Got (penalty={})".format(
self.penalty))
l1_ratios_ = [None]
if self.penalty == 'none':
raise ValueError(
"penalty='none' is not useful and not supported by "
"LogisticRegressionCV."
)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = dict((label_encoder.transform([cls])[0], v)
for cls, v in class_weight.items())
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
multi_class = _check_multi_class(self.multi_class, solver,
len(classes))
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if multi_class == 'multinomial':
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(class_weight,
np.arange(len(self.classes_)),
y)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio
)
for label in iter_encoded_labels
for train, test in folds
for l1_ratio in l1_ratios_)
# _log_reg_scoring_path will output different shapes depending on the
# multi_class param, so we need to reshape the outputs accordingly.
# Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the
# rows are equal, so we just take the first one.
# After reshaping,
# - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios)
# - coefs_paths is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios, n_features)
# - n_iter is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios) or
# (1, n_folds, n_Cs . n_l1_ratios)
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
if multi_class == 'multinomial':
coefs_paths = np.reshape(
coefs_paths,
(len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1)
)
# equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3),
# (1, 2, 0, 3))
coefs_paths = np.swapaxes(coefs_paths, 0, 1)
coefs_paths = np.swapaxes(coefs_paths, 0, 2)
self.n_iter_ = np.reshape(
n_iter_,
(1, len(folds), len(self.Cs_) * len(l1_ratios_))
)
# repeat same scores across all classes
scores = np.tile(scores, (n_classes, 1, 1))
else:
coefs_paths = np.reshape(
coefs_paths,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_),
-1)
)
self.n_iter_ = np.reshape(
n_iter_,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_))
)
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
self.C_ = list()
self.l1_ratio_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)):
if multi_class == 'ovr':
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
else:
# For multinomial, all scores are the same across classes
scores = scores[0]
# coefs_paths will keep its original shape because
# logistic_regression_path expects it this way
if self.refit:
# best_index is between 0 and (n_Cs . n_l1_ratios - 1)
# for example, with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
best_index = scores.sum(axis=0).argmax()
best_index_C = best_index % len(self.Cs_)
C_ = self.Cs_[best_index_C]
self.C_.append(C_)
best_index_l1 = best_index // len(self.Cs_)
l1_ratio_ = l1_ratios_[best_index_l1]
self.l1_ratio_.append(l1_ratio_)
if multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, :, best_index, :],
axis=1)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = _logistic_regression_path(
X, y, pos_class=encoded_label, Cs=[C_], solver=solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio_)
w = w[0]
else:
# Take the best scores across every fold and the average of
# all coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
if self.multi_class == 'ovr':
w = np.mean([coefs_paths[i, best_indices[i], :]
for i in range(len(folds))], axis=0)
else:
w = np.mean([coefs_paths[:, i, best_indices[i], :]
for i in range(len(folds))], axis=0)
best_indices_C = best_indices % len(self.Cs_)
self.C_.append(np.mean(self.Cs_[best_indices_C]))
best_indices_l1 = best_indices // len(self.Cs_)
self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))
if multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
self.l1_ratio_ = np.asarray(self.l1_ratio_)
self.l1_ratios_ = np.asarray(l1_ratios_)
# if elasticnet was used, add the l1_ratios dimension to some
# attributes
if self.l1_ratios is not None:
for cls, coefs_path in self.coefs_paths_.items():
self.coefs_paths_[cls] = coefs_path.reshape(
(len(folds), self.Cs_.size, self.l1_ratios_.size, -1))
for cls, score in self.scores_.items():
self.scores_[cls] = score.reshape(
(len(folds), self.Cs_.size, self.l1_ratios_.size))
self.n_iter_ = self.n_iter_.reshape(
(-1, len(folds), self.Cs_.size, self.l1_ratios_.size))
return self
def score(self, X, y, sample_weight=None):
"""Returns the score using the `scoring` option on the given
test data and labels.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Score of self.predict(X) wrt. y.
"""
if self.scoring is not None:
warnings.warn("The long-standing behavior to use the "
"accuracy score has changed. The scoring "
"parameter is now used. "
"This warning will disappear in version 0.22.",
ChangedBehaviorWarning)
scoring = self.scoring or 'accuracy'
if isinstance(scoring, str):
scoring = get_scorer(scoring)
return scoring(self, X, y, sample_weight=sample_weight)
| [
"[email protected]"
] | |
f0d45c4482fab4fd7e9e9807c9f0bd38de1ebd83 | aaa204ad7f134b526593c785eaa739bff9fc4d2a | /airflow/providers/cncf/kubernetes/utils/pod_manager.py | c65150b4181d4f3ada2c2247d020f2265a91a707 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | cfei18/incubator-airflow | 913b40efa3d9f1fdfc5e299ce2693492c9a92dd4 | ffb2078eb5546420864229cdc6ee361f89cab7bd | refs/heads/master | 2022-09-28T14:44:04.250367 | 2022-09-19T16:50:23 | 2022-09-19T16:50:23 | 88,665,367 | 0 | 1 | Apache-2.0 | 2021-02-05T16:29:42 | 2017-04-18T20:00:03 | Python | UTF-8 | Python | false | false | 16,175 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Launches PODs"""
from __future__ import annotations
import json
import math
import time
import warnings
from contextlib import closing
from dataclasses import dataclass
from datetime import datetime
from typing import TYPE_CHECKING, Iterable, cast
import pendulum
import tenacity
from kubernetes import client, watch
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream as kubernetes_stream
from pendulum import DateTime
from pendulum.parsing.exceptions import ParserError
from urllib3.exceptions import HTTPError as BaseHTTPError
from airflow.exceptions import AirflowException
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.pod_generator import PodDefaults
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
from kubernetes.client.models.core_v1_event_list import CoreV1EventList
class PodLaunchFailedException(AirflowException):
"""When pod launching fails in KubernetesPodOperator."""
def should_retry_start_pod(exception: BaseException) -> bool:
"""Check if an Exception indicates a transient error and warrants retrying"""
if isinstance(exception, ApiException):
return exception.status == 409
return False
class PodPhase:
"""
Possible pod phases
See https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase.
"""
PENDING = 'Pending'
RUNNING = 'Running'
FAILED = 'Failed'
SUCCEEDED = 'Succeeded'
terminal_states = {FAILED, SUCCEEDED}
def container_is_running(pod: V1Pod, container_name: str) -> bool:
"""
Examines V1Pod ``pod`` to determine whether ``container_name`` is running.
If that container is present and running, returns True. Returns False otherwise.
"""
container_statuses = pod.status.container_statuses if pod and pod.status else None
if not container_statuses:
return False
container_status = next((x for x in container_statuses if x.name == container_name), None)
if not container_status:
return False
return container_status.state.running is not None
def get_container_termination_message(pod: V1Pod, container_name: str):
try:
container_statuses = pod.status.container_statuses
container_status = next((x for x in container_statuses if x.name == container_name), None)
return container_status.state.terminated.message if container_status else None
except (AttributeError, TypeError):
return None
@dataclass
class PodLoggingStatus:
"""Used for returning the status of the pod and last log time when exiting from `fetch_container_logs`"""
running: bool
last_log_time: DateTime | None
class PodManager(LoggingMixin):
"""
Helper class for creating, monitoring, and otherwise interacting with Kubernetes pods
for use with the KubernetesPodOperator
"""
def __init__(
self,
kube_client: client.CoreV1Api = None,
in_cluster: bool = True,
cluster_context: str | None = None,
):
"""
Creates the launcher.
:param kube_client: kubernetes client
:param in_cluster: whether we are in cluster
:param cluster_context: context of the cluster
"""
super().__init__()
self._client = kube_client or get_kube_client(in_cluster=in_cluster, cluster_context=cluster_context)
self._watch = watch.Watch()
def run_pod_async(self, pod: V1Pod, **kwargs) -> V1Pod:
"""Runs POD asynchronously"""
sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug('Pod Creation Request: \n%s', json_pod)
try:
resp = self._client.create_namespaced_pod(
body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
)
self.log.debug('Pod Creation Response: %s', resp)
except Exception as e:
self.log.exception(
'Exception when attempting to create Namespaced Pod: %s', str(json_pod).replace("\n", " ")
)
raise e
return resp
def delete_pod(self, pod: V1Pod) -> None:
"""Deletes POD"""
try:
self._client.delete_namespaced_pod(
pod.metadata.name, pod.metadata.namespace, body=client.V1DeleteOptions()
)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
@tenacity.retry(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_random_exponential(),
reraise=True,
retry=tenacity.retry_if_exception(should_retry_start_pod),
)
def create_pod(self, pod: V1Pod) -> V1Pod:
"""Launches the pod asynchronously."""
return self.run_pod_async(pod)
def await_pod_start(self, pod: V1Pod, startup_timeout: int = 120) -> None:
"""
Waits for the pod to reach phase other than ``Pending``
:param pod:
:param startup_timeout: Timeout (in seconds) for startup of the pod
(if pod is pending for too long, fails task)
:return:
"""
curr_time = datetime.now()
while True:
remote_pod = self.read_pod(pod)
if remote_pod.status.phase != PodPhase.PENDING:
break
self.log.warning("Pod not yet started: %s", pod.metadata.name)
delta = datetime.now() - curr_time
if delta.total_seconds() >= startup_timeout:
msg = (
f"Pod took longer than {startup_timeout} seconds to start. "
"Check the pod events in kubernetes to determine why."
)
raise PodLaunchFailedException(msg)
time.sleep(1)
def follow_container_logs(self, pod: V1Pod, container_name: str) -> PodLoggingStatus:
warnings.warn(
"Method `follow_container_logs` is deprecated. Use `fetch_container_logs` instead"
"with option `follow=True`.",
DeprecationWarning,
)
return self.fetch_container_logs(pod=pod, container_name=container_name, follow=True)
def fetch_container_logs(
self, pod: V1Pod, container_name: str, *, follow=False, since_time: DateTime | None = None
) -> PodLoggingStatus:
"""
Follows the logs of container and streams to airflow logging.
Returns when container exits.
"""
def consume_logs(*, since_time: DateTime | None = None, follow: bool = True) -> DateTime | None:
"""
Tries to follow container logs until container completes.
For a long-running container, sometimes the log read may be interrupted
Such errors of this kind are suppressed.
Returns the last timestamp observed in logs.
"""
timestamp = None
try:
logs = self.read_pod_logs(
pod=pod,
container_name=container_name,
timestamps=True,
since_seconds=(
math.ceil((pendulum.now() - since_time).total_seconds()) if since_time else None
),
follow=follow,
)
for raw_line in logs:
line = raw_line.decode('utf-8', errors="backslashreplace")
timestamp, message = self.parse_log_line(line)
self.log.info(message)
except BaseHTTPError as e:
self.log.warning(
"Reading of logs interrupted with error %r; will retry. "
"Set log level to DEBUG for traceback.",
e,
)
self.log.debug(
"Traceback for interrupted logs read for pod %r",
pod.metadata.name,
exc_info=True,
)
return timestamp or since_time
# note: `read_pod_logs` follows the logs, so we shouldn't necessarily *need* to
# loop as we do here. But in a long-running process we might temporarily lose connectivity.
# So the looping logic is there to let us resume following the logs.
last_log_time = since_time
while True:
last_log_time = consume_logs(since_time=last_log_time, follow=follow)
if not self.container_is_running(pod, container_name=container_name):
return PodLoggingStatus(running=False, last_log_time=last_log_time)
if not follow:
return PodLoggingStatus(running=True, last_log_time=last_log_time)
else:
self.log.warning(
'Pod %s log read interrupted but container %s still running',
pod.metadata.name,
container_name,
)
time.sleep(1)
def await_container_completion(self, pod: V1Pod, container_name: str) -> None:
while not self.container_is_running(pod=pod, container_name=container_name):
time.sleep(1)
def await_pod_completion(self, pod: V1Pod) -> V1Pod:
"""
Monitors a pod and returns the final state
:param pod: pod spec that will be monitored
:return: Tuple[State, Optional[str]]
"""
while True:
remote_pod = self.read_pod(pod)
if remote_pod.status.phase in PodPhase.terminal_states:
break
self.log.info('Pod %s has phase %s', pod.metadata.name, remote_pod.status.phase)
time.sleep(2)
return remote_pod
def parse_log_line(self, line: str) -> tuple[DateTime | None, str]:
"""
Parse K8s log line and returns the final state
:param line: k8s log line
:return: timestamp and log message
:rtype: Tuple[str, str]
"""
split_at = line.find(' ')
if split_at == -1:
self.log.error(
"Error parsing timestamp (no timestamp in message %r). "
"Will continue execution but won't update timestamp",
line,
)
return None, line
timestamp = line[:split_at]
message = line[split_at + 1 :].rstrip()
try:
last_log_time = cast(DateTime, pendulum.parse(timestamp))
except ParserError:
self.log.error("Error parsing timestamp. Will continue execution but won't update timestamp")
return None, line
return last_log_time, message
def container_is_running(self, pod: V1Pod, container_name: str) -> bool:
"""Reads pod and checks if container is running"""
remote_pod = self.read_pod(pod)
return container_is_running(pod=remote_pod, container_name=container_name)
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_logs(
self,
pod: V1Pod,
container_name: str,
tail_lines: int | None = None,
timestamps: bool = False,
since_seconds: int | None = None,
follow=True,
) -> Iterable[bytes]:
"""Reads log from the POD"""
additional_kwargs = {}
if since_seconds:
additional_kwargs['since_seconds'] = since_seconds
if tail_lines:
additional_kwargs['tail_lines'] = tail_lines
try:
return self._client.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container=container_name,
follow=follow,
timestamps=timestamps,
_preload_content=False,
**additional_kwargs,
)
except BaseHTTPError:
self.log.exception('There was an error reading the kubernetes API.')
raise
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_events(self, pod: V1Pod) -> CoreV1EventList:
"""Reads events from the POD"""
try:
return self._client.list_namespaced_event(
namespace=pod.metadata.namespace, field_selector=f"involvedObject.name={pod.metadata.name}"
)
except BaseHTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod(self, pod: V1Pod) -> V1Pod:
"""Read POD information"""
try:
return self._client.read_namespaced_pod(pod.metadata.name, pod.metadata.namespace)
except BaseHTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
def await_xcom_sidecar_container_start(self, pod: V1Pod) -> None:
self.log.info("Checking if xcom sidecar container is started.")
warned = False
while True:
if self.container_is_running(pod, PodDefaults.SIDECAR_CONTAINER_NAME):
self.log.info("The xcom sidecar container is started.")
break
if not warned:
self.log.warning("The xcom sidecar container is not yet started.")
warned = True
time.sleep(1)
def extract_xcom(self, pod: V1Pod) -> str:
"""Retrieves XCom value and kills xcom sidecar container"""
with closing(
kubernetes_stream(
self._client.connect_get_namespaced_pod_exec,
pod.metadata.name,
pod.metadata.namespace,
container=PodDefaults.SIDECAR_CONTAINER_NAME,
command=['/bin/sh'],
stdin=True,
stdout=True,
stderr=True,
tty=False,
_preload_content=False,
)
) as resp:
result = self._exec_pod_command(
resp,
f'if [ -s {PodDefaults.XCOM_MOUNT_PATH}/return.json ]; then cat {PodDefaults.XCOM_MOUNT_PATH}/return.json; else echo __airflow_xcom_result_empty__; fi', # noqa
)
self._exec_pod_command(resp, 'kill -s SIGINT 1')
if result is None:
raise AirflowException(f'Failed to extract xcom from pod: {pod.metadata.name}')
return result
def _exec_pod_command(self, resp, command: str) -> str | None:
res = None
if resp.is_open():
self.log.info('Running command... %s\n', command)
resp.write_stdin(command + '\n')
while resp.is_open():
resp.update(timeout=1)
while resp.peek_stdout():
res = res + resp.read_stdout() if res else resp.read_stdout()
error_res = None
while resp.peek_stderr():
error_res = error_res + resp.read_stderr() if error_res else resp.read_stderr()
if error_res:
self.log.info("stderr from command: %s", error_res)
break
if res:
return res
return res
| [
"[email protected]"
] | |
fe70326740b6a0076abc3e8d128549821babdd53 | f7630fd6c829cb306e72472296e3a513844d99af | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_gtp_noippolicy.py | 1f59b674b7a6e1afe2365d02864e506d84060883 | [] | no_license | baltah666/automation | 6eccce20c83dbe0d5aa9a82a27937886e3131d32 | 140eb81fe9bacb9a3ed1f1eafe86edeb8a8d0d52 | refs/heads/master | 2023-03-07T10:53:21.187020 | 2023-02-10T08:39:38 | 2023-02-10T08:39:38 | 272,007,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,542 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_firewall_gtp_noippolicy
short_description: no description
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "1.0.0"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: |
only set to True when module schema diffs with FortiManager API structure,
module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: |
the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
gtp:
description: the parameter (gtp) in requested url
type: str
required: true
firewall_gtp_noippolicy:
description: the top level parameters set
required: false
type: dict
suboptions:
action:
type: str
description: no description
choices:
- 'allow'
- 'deny'
end:
type: int
description: no description
id:
type: int
description: no description
start:
type: int
description: no description
type:
type: str
description: no description
choices:
- 'etsi'
- 'ietf'
'''
EXAMPLES = '''
- hosts: fortimanager00
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: No IP policy.
fmgr_firewall_gtp_noippolicy:
bypass_validation: False
adom: FortiCarrier # This is FOC-only object, need a FortiCarrier adom
gtp: 'ansible-test' # name
state: present
firewall_gtp_noippolicy:
action: allow #<value in [allow, deny]>
id: 1
type: ietf #<value in [etsi, ietf]>
- name: gathering fortimanager facts
hosts: fortimanager00
gather_facts: no
connection: httpapi
collections:
- fortinet.fortimanager
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: retrieve all the No IP policy in the GTP
fmgr_fact:
facts:
selector: 'firewall_gtp_noippolicy'
params:
adom: 'FortiCarrier' # This is FOC-only object, need a FortiCarrier adom
gtp: 'ansible-test' # name
noip-policy: 'your_value'
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/gtp/{gtp}/noip-policy',
'/pm/config/global/obj/firewall/gtp/{gtp}/noip-policy'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/gtp/{gtp}/noip-policy/{noip-policy}',
'/pm/config/global/obj/firewall/gtp/{gtp}/noip-policy/{noip-policy}'
]
url_params = ['adom', 'gtp']
module_primary_key = 'id'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'gtp': {
'required': True,
'type': 'str'
},
'firewall_gtp_noippolicy': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'options': {
'action': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'choices': [
'allow',
'deny'
],
'type': 'str'
},
'end': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'id': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'start': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'type': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'choices': [
'etsi',
'ietf'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'firewall_gtp_noippolicy'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1a12b7c2d2f69df76cef3e44e1259cdf614dfc4d | 927e8a9390d219a14fce6922ab054e2521a083d3 | /tree/largest bst.py | a70af6bfda90d12ff4c3307819dc8596461b2aa9 | [] | no_license | RavinderSinghPB/data-structure-and-algorithm | 19e7784f24b3536e29486ddabf4830f9eb578005 | f48c759fc347471a44ac4bb4362e99efacdd228b | refs/heads/master | 2023-08-23T21:07:28.704498 | 2020-07-18T09:44:04 | 2020-07-18T09:44:04 | 265,993,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,794 | py | from mathpro.math import inf
from collections import deque
import sys
sys.setrecursionlimit(10000)
class Node1:
def __init__(self,isBst,size,mini,maxi):
self.isBst = isBst
self.size = size
self.mini = mini
self.maxi = maxi
def bst(root):
if not root:
x=Node1(True,0,1000000,0)
return x
left=bst(root.left)
right=bst(root.right)
if left.isBst and right.isBst and root.data>left.maxi and root.data<right.mini:
x= Node1(True,1+left.size+right.size,min(root.data,left.mini),max(root.data,right.maxi))
else:
x= Node1(False,max(left.size,right.size),1000000,0)
return x
def largestBSTBT(root):
return bst(root).size
def largestBSTBT(root):
# Base cases : When tree is empty or it has
# one child.
if (root == None):
return 0, -inf, inf, 0, True
if (root.left == None and root.right == None):
return 1, root.data, root.data, 1, True
# Recur for left subtree and right subtrees
l = largestBSTBT(root.left)
r = largestBSTBT(root.right)
# Create a return variable and initialize its
# size.
ret = [0, 0, 0, 0, 0]
ret[0] = (1 + l[0] + r[0])
# If whole tree rooted under current root is
# BST.
if (l[4] and r[4] and l[1] <
root.data and r[2] > root.data):
ret[2] = min(l[2], min(r[2], root.data))
ret[1] = max(r[1], max(l[1], root.data))
# Update answer for tree rooted under
# current 'root'
ret[3] = ret[0]
ret[4] = True
return ret
# If whole tree is not BST, return maximum
# of left and right subtrees
ret[3] = max(l[3], r[3])
ret[4] = False
return ret
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
def InOrder(root):
'''
:param root: root of the given tree.
:return: None, print the space separated in order Traversal of the given tree.
'''
if root is None: # check if the root is none
return
InOrder(root.left) # do in order of left child
print(root.data, end=" ") # print root of the given tree
InOrder(root.right) # do in order of right child
# Function to Build Tree
def buildTree(s):
# Corner Case
if (len(s) == 0 or s[0] == "N"):
return None
# Creating list of strings from input
# string after spliting by space
ip = list(map(str, s.split()))
# Create the root of the tree
root = Node(int(ip[0]))
size = 0
q = deque()
# Push the root to the queue
q.append(root)
size = size + 1
# Starting from the second element
i = 1
while size > 0 and i < len(ip):
# Get and remove the front of the queue
currNode = q[0]
q.popleft()
size = size - 1
# Get the current node's value from the string
currVal = ip[i]
# If the left child is not null
if (currVal != "N"):
# Create the left child for the current node
currNode.left = Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size = size + 1
# For the right child
i = i + 1
if (i >= len(ip)):
break
currVal = ip[i]
# If the right child is not null
if (currVal != "N"):
# Create the right child for the current node
currNode.right = Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size = size + 1
i = i + 1
return root
if __name__ == "__main__":
t = int(input())
for _ in range(0, t):
s = input()
root = buildTree(s)
print(largestBSTBT(root)[3])
| [
"[email protected]"
] | |
81a2872d9a9c25af764af1274d957578a126869a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_gripped.py | b9c9b15141f34bec76fa0ae3fbe86ef380965360 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _GRIPPED():
def __init__(self,):
self.name = "GRIPPED"
self.definitions = grip
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['grip']
| [
"[email protected]"
] | |
2719ecdb0d3f55cf2a59d28875a664afed9e14ec | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/comparison/test_chart_legend06.py | f10d00e6bc314ead1c5752aad9c675cf4fe559c5 | [
"BSD-2-Clause"
] | permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 1,405 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_legend06.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with legend options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [79972224, 79973760]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_legend({"fill": {"color": "yellow"}})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
34ff077858f4ae33064ecedb7125229e30d88e37 | 2d8ec841d75acb7ca3c3d1117c06d951e9be0169 | /test/X13_TestRomantoInteger.py | 3d16e1d47bbcdcf1ab7b8cd7deb848d1167d0c76 | [] | no_license | mcceTest/leetcode_py | 040aee95ed23674b7e2fea899d22945b12f85981 | eb25b3e5866b51fbac10d4686966f2c546c4696f | refs/heads/master | 2021-06-27T02:04:56.856659 | 2021-01-08T03:14:56 | 2021-01-08T03:14:56 | 205,760,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import unittest
from X13_RomantoInteger import Solution
class TestSum(unittest.TestCase):
def test1(self):
sol = Solution()
self.assertEqual(sol.romanToInt("III"), 3)
self.assertEqual(sol.romanToInt("IV"), 4)
self.assertEqual(sol.romanToInt("IX"), 9)
self.assertEqual(sol.romanToInt("LVIII"), 58)
self.assertEqual(sol.romanToInt("MCMXCIV"), 1994)
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
7f71b8025b0da4af24e6f067a0fed3393f846eb2 | af259acdd0acd341370c9d5386c444da6a7a28a6 | /Supervised-Learning-with-scikit-learn/04-Preprocessing-and-pipelines/04-Dropping-missing-data.py | 438c80f1a8b57e4ed94a02048b0011d9457d637e | [] | no_license | pace-noge/datacamp | fcd544f6478040660f7149b1a37bfd957eef9747 | eeffb8af233e7304c0f122a48e6b4f78ee7c650e | refs/heads/master | 2020-07-04T12:41:29.635167 | 2019-09-17T10:11:39 | 2019-09-17T10:11:39 | 202,289,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,804 | py | """
Dropping missing data
The voting dataset from Chapter 1 contained a bunch of missing values that we dealt with for you behind the scenes. Now, it's time for you to take care of these yourself!
The unprocessed dataset has been loaded into a DataFrame df. Explore it in the IPython Shell with the .head() method. You will see that there are certain data points labeled with a '?'. These denote missing values. As you saw in the video, different datasets encode missing values in different ways. Sometimes it may be a '9999', other times a 0 - real-world data can be very messy! If you're lucky, the missing values will already be encoded as NaN. We use NaN because it is an efficient and simplified way of internally representing missing data, and it lets us take advantage of pandas methods such as .dropna() and .fillna(), as well as scikit-learn's Imputation transformer Imputer().
In this exercise, your job is to convert the '?'s to NaNs, and then drop the rows that contain them from the DataFrame.
INSTRUCTION
-----------
Explore the DataFrame df in the IPython Shell. Notice how the missing value is represented.
Convert all '?' data points to np.nan.
Count the total number of NaNs using the .isnull() and .sum() methods. This has been done for you.
Drop the rows with missing values from df using .dropna().
Hit 'Submit Answer' to see how many rows were lost by dropping the missing values.
"""
# Convert '?' to NaN
df[df == '?'] = np.nan
# Print the number of NaNs
print(df.isnull().sum())
# Print shape of original DataFrame
print("Shape of Original DataFrame: {}".format(df.shape))
# Drop missing values and print shape of new DataFrame
df = df.dropna()
# Print shape of new DataFrame
print("Shape of DataFrame After Dropping All Rows with Missing Values: {}".format(df.shape))
| [
"[email protected]"
] | |
6757338d0b65931a2f906bdc7f2b1f72184ecadb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_schoolteachers.py | a97e1eb48efc47de30e4d91ff8c1c06736a2b31a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py |
#calss header
class _SCHOOLTEACHERS():
def __init__(self,):
self.name = "SCHOOLTEACHERS"
self.definitions = schoolteacher
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['schoolteacher']
| [
"[email protected]"
] | |
8fe85c165c882f31473b97cc238e98b2399d9522 | 24e7e0dfaaeaca8f911b40fcc2937342a0f278fd | /venv/Lib/site-packages/psutil/tests/test_contracts.py | 39a525696000f799dfae0c817aeb45a7e8799281 | [
"MIT",
"BSD-3-Clause"
] | permissive | BimiLevi/Covid19 | 90e234c639192d62bb87364ef96d6a46d8268fa0 | 5f07a9a4609383c02597373d76d6b6485d47936e | refs/heads/master | 2023-08-04T13:13:44.480700 | 2023-08-01T08:36:36 | 2023-08-01T08:36:36 | 288,455,446 | 1 | 0 | MIT | 2021-01-22T19:36:26 | 2020-08-18T12:53:43 | HTML | UTF-8 | Python | false | false | 26,188 | py | #!/usr/bin/env python3
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contracts tests. These tests mainly check API sanity in terms of
returned types and APIs availability.
Some of these are duplicates of tests test_system.py and test_process.py
"""
import errno
import multiprocessing
import os
import signal
import stat
import sys
import time
import traceback
from psutil import AIX
from psutil import BSD
from psutil import FREEBSD
from psutil import LINUX
from psutil import MACOS
from psutil import NETBSD
from psutil import OPENBSD
from psutil import OSX
from psutil import POSIX
from psutil import SUNOS
from psutil import WINDOWS
from psutil._compat import FileNotFoundError
from psutil._compat import long
from psutil._compat import range
from psutil.tests import create_sockets
from psutil.tests import enum
from psutil.tests import GITHUB_WHEELS
from psutil.tests import HAS_CPU_FREQ
from psutil.tests import HAS_NET_IO_COUNTERS
from psutil.tests import HAS_SENSORS_FANS
from psutil.tests import HAS_SENSORS_TEMPERATURES
from psutil.tests import is_namedtuple
from psutil.tests import process_namespace
from psutil.tests import PsutilTestCase
from psutil.tests import PYPY
from psutil.tests import serialrun
from psutil.tests import SKIP_SYSCONS
from psutil.tests import unittest
from psutil.tests import VALID_PROC_STATUSES
import psutil
# ===================================================================
# --- APIs availability
# ===================================================================
# Make sure code reflects what doc promises in terms of APIs
# availability.
class TestAvailConstantsAPIs(PsutilTestCase):
def test_PROCFS_PATH(self):
self.assertEqual(hasattr(psutil, "PROCFS_PATH"),
LINUX or SUNOS or AIX)
def test_win_priority(self):
ae = self.assertEqual
ae(hasattr(psutil, "ABOVE_NORMAL_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "BELOW_NORMAL_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "HIGH_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "IDLE_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "NORMAL_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "REALTIME_PRIORITY_CLASS"), WINDOWS)
def test_linux_ioprio_linux(self):
ae = self.assertEqual
ae(hasattr(psutil, "IOPRIO_CLASS_NONE"), LINUX)
ae(hasattr(psutil, "IOPRIO_CLASS_RT"), LINUX)
ae(hasattr(psutil, "IOPRIO_CLASS_BE"), LINUX)
ae(hasattr(psutil, "IOPRIO_CLASS_IDLE"), LINUX)
def test_linux_ioprio_windows(self):
ae = self.assertEqual
ae(hasattr(psutil, "IOPRIO_HIGH"), WINDOWS)
ae(hasattr(psutil, "IOPRIO_NORMAL"), WINDOWS)
ae(hasattr(psutil, "IOPRIO_LOW"), WINDOWS)
ae(hasattr(psutil, "IOPRIO_VERYLOW"), WINDOWS)
@unittest.skipIf(GITHUB_WHEELS, "not exposed via GITHUB_WHEELS")
def test_linux_rlimit(self):
ae = self.assertEqual
ae(hasattr(psutil, "RLIM_INFINITY"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_AS"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_CORE"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_CPU"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_DATA"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_FSIZE"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_MEMLOCK"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_NOFILE"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_NPROC"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_RSS"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_STACK"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_LOCKS"), LINUX)
ae(hasattr(psutil, "RLIMIT_MSGQUEUE"), LINUX) # requires Linux 2.6.8
ae(hasattr(psutil, "RLIMIT_NICE"), LINUX) # requires Linux 2.6.12
ae(hasattr(psutil, "RLIMIT_RTPRIO"), LINUX) # requires Linux 2.6.12
ae(hasattr(psutil, "RLIMIT_RTTIME"), LINUX) # requires Linux 2.6.25
ae(hasattr(psutil, "RLIMIT_SIGPENDING"), LINUX) # requires Linux 2.6.8
ae(hasattr(psutil, "RLIMIT_SWAP"), FREEBSD)
ae(hasattr(psutil, "RLIMIT_SBSIZE"), FREEBSD)
ae(hasattr(psutil, "RLIMIT_NPTS"), FREEBSD)
class TestAvailSystemAPIs(PsutilTestCase):
def test_win_service_iter(self):
self.assertEqual(hasattr(psutil, "win_service_iter"), WINDOWS)
def test_win_service_get(self):
self.assertEqual(hasattr(psutil, "win_service_get"), WINDOWS)
def test_cpu_freq(self):
self.assertEqual(hasattr(psutil, "cpu_freq"),
LINUX or MACOS or WINDOWS or FREEBSD)
def test_sensors_temperatures(self):
self.assertEqual(
hasattr(psutil, "sensors_temperatures"), LINUX or FREEBSD)
def test_sensors_fans(self):
self.assertEqual(hasattr(psutil, "sensors_fans"), LINUX)
def test_battery(self):
self.assertEqual(hasattr(psutil, "sensors_battery"),
LINUX or WINDOWS or FREEBSD or MACOS)
class TestAvailProcessAPIs(PsutilTestCase):
def test_environ(self):
self.assertEqual(hasattr(psutil.Process, "environ"),
LINUX or MACOS or WINDOWS or AIX or SUNOS or
FREEBSD or OPENBSD or NETBSD)
def test_uids(self):
self.assertEqual(hasattr(psutil.Process, "uids"), POSIX)
def test_gids(self):
self.assertEqual(hasattr(psutil.Process, "uids"), POSIX)
def test_terminal(self):
self.assertEqual(hasattr(psutil.Process, "terminal"), POSIX)
def test_ionice(self):
self.assertEqual(hasattr(psutil.Process, "ionice"), LINUX or WINDOWS)
@unittest.skipIf(GITHUB_WHEELS, "not exposed via GITHUB_WHEELS")
def test_rlimit(self):
# requires Linux 2.6.36
self.assertEqual(hasattr(psutil.Process, "rlimit"), LINUX or FREEBSD)
def test_io_counters(self):
hasit = hasattr(psutil.Process, "io_counters")
self.assertEqual(hasit, False if MACOS or SUNOS else True)
def test_num_fds(self):
self.assertEqual(hasattr(psutil.Process, "num_fds"), POSIX)
def test_num_handles(self):
self.assertEqual(hasattr(psutil.Process, "num_handles"), WINDOWS)
def test_cpu_affinity(self):
self.assertEqual(hasattr(psutil.Process, "cpu_affinity"),
LINUX or WINDOWS or FREEBSD)
def test_cpu_num(self):
self.assertEqual(hasattr(psutil.Process, "cpu_num"),
LINUX or FREEBSD or SUNOS)
def test_memory_maps(self):
hasit = hasattr(psutil.Process, "memory_maps")
self.assertEqual(
hasit, False if OPENBSD or NETBSD or AIX or MACOS else True)
# ===================================================================
# --- API types
# ===================================================================
class TestSystemAPITypes(PsutilTestCase):
"""Check the return types of system related APIs.
Mainly we want to test we never return unicode on Python 2, see:
https://github.com/giampaolo/psutil/issues/1039
"""
@classmethod
def setUpClass(cls):
cls.proc = psutil.Process()
def assert_ntuple_of_nums(self, nt, type_=float, gezero=True):
assert is_namedtuple(nt)
for n in nt:
self.assertIsInstance(n, type_)
if gezero:
self.assertGreaterEqual(n, 0)
def test_cpu_times(self):
self.assert_ntuple_of_nums(psutil.cpu_times())
for nt in psutil.cpu_times(percpu=True):
self.assert_ntuple_of_nums(nt)
def test_cpu_percent(self):
self.assertIsInstance(psutil.cpu_percent(interval=None), float)
self.assertIsInstance(psutil.cpu_percent(interval=0.00001), float)
def test_cpu_times_percent(self):
self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=None))
self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=0.0001))
def test_cpu_count(self):
self.assertIsInstance(psutil.cpu_count(), int)
@unittest.skipIf(not HAS_CPU_FREQ, "not supported")
def test_cpu_freq(self):
if psutil.cpu_freq() is None:
raise self.skipTest("cpu_freq() returns None")
self.assert_ntuple_of_nums(psutil.cpu_freq(), type_=(float, int, long))
def test_disk_io_counters(self):
# Duplicate of test_system.py. Keep it anyway.
for k, v in psutil.disk_io_counters(perdisk=True).items():
self.assertIsInstance(k, str)
self.assert_ntuple_of_nums(v, type_=(int, long))
def test_disk_partitions(self):
# Duplicate of test_system.py. Keep it anyway.
for disk in psutil.disk_partitions():
self.assertIsInstance(disk.device, str)
self.assertIsInstance(disk.mountpoint, str)
self.assertIsInstance(disk.fstype, str)
self.assertIsInstance(disk.opts, str)
@unittest.skipIf(SKIP_SYSCONS, "requires root")
def test_net_connections(self):
with create_sockets():
ret = psutil.net_connections('all')
self.assertEqual(len(ret), len(set(ret)))
for conn in ret:
assert is_namedtuple(conn)
def test_net_if_addrs(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, addrs in psutil.net_if_addrs().items():
self.assertIsInstance(ifname, str)
for addr in addrs:
if enum is not None and not PYPY:
self.assertIsInstance(addr.family, enum.IntEnum)
else:
self.assertIsInstance(addr.family, int)
self.assertIsInstance(addr.address, str)
self.assertIsInstance(addr.netmask, (str, type(None)))
self.assertIsInstance(addr.broadcast, (str, type(None)))
def test_net_if_stats(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, info in psutil.net_if_stats().items():
self.assertIsInstance(ifname, str)
self.assertIsInstance(info.isup, bool)
if enum is not None:
self.assertIsInstance(info.duplex, enum.IntEnum)
else:
self.assertIsInstance(info.duplex, int)
self.assertIsInstance(info.speed, int)
self.assertIsInstance(info.mtu, int)
@unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
def test_net_io_counters(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, _ in psutil.net_io_counters(pernic=True).items():
self.assertIsInstance(ifname, str)
@unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
def test_sensors_fans(self):
# Duplicate of test_system.py. Keep it anyway.
for name, units in psutil.sensors_fans().items():
self.assertIsInstance(name, str)
for unit in units:
self.assertIsInstance(unit.label, str)
self.assertIsInstance(unit.current, (float, int, type(None)))
@unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
def test_sensors_temperatures(self):
# Duplicate of test_system.py. Keep it anyway.
for name, units in psutil.sensors_temperatures().items():
self.assertIsInstance(name, str)
for unit in units:
self.assertIsInstance(unit.label, str)
self.assertIsInstance(unit.current, (float, int, type(None)))
self.assertIsInstance(unit.high, (float, int, type(None)))
self.assertIsInstance(unit.critical, (float, int, type(None)))
def test_boot_time(self):
# Duplicate of test_system.py. Keep it anyway.
self.assertIsInstance(psutil.boot_time(), float)
def test_users(self):
# Duplicate of test_system.py. Keep it anyway.
for user in psutil.users():
self.assertIsInstance(user.name, str)
self.assertIsInstance(user.terminal, (str, type(None)))
self.assertIsInstance(user.host, (str, type(None)))
self.assertIsInstance(user.pid, (int, type(None)))
class TestProcessWaitType(PsutilTestCase):
@unittest.skipIf(not POSIX, "not POSIX")
def test_negative_signal(self):
p = psutil.Process(self.spawn_testproc().pid)
p.terminate()
code = p.wait()
self.assertEqual(code, -signal.SIGTERM)
if enum is not None:
self.assertIsInstance(code, enum.IntEnum)
else:
self.assertIsInstance(code, int)
# ===================================================================
# --- Featch all processes test
# ===================================================================
def proc_info(pid):
tcase = PsutilTestCase()
def check_exception(exc, proc, name, ppid):
tcase.assertEqual(exc.pid, pid)
tcase.assertEqual(exc.name, name)
if isinstance(exc, psutil.ZombieProcess):
if exc.ppid is not None:
tcase.assertGreaterEqual(exc.ppid, 0)
tcase.assertEqual(exc.ppid, ppid)
elif isinstance(exc, psutil.NoSuchProcess):
tcase.assertProcessGone(proc)
str(exc)
assert exc.msg
def do_wait():
if pid != 0:
try:
proc.wait(0)
except psutil.Error as exc:
check_exception(exc, proc, name, ppid)
try:
proc = psutil.Process(pid)
d = proc.as_dict(['ppid', 'name'])
except psutil.NoSuchProcess:
return {}
name, ppid = d['name'], d['ppid']
info = {'pid': proc.pid}
ns = process_namespace(proc)
with proc.oneshot():
for fun, fun_name in ns.iter(ns.getters, clear_cache=False):
try:
info[fun_name] = fun()
except psutil.Error as exc:
check_exception(exc, proc, name, ppid)
continue
do_wait()
return info
@serialrun
class TestFetchAllProcesses(PsutilTestCase):
"""Test which iterates over all running processes and performs
some sanity checks against Process API's returned values.
Uses a process pool to get info about all processes.
"""
def setUp(self):
self.pool = multiprocessing.Pool()
def tearDown(self):
self.pool.terminate()
self.pool.join()
def iter_proc_info(self):
# Fixes "can't pickle <function proc_info>: it's not the
# same object as test_contracts.proc_info".
from psutil.tests.test_contracts import proc_info
return self.pool.imap_unordered(proc_info, psutil.pids())
def test_all(self):
failures = []
for info in self.iter_proc_info():
for name, value in info.items():
meth = getattr(self, name)
try:
meth(value, info)
except AssertionError:
s = '\n' + '=' * 70 + '\n'
s += "FAIL: test_%s pid=%s, ret=%s\n" % (
name, info['pid'], repr(value))
s += '-' * 70
s += "\n%s" % traceback.format_exc()
s = "\n".join((" " * 4) + i for i in s.splitlines())
s += '\n'
failures.append(s)
else:
if value not in (0, 0.0, [], None, '', {}):
assert value, value
if failures:
raise self.fail(''.join(failures))
def cmdline(self, ret, info):
self.assertIsInstance(ret, list)
for part in ret:
self.assertIsInstance(part, str)
def exe(self, ret, info):
self.assertIsInstance(ret, (str, type(None)))
if not ret:
self.assertEqual(ret, '')
else:
if WINDOWS and not ret.endswith('.exe'):
return # May be "Registry", "MemCompression", ...
assert os.path.isabs(ret), ret
# Note: os.stat() may return False even if the file is there
# hence we skip the test, see:
# http://stackoverflow.com/questions/3112546/os-path-exists-lies
if POSIX and os.path.isfile(ret):
if hasattr(os, 'access') and hasattr(os, "X_OK"):
# XXX may fail on MACOS
assert os.access(ret, os.X_OK)
def pid(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
def ppid(self, ret, info):
self.assertIsInstance(ret, (int, long))
self.assertGreaterEqual(ret, 0)
def name(self, ret, info):
self.assertIsInstance(ret, str)
# on AIX, "<exiting>" processes don't have names
if not AIX:
assert ret
def create_time(self, ret, info):
self.assertIsInstance(ret, float)
try:
self.assertGreaterEqual(ret, 0)
except AssertionError:
# XXX
if OPENBSD and info['status'] == psutil.STATUS_ZOMBIE:
pass
else:
raise
# this can't be taken for granted on all platforms
# self.assertGreaterEqual(ret, psutil.boot_time())
# make sure returned value can be pretty printed
# with strftime
time.strftime("%Y %m %d %H:%M:%S", time.localtime(ret))
def uids(self, ret, info):
assert is_namedtuple(ret)
for uid in ret:
self.assertIsInstance(uid, int)
self.assertGreaterEqual(uid, 0)
def gids(self, ret, info):
assert is_namedtuple(ret)
# note: testing all gids as above seems not to be reliable for
# gid == 30 (nodoby); not sure why.
for gid in ret:
self.assertIsInstance(gid, int)
if not MACOS and not NETBSD:
self.assertGreaterEqual(gid, 0)
def username(self, ret, info):
self.assertIsInstance(ret, str)
assert ret
def status(self, ret, info):
self.assertIsInstance(ret, str)
assert ret
self.assertNotEqual(ret, '?') # XXX
self.assertIn(ret, VALID_PROC_STATUSES)
def io_counters(self, ret, info):
assert is_namedtuple(ret)
for field in ret:
self.assertIsInstance(field, (int, long))
if field != -1:
self.assertGreaterEqual(field, 0)
def ionice(self, ret, info):
if LINUX:
self.assertIsInstance(ret.ioclass, int)
self.assertIsInstance(ret.value, int)
self.assertGreaterEqual(ret.ioclass, 0)
self.assertGreaterEqual(ret.value, 0)
else: # Windows, Cygwin
choices = [
psutil.IOPRIO_VERYLOW,
psutil.IOPRIO_LOW,
psutil.IOPRIO_NORMAL,
psutil.IOPRIO_HIGH]
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
self.assertIn(ret, choices)
def num_threads(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 1)
def threads(self, ret, info):
self.assertIsInstance(ret, list)
for t in ret:
assert is_namedtuple(t)
self.assertGreaterEqual(t.id, 0)
self.assertGreaterEqual(t.user_time, 0)
self.assertGreaterEqual(t.system_time, 0)
for field in t:
self.assertIsInstance(field, (int, float))
def cpu_times(self, ret, info):
assert is_namedtuple(ret)
for n in ret:
self.assertIsInstance(n, float)
self.assertGreaterEqual(n, 0)
# TODO: check ntuple fields
def cpu_percent(self, ret, info):
self.assertIsInstance(ret, float)
assert 0.0 <= ret <= 100.0, ret
def cpu_num(self, ret, info):
self.assertIsInstance(ret, int)
if FREEBSD and ret == -1:
return
self.assertGreaterEqual(ret, 0)
if psutil.cpu_count() == 1:
self.assertEqual(ret, 0)
self.assertIn(ret, list(range(psutil.cpu_count())))
def memory_info(self, ret, info):
assert is_namedtuple(ret)
for value in ret:
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0)
if WINDOWS:
self.assertGreaterEqual(ret.peak_wset, ret.wset)
self.assertGreaterEqual(ret.peak_paged_pool, ret.paged_pool)
self.assertGreaterEqual(ret.peak_nonpaged_pool, ret.nonpaged_pool)
self.assertGreaterEqual(ret.peak_pagefile, ret.pagefile)
def memory_full_info(self, ret, info):
assert is_namedtuple(ret)
total = psutil.virtual_memory().total
for name in ret._fields:
value = getattr(ret, name)
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0, msg=(name, value))
if LINUX or OSX and name in ('vms', 'data'):
# On Linux there are processes (e.g. 'goa-daemon') whose
# VMS is incredibly high for some reason.
continue
self.assertLessEqual(value, total, msg=(name, value, total))
if LINUX:
self.assertGreaterEqual(ret.pss, ret.uss)
def open_files(self, ret, info):
self.assertIsInstance(ret, list)
for f in ret:
self.assertIsInstance(f.fd, int)
self.assertIsInstance(f.path, str)
if WINDOWS:
self.assertEqual(f.fd, -1)
elif LINUX:
self.assertIsInstance(f.position, int)
self.assertIsInstance(f.mode, str)
self.assertIsInstance(f.flags, int)
self.assertGreaterEqual(f.position, 0)
self.assertIn(f.mode, ('r', 'w', 'a', 'r+', 'a+'))
self.assertGreater(f.flags, 0)
elif BSD and not f.path:
# XXX see: https://github.com/giampaolo/psutil/issues/595
continue
assert os.path.isabs(f.path), f
try:
st = os.stat(f.path)
except FileNotFoundError:
pass
else:
assert stat.S_ISREG(st.st_mode), f
def num_fds(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
def connections(self, ret, info):
with create_sockets():
self.assertEqual(len(ret), len(set(ret)))
for conn in ret:
assert is_namedtuple(conn)
def cwd(self, ret, info):
if ret: # 'ret' can be None or empty
self.assertIsInstance(ret, str)
assert os.path.isabs(ret), ret
try:
st = os.stat(ret)
except OSError as err:
if WINDOWS and err.errno in \
psutil._psplatform.ACCESS_DENIED_SET:
pass
# directory has been removed in mean time
elif err.errno != errno.ENOENT:
raise
else:
assert stat.S_ISDIR(st.st_mode)
def memory_percent(self, ret, info):
self.assertIsInstance(ret, float)
assert 0 <= ret <= 100, ret
def is_running(self, ret, info):
self.assertIsInstance(ret, bool)
def cpu_affinity(self, ret, info):
self.assertIsInstance(ret, list)
assert ret != [], ret
cpus = list(range(psutil.cpu_count()))
for n in ret:
self.assertIsInstance(n, int)
self.assertIn(n, cpus)
def terminal(self, ret, info):
self.assertIsInstance(ret, (str, type(None)))
if ret is not None:
assert os.path.isabs(ret), ret
assert os.path.exists(ret), ret
def memory_maps(self, ret, info):
for nt in ret:
self.assertIsInstance(nt.addr, str)
self.assertIsInstance(nt.perms, str)
self.assertIsInstance(nt.path, str)
for fname in nt._fields:
value = getattr(nt, fname)
if fname == 'path':
if not value.startswith('['):
assert os.path.isabs(nt.path), nt.path
# commented as on Linux we might get
# '/foo/bar (deleted)'
# assert os.path.exists(nt.path), nt.path
elif fname == 'addr':
assert value, repr(value)
elif fname == 'perms':
if not WINDOWS:
assert value, repr(value)
else:
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0)
def num_handles(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
def nice(self, ret, info):
self.assertIsInstance(ret, int)
if POSIX:
assert -20 <= ret <= 20, ret
else:
priorities = [getattr(psutil, x) for x in dir(psutil)
if x.endswith('_PRIORITY_CLASS')]
self.assertIn(ret, priorities)
if sys.version_info > (3, 4):
self.assertIsInstance(ret, enum.IntEnum)
else:
self.assertIsInstance(ret, int)
def num_ctx_switches(self, ret, info):
assert is_namedtuple(ret)
for value in ret:
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0)
def rlimit(self, ret, info):
self.assertIsInstance(ret, tuple)
self.assertEqual(len(ret), 2)
self.assertGreaterEqual(ret[0], -1)
self.assertGreaterEqual(ret[1], -1)
def environ(self, ret, info):
self.assertIsInstance(ret, dict)
for k, v in ret.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, str)
if __name__ == '__main__':
from psutil.tests.runner import run_from_name
run_from_name(__file__)
| [
"[email protected]"
] | |
94a2a1fa20e97c51852243a2a81a4149bdffabba | fb54704d4a6f9475f42b85d8c470e3425b37dcae | /medium/ex1381.py | 3b090451fda3eb43df141d4f0235c64721da852a | [] | no_license | ziyuan-shen/leetcode_algorithm_python_solution | b2784071a94b04e687fd536b57e8d5a9ec1a4c05 | 920b65db80031fad45d495431eda8d3fb4ef06e5 | refs/heads/master | 2021-06-27T05:19:47.774044 | 2021-02-04T09:47:30 | 2021-02-04T09:47:30 | 210,991,299 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | class CustomStack:
def __init__(self, maxSize: int):
self.stack = []
self.maxSize = maxSize
def push(self, x: int) -> None:
if len(self.stack) < self.maxSize:
self.stack.append(x)
def pop(self) -> int:
if self.stack:
return self.stack.pop()
else:
return -1
def increment(self, k: int, val: int) -> None:
for i in range(min(k, len(self.stack))):
self.stack[i] += val
# Your CustomStack object will be instantiated and called as such:
# obj = CustomStack(maxSize)
# obj.push(x)
# param_2 = obj.pop()
# obj.increment(k,val) | [
"[email protected]"
] | |
95323488f1a2f39dd31806aae172ae8687c22cab | 39fe41a33c00ea6dc8e04c61842c3764fdd07ff1 | /py3standardlib/algorithms/contextlib/contextlib_exitstack_pop_all.py | 68ab294ea41ed5242c5100523e6e1a684725e4f4 | [] | no_license | playbar/pylearn | f9639ffa1848a9db2aba52977de6c7167828b317 | 8bcd1b5a043cb19cde1631947eb128d9c05c259d | refs/heads/master | 2021-06-12T01:51:33.480049 | 2021-03-31T12:16:14 | 2021-03-31T12:16:14 | 147,980,595 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | # contextlib_exitstack_pop_all.py
import contextlib
from contextlib_context_managers import *
def variable_stack(contexts):
with contextlib.ExitStack() as stack:
for c in contexts:
stack.enter_context(c)
# Return the close() method of a new stack as a clean-up
# function.
return stack.pop_all().close
# Explicitly return None, indicating that the ExitStack could
# not be initialized cleanly but that cleanup has already
# occurred.
return None
print('No errors:')
cleaner = variable_stack([
HandleError(1),
HandleError(2),
])
cleaner()
print('\nHandled error building context manager stack:')
try:
cleaner = variable_stack([
HandleError(1),
ErrorOnEnter(2),
])
except RuntimeError as err:
print('caught error {}'.format(err))
else:
if cleaner is not None:
cleaner()
else:
print('no cleaner returned')
print('\nUnhandled error building context manager stack:')
try:
cleaner = variable_stack([
PassError(1),
ErrorOnEnter(2),
])
except RuntimeError as err:
print('caught error {}'.format(err))
else:
if cleaner is not None:
cleaner()
else:
print('no cleaner returned')
| [
"[email protected]"
] | |
cbe4b5b0a93c62e34a4f64d8d65fcb3619111147 | 1b5802806cdf2c3b6f57a7b826c3e064aac51d98 | /tensorrt-basic-1.10-3rd-plugin/TensorRT-main/tools/Polygraphy/examples/cli/run/05_comparing_with_custom_input_data/data_loader.py | 4284ddc1e5d6dbe661a164e636b3c38257bcee12 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause"
] | permissive | jinmin527/learning-cuda-trt | def70b3b1b23b421ab7844237ce39ca1f176b297 | 81438d602344c977ef3cab71bd04995c1834e51c | refs/heads/main | 2023-05-23T08:56:09.205628 | 2022-07-24T02:48:24 | 2022-07-24T02:48:24 | 517,213,903 | 36 | 18 | null | 2022-07-24T03:05:05 | 2022-07-24T03:05:05 | null | UTF-8 | Python | false | false | 1,709 | py | #!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Demonstrates two methods of loading custom input data in Polygraphy:
Option 1: Defines a `load_data` function that returns a generator yielding
feed_dicts so that this script can be used as the argument for
the --data-loader-script command-line parameter.
Option 2: Writes input data to a JSON file that can be used as the argument for
the --load-inputs command-line parameter.
"""
import numpy as np
from polygraphy.json import save_json
INPUT_SHAPE = (1, 2, 28, 28)
# Option 1: Define a function that will yield feed_dicts (i.e. Dict[str, np.ndarray])
def load_data():
for _ in range(5):
yield {"x": np.ones(shape=INPUT_SHAPE, dtype=np.float32)} # Still totally real data
# Option 2: Create a JSON file containing the input data using the `save_json()` helper.
# The input to `save_json()` should have type: List[Dict[str, np.ndarray]].
# For convenience, we'll reuse our `load_data()` implementation to generate the list.
input_data = list(load_data())
save_json(input_data, "custom_inputs.json", description="custom input data")
| [
"[email protected]"
] | |
4e746f96da01b3c31cde8ca5fd5703e427fb4f2d | e2a7f0ac4e5369e7e924029c1650a986716e78fc | /provisioning/ubuntu-trusty/config.py | 698b06100dd24a2e25e9602b22025650824fecf2 | [
"Unlicense"
] | permissive | reductus/reductus | f89566de60cda387fc20b1aba4210528c3bd535b | 07e865a08396b42fa7ae035de97628bc995506bc | refs/heads/main | 2023-05-22T15:08:10.730577 | 2023-05-12T16:08:49 | 2023-05-12T16:08:49 | 1,320,973 | 7 | 12 | Unlicense | 2022-09-30T03:23:50 | 2011-02-02T16:46:54 | Python | UTF-8 | Python | false | false | 863 | py | #############################################################
# rename or copy this file to config.py if you make changes #
#############################################################
# change this to your fully-qualified domain name to run a
# remote server. The default value of localhost will
# only allow connections from the same computer.
#jsonrpc_servername = "h3.umd.edu"
jsonrpc_servername = "localhost"
jsonrpc_port = 8001
http_port = 8000
serve_staticfiles = False
#use_redis = True
use_diskcache = True
diskcache_params = {"size_limit": int(4*2**30), "shards": 5}
use_msgpack = True
data_sources = [
{
"name": "ncnr",
"url": "https://www.ncnr.nist.gov/pub/",
"start_path": "ncnrdata"
},
]
file_helper_url = {
"ncnr": "https://www.ncnr.nist.gov/ipeek/listftpfiles.php"
}
instruments = ["refl", "ospec", "sans"]
| [
"[email protected]"
] | |
8105b5e240ed50ebab8d4237de4287212a077d45 | 8c55d93116982758740665fdf93a57d7668d62f3 | /calls/bin/registry-read.py | 077e111f7c9656f2119fe7a8ed3124acc0c3e36b | [] | no_license | Ngahu/Making-web-calls | 42971fbb5835a46237854d45702f7feb50dd9314 | df7e0d9032db914b73a9f19a73be18453e524f6e | refs/heads/master | 2021-07-11T06:20:36.953011 | 2016-09-22T09:22:24 | 2016-09-22T09:22:24 | 68,893,415 | 0 | 1 | null | 2020-07-26T08:34:38 | 2016-09-22T06:55:32 | Python | UTF-8 | Python | false | false | 4,984 | py | #!/SHARED-THINGS/ONGOING/making calls/calls/bin/python
# Copyright (c) 2003-2015 CORE Security Technologies)
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
#
# Description: A Windows Registry Reader Example
#
# Reference for:
# winregistry.py
#
import impacket
from impacket.examples import logger
from impacket import version
from impacket import winregistry
import sys
import argparse
import ntpath
def bootKey(reg):
baseClass = 'ControlSet001\\Control\\Lsa\\'
keys = ['JD','Skew1','GBG','Data']
tmpKey = ''
for key in keys:
tmpKey = tmpKey + reg.getClass(baseClass + key).decode('utf-16le')[:8].decode('hex')
transforms = [ 8, 5, 4, 2, 11, 9, 13, 3, 0, 6, 1, 12, 14, 10, 15, 7 ]
syskey = ''
for i in xrange(len(tmpKey)):
syskey += tmpKey[transforms[i]]
print syskey.encode('hex')
def getClass(reg, className):
regKey = ntpath.dirname(className)
regClass = ntpath.basename(className)
value = reg.getClass(className)
if value is None:
return
print "[%s]" % regKey
print "Value for Class %s: \n" % regClass,
winregistry.hexdump(value,' ')
def getValue(reg, keyValue):
regKey = ntpath.dirname(keyValue)
regValue = ntpath.basename(keyValue)
value = reg.getValue(keyValue)
print "[%s]\n" % regKey
if value is None:
return
print "Value for %s:\n " % regValue,
reg.printValue(value[0],value[1])
def enumValues(reg, searchKey):
key = reg.findKey(searchKey)
if key is None:
return
print "[%s]\n" % searchKey
values = reg.enumValues(key)
for value in values:
print " %-30s: " % (value),
data = reg.getValue('%s\\%s'%(searchKey,value))
# Special case for binary string.. so it looks better formatted
if data[0] == winregistry.REG_BINARY:
print ''
reg.printValue(data[0],data[1])
print ''
else:
reg.printValue(data[0],data[1])
def enumKey(reg, searchKey, isRecursive, indent=' '):
parentKey = reg.findKey(searchKey)
if parentKey is None:
return
keys = reg.enumKey(parentKey)
for key in keys:
print "%s%s" %(indent, key)
if isRecursive is True:
if searchKey == '\\':
enumKey(reg, '\\%s'%(key),isRecursive,indent+' ')
else:
enumKey(reg, '%s\\%s'%(searchKey,key),isRecursive,indent+' ')
def walk(reg, keyName):
return reg.walk(keyName)
def main():
print version.BANNER
parser = argparse.ArgumentParser(add_help = True, description = "Reads data from registry hives.")
parser.add_argument('hive', action='store', help='registry hive to open')
subparsers = parser.add_subparsers(help='actions', dest='action')
# A enum_key command
enumkey_parser = subparsers.add_parser('enum_key', help='enumerates the subkeys of the specified open registry key')
enumkey_parser.add_argument('-name', action='store', required=True, help='registry key')
enumkey_parser.add_argument('-recursive', dest='recursive', action='store_true', required=False, help='recursive search (default False)')
# A enum_values command
enumvalues_parser = subparsers.add_parser('enum_values', help='enumerates the values for the specified open registry key')
enumvalues_parser.add_argument('-name', action='store', required=True, help='registry key')
# A get_value command
getvalue_parser = subparsers.add_parser('get_value', help='retrieves the data for the specified registry value')
getvalue_parser.add_argument('-name', action='store', required=True, help='registry value')
# A get_class command
getclass_parser = subparsers.add_parser('get_class', help='retrieves the data for the specified registry class')
getclass_parser.add_argument('-name', action='store', required=True, help='registry class name')
# A walk command
walk_parser = subparsers.add_parser('walk', help='walks the registry from the name node down')
walk_parser.add_argument('-name', action='store', required=True, help='registry class name to start walking down from')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
reg = winregistry.Registry(options.hive)
if options.action.upper() == 'ENUM_KEY':
print "[%s]" % options.name
enumKey(reg, options.name, options.recursive)
elif options.action.upper() == 'ENUM_VALUES':
enumValues(reg, options.name)
elif options.action.upper() == 'GET_VALUE':
getValue(reg, options.name)
elif options.action.upper() == 'GET_CLASS':
getClass(reg, options.name)
elif options.action.upper() == 'WALK':
walk(reg, options.name)
reg.close()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f632da1a09f68ac9dc98d92c87a45c3d48be3d42 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_None/trend_PolyTrend/cycle_0/ar_/test_artificial_32_None_PolyTrend_0__20.py | a4acd452378510898a95c0d7c7399843cf84e0e1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 263 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"[email protected]"
] | |
2c5fb9bc6be3248ac3b35d7d12190b2ea8d205a5 | b9efe70d12c2cbd55065d02e974f5725534583ee | /old_scripts/show_corpus.py | b687f9489733eacbea05242368defb71cc58c4e7 | [] | no_license | diegoami/bankdomain_PY | 5089581ea7b7db6233243dff305488ff27dc8e90 | 83816e1beb96d3e9e0f746bec7f9db9521f32ee7 | refs/heads/master | 2022-12-17T05:05:13.557911 | 2020-06-03T22:19:44 | 2020-06-03T22:19:44 | 131,530,574 | 0 | 0 | null | 2022-12-08T01:30:27 | 2018-04-29T21:12:25 | HTML | UTF-8 | Python | false | false | 996 | py |
import yaml
from repository.mongo_ops import copy_into_qa_documents, split_qa_documents_into_questions, print_all_questions, iterate_questions_in_mongo
from preprocess.preprocessor import create_corpus, load_corpus, print_corpus
from language.custom_lemmas import my_component
from textacy.corpus import Corpus
import spacy
if __name__ == '__main__':
config = yaml.safe_load(open("config.yml"))
data_dir = config['data_dir']
mongo_connection = config['mongo_connection']
corpus_out_dir = config['corpus_dir']
corpus_filename = config['corpus_filename']
corpus_proc_filename = config['corpus_proc_filename']
corpus = load_corpus(corpus_out_dir+'/'+corpus_proc_filename)
new_corpus = Corpus('de')
new_corpus.spacy_lang.add_pipe(my_component, name='print_length', last=True)
new_corpus.add_texts([doc.text for doc in corpus] )
#print_corpus(corpus)
# corpus.spacy_vocab
print(new_corpus.word_doc_freqs(normalize=u'lemma', as_strings=True))
| [
"[email protected]"
] | |
ed0cf72b39a6f14b5f816f8ed59a8ae0a23a3e5e | d921253b98a922975709693c411218746af2f017 | /bgx/telebot/bgt_bot_api/bot_handlers.py | 2f583a0ac5a05ac6fa1d64bbed7bc09e7f3ffb96 | [
"Zlib",
"MIT",
"Apache-2.0"
] | permissive | bestonly125/DGT-Kawartha | 223f88e224c1464fa22a4512e4567ac7ce1bc78f | edfbc18f2c70e813805ec23c28fbc35bf7866ffc | refs/heads/master | 2022-11-22T13:18:21.204906 | 2020-07-24T09:03:57 | 2020-07-24T09:03:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,606 | py | # Copyright 2020 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
sudo pip3 install apiai
sudo pip3 install pytelegrambotapi
sudo pip3 install dialogflow
sudo pip3 install pysocks
"""
import asyncio
import queue
import re
import logging
import json
import base64
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
import bgt_bot_api.exceptions as errors
from bgt_bot_api import error_handlers
from google.protobuf.json_format import MessageToDict
from google.protobuf.message import DecodeError
from bgt_bot_api.messaging import DisconnectError
from bgt_bot_api.messaging import SendBackoffTimeoutError
from requests.exceptions import ConnectTimeout,ReadTimeout
from sawtooth_sdk.protobuf.validator_pb2 import Message
from sawtooth_sdk.protobuf import client_heads_pb2,client_topology_pb2
from sawtooth_sdk.protobuf import client_peers_pb2
import telebot
from telebot import apihelper
from bgt_bot_api.dflow import Dflow
LOGGER = logging.getLogger(__name__)
BotMessage = namedtuple('BotMessage', "message_id chat_id user_id user_first_name user_last_name intent confidence result batch_id")
FLUSH_TIMEOUT=3600
DEFAULT_TIMEOUT = 300
PROJECT_ID = 'small-talk-wfkygw'
SESSION_ID = '123456789'
language_code = 'ru'
TOKEN='1205652427:AAFr0eynwihWGyvObUA0QSjOfKMwiH3HkZs'
PROXIES = ['82.223.120.213:1080','138.201.6.102:1080','85.10.235.14:1080','217.69.10.129:32401','217.182.230.15:4485','96.96.33.133:1080','93.157.248.106:1080','81.17.20.50:1177','217.69.10.129:32401','1.179.185.253:8080']
class Tbot(object):
def __init__(self,loop, connection,tdb,token=TOKEN,project_id=PROJECT_ID,session_id=SESSION_ID,proxy=PROXIES,connects=None):
self._connects = connects
self._conn_n = 0
self._tdb = tdb
self._proxies = proxy if proxy else PROXIES
self._project_id = project_id if project_id else PROJECT_ID
self._session_id = session_id if session_id else SESSION_ID
self._proxy_pos = 1
self.set_proxy()
self._connection = connection
self._loop = loop
self._token = token
self._intent_handlers = {}
self._keyboard1 = telebot.types.ReplyKeyboardMarkup(True, True,True)
self._keyboard1.row('Привет', 'Пока','Sticker')
self._timeout = DEFAULT_TIMEOUT
self._bgt_queue = queue.Queue()
self.is_pause = False
LOGGER.info('USE proxy=%d from %d',self._proxy_pos,len(self._proxies))
try:
self._dflow = Dflow(self._project_id,self._session_id)
LOGGER.info('DFLOW OK')
except Exception as e:
LOGGER.info('DFLOW error %s',e)
self._dflow = None
def set_proxy(self):
proxy = self._proxies[self._proxy_pos]
#apihelper.proxy = {'https': 'socks5://{}'.format(proxy)}
self._proxy_pos += 1
self._proxy_pos = self._proxy_pos % len(self._proxies)
LOGGER.info("NEXT proxy %d",self._proxy_pos)
def send_message(self,chat_id,repl):
n = 3
while n > 0:
try:
if repl != '':
self._bot.send_message(chat_id,repl)
return
except ReadTimeout:
LOGGER.info('Cant send message err=Timeout')
except Exception as ex:
LOGGER.info('Cant send message err=%s',ex)
n += 1
def send_sticker(self,chat_id,sticker):
try:
self._bot.send_sticker(chat_id,sticker)
except ReadTimeout:
LOGGER.info('Cant send send_sticker err=Timeout')
def _start_bot(self):
bot = telebot.TeleBot(self._token)
#blog = logging.getLogger('TeleBot')
#blog.setLevel(logging.INFO)
self._bot = bot
keyboard1 = telebot.types.ReplyKeyboardMarkup(True, True,True,True)
keyboard1.row('Привет', 'Admins','Sticker','Wallet')
def send_message(chat_id,repl):
try:
if repl != '':
bot.send_message(chat_id,repl)
except ReadTimeout:
LOGGER.info('Cant send message err=Timeout')
@bot.message_handler(commands=['start'])
def start_message(message):
self.send_message(message.chat.id, 'Привет {}, ты написал мне /start'.format(message.from_user.first_name),reply_markup=keyboard1)
@bot.message_handler(commands=['info'])
def info_message(message):
chat = bot.get_chat(message.chat.id)
self.send_message(message.chat.id, 'Смотри {}, {}'.format(message.from_user.first_name,str(chat)))
@bot.message_handler(content_types=['sticker'])
def sticker_message(message):
LOGGER.info("sticker_message %s",message)
#bot.send_message(message.chat.id, message)
#bot.send_sticker(message.chat.id,message.sticker.file_id)
s_key = message.json['sticker']['file_unique_id'] #message.sticker.file_id
if s_key not in self._tdb :
LOGGER.info("NEW STICKER %s un=%s", s_key, message.sticker.file_id) #.file_unique_id)
self._tdb.put(s_key,{'type':'sticker','file_id':message.sticker.file_id,'name': message.sticker.set_name, 'is_animated':message.sticker.is_animated})
self.send_message(message.chat.id, 'Отличный стикер из {} сохраню'.format(message.sticker.set_name))
else:
self.send_message(message.chat.id, 'Спасибо за стикер из {},но такой есть'.format(message.sticker.set_name))
@bot.message_handler(content_types=['text'])
def send_text(message):
self.check_user(message.from_user)
if message.text == 'Привет1' or message.text == 'привет1':
self.send_message(message.chat.id, 'Привет, мой {} {}'.format('создатель' if message.from_user.first_name == 'Stan' else 'господин',message.from_user.first_name),reply_to_message_id=0)
photo = bot.get_user_profile_photos(message.chat.id,0,1)
p1 = photo.photos[0][0]
LOGGER.info('photo=%s',photo.photos[0][0])
file = bot.get_file(p1.file_id)
fnm = 'https://api.telegram.org/file/bot'+TOKEN+'/'+file.file_path
bot.send_photo(message.chat.id,p1.file_id)
LOGGER.info("Пришел {}".format(message.from_user.first_name))
try:
bot.pin_chat_message(message.chat.id,message.message_id)
except Exception as e:
LOGGER.info("cant pin message %s",e)
#f = open('p1.jpg', 'w')
#f.write(str(file))
#f.close
#
elif message.text == 'Sticker':
try:
self.send_message("@sticker", '@sticker :)')
except Exception as ex:
LOGGER.info("cant send message %s",ex)
elif message.text == 'Пока1' or message.text == 'пока1':
self.send_message(message.chat.id, 'Прощай, {}'.format('создатель' if message.from_user.first_name == 'Stan' else 'господин'))
try:
bot.set_chat_title("@Shiva64_bot","Хозяин покинул меня")
except :
LOGGER.info("cant set title")
elif message.text[0] == '@':
try:
LOGGER.info("GET CHAT %s",message.text[1:])
chat = bot.get_chat(message.text[1:])
self.send_message(message.chat.id, 'Смотри {}, {}'.format(message.from_user.first_name,str(chat)))
except Exception as e:
self.send_message(message.chat.id, 'Смотри {}, {}'.format(message.from_user.first_name,e))
# get chat info
else : #elif message.text[0] == '?':
if message.text == 'Привет':
LOGGER.info("message=%s",message)
resp = self._dflow.detect_intent_text(message.text,language_code) if self._dflow else None
if resp:
response = resp.query_result.fulfillment_text
confidence = round(resp.query_result.intent_detection_confidence,2)
intent = resp.query_result.intent.display_name
if intent != '':
repl = "{}({})".format(response,confidence) if response != '' else ''
else:
repl = 'Так, погоди, не врубаюсь!'
if self.can_talk(intent):
self.send_message(message.chat.id,repl)
LOGGER.info("DFLOW QUERY %s param=%s RESULT=%s",type(resp.query_result),type(resp.query_result.parameters),resp.query_result)
for param,val in resp.query_result.parameters.items():
LOGGER.info("PARAM %s='%s'(%s) ",param,val,type(val))
if intent != '' and intent in self._intent_handlers:
minfo = BotMessage(message.message_id,message.chat.id,message.from_user.id,message.from_user.first_name,message.from_user.last_name,intent,confidence,resp.query_result,None)
self.intent_handler(minfo)
else:
if not self.is_pause:
self.send_message(message.chat.id,'Я Вас не совсем понял {}!'.format(message.from_user.first_name))
# start polling
LOGGER.info('START BOT via=%s',apihelper.proxy)
self._stop = False
#self._bot.polling()
try:
LOGGER.info('ME=%s',bot.get_me())
except Exception as ex:
LOGGER.info('Cant get ME(%s)',ex)
async def _polling(self):
"""
get message from bot and do something useful
"""
self._attemp = 0
self._timeout = 1
def shift_proxy():
self.set_proxy()
if self._attemp > len(self._proxies):
self._stop = True
self._attemp += 1
while not self._stop:
await self.process_queue()
try:
updates = self._bot.get_updates(offset=(self._bot.last_update_id+1),timeout=self._timeout) #get_me() # Execute an API call
self._attemp = 0
except ConnectTimeout:
LOGGER.info('Get updates ConnectTimeout')
if self._timeout < 6:
self._timeout += 1
shift_proxy()
updates = None
except Exception as ex :
LOGGER.info('Get updates except=%s',ex)
shift_proxy()
updates = None
# Do some other operations...
#LOGGER.info('get_updates DONE=%s',updates)
if updates:
LOGGER.info('UPDATE={}'.format(len(updates)))
self.check_member_add_left(updates)
try:
self._bot.process_new_updates(updates)
except Exception as ex :
LOGGER.info('Process updates except=%s',ex)
LOGGER.info('last update=%s qsize=%s',self._bot.last_update_id,self._bgt_queue.qsize())
#self._bot.get_updates(offset=(self._bot.last_update_id+1),timeout=0.1)
else:
pass
#LOGGER.info('No updates')
def check_user(self,from_user):
u_key = '{}'.format(from_user.id)
if u_key not in self._tdb :
LOGGER.info("NEW USER %s un=%s", u_key, from_user.first_name)
self._tdb.put(u_key,{'type':'user','name':from_user.first_name,'last_name': from_user.last_name})
return True
return False
def is_user_with_name(self,name):
try:
return self._tdb.contains_key(name,index='name')
except Exception as ex:
return False
def check_member_add_left(self,updates):
# for update in updates:'new_chat_member': None, 'new_chat_members': None, 'left_chat_member'
for update in updates:
if update.message.new_chat_member is not None:
# new_chat_member {'id': 1205652427, 'is_bot': True, 'first_name': 'Mongoose', 'username': 'Shiva64_bot', 'last_name': None, 'language_code': None}
if self.check_user(update.message.new_chat_member):
# make new wallet
new_chat_member = update.message.new_chat_member
LOGGER.info('new_chat_member7 %s',new_chat_member)
minfo = BotMessage(update.message.message_id,update.message.chat.id,new_chat_member.id,new_chat_member.first_name,new_chat_member.last_name,'smalltalk.agent.create_wallet',1.0,None,None)
self.intent_handler(minfo)
if update.message.new_chat_members is not None:
#new_chat_members [<telebot.types.User object at 0x7fd4b19e2d68>]
LOGGER.info('new_chat_members %s',update.message.new_chat_members)
if update.message.left_chat_member is not None:
left_chat_member = update.message.left_chat_member
LOGGER.info('del left_chat_member %s from DB',left_chat_member)
self._tdb.delete(str(left_chat_member.id))
def add_intent_handler(self,intent_name,intent_handler):
"""
add handler for intention
"""
self._intent_handlers[intent_name] = intent_handler
def intent_handler(self,minfo):
# put intention into queue
self._bgt_queue.put(minfo)
LOGGER.info('RUN HANDLER FOR=%s size=%s',minfo.intent,self._bgt_queue.qsize())
async def intent_hello(self,minfo):
"""
Reply on hello
"""
self._bot.send_message(minfo.chat_id, 'Чем могу помочь, мой {} {}?'.format('создатель' if minfo.user_first_name == 'Stan' else 'господин',minfo.user_first_name),reply_to_message_id=0)
try:
photo = self._bot.get_user_profile_photos(minfo.user_id,0,1)
p1 = photo.photos[0][0]
LOGGER.info('photo=%s',photo.photos[0][0])
#file = self._bot.get_file(p1.file_id)
#fnm = 'https://api.telegram.org/file/bot'+TOKEN+'/'+file.file_path
self._bot.send_photo(minfo.chat_id,p1.file_id)
except Exception as ex:
LOGGER.info("Cant get user photo mess (%s)",ex)
LOGGER.info("Пришел {}".format(minfo.user_first_name))
try:
self._bot.pin_chat_message(minfo.chat_id,minfo.message_id)
except Exception as ex:
LOGGER.info("Cant pin message %s",ex)
async def intent_bye(self,minfo):
self.send_message(minfo.chat_id, 'Заходи еще {}'.format('создатель' if minfo.user_first_name == 'Stan' else 'господин'))
async def intent_help(self,minfo):
LOGGER.info('INTENT HELP chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
response = await self._query_validator(
Message.CLIENT_HEADS_GET_REQUEST,
client_heads_pb2.ClientHeadsGetResponse,
client_heads_pb2.ClientHeadsGetRequest(head_id=''))
self.send_message(minfo.chat_id, 'Посмотри: {}'.format(response))
LOGGER.info('response HELP=%s\n',response)
async def intent_chat_admins(self,minfo):
#self._bot.send_message(minfo.chat_id, 'Посмотрю : {}'.format(response))
try:
repl = self._bot.get_chat_administrators(minfo.chat_id)
LOGGER.info('admins :%s\n',repl)
except Exception as ex:
#{"ok":false,"error_code":400,"description":"Bad Request:
LOGGER.info("cant get admins %s",ex)
async def intent_get_users(self,minfo):
users = ''
with self._tdb.cursor(index='name') as curs:
#values = list(curs.iter())
for val in curs.iter():
if val['type'] == 'user':
users += val['name']+','
self.send_message(minfo.chat_id, 'Я знаю вот кого : {}'.format(users))
async def intent_hold_on(self,minfo):
LOGGER.info('INTENT HOLD ON chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
async def intent_needs_advice(self,minfo):
LOGGER.info('INTENT NEEDS_ADVICE chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
response = await self._query_validator(
Message.CLIENT_PEERS_GET_REQUEST,
client_peers_pb2.ClientPeersGetResponse,
client_peers_pb2.ClientPeersGetRequest())
self.send_message(minfo.chat_id, 'Посмотри: {}'.format(response))
async def intent_pause(self,minfo):
LOGGER.info('INTENT PAUSE chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
self.is_pause = True
async def intent_unpause(self,minfo):
LOGGER.info('INTENT UNPAUSE chat_id=%s confidence=%s\n',minfo.chat_id,minfo.confidence)
self.is_pause = False
@staticmethod
def _parse_response(proto, response):
"""Parses the content from a validator response Message.
"""
try:
content = proto()
content.ParseFromString(response.content)
return content
except (DecodeError, AttributeError):
LOGGER.error('Validator response was not parsable: %s', response)
return None
#raise errors.ValidatorResponseInvalid()
async def _query_validator(self, request_type, response_proto,payload, error_traps=None):
"""
Sends a request to the validator and parses the response.
"""
LOGGER.debug('Sending %s request to validator',self._get_type_name(request_type))
payload_bytes = payload.SerializeToString()
response = await self._send_request(request_type, payload_bytes)
"""
#response = self._loop.run_until_complete(self._send_request(request_type, payload_bytes))
resp = []
async def send_request():
return await self._send_request(request_type, payload_bytes)
async def send_task(resp):
task = self._loop.create_task(send_request())
response = await task
resp.append(response)
LOGGER.debug('Sending request finished %s',response)
return None
#self._loop.run_until_complete(send_task(resp))
response = resp.pop()
"""
LOGGER.debug('response %s',type(response))
#task = asyncio.ensure_future(self._send_request(request_type, payload_bytes))
#response = asyncio.wait(task)
#response = await self._send_request(request_type, payload_bytes)
#response = self._send_request(request_type, payload_bytes)
content = self._parse_response(response_proto, response)
if content is not None:
LOGGER.debug(
'Received %s response from validator with status %s',
self._get_type_name(response.message_type),
self._get_status_name(response_proto, content.status))
self._check_status_errors(response_proto, content, error_traps)
return self._message_to_dict(content)
async def _send_request(self, request_type, payload):
"""Uses an executor to send an asynchronous ZMQ request to the
validator with the handler's Connection
"""
try:
return await self._connection.send( # await
message_type=request_type,
message_content=payload,
timeout=self._timeout)
except DisconnectError:
LOGGER.warning('Validator disconnected while waiting for response')
# reconnect
self.change_gateway(self._conn_n)
#raise errors.ValidatorDisconnected()
except asyncio.TimeoutError:
LOGGER.warning('Timed out while waiting for validator response')
self.change_gateway(self._conn_n)
#raise errors.ValidatorTimedOut()
except SendBackoffTimeoutError:
LOGGER.warning('Failed sending message - Backoff timed out')
raise errors.SendBackoffTimeout()
def change_gateway(self,num):
url = self._connects[num]
try:
self._connection.reopen(url)
self._conn_n = num
except:
pass
return self._conn_n == num
@staticmethod
def _check_status_errors(proto, content, error_traps=None):
"""Raises HTTPErrors based on error statuses sent from validator.
Checks for common statuses and runs route specific error traps.
"""
if content.status == proto.OK:
return
try:
if content.status == proto.INTERNAL_ERROR:
raise errors.UnknownValidatorError()
except AttributeError:
# Not every protobuf has every status enum, so pass AttributeErrors
pass
try:
if content.status == proto.NOT_READY:
raise errors.ValidatorNotReady()
except AttributeError:
pass
try:
if content.status == proto.NO_ROOT:
raise errors.HeadNotFound()
except AttributeError:
pass
try:
if content.status == proto.INVALID_PAGING:
raise errors.PagingInvalid()
except AttributeError:
pass
try:
if content.status == proto.INVALID_SORT:
raise errors.SortInvalid()
except AttributeError:
pass
# Check custom error traps from the particular route message
if error_traps is not None:
for trap in error_traps:
trap.check(content.status)
@staticmethod
def _message_to_dict(message):
"""Converts a Protobuf object to a python dict with desired settings.
"""
return MessageToDict(
message,
including_default_value_fields=True,
preserving_proto_field_name=True)
@staticmethod
def _get_type_name(type_enum):
return Message.MessageType.Name(type_enum)
@staticmethod
def _get_status_name(proto, status_enum):
try:
return proto.Status.Name(status_enum)
except ValueError:
return 'Unknown ({})'.format(status_enum)
def _drop_empty_props(self, item):
"""Remove properties with empty strings from nested dicts.
"""
if isinstance(item, list):
return [self._drop_empty_props(i) for i in item]
if isinstance(item, dict):
return {
k: self._drop_empty_props(v)
for k, v in item.items() if v != ''
}
return item
def _drop_id_prefixes(self, item):
"""Rename keys ending in 'id', to just be 'id' for nested dicts.
"""
if isinstance(item, list):
return [self._drop_id_prefixes(i) for i in item]
if isinstance(item, dict):
return {
'id' if k.endswith('id') else k: self._drop_id_prefixes(v)
for k, v in item.items()
}
return item
def can_talk(self,intent):
return not self.is_pause or (intent == "smalltalk.agent.unpause")
async def validator_task(self):
try:
LOGGER.debug("validator_task:queue...")
while True:
await self.process_queue()
# pylint: disable=broad-except
except Exception as exc:
LOGGER.exception(exc)
LOGGER.critical("validator_task thread exited with error.")
async def process_queue(self):
try:
request = self._bgt_queue.get(timeout=0.01)
LOGGER.debug("VALIDATOR_TASK: intent=%s qsize=%s pause=%s",request.intent,self._bgt_queue.qsize(),self.is_pause)
if self.can_talk(request.intent):
await self._intent_handlers[request.intent](request)
except queue.Empty:
pass
except errors.ValidatorDisconnected:
LOGGER.debug("VALIDATOR Disconnected")
self.send_message(request.chat_id, 'Похоже BGT временно не доступен (:')
except KeyError as key:
LOGGER.debug("VALIDATOR_TASK: ignore=%s (no handler %s)",request.intent,key)
#LOGGER.debug("VALIDATOR_TASK:queue=%s EMPTY",self._bgt_queue.qsize())
#return
def start(self):
async def main_task():
LOGGER.info('START MAIN...')
while True:
await asyncio.sleep(FLUSH_TIMEOUT)
def bot_poll():
LOGGER.info('START BOT via=%s',PROXIES[0])
self._bot.polling()
LOGGER.info('STOP BOT')
self._pool = ThreadPoolExecutor(max_workers=2) #ProcessPoolExecutor(max_workers=2)
self._start_bot()
#self._pool = ProcessPoolExecutor(max_workers=2)
#self._pool.submit(self._start_bot())
#self._pool.start()
#task = loop.create_task(self.validator_task())
#task1 = loop.create_task(self._polling())
#loop.run_in_executor(self._pool,task1)
#loop.run_in_executor(self._pool,task)
LOGGER.info('START ...')
#self._bgt_queue.put('smalltalk.agent.can_you_help')
self._loop.run_until_complete(self._polling()) #main_task())
#loop.run_until_complete(main_task())
#LOGGER.info('START')
#self._start_bot_start_bot()
LOGGER.info('STOP')
self._loop.close()
LOGGER.info('STOP DONE')
#
"""
{'content_type': 'sticker', 'message_id': 17, 'from_user': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'username': 'Thou_shalt', 'last_name': 'P', 'language_code': 'ru'}, 'date': 1587126688, 'chat': {'type': 'private', 'last_name': 'P', 'first_name': 'Stan', 'username': 'Thou_shalt', 'id': 456125525, 'title': None, 'all_members_are_administrators': None, 'photo': None, 'description': None, 'invite_link': None, 'pinned_message': None, 'sticker_set_name': None, 'can_set_sticker_set': None}, 'forward_from_chat': None, 'forward_from_message_id': None, 'forward_from': None, 'forward_date': None, 'reply_to_message': None, 'edit_date': None, 'media_group_id': None, 'author_signature': None, 'text': None, 'entities': None, 'caption_entities': None, 'audio': None, 'document': None, 'photo': None, 'sticker': {'file_id': 'CAACAgIAAxkBAAMRXpmhoAlC4ghzi1DpcbrNLuIJbaMAAgMAA8A2TxOkKe7mffPAeBgE', 'width': 512, 'height': 512, 'thumb': <telebot.types.PhotoSize object at 0x7f7e51f3f2b0>, 'emoji': '😨', 'set_name': 'HotCherry', 'mask_position': None, 'file_size': 12727, 'is_animated': True}, 'video': None, 'video_note': None, 'voice': None, 'caption': None, 'contact': None, 'location': None, 'venue': None, 'animation': None, 'new_chat_member': None, 'new_chat_members': None, 'left_chat_member': None, 'new_chat_title': None, 'new_chat_photo': None, 'delete_chat_photo': None, 'group_chat_created': None, 'supergroup_chat_created': None, 'channel_chat_created': None, 'migrate_to_chat_id': None, 'migrate_from_chat_id': None, 'pinned_message': None, 'invoice': None, 'successful_payment': None, 'connected_website': None, 'json': {'message_id': 17, 'from': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'language_code': 'ru'}, 'chat': {'id': 456125525, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'type': 'private'}, 'date': 1587126688, 'sticker': {'width': 512, 'height': 512, 'emoji': '😨', 'set_name': 'HotCherry', 'is_animated': True, 'thumb': {'file_id': 'AAMCAgADGQEAAxFemaGgCULiCHOLUOlxus0u4gltowACAwADwDZPE6Qp7uZ988B4AAHthQ8ABAEAB20AA8eUAAIYBA', 'file_unique_id': 'AQAE7YUPAATHlAAC', 'file_size': 4448, 'width': 128, 'height': 128}, 'file_id': 'CAACAgIAAxkBAAMRXpmhoAlC4ghzi1DpcbrNLuIJbaMAAgMAA8A2TxOkKe7mffPAeBgE', 'file_unique_id': 'AgADAwADwDZPEw', 'file_size': 12727}}}
################################
{'update_id': 674365978, 'message': {'content_type': 'text', 'message_id': 1723, 'from_user': <telebot.types.User object at 0x7fc7d2dcb240>, 'date': 1587888825, 'chat': <telebot.types.Chat object at 0x7fc7d2dcb128>, 'forward_from_chat': None, 'forward_from_message_id': None, 'forward_from': None, 'forward_date': None, 'reply_to_message': None, 'edit_date': None, 'media_group_id': None, 'author_signature': None, 'text': 'Как успехи', 'entities': None, 'caption_entities': None, 'audio': None, 'document': None, 'photo': None, 'sticker': None, 'video': None, 'video_note': None, 'voice': None, 'caption': None, 'contact': None, 'location': None, 'venue': None, 'animation': None, 'new_chat_member': None, 'new_chat_members': None, 'left_chat_member': None, 'new_chat_title': None, 'new_chat_photo': None, 'delete_chat_photo': None, 'group_chat_created': None, 'supergroup_chat_created': None, 'channel_chat_created': None, 'migrate_to_chat_id': None, 'migrate_from_chat_id': None, 'pinned_message': None, 'invoice': None, 'successful_payment': None, 'connected_website': None, 'json': {'message_id': 1723, 'from': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'language_code': 'ru'}, 'chat': {'id': 456125525, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'type': 'private'}, 'date': 1587888825, 'text': 'Как успехи'}}, 'edited_message': None, 'channel_post': None, 'edited_channel_post': None, 'inline_query': None, 'chosen_inline_result': None, 'callback_query': None, 'shipping_query': None, 'pre_checkout_query': None}
################################
sticker_message {'content_type': 'sticker', 'message_id': 1725, 'from_user': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'username': 'Thou_shalt', 'last_name': 'P', 'language_code': 'ru'}, 'date': 1587888938, 'chat': {'type': 'private', 'last_name': 'P', 'first_name': 'Stan', 'username': 'Thou_shalt', 'id': 456125525, 'title': None, 'all_members_are_administrators': None, 'photo': None, 'description': None, 'invite_link': None, 'pinned_message': None, 'sticker_set_name': None, 'can_set_sticker_set': None}, 'forward_from_chat': None, 'forward_from_message_id': None, 'forward_from': None, 'forward_date': None, 'reply_to_message': None, 'edit_date': None, 'media_group_id': None, 'author_signature': None, 'text': None, 'entities': None, 'caption_entities': None, 'audio': None, 'document': None, 'photo': None, 'sticker': {'file_id': 'CAACAgIAAxkBAAIGvV6lQyo1b6Yvtzi3uKcGj47RiUdcAALCAQACVp29Cpl4SIBCOG2QGQQ', 'width': 512, 'height': 512, 'thumb': <telebot.types.PhotoSize object at 0x7fc7d005ef60>, 'emoji': '👍', 'set_name': 'TheVirus', 'mask_position': None, 'file_size': 7420, 'is_animated': True}, 'video': None, 'video_note': None, 'voice': None, 'caption': None, 'contact': None, 'location': None, 'venue': None, 'animation': None, 'new_chat_member': None, 'new_chat_members': None, 'left_chat_member': None, 'new_chat_title': None, 'new_chat_photo': None, 'delete_chat_photo': None, 'group_chat_created': None, 'supergroup_chat_created': None, 'channel_chat_created': None, 'migrate_to_chat_id': None, 'migrate_from_chat_id': None, 'pinned_message': None, 'invoice': None, 'successful_payment': None, 'connected_website': None, 'json': {'message_id': 1725, 'from': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'language_code': 'ru'}, 'chat': {'id': 456125525, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'type': 'private'}, 'date': 1587888938, 'sticker': {'width': 512, 'height': 512, 'emoji': '👍', 'set_name': 'TheVirus', 'is_animated': True, 'thumb': {'file_id': 'AAMCAgADGQEAAga9XqVDKjVvpi-3OLe4pwaPjtGJR1wAAsIBAAJWnb0KmXhIgEI4bZB6wdWRLgADAQAHbQADeiIAAhkE', 'file_unique_id': 'AQADesHVkS4AA3oiAAI', 'file_size': 6186, 'width': 128, 'height': 128}, 'file_id': 'CAACAgIAAxkBAAIGvV6lQyo1b6Yvtzi3uKcGj47RiUdcAALCAQACVp29Cpl4SIBCOG2QGQQ', 'file_unique_id': 'AgADwgEAAladvQo', 'file_size': 7420}}}
+++++++++++++++++++++++++++++++++=
{'message_id': 1798, 'from': {'id': 456125525, 'is_bot': False, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'language_code': 'ru'}, 'chat': {'id': 456125525, 'first_name': 'Stan', 'last_name': 'P', 'username': 'Thou_shalt', 'type': 'private'}, 'date': 1587914988, 'sticker': {'width': 512, 'height': 512, 'emoji': '👍', 'set_name': 'TheVirus', 'is_animated': True, 'thumb': {'file_id': 'AAMCAgADGQEAAgcGXqWo7KbDR7NPdeq-Ish0T_k2e2wAAsIBAAJWnb0KmXhIgEI4bZB6wdWRLgADAQAHbQADeiIAAhkE', 'file_unique_id': 'AQADesHVkS4AA3oiAAI', 'file_size': 6186, 'width': 128, 'height': 128}, 'file_id': 'CAACAgIAAxkBAAIHBl6lqOymw0ezT3XqviLIdE_5NntsAALCAQACVp29Cpl4SIBCOG2QGQQ', 'file_unique_id': 'AgADwgEAAladvQo', 'file_size': 7420}}
"""
| [
"[email protected]"
] | |
08da355ed5009788d673daf96c0f5f8075c62524 | 77ab53380f74c33bb3aacee8effc0e186b63c3d6 | /720_longest_word_in_dictionary.py | b1ef8b4f98d24725eeb93e621ed887835df90cb5 | [] | no_license | tabletenniser/leetcode | 8e3aa1b4df1b79364eb5ca3a97db57e0371250b6 | d3ebbfe2e4ab87d5b44bc534984dfa453e34efbd | refs/heads/master | 2023-02-23T18:14:31.577455 | 2023-02-06T07:09:54 | 2023-02-06T07:09:54 | 94,496,986 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | '''
Given a list of strings words representing an English Dictionary, find the longest word in words that can be built one character at a time by other words in words. If there is more than one possible answer, return the longest word with the smallest lexicographical order.
If there is no answer, return the empty string.
Example 1:
Input:
words = ["w","wo","wor","worl", "world"]
Output: "world"
Explanation:
The word "world" can be built one character at a time by "w", "wo", "wor", and "worl".
Example 2:
Input:
words = ["a", "banana", "app", "appl", "ap", "apply", "apple"]
Output: "apple"
Explanation:
Both "apply" and "apple" can be built from other words in the dictionary. However, "apple" is lexicographically smaller than "apply".
Note:
All the strings in the input will only contain lowercase letters.
The length of words will be in the range [1, 1000].
The length of words[i] will be in the range [1, 30].
'''
class Solution(object):
def longestWord(self, words):
"""
:type words: List[str]
:rtype: str
"""
words.sort()
w_dict = set()
result = ''
for w in words:
w_dict.add(w)
for w in words:
can_be_built = True
for i in xrange(1, len(w)):
if w[:i] not in w_dict:
can_be_built = False
break
if can_be_built and len(w) > len(result):
result = w
return result
s = Solution()
print s.longestWord(["w","wo","wor","worl", "world"])
print s.longestWord(["a", "banana", "app", "appl", "ap", "apply", "apple"])
| [
"[email protected]"
] | |
ae998783f6f09edee5eb0409239e0811735c2f57 | 141b42d9d72636c869ff2ce7a2a9f7b9b24f508b | /myvenv/Lib/site-packages/phonenumbers/data/region_SJ.py | 30448b9dce8f8518c9cc53db0649a80ffccfe27c | [
"BSD-3-Clause"
] | permissive | Fa67/saleor-shop | 105e1147e60396ddab6f006337436dcbf18e8fe1 | 76110349162c54c8bfcae61983bb59ba8fb0f778 | refs/heads/master | 2021-06-08T23:51:12.251457 | 2018-07-24T08:14:33 | 2018-07-24T08:14:33 | 168,561,915 | 1 | 0 | BSD-3-Clause | 2021-04-18T07:59:12 | 2019-01-31T17:00:39 | Python | UTF-8 | Python | false | false | 1,464 | py | """Auto-generated file, do not edit by hand. SJ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SJ = PhoneMetadata(id='SJ', country_code=47, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='0\\d{4}|[45789]\\d{7}', possible_length=(5, 8)),
fixed_line=PhoneNumberDesc(national_number_pattern='79\\d{6}', example_number='79123456', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:4[015-8]|5[89]|9\\d)\\d{6}', example_number='41234567', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='80[01]\\d{5}', example_number='80012345', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='82[09]\\d{5}', example_number='82012345', possible_length=(8,)),
shared_cost=PhoneNumberDesc(national_number_pattern='810(?:0[0-6]|[2-8]\\d)\\d{3}', example_number='81021234', possible_length=(8,)),
personal_number=PhoneNumberDesc(national_number_pattern='880\\d{5}', example_number='88012345', possible_length=(8,)),
voip=PhoneNumberDesc(national_number_pattern='85[0-5]\\d{5}', example_number='85012345', possible_length=(8,)),
uan=PhoneNumberDesc(national_number_pattern='0\\d{4}|81(?:0(?:0[7-9]|1\\d)|5\\d{2})\\d{3}', example_number='01234', possible_length=(5, 8)),
voicemail=PhoneNumberDesc(national_number_pattern='81[23]\\d{5}', example_number='81212345', possible_length=(8,)))
| [
"[email protected]"
] | |
4b62a941576cc59defc792dd09df58d2eb7e386b | ef11a06c906f37fa98c3de38aa1307110269b2f4 | /Notes/Fall2019/Ch4C.py | 6d229ca716a445032f72e9b2a630d709bd76b422 | [] | no_license | fwparkercode/IntroProgrammingNotes | 0d389d2d281122303da48ab2c1648750e594c04f | ad64777208d2f84f87e4ab45695adbfe073eae18 | refs/heads/master | 2021-07-16T07:13:55.665243 | 2020-06-09T12:45:38 | 2020-06-09T12:45:38 | 170,581,913 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,842 | py | # Chapter 4 - Loops and Random Numbers
# Random numbers
import random
# Randrange function - random.randrange(start, end, count_by)
print(random.randrange(10)) # generates a random int from 0 to 9
print(random.randrange(5, 10)) # random int from 5 to 9
print(random.randrange(50, 100, 5)) # random int from 50 to 99 counting by 5s
# make a random number between -10 and - 5
print(random.randrange(-10, -4))
# random even number from 28 to 36
print(random.randrange(28, 37, 2))
# random function - random.random()
# generates a random float from 0 to 1
print(random.random())
# to generate any other float use random.random() * spread + offset
# random float from 0 to 10
print(random.random() * 10)
# random float from 10 to 15
print(random.random() * 5 + 10)
# random float from -5 to 0
print(random.random() * 5 - 5)
# FOR LOOPS
# for the example below: i is the index, range is from 0 to 9
for i in range(10):
print("Taco Tuesday")
print("and Quesadillas")
print("Pulled Pork Wednesday")
# print twenty random integers from 0 to 100
for i in range(20):
print(random.randrange(101))
# Range function - range(start, end, count_by)
# works like random.randrange()
for i in range(10):
print(i)
for i in range(1, 11):
print(i)
for i in range(0, 101, 2):
print(i)
for i in range(50, 10, -5):
print(i)
# Nested loops
for i in range(3):
print("a")
for j in range(3):
print("b")
print("\n\n")
for i in range(3):
print("a")
for j in range(3):
print("b")
'''
for hours in range(24):
for minutes in range(60):
for seconds in range(60):
print(hours, minutes, seconds)
'''
for row in range(1, 21):
for seat in range(1, 21):
print("row", row, "seat", seat)
# Add all the numbers from 1 to 100
total = 0
for i in range(1, 101):
total += i
print(total)
# WHILE Loops
# use a FOR loop if you can.
# use a WHILE loop when you want to keep going until a condition exists
# count from 1 to 10
for i in range(1, 11):
print(i)
i = 1
while i <= 10:
print(i)
i += 1
# print multiples of 7 from 21 to 42
for i in range(21, 43, 7):
print(i)
i = 21
while i <= 42:
print(i)
i += 7
# what are all of the squared numbers under 100000
n = 1
while n ** 2 < 100000:
print(n, "squared is", n ** 2)
n += 1
# Beware the infinite loop
'''
n = 10
while n == 10:
print("TEN")
'''
'''
n = 10
while n > 0:
print(n)
n *= 2
'''
'''
while 4:
print("AHHHH")
'''
# GAME LOOP
done = False
print("Welcome to Dragon Quest 2!")
while not done:
answer = input("A dragon is blocking the exit. Do you want to wake it? ")
if answer.lower() == "yes" or answer.lower() == "y":
print("The dragon eats you!")
done = True
print("Thank you for playing")
| [
"[email protected]"
] | |
bfec5c587e199e8661352e09e76eb119ef9d4709 | 7d1d30be1995f2780cbf8999f1891e967936c090 | /pttweaks/activity/tests/test_models.py | f91ed5f80aebe191788ab9c3ad566ab2ce0f26ee | [] | no_license | EastAgile/PT-tweaks | 118274f70c198fb8885f4a42136a5a1bdefc4e51 | 7d5742862e42672eb77441ef7a7250d7a3a9359e | refs/heads/master | 2022-12-10T20:07:04.859288 | 2019-08-08T05:29:41 | 2019-08-08T05:29:41 | 164,597,129 | 0 | 1 | null | 2022-12-08T05:49:40 | 2019-01-08T08:06:59 | Python | UTF-8 | Python | false | false | 319 | py | from django.test import SimpleTestCase
from robber import expect
from activity.factories import ActivityChangeLogFactory
class ActivityChangeLogTestCase(SimpleTestCase):
def test_model_str(self):
activity = ActivityChangeLogFactory.build(story_id='123123')
expect(str(activity)).to.eq('123123')
| [
"[email protected]"
] | |
d2d30ed9ef98512ff7d30f5c9754fa535c698414 | c838c53ec5de94af57696f11db08f332ff2a65d8 | /mission/migrations/0121_picklist_pick_order.py | 042a0e8203c7fedb1905237711b5f99b55874583 | [] | no_license | memobijou/erpghost | 4a9af80b3c948a4d7bb20d26e5afb01b40efbab5 | c0ee90718778bc2b771b8078d9c08e038ae59284 | refs/heads/master | 2022-12-11T14:47:59.048889 | 2019-01-28T02:30:40 | 2019-01-28T02:30:40 | 113,774,918 | 1 | 1 | null | 2022-11-22T02:02:41 | 2017-12-10T18:53:41 | Python | UTF-8 | Python | false | false | 557 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-08-18 14:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mission', '0120_pickorder'),
]
operations = [
migrations.AddField(
model_name='picklist',
name='pick_order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mission.PickOrder'),
),
]
| [
"[email protected]"
] | |
16cbf16dc6b7fe17bb9032ee14ac7d326eeaced8 | 3a570384a3fa9c4c7979d33b182556e1c637e9eb | /anwmisc/anw-pyui/Packages/anwp/gui/configinfo.py | 5c7bf27ee02e7b8c9821e1b50aacc8cfcb5713f6 | [] | no_license | colshag/ANW | 56a028af5042db92b5ead641dc542fcb4533344e | 46948d8d18a0639185dd4ffcffde126914991553 | refs/heads/master | 2020-03-27T00:22:49.409109 | 2018-10-27T06:37:04 | 2018-10-27T06:37:04 | 145,618,125 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,603 | py | # ---------------------------------------------------------------------------
# Armada Net Wars (ANW)
# configinfo.py
# Written by Chris Lewis
# ---------------------------------------------------------------------------
# This panel Displays User Config information
# ---------------------------------------------------------------------------
import pyui
import guibase
import anwp.func.globals
class ConfigInfoFrame(guibase.BaseFrame):
"""Displays User Config Information"""
def __init__(self, mode, app, title='User Configuration'):
self.app = app
self.width = 1024
try:
self.height = (app.height - mode.mainMenu.height - mode.mainFooter.height - 40)
except:
self.height = (app.height - 120)
try:
y = (mode.mainMenu.height)
except:
y = 40
x = 0
guibase.BaseFrame.__init__(self, mode, x, y, self.width, self.height, title)
self.setPanel(ConfigInfoPanel(self))
class ConfigInfoPanel(guibase.BasePanel):
"""Panel for User Config information"""
def __init__(self, frame):
guibase.BasePanel.__init__(self, frame)
numExtend = 1
x = (self.frame.app.height - 768) / (22 * numExtend)
cells = 28 + (numExtend * x)
self.setLayout(pyui.layouts.TableLayoutManager(8, cells))
# subject title
self.pctEmpire = pyui.widgets.Picture('')
self.addChild(self.pctEmpire, (0, 0, 1, 3))
self.lblTitle = pyui.widgets.Label(text='', type=1)
self.addChild(self.lblTitle, (1, 1, 3, 1))
self.btnSurrender = pyui.widgets.Button('Surrender Game', self.onSurrender)
self.addChild(self.btnSurrender, (6, 1, 2, 1))
n = 4
self.lbl = pyui.widgets.Label(text='CHANGE EMPIRE INFO:', type=1)
self.addChild(self.lbl, (0, n, 4, 1))
self.lbl = pyui.widgets.Label(text='Email Address:', type=2)
self.addChild(self.lbl, (0, n+1, 2, 1))
self.txtEmail = pyui.widgets.Edit('',50)
self.addChild(self.txtEmail, (2, n+1, 4, 1))
self.btnEmail = pyui.widgets.Button('Change Email', self.onChangeEmail)
self.addChild(self.btnEmail, (6, n+1, 2, 1))
self.lbl = pyui.widgets.Label(text='Login Password:', type=2)
self.addChild(self.lbl, (0, n+2, 2, 1))
self.txtPassword = pyui.widgets.Edit('',20)
self.addChild(self.txtPassword, (2, n+2, 2, 1))
self.btnEmail = pyui.widgets.Button('Change Password', self.onChangePassword)
self.addChild(self.btnEmail, (6, n+2, 2, 1))
# starship captains
n = n+4
self.lbl = pyui.widgets.Label(text='SELECT STARSHIP CAPTAIN:', type=1)
self.addChild(self.lbl, (0, n, 4, 1))
self.lstCaptains = pyui.widgets.ListBox(self.onCaptainSelected,None,100,100,0)
self.addChild(self.lstCaptains, (0, n+1, 8, 16+x))
n = n+18+x
self.lbl = pyui.widgets.Label(text='Selected Captain Name:', type=2)
self.addChild(self.lbl, (0, n, 2, 1))
self.txtName = pyui.widgets.Edit('',20)
self.addChild(self.txtName, (2, n, 2, 1))
self.btnName = pyui.widgets.Button('Change Captain Name', self.onChangeName)
self.addChild(self.btnName, (4, n, 2, 1))
self.pack
self.populate()
def buildCaptainsData(self):
"""Display all Captains in Empire"""
d = {}
# sort captains by experience level
##captains = anwp.func.funcs.sortDictByChildObjValue(self.frame.mode.game.myCaptains, 'experience', True, {})
for captainID, myCaptainDict in self.frame.mode.game.myCaptains.iteritems():
d[captainID] = '%s - RANK:%s' % (myCaptainDict['name'], myCaptainDict['rank'])
return d
def onCaptainSelected(self, item):
"""Select item from List"""
if not item:
self.btnName.disable()
else:
if self.lstCaptains.selected <> -1:
self.btnName.enable()
self.txtName.setText(self.frame.mode.game.myCaptains[item.data]['name'])
def onChangeEmail(self, item):
"""Change Email Address"""
try:
d = {}
d['emailAddress'] = self.txtEmail.text
serverResult = self.frame.mode.game.server.setEmpire(self.frame.mode.game.authKey, d)
if serverResult == 1:
self.frame.mode.game.myEmpire['emailAddress'] = self.txtEmail.text
self.frame.mode.modeMsgBox('Empire Email Address Changed')
else:
self.frame.mode.modeMsgBox(serverResult)
except:
self.frame.mode.modeMsgBox('onChangeEmail->Connection to Server Lost, Login Again')
def onSurrender(self, item):
"""Surrender Game"""
self.frame.mode.modeYesNoBox('Do you really want to surrender the game?', 'surrenderYes', 'surrenderNo')
def onChangeName(self, item):
"""Change Selected Captain Name"""
try:
id = self.lstCaptains.getSelectedItem().data
serverResult = self.frame.mode.game.server.setCaptainName(self.frame.mode.game.authKey, id, self.txtName.text)
if serverResult == 1:
self.frame.mode.game.myCaptains[id]['name'] = self.txtName.text
self.frame.mode.modeMsgBox('Captain name Changed')
self.populate()
else:
self.frame.mode.modeMsgBox(serverResult)
except:
self.frame.mode.modeMsgBox('onChangeName->Connection to Server Lost, Login Again')
def onChangePassword(self, item):
"""Change Password"""
try:
d = {}
d['password'] = self.txtPassword.text
serverResult = self.frame.mode.game.server.setEmpire(self.frame.mode.game.authKey, d)
if serverResult == 1:
self.frame.mode.game.empirePass = self.txtPassword.text
self.frame.mode.modeMsgBox('Empire Password Changed')
else:
self.frame.mode.modeMsgBox(serverResult)
except:
self.frame.mode.modeMsgBox('onChangePassword->Connection to Server Lost, Login Again')
def populate(self):
"""Populate frame with new data"""
self.btnName.disable()
try:
myEmpireDict = self.frame.mode.game.myEmpire
myEmpirePict = '%s%s.png' % (self.frame.app.simImagePath, myEmpireDict['imageFile'])
self.lblTitle.setText('CONFIGURATION FOR: %s' % myEmpireDict['name'])
self.lblTitle.setColor(anwp.func.globals.colors[myEmpireDict['color1']])
myCaptains = self.buildCaptainsData()
self.txtEmail.setText(myEmpireDict['emailAddress'])
self.txtPassword.setText(self.frame.mode.game.empirePass)
except:
# this allows for testing panel outside game
myEmpirePict = self.testImagePath + 'empire1.png'
self.lblTitle.setText('CONFIGURATION FOR: Test')
myCaptains = self.testDict
self.pctEmpire.setFilename(myEmpirePict)
self.populateListbox(self.lstCaptains, myCaptains)
def main():
"""Run gui for testing"""
import run
width = 1024
height = 768
pyui.init(width, height, 'p3d', 0, 'Testing Config Info Panel')
app = run.TestApplication(width, height)
frame = ConfigInfoFrame(None, app)
app.addGui(frame)
app.run()
pyui.quit()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4b0e4be71217535a3b023b8cd57fa3de00fa5b98 | a4440a990b86a239a30b4295661ca588db3f5928 | /src/knn/digital_recognition.py | 5411be618eb5bc4fae3f1b70b129b3cbdb7ead0f | [] | no_license | YangXinNewlife/MachineLearning | fdaa1f75b90c143165d457b645d3c13fee7ea9a1 | 196ebdc881b74c746f63768b7ba31fec65e462d5 | refs/heads/master | 2020-04-05T00:10:25.050507 | 2019-06-10T03:44:33 | 2019-06-10T03:44:33 | 156,386,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,377 | py | # -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
from numpy import *
from os import listdir
from collections import Counter
import operator
"""
图片的输入为 32 * 32的转换为 1 * 1024的向量
"""
class DigitalRecognition(object):
def __init__(self):
print("Welcome, 手写数字识别算法!")
"""
1.距离计算
tile生成和训练样本对应的矩阵,并与训练样本求差
取平方
将矩阵的每一行相加
开方
根据距离从小到大的排序,并返回对应的索引位置
2.选择距离最小的k个值
3.排序并返回出现最多的那个类型
"""
def classify_1(self, in_x, data_set, labels, k):
data_set_size = data_set.shape[0]
diff_mat = tile(in_x, (data_set_size, 1)) - data_set
sq_diff_mat = diff_mat ** 2
sq_distances = sq_diff_mat.sum(axis=1)
distances = sq_distances ** 0.5
sorted_dist_indicies = distances.argsort()
class_count = {}
for i in range(k):
vote_i_label = labels[sorted_dist_indicies[i]]
class_count[vote_i_label] = class_count.get(vote_i_label, 0) + 1
sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)
return sorted_class_count
"""
1.计算距离
2.k个最近的标签
3.出现次数最多的标签即为最终类别
"""
def classify_2(self, in_x, data_set, labels, k):
dist = np.sum((in_x - data_set) ** 2, axis=1) ** 0.5
k_labels = [labels[index] for index in dist.argsort()[0:k]]
label = Counter(k_labels).most_common(1)[0][0]
return label
def file_to_matrix(self, file_name):
fr = open(file_name)
number_of_lines = len(fr.readlines())
return_mat = zeros((number_of_lines, 3))
class_label_vector = []
fr = open(file_name)
index = 0
for line in fr.readlines():
line = line.strip()
list_from_line = line.split("\t")
return_mat[index, :] = list_from_line[0:3]
class_label_vector.append(int(list_from_line[-1]))
index += 1
return return_mat, class_label_vector
"""
将图片转换为向量
图片的输入为 32 * 32的,将图像转换为向量,该函数创建 1 * 1024 的Numpy数组
"""
def img_to_vector(self, file_name):
return_vector = zeros((1, 1024))
fr = open(file_name, 'r')
for i in range(32):
line_str = fr.readline()
for j in range(32):
return_vector[0, 32 * i + j] = int(line_str[j])
return return_vector
def run(self, train_file_path, test_file_path, k):
labels = []
training_file_list = listdir(train_file_path)
train_len = len(training_file_list)
training_mat = zeros((train_len, 1024))
for i in range(train_len):
file_name_str = training_file_list[i]
file_str = file_name_str.split(".")[0]
class_num_str = int(file_str.split("_")[0])
labels.append(class_num_str)
img_file = train_file_path + file_name_str
print(img_file)
training_mat[i] = self.img_to_vector(img_file)
test_file_list = listdir(test_file_path)
error_count = 0.0
test_len = len(test_file_list)
for i in range(test_len):
file_name_str = test_file_list[i]
file_str = file_name_str.split(".")[0]
class_num_str = int(file_str.split("_")[0])
test_file_img = test_file_path + file_name_str
vector_under_test = self.img_to_vector(test_file_img)
classifier_result = self.classify_1(vector_under_test, training_mat, labels, k)
if classifier_result != class_num_str:
print(file_name_str)
error_count += 1.0
print("\nthe total number of errors is: %d" % error_count)
print("\nthe total error rate is: %f" % (error_count / float(test_len)))
if __name__ == '__main__':
digital_recognition = DigitalRecognition()
digital_recognition.run("/Users/yangxin_ryan/PycharmProjects/MachineLearning/data/knn/trainingDigits/",
"/Users/yangxin_ryan/PycharmProjects/MachineLearning/data/knn/testDigits/",
6) | [
"[email protected]"
] | |
c6beb75082885391bc95b1891a36e80e937a4666 | 642151dff23fff48310139ddc9b89c8bf6a670e3 | /app/base/routes.py | 14e706ea47230919be667a314346e7ccf0af73e5 | [] | no_license | gemicn/flask-navigation | 7ad371e0ac8220c14687f02b130707bf89c81553 | 382940492ca6cae41da44d30fb78c9535e8de955 | refs/heads/master | 2022-09-03T22:51:41.920901 | 2020-05-26T01:37:26 | 2020-05-26T01:37:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from bcrypt import checkpw
from flask import jsonify, render_template, redirect, request, url_for
from app.base import blueprint
@blueprint.route('/')
def route_default():
return redirect(url_for('nav_blueprint.index'))
| [
"[email protected]"
] | |
ea8884ee217bc2ebacf21f538a754ef50ed61ba6 | b5d9ce70b77672f497bd1f474f3c01230931169f | /econobilidade/econobilidade/wsgi.py | 8e01bd3c1ce6d7414c64aed22433a5202ede9230 | [] | no_license | chemalle/fisconobilidade | a59ef6544f97195da637ba5ad458cf834e3e6030 | 2b9fdd000ce83e7e9927e85895b21d57c644af35 | refs/heads/master | 2021-07-02T17:19:54.864048 | 2017-09-21T20:03:08 | 2017-09-21T20:03:08 | 104,392,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | """
WSGI config for econobilidade project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "econobilidade.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
22673e5c1820b8646b55bf630652d58b49177ef8 | bbf025a5f8596e5513bd723dc78aa36c46e2c51b | /dfs + tree/100 sameTree.py | 41ecd7912ef4878879d74277903c001435c1f6a2 | [] | no_license | AlanFermat/leetcode | 6209bb5cf2d1b19e3fe7b619e1230f75bb0152ab | cacba4abaca9c4bad8e8d12526336115067dc6a0 | refs/heads/master | 2021-07-11T04:00:00.594820 | 2020-06-22T21:31:02 | 2020-06-22T21:31:02 | 142,341,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | from binaryTree import Node
t1 = Node(1)
t1.left = Node(2)
t1.right = Node(3)
t2 = Node(1)
t2.left = Node(2)
t2.right = Node(3)
def treeToList(t):
if t == None:
return []
result = [t.val]
return result + treeToList(t.left) + treeToList(t.right)
def isSameTree(t,s):
t_list = treeToList(t)
s_list = treeToList(s)
return s_list == t_list
def isSame(t,s):
if not t and not s:
return True
if t and s:
if t.val == s.val:
return isSame(t.left, s.left) and isSame(t.right, s.right)
return False
print (isSame(t1,t2)) | [
"[email protected]"
] | |
336c31fca80c5b1edd2c0ae1909608af55e7d349 | 61d22eef5483a046b418a295d2ffa22857a296e1 | /swtest/1952.py | 0a3cb502106980c8bed236da21232f446e6783fd | [] | no_license | seoul-ssafy-class-2-studyclub/hyeonhwa | 7ad680a67ba253eece07a9605a3b983f98a8cca3 | e51163b3135cf529d295bc0d527c98b642f8c367 | refs/heads/master | 2021-10-06T12:57:44.046963 | 2021-10-02T09:42:55 | 2021-10-02T09:42:55 | 198,594,633 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import sys
sys.stdin = open('input4.txt', 'r')
T = int(input())
for t in range(T):
money = list(map(int, input().split()))
plan = list(map(int, input().split()))
dp = [0] * 12
dp[0] = min(money[0]*plan[0], money[1])
for i in range(1, 12):
dp[i] = min(dp[i-1]+money[0]*plan[i], dp[i-1]+money[1])
if i >= 2:
dp[i] = min(dp[i-3]+money[2], dp[i])
res = min(dp[11], money[3])
print('#{} {}'.format(t+1, res))
| [
"[email protected]"
] | |
f79dd2dab6a9e5e06232020fc812c26b78740da4 | a50e906945260351f43d57e014081bcdef5b65a4 | /collections/ansible_collections/fortinet/fortios/plugins/modules/fortios_test_autod.py | ba72e9dc091a5fba49d6174bde73f232cf0ec22c | [] | no_license | alhamdubello/evpn-ipsec-dci-ansible | 210cb31f4710bb55dc6d2443a590f3eb65545cf5 | 2dcc7c915167cd3b25ef3651f2119d54a18efdff | refs/heads/main | 2023-06-08T10:42:35.939341 | 2021-06-28T09:52:45 | 2021-06-28T09:52:45 | 380,860,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,724 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_test_autod
short_description: Automation daemon in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify test feature and autod category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.4.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
test_autod:
description:
- Automation daemon.
default: null
type: dict
suboptions:
<Integer>:
description:
- Test level.
type: str
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Automation daemon.
fortios_test_autod:
vdom: "{{ vdom }}"
test_autod:
<Integer>: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_test_autod_data(json):
option_list = ['<Integer>']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def test_autod(data, fos):
vdom = data['vdom']
test_autod_data = data['test_autod']
filtered_data = underscore_to_hyphen(filter_test_autod_data(test_autod_data))
return fos.set('test',
'autod',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_test(data, fos):
if data['test_autod']:
resp = test_autod(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('test_autod'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = None
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"test_autod": {
"required": False, "type": "dict", "default": None,
"options": {
"<Integer>": {"required": False, "type": "str"}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_test(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6168a3b2331db5b8eeef80583560970fab0652a2 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/popish.py | fc482e43309e27670d3e1bb14bc0bb4c6a354519 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 409 | py | ii = [('CookGHP3.py', 3), ('CookGHP.py', 50), ('BailJD2.py', 1), ('ClarGE2.py', 1), ('SeniNSP.py', 1), ('CookGHP2.py', 5), ('ClarGE.py', 5), ('DaltJMA.py', 33), ('WadeJEB.py', 5), ('NewmJLP.py', 1), ('SoutRD2.py', 1), ('SoutRD.py', 1), ('MereHHB3.py', 1), ('HowiWRL2.py', 1), ('HogaGMM.py', 2), ('FerrSDO.py', 1), ('WilbRLW3.py', 3), ('MereHHB2.py', 1), ('ClarGE3.py', 3), ('EvarJSP.py', 1), ('TaylIF.py', 3)] | [
"[email protected]"
] | |
ac50fa692d845d1eeee00f4cca7f93fa4cfa9589 | c0caed81b5b3e1498cbca4c1627513c456908e38 | /src/python/bindings/app/membrane/predict_ddG.py | f9857bead41598d64c25060485f3cf5045c0b739 | [
"LicenseRef-scancode-other-permissive"
] | permissive | malaifa/source | 5b34ac0a4e7777265b291fc824da8837ecc3ee84 | fc0af245885de0fb82e0a1144422796a6674aeae | refs/heads/master | 2021-01-19T22:10:22.942155 | 2017-04-19T14:13:07 | 2017-04-19T14:13:07 | 88,761,668 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,380 | py | #!/usr/bin/env python
# :noTabs=true:
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: [email protected].
## @file: compute_ddG.py
##
## @brief: Compute ddGs of mutation
## @details: Use the Rosetta membrane framework to compute the ddG of unfolding of
## a membrane protein in Rosetta (uses packer, mutate.py from Evan Baugh)
##
## @author: Rebecca F. Alford ([email protected])
## @author: JKLeman ([email protected])
# Tools
import sys, os
import commands
import random
from optparse import OptionParser, IndentedHelpFormatter
_script_path_ = os.path.dirname( os.path.realpath(__file__) )
# Rosetta-specific imports
import rosetta.protocols.membrane
from rosetta import Pose
from rosetta import create_score_function
from rosetta import TaskFactory
from rosetta.utility import vector1_bool
from rosetta import aa_from_oneletter_code
from rosetta import PackRotamersMover
from rosetta.core.pose import PDBInfo
from rosetta.core.chemical import VariantType
from toolbox import mutate_residue
###############################################################################
## @brief Main - Add Membrane to Pose, Compute ddG
def main( args ):
parser = OptionParser(usage="usage: %prog [OPTIONS] [TESTS]")
parser.set_description(main.__doc__)
#input options
parser.add_option('--in_pdb', '-p',
action="store",
help="Input PDB file.", )
parser.add_option('--in_span', '-s',
action="store",
help="Input spanfile.", )
parser.add_option('--out', '-o',
action="store", default='ddG.out',
help="Output filename with pose residue numbering. Default: 'ddG.out'", )
parser.add_option('--res', '-r',
action="store",
help="Pose residue number to mutate.", )
parser.add_option('--mut', '-m',
action="store",
help="One-letter code of residue identity of the mutant. Example: A181F would be 'F'", )
parser.add_option('--repack_radius', '-a',
action="store", default=0,
help="Repack the residues within this radius",)
parser.add_option('--output_breakdown', '-b',
action="store", default="scores.sc",
help="Output mutant and native score breakdown by weighted energy term into a scorefile", )
parser.add_option('--include_pH', '-t',
action="store", default=0,
help="Include pH energy terms: pH_energy and fa_elec. Default false.", )
parser.add_option('--pH_value', '-q',
action="store", default=7,
help="Predict ddG and specified pH value. Default 7. Will not work if include pH is not passed", )
#parse options
(options, args) = parser.parse_args(args=args[1:])
global Options
Options = options
# Check the required inputs (PDB file, spanfile) are present
if ( not Options.in_pdb or not Options.in_span or not Options.res ):
sys.exit( "Must provide flags '-in_pdb', '-in_span', and '-res'! Exiting..." )
# Initialize Rosetta options from user options. Enable pH mode if applicable
rosetta_options = ""
standard_options = "-membrane_new:setup:spanfiles " + Options.in_span + " -run:constant_seed -in:ignore_unrecognized_res"
if ( Options.include_pH ):
print Options.pH_value
if ( float( Options.pH_value ) < 0 or float(Options.pH_value) > 14 ):
sys.exit( "Specified pH value must be between 0-14: Exiting..." )
else:
pH_options = " -pH_mode -value_pH " + str(Options.pH_value)
rosetta_options = standard_options + pH_options
else:
rosetta_options = standard_options
# Initialize Rosetta based on user inputs
rosetta.init( extra_options=rosetta_options )
# Load Pose, & turn on the membrane
pose = pose_from_file( Options.in_pdb )
# Add Membrane to Pose
add_memb = rosetta.protocols.membrane.AddMembraneMover()
add_memb.apply( pose )
# Setup in a topology based membrane
init_mem_pos = rosetta.protocols.membrane.MembranePositionFromTopologyMover()
init_mem_pos.apply( pose )
# check the user has specified a reasonable value for the pH
sfxn = rosetta.core.scoring.ScoreFunction()
if ( Options.include_pH ):
# Create a membrane energy function enabled by pH mode
# Includes two terms not standard in the smoothed energy function: pH energy
# and fa_elec
sfxn = create_score_function( "mpframework_pHmode_fa_2015")
else:
# Create a smoothed membrane full atom energy function (pH 7 calculations)
sfxn = create_score_function( "mpframework_smooth_fa_2012")
# Repack the native rotamer and residues within the repack radius
native_res = pose.residue( int( Options.res ) ).name1()
repacked_native = mutate_residue( pose, int( Options.res), native_res, Options.repack_radius, sfxn )
# to output score breakdown, start by printing the score labels in
# the top of the file
print_score_labels_to_file( repacked_native, sfxn, Options.output_breakdown )
# Compute mutations
if ( Options.mut ):
with file( Options.out, 'a' ) as f:
ddGs = compute_ddG( repacked_native, sfxn, int( Options.res ), Options.mut, Options.repack_radius, Options.output_breakdown )
f.write( Options.in_pdb + " " + Options.res + " " + str(ddGs[0]) + " " + str(ddGs[1]) + " " + str(ddGs[2]) + " " + str(ddGs[3]) + "\n" )
f.close
else:
AAs = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
for aa in AAs:
with file( Options.out, 'a' ) as f:
ddGs = compute_ddG( repacked_native, sfxn, int( Options.res ), aa, Options.repack_radius, Options.output_breakdown )
f.write( str(ddGs[0]) + " " + str(ddGs[1]) + " " + str(ddGs[2]) + " " + str(ddGs[3]) + "\n" )
f.close
###############################################################################
## @brief Compute ddG of mutation in a protein at specified residue and AA position
def compute_ddG( pose, sfxn, resnum, aa, repack_radius, sc_file ):
# Score Native Pose
native_score = sfxn( pose )
# Perform Mutation at residue <resnum> to amino acid <aa>
mutated_pose = mutate_residue( pose, resnum, aa, repack_radius, sfxn )
# Score Mutated Pose
mutant_score = sfxn( mutated_pose )
# If specified the user, print the breakdown of ddG values into a file
print_ddG_breakdown( pose, mutated_pose, sfxn, resnum, aa, sc_file )
# return scores
return aa, round( mutant_score, 3 ), round( native_score, 3 ), round ( mutant_score - native_score, 3 )
###############################################################################
# @brief Replace the residue at <resid> in <pose> with <new_res> and allows
# repacking within a given <pack_radius>
def mutate_residue( pose, mutant_position, mutant_aa, pack_radius, pack_scorefxn ):
if pose.is_fullatom() == False:
IOError( 'mutate_residue only works with fullatom poses' )
test_pose = Pose()
test_pose.assign( pose )
# Create a packer task (standard)
task = TaskFactory.create_packer_task( test_pose )
# the Vector1 of booleans (a specific object) is needed for specifying the
# mutation, this demonstrates another more direct method of setting
# PackerTask options for design
aa_bool = vector1_bool()
# PyRosetta uses several ways of tracking amino acids (ResidueTypes)
# the numbers 1-20 correspond individually to the 20 proteogenic amino acids
# aa_from_oneletter returns the integer representation of an amino acid
# from its one letter code
# convert mutant_aa to its integer representation
mutant_aa = aa_from_oneletter_code( mutant_aa )
# mutation is performed by using a PackerTask with only the mutant
# amino acid available during design
# to do this, construct a Vector1 of booleans indicating which amino acid
# (by its numerical designation, see above) to allow
for i in range( 1 , 21 ):
# in Python, logical expression are evaluated with priority, thus the
# line below appends to aa_bool the truth (True or False) of the
# statement i == mutant_aa
aa_bool.append( i == mutant_aa )
# modify the mutating residue's assignment in the PackerTask using the
# Vector1 of booleans across the proteogenic amino acids
task.nonconst_residue_task( mutant_position
).restrict_absent_canonical_aas( aa_bool )
# prevent residues from packing by setting the per-residue "options" of
# the PackerTask
center = pose.residue( mutant_position ).nbr_atom_xyz()
for i in range( 1, pose.total_residue() + 1 ):
dist = center.distance_squared( test_pose.residue( i ).nbr_atom_xyz() );
# only pack the mutating residue and any within the pack_radius
if i != mutant_position and dist > pow( float( pack_radius ), 2 ) :
task.nonconst_residue_task( i ).prevent_repacking()
# apply the mutation and pack nearby residues
packer = PackRotamersMover( pack_scorefxn , task )
packer.apply( test_pose )
return test_pose
###############################################################################
#@brief Print ddG breakdown from the pose
# Extract weighted energies from the native and mutated pose. Calculate the ddG
# of each and print the component-wise ddG vlaues
def print_ddG_breakdown( native_pose, mutated_pose, sfxn, resnum, aa, fn ):
# Extract scores
tmp_native = native_pose.energies().total_energies().weighted_string_of( sfxn.weights() )
tmp_mutant = mutated_pose.energies().total_energies().weighted_string_of( sfxn.weights() )
# Parse out scores
array_native = filter( None, tmp_native.split(' ') )
array_mutant = filter( None, tmp_mutant.split(' ') )
# Pull out only the scores from these arrays
native_scores = []
for i in range( len(array_native) ):
if ( i % 2 != 0 ):
native_scores.append( float( array_native[i] ) )
mutant_scores = []
for i in range( len(array_mutant) ):
if ( i % 2 != 0 ):
mutant_scores.append( float( array_mutant[i] ) )
# Make a label for the mutation
native_res = native_pose.residue( int( Options.res ) ).name1()
mut_label = native_res + str(resnum) + aa
# Calculate ddG of individual components
ddGs = []
ddGs.append( mut_label )
for i in range( len( mutant_scores ) ):
ddG_component = mutant_scores[i] - native_scores[i]
ddGs.append( round( ddG_component, 3 ) )
ddGs_str = convert_array_to_str( ddGs )
with file( fn, 'a' ) as f:
f.write( ddGs_str + "\n" )
f.close()
###############################################################################
#@brief Get header for ddG breakdown output
# Save the score labels, to be printed at the top of the output breakdown file
def print_score_labels_to_file( native_pose, sfxn, fn ):
tmp_native = native_pose.energies().total_energies().weighted_string_of( sfxn.weights() )
array_native = filter( None, tmp_native.split(' ') )
labels = []
labels.append( 'mutation ' ) # Append field for mutation label
for i in range( len(array_native) ):
if ( i % 2 == 0 ):
labels.append( array_native[i].translate(None, ':') )
labels_str = convert_array_to_str( labels )
with file( fn, 'a' ) as f:
f.write( labels_str + "\n" )
f.close()
###############################################################################
#@brief Convert an array to a space deliminted string
# Save the score labels, to be printed at the top of the output breakdown file
def convert_array_to_str( array ):
linestr = ""
for elem in array:
if ( linestr == "" ):
linestr = linestr + str( elem )
else:
linestr = linestr + " " + str( elem )
return linestr
if __name__ == "__main__" : main(sys.argv)
| [
"[email protected]"
] | |
6ec54bc427e38fff5352b9d4a525b5c7b1bbc069 | 214ea3873f451940c73c4fb02981b08c8161b23c | /Array/range-addition.py | 8617f93a5c2256d727dd8742f68e760606d4d616 | [] | no_license | Tiierr/LeetCode-Python | 4a086a76a6d3780140e47246304d11c520548396 | e8532b63fc5bb6ceebe30a9c53ab3a2b4b2a75a3 | refs/heads/master | 2021-06-14T04:36:57.394115 | 2017-03-07T06:46:39 | 2017-03-07T06:46:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # Time: O(k + n)
# Space: O(1)
#
# Assume you have an array of length n initialized with
# all 0's and are given k update operations.
#
# Each operation is represented as a triplet:
# [startIndex, endIndex, inc] which increments each element of subarray
# A[startIndex ... endIndex] (startIndex and endIndex inclusive) with inc.
#
# Return the modified array after all k operations were executed.
#
# Example:
#
# Given:
#
# length = 5,
# updates = [
# [1, 3, 2],
# [2, 4, 3],
# [0, 2, -2]
# ]
#
# Output:
#
# [-2, 0, 3, 5, 3]
#
# Explanation:
#
# Initial state:
# [ 0, 0, 0, 0, 0 ]
#
# After applying operation [1, 3, 2]:
# [ 0, 2, 2, 2, 0 ]
#
# After applying operation [2, 4, 3]:
# [ 0, 2, 5, 5, 3 ]
#
# After applying operation [0, 2, -2]:
# [-2, 0, 3, 5, 3 ]
#
# Hint:
#
# Thinking of using advanced data structures? You are thinking it too complicated.
# For each update operation, do you really need to update all elements between i and j?
# Update only the first and end element is sufficient.
# The optimal time complexity is O(k + n) and uses O(1) extra space.
class Solution(object):
def getModifiedArray(self, length, updates):
"""
:type length: int
:type updates: List[List[int]]
:rtype: List[int]
"""
result = [0] * length
for update in updates:
result[update[0]] += update[2]
if update[1]+1 < length:
result[update[1]+1] -= update[2]
for i in xrange(1, length):
result[i] += result[i - 1]
return result
| [
"[email protected]"
] | |
0638fe82fed7e47502334e6a6fc971322690549a | 8f73125d816f3b44b03159dba272e095f37c1f0c | /scripts/viewhdf.py | dc40b917231106ad0c60945affdc86ff17e43524 | [] | no_license | tarah28/nanopore | 356b218e5ca3dfb98e4dd7232d8f1c6303f899d1 | ec716ee15ab26d7bf33b7f7352ab8cad1c369ae8 | refs/heads/master | 2021-05-27T06:21:51.958938 | 2014-09-10T11:36:07 | 2014-09-10T11:36:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | #!/usr/bin/python
import h5view
import sys
for fn in sys.argv[1:]:
with h5view.open(fn) as f:
print(f)
| [
"[email protected]"
] | |
a0641d0c62f8c6e39c81e1f9266c4710026b35aa | f3a7b2b71af1ca16e87fcc2c6063670d056f59c6 | /libs/configs_old/MLT/gwd/cfgs_res101_mlt_v1.py | d7bff74ce9ce7235a49aac2edc2032e4000f6281 | [
"Apache-2.0"
] | permissive | DLPerf/RotationDetection | 3af165ab00ea6d034774a7289a375b90e4079df4 | c5d3e604ace76d7996bc461920854b2c79d8c023 | refs/heads/main | 2023-07-16T06:01:42.496723 | 2021-08-28T03:17:39 | 2021-08-28T03:17:39 | 400,690,285 | 0 | 0 | Apache-2.0 | 2021-08-28T03:16:55 | 2021-08-28T03:16:55 | null | UTF-8 | Python | false | false | 3,449 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
"""
FLOPs: 737874381; Trainable params: 51265150
trainval/test + sqrt tau=2
"""
# ------------------------------------------------
VERSION = 'RetinaNet_MLT_GWD_1x_20201222'
NET_NAME = 'resnet101_v1d' # 'MobilenetV2'
# ---------------------------------------- System
ROOT_PATH = os.path.abspath('../../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 10000 * 2
SUMMARY_PATH = os.path.join(ROOT_PATH, 'output/summary')
TEST_SAVE_PATH = os.path.join(ROOT_PATH, 'tools/test_result')
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_R_DIR = os.path.join(ROOT_PATH, 'output/evaluate_result_pickle/')
# ------------------------------------------ Train and test
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
ADD_BOX_IN_TENSORBOARD = True
MUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
ANGLE_WEIGHT = 0.5
REG_LOSS_MODE = 2
ALPHA = 1.0
BETA = 1.0
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 1e-3
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Dataset
DATASET_NAME = 'MLT' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = [800, 600, 1000, 1200]
IMG_MAX_LENGTH = 1500
CLASS_NUM = 1
IMG_ROTATE = True
RGB2GRAY = True
VERTICAL_FLIP = True
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = True
# --------------------------------------------- Network
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
USE_GN = False
FPN_CHANNEL = 256
NUM_SUBNET_CONV = 4
FPN_MODE = 'fpn'
# --------------------------------------------- Anchor
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]
ANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
METHOD = 'H'
USE_ANGLE_COND = False
ANGLE_RANGE = 90 # or 180
# -------------------------------------------- Head
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.1
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.05
VIS_SCORE = 0.2
# -------------------------------------------- GWD
GWD_TAU = 2.0
GWD_FUNC = tf.sqrt
| [
"[email protected]"
] | |
a4a41c511ad4e482fe95c4a61ab6d49518ec4964 | 2a3606551a4d850a7b4d6a4e08089c51108ef7be | /plugin.video.fanfilm/resources/lib/libraries/cleangenre.py | 855f071415fcee23e4105c7f8e444d07c12b232c | [
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | rrosajp/filmkodi | a6bb1823f4ed45453c8b8e54ffbd6a7b49f44450 | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | refs/heads/master | 2021-09-18T06:03:17.561062 | 2018-06-22T23:28:53 | 2018-06-22T23:28:53 | 234,768,781 | 1 | 0 | Apache-2.0 | 2021-06-03T20:33:07 | 2020-01-18T17:11:57 | null | UTF-8 | Python | false | false | 42,826 | py | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2016 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
def lang(i, lang):
if lang == 'bg':
i = i.replace('Action', u'\u0415\u043a\u0448\u044a\u043d')
i = i.replace('Adventure', u'\u041f\u0440\u0438\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u0435')
i = i.replace('Animation', u'\u0410\u043d\u0438\u043c\u0430\u0446\u0438\u044f')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u041a\u043e\u043c\u0435\u0434\u0438\u044f')
i = i.replace('Crime', u'\u041a\u0440\u0438\u043c\u0438\u043d\u0430\u043b\u0435\u043d')
i = i.replace('Documentary', u'\u0414\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u043b\u0435\u043d')
i = i.replace('Drama', u'\u0414\u0440\u0430\u043c\u0430')
i = i.replace('Family', u'\u0421\u0435\u043c\u0435\u0435\u043d')
i = i.replace('Fantasy', u'\u0424\u0435\u043d\u0442\u044a\u0437\u0438')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0418\u0441\u0442\u043e\u0440\u0438\u0447\u0435\u0441\u043a\u0438')
i = i.replace('Horror', u'\u0423\u0436\u0430\u0441')
i = i.replace('Music ', u'\u041c\u0443\u0437\u0438\u043a\u0430')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u041c\u0438\u0441\u0442\u0435\u0440\u0438\u044f')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0420\u043e\u043c\u0430\u043d\u0441')
i = i.replace('Science Fiction', u'\u041d\u0430\u0443\u0447\u043d\u0430\u002d\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sci-Fi', u'\u041d\u0430\u0443\u0447\u043d\u0430\u002d\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0422\u0440\u0438\u043b\u044a\u0440')
i = i.replace('War', u'\u0412\u043e\u0435\u043d\u0435\u043d')
i = i.replace('Western', u'\u0423\u0435\u0441\u0442\u044a\u0440\u043d')
elif lang == 'cs':
i = i.replace('Action', u'\u0041\u006b\u010d\u006e\u00ed')
i = i.replace('Adventure', u'\u0044\u006f\u0062\u0072\u006f\u0064\u0072\u0075\u017e\u006e\u00fd')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u006f\u0076\u0061\u006e\u00fd')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0065')
i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u00e1\u0072\u006e\u00ed')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0052\u006f\u0064\u0069\u006e\u006e\u00fd')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0063\u006b\u00fd')
i = i.replace('Horror', u'\u0048\u006f\u0072\u006f\u0072')
i = i.replace('Music ', u'\u0048\u0075\u0064\u0065\u0062\u006e\u00ed')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0069\u00f3\u007a\u006e\u00ed')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u0063\u006b\u00fd')
i = i.replace('Science Fiction', u'\u0056\u011b\u0064\u0065\u0063\u006b\u006f\u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u0063\u006b\u00fd')
i = i.replace('Sci-Fi', u'\u0056\u011b\u0064\u0065\u0063\u006b\u006f\u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u0063\u006b\u00fd')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0056\u00e1\u006c\u0065\u010d\u006e\u00fd')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'da':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Adventure', u'\u0045\u0076\u0065\u006e\u0074\u0079\u0072')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u006f\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0065')
i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069\u006e\u0061\u006c\u0069\u0074\u0065\u0074')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u0079')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0065 ')
i = i.replace('Horror', u'\u0047\u0079\u0073\u0065\u0072')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u006b')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0069\u0075\u006d')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u002d\u0066\u0069')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u002d\u0066\u0069')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u004b\u0072\u0069\u0067')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'de':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Adventure', u'\u0041\u0062\u0065\u006e\u0074\u0065\u0075\u0065\u0072')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u006f\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u00f6\u0064\u0069\u0065')
i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u0066\u0069\u006c\u006d')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0065')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u006b')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0079')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u004c\u006f\u0076\u0065\u0073\u0074\u006f\u0072\u0079')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u004b\u0072\u0069\u0065\u0067\u0073\u0066\u0069\u006c\u006d')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'el':
i = i.replace('Action', u'\u0394\u03c1\u03ac\u03c3\u03b7')
i = i.replace('Adventure', u'\u03a0\u03b5\u03c1\u03b9\u03c0\u03ad\u03c4\u03b5\u03b9\u03b1')
i = i.replace('Animation', u'\u039a\u03b9\u03bd\u03bf\u03cd\u03bc\u03b5\u03bd\u03b1 \u03a3\u03c7\u03ad\u03b4\u03b9\u03b1')
i = i.replace('Biography', u'\u0392\u03b9\u03bf\u03b3\u03c1\u03b1\u03c6\u03b9\u03ba\u03ae')
i = i.replace('Comedy', u'\u039a\u03c9\u03bc\u03c9\u03b4\u03af\u03b1')
i = i.replace('Crime', u'\u0391\u03c3\u03c4\u03c5\u03bd\u03bf\u03bc\u03b9\u03ba\u03ae')
i = i.replace('Documentary', u'\u039d\u03c4\u03bf\u03ba\u03c5\u03bc\u03b1\u03bd\u03c4\u03ad\u03c1')
i = i.replace('Drama', u'\u0394\u03c1\u03ac\u03bc\u03b1')
i = i.replace('Family', u'\u039f\u03b9\u03ba\u03bf\u03b3\u03b5\u03bd\u03b5\u03b9\u03b1\u03ba\u03ae')
i = i.replace('Fantasy', u'\u03a6\u03b1\u03bd\u03c4\u03b1\u03c3\u03af\u03b1\u03c2')
i = i.replace('Game-Show', u'\u03a4\u03b7\u03bb\u03b5\u03c0\u03b1\u03b9\u03c7\u03bd\u03af\u03b4\u03b9')
i = i.replace('History', u'\u0399\u03c3\u03c4\u03bf\u03c1\u03b9\u03ba\u03ae')
i = i.replace('Horror', u'\u03a4\u03c1\u03cc\u03bc\u03bf\u03c5')
i = i.replace('Music ', u'\u039c\u03bf\u03c5\u03c3\u03b9\u03ba\u03ae')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u039c\u03c5\u03c3\u03c4\u03b7\u03c1\u03af\u03bf\u03c5')
i = i.replace('News', u'\u0395\u03b9\u03b4\u03ae\u03c3\u03b5\u03b9\u03c2')
i = i.replace('Reality-TV', u'\u03a1\u03b9\u03ac\u03bb\u03b9\u03c4\u03c5')
i = i.replace('Romance', u'\u03a1\u03bf\u03bc\u03b1\u03bd\u03c4\u03b9\u03ba\u03ae')
i = i.replace('Science Fiction', u'\u0395\u03c0\u002e \u03a6\u03b1\u03bd\u03c4\u03b1\u03c3\u03af\u03b1\u03c2')
i = i.replace('Sci-Fi', u'\u0395\u03c0\u002e \u03a6\u03b1\u03bd\u03c4\u03b1\u03c3\u03af\u03b1\u03c2')
i = i.replace('Sport', u'\u0391\u03b8\u03bb\u03b7\u03c4\u03b9\u03ba\u03ae')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0398\u03c1\u03af\u03bb\u03b5\u03c1')
i = i.replace('War', u'\u03a0\u03bf\u03bb\u03b5\u03bc\u03b9\u03ba\u03ae')
i = i.replace('Western', u'\u0393\u03bf\u03c5\u03ad\u03c3\u03c4\u03b5\u03c1\u03bd')
elif lang == 'es':
i = i.replace('Action', u'\u0041\u0063\u0063\u0069\u00f3\u006e')
i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0063\u0069\u00f3\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u0065\u0064\u0069\u0061')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065\u006e')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u006c')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0061')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u00ed\u0061')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0061')
i = i.replace('Horror', u'\u0054\u0065\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u00fa\u0073\u0069\u0063\u0061')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072\u0069\u006f')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065')
i = i.replace('Science Fiction', u'\u0043\u0069\u0065\u006e\u0063\u0069\u0061 \u0066\u0069\u0063\u0063\u0069\u00f3\u006e')
i = i.replace('Sci-Fi', u'\u0043\u0069\u0065\u006e\u0063\u0069\u0061 \u0066\u0069\u0063\u0063\u0069\u00f3\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0053\u0075\u0073\u0070\u0065\u006e\u0073\u0065')
i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0061')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'fr':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0065')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u006f\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u00e9\u0064\u0069\u0065')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0069\u0072\u0065')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0065')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0061\u006c')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u0071\u0075\u0065')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0069\u0072\u0065')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u0065\u0075\u0072')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u0071\u0075\u0065')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u00e8\u0072\u0065')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u002d\u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u002d\u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0065')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'he':
i = i.replace('Action', u'\u05d0\u05e7\u05e9\u05df')
i = i.replace('Adventure', u'\u05d4\u05e8\u05e4\u05ea\u05e7\u05d0\u05d5\u05ea')
i = i.replace('Animation', u'\u05d0\u05e0\u05d9\u05de\u05e6\u05d9\u05d4')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u05e7\u05d5\u05de\u05d3\u05d9\u05d4')
i = i.replace('Crime', u'\u05e4\u05e9\u05e2')
i = i.replace('Documentary', u'\u05d3\u05d5\u05e7\u05d5\u05de\u05e0\u05d8\u05e8\u05d9')
i = i.replace('Drama', u'\u05d3\u05e8\u05de\u05d4')
i = i.replace('Family', u'\u05de\u05e9\u05e4\u05d7\u05d4')
i = i.replace('Fantasy', u'\u05e4\u05e0\u05d8\u05d6\u05d9\u05d4')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u05d4\u05e1\u05d8\u05d5\u05e8\u05d9\u05d4')
i = i.replace('Horror', u'\u05d0\u05d9\u05de\u05d4')
i = i.replace('Music ', u'\u05de\u05d5\u05e1\u05d9\u05e7\u05d4')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u05de\u05e1\u05ea\u05d5\u05e8\u05d9\u05df')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u05e8\u05d5\u05de\u05e0\u05d8\u05d9')
i = i.replace('Science Fiction', u'\u05de\u05d3\u05e2 \u05d1\u05d3\u05d9\u05d5\u05e0\u05d9')
i = i.replace('Sci-Fi', u'\u05de\u05d3\u05e2 \u05d1\u05d3\u05d9\u05d5\u05e0\u05d9')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u05de\u05d5\u05ea\u05d7\u05df')
i = i.replace('War', u'\u05de\u05dc\u05d7\u05de\u05d4')
i = i.replace('Western', u'\u05de\u05e2\u05e8\u05d1\u05d5\u05df')
elif lang == 'hu':
i = i.replace('Action', u'\u0041\u006b\u0063\u0069\u00f3')
i = i.replace('Adventure', u'\u004b\u0061\u006c\u0061\u006e\u0064')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u00e1\u0063\u0069\u00f3\u0073')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0056\u00ed\u0067\u006a\u00e1\u0074\u00e9\u006b')
i = i.replace('Crime', u'\u0042\u0171\u006e\u00fc\u0067\u0079\u0069')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0075\u006d')
i = i.replace('Drama', u'\u0044\u0072\u00e1\u006d\u0061')
i = i.replace('Family', u'\u0043\u0073\u0061\u006c\u00e1\u0064\u0069')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0054\u00f6\u0072\u0074\u00e9\u006e\u0065\u006c\u006d\u0069')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u005a\u0065\u006e\u0065\u0069')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u0052\u0065\u006a\u0074\u00e9\u006c\u0079')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b\u0075\u0073')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u002d\u0046\u0069')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u002d\u0046\u0069')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0048\u00e1\u0062\u006f\u0072\u00fa\u0073')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'it':
i = i.replace('Action', u'\u0041\u007a\u0069\u006f\u006e\u0065')
i = i.replace('Adventure', u'\u0041\u0076\u0076\u0065\u006e\u0074\u0075\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u007a\u0069\u006f\u006e\u0065')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u006d\u0065\u0064\u0069\u0061')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u0069\u006f')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u0067\u006c\u0069\u0061')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0053\u0074\u006f\u0072\u0069\u0061')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u0063\u0061')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072\u006f')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065')
i = i.replace('Science Fiction', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0063\u0069\u0065\u006e\u007a\u0061')
i = i.replace('Sci-Fi', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0063\u0069\u0065\u006e\u007a\u0061')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0061')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'ja':
i = i.replace('Action', u'\u30a2\u30af\u30b7\u30e7\u30f3')
i = i.replace('Adventure', u'\u30a2\u30c9\u30d9\u30f3\u30c1\u30e3\u30fc')
i = i.replace('Animation', u'\u30a2\u30cb\u30e1\u30fc\u30b7\u30e7\u30f3')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u30b3\u30e1\u30c7\u30a3')
i = i.replace('Crime', u'\u72af\u7f6a')
i = i.replace('Documentary', u'\u30c9\u30ad\u30e5\u30e1\u30f3\u30bf\u30ea\u30fc')
i = i.replace('Drama', u'\u30c9\u30e9\u30de')
i = i.replace('Family', u'\u30d5\u30a1\u30df\u30ea\u30fc')
i = i.replace('Fantasy', u'\u30d5\u30a1\u30f3\u30bf\u30b8\u30fc')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u5c65\u6b74')
i = i.replace('Horror', u'\u30db\u30e9\u30fc')
i = i.replace('Music ', u'\u97f3\u697d')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u8b0e')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u30ed\u30de\u30f3\u30b9')
i = i.replace('Science Fiction', u'\u30b5\u30a4\u30a8\u30f3\u30b9\u30d5\u30a3\u30af\u30b7\u30e7\u30f3')
i = i.replace('Sci-Fi', u'\u30b5\u30a4\u30a8\u30f3\u30b9\u30d5\u30a3\u30af\u30b7\u30e7\u30f3')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u30b9\u30ea\u30e9\u30fc')
i = i.replace('War', u'\u6226\u4e89')
i = i.replace('Western', u'\u897f\u6d0b')
elif lang == 'ko':
i = i.replace('Action', u'\uc561\uc158')
i = i.replace('Adventure', u'\ubaa8\ud5d8')
i = i.replace('Animation', u'\uc560\ub2c8\uba54\uc774\uc158')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\ucf54\ubbf8\ub514')
i = i.replace('Crime', u'\ubc94\uc8c4')
i = i.replace('Documentary', u'\ub2e4\ud050\uba58\ud130\ub9ac')
i = i.replace('Drama', u'\ub4dc\ub77c\ub9c8')
i = i.replace('Family', u'\uac00\uc871')
i = i.replace('Fantasy', u'\ud310\ud0c0\uc9c0')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\uc5ed\uc0ac')
i = i.replace('Horror', u'\uacf5\ud3ec')
i = i.replace('Music ', u'\uc74c\uc545')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\ubbf8\uc2a4\ud130\ub9ac')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\ub85c\ub9e8\uc2a4')
i = i.replace('Science Fiction', u'\u0053\u0046')
i = i.replace('Sci-Fi', u'\u0053\u0046')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\uc2a4\ub9b4\ub7ec')
i = i.replace('War', u'\uc804\uc7c1')
i = i.replace('Western', u'\uc11c\ubd80')
elif lang == 'nl':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u0065')
i = i.replace('Adventure', u'\u0041\u0076\u006f\u006e\u0074\u0075\u0075\u0072')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0074\u0069\u0065')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0065')
i = i.replace('Crime', u'\u004d\u0069\u0073\u0064\u0061\u0061\u0064')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0069\u0072\u0065')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0069\u0065')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0073\u0063\u0068')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u007a\u0069\u0065\u006b')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0065\u0072\u0069\u0065')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u0065\u006b')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u0066\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065\u0066\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u004f\u006f\u0072\u006c\u006f\u0067')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'pl':
i = i.replace('Action', u'\u0041\u006b\u0063\u006a\u0061')
i = i.replace('Adventure', u'\u0050\u0072\u007a\u0079\u0067\u006f\u0064\u006f\u0077\u0079')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0063\u006a\u0061')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069\u0061')
i = i.replace('Crime', u'\u004b\u0072\u0079\u006d\u0069\u006e\u0061\u0142')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0061\u006c\u006e\u0079')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061\u0074')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u006a\u006e\u0079')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0079\u0063\u007a\u006e\u0079')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u007a\u0079\u0063\u007a\u006e\u0079')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u0054\u0061\u006a\u0065\u006d\u006e\u0069\u0063\u0061')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0073')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u002d\u0046\u0069')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u002d\u0046\u0069')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0057\u006f\u006a\u0065\u006e\u006e\u0079')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'pt':
i = i.replace('Action', u'\u0041\u00e7\u00e3\u006f')
i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u00e7\u00e3\u006f')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u00e9\u0064\u0069\u0061')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0065')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u00e1\u0072\u0069\u006f')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u00ed\u006c\u0069\u0061')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0069\u0061')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u00f3\u0072\u0069\u0061')
i = i.replace('Horror', u'\u0054\u0065\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u00fa\u0073\u0069\u0063\u0061')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u00e9\u0072\u0069\u006f')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0063\u0065')
i = i.replace('Science Fiction', u'\u0046\u0069\u0063\u00e7\u00e3\u006f \u0063\u0069\u0065\u006e\u0074\u00ed\u0066\u0069\u0063\u0061')
i = i.replace('Sci-Fi', u'\u0046\u0069\u0063\u00e7\u00e3\u006f \u0063\u0069\u0065\u006e\u0074\u00ed\u0066\u0069\u0063\u0061')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0047\u0075\u0065\u0072\u0072\u0061')
i = i.replace('Western', u'\u0046\u0061\u0072\u006f\u0065\u0073\u0074\u0065')
elif lang == 'ro':
i = i.replace('Action', u'\u0041\u0063\u021b\u0069\u0075\u006e\u0065')
i = i.replace('Adventure', u'\u0041\u0076\u0065\u006e\u0074\u0075\u0072\u0069')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0163\u0069\u0065')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u0043\u006f\u006d\u0065\u0064\u0069\u0065')
i = i.replace('Crime', u'\u0043\u0072\u0069\u006d\u0103')
i = i.replace('Documentary', u'\u0044\u006f\u0063\u0075\u006d\u0065\u006e\u0074\u0061\u0072')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0103')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u0069\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0049\u0073\u0074\u006f\u0072\u0069\u0063')
i = i.replace('Horror', u'\u0048\u006f\u0072\u0072\u006f\u0072')
i = i.replace('Music ', u'\u004d\u0075\u007a\u0069\u0063\u0103')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u0063')
i = i.replace('Science Fiction', u'\u0053\u0046')
i = i.replace('Sci-Fi', u'\u0053\u0046')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u0052\u0103\u007a\u0062\u006f\u0069')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'ru':
i = i.replace('Action', u'\u0431\u043e\u0435\u0432\u0438\u043a')
i = i.replace('Adventure', u'\u043f\u0440\u0438\u043a\u043b\u044e\u0447\u0435\u043d\u0438\u044f')
i = i.replace('Animation', u'\u043c\u0443\u043b\u044c\u0442\u0444\u0438\u043b\u044c\u043c')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u043a\u043e\u043c\u0435\u0434\u0438\u044f')
i = i.replace('Crime', u'\u043a\u0440\u0438\u043c\u0438\u043d\u0430\u043b')
i = i.replace('Documentary', u'\u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u043b\u044c\u043d\u044b\u0439')
i = i.replace('Drama', u'\u0434\u0440\u0430\u043c\u0430')
i = i.replace('Family', u'\u0441\u0435\u043c\u0435\u0439\u043d\u044b\u0439')
i = i.replace('Fantasy', u'\u0444\u044d\u043d\u0442\u0435\u0437\u0438')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0438\u0441\u0442\u043e\u0440\u0438\u044f')
i = i.replace('Horror', u'\u0443\u0436\u0430\u0441\u044b')
i = i.replace('Music ', u'\u043c\u0443\u0437\u044b\u043a\u0430')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u0434\u0435\u0442\u0435\u043a\u0442\u0438\u0432')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u043c\u0435\u043b\u043e\u0434\u0440\u0430\u043c\u0430')
i = i.replace('Science Fiction', u'\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sci-Fi', u'\u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0442\u0440\u0438\u043b\u043b\u0435\u0440')
i = i.replace('War', u'\u0432\u043e\u0435\u043d\u043d\u044b\u0439')
i = i.replace('Western', u'\u0432\u0435\u0441\u0442\u0435\u0440\u043d')
elif lang == 'sl':
i = i.replace('Action', u'\u0041\u006b\u0063\u0069\u006a\u0061')
i = i.replace('Adventure', u'\u0041\u0076\u0061\u006e\u0074\u0075\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0063\u0069\u006a\u0061')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u041a\u043e\u043c\u0435\u0064\u0069\u006a\u0061')
i = i.replace('Crime', u'\u041a\u0072\u0069\u006d\u0069\u006e\u0061\u006c\u006e\u0069')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u0061\u0072\u006e\u0069')
i = i.replace('Drama', u'\u0044\u0072\u0430\u043c\u0430')
i = i.replace('Family', u'\u0044\u0072\u0075\u017e\u0069\u006e\u0073\u006b\u0069')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b\u0061')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u005a\u0067\u006f\u0064\u006f\u0076\u0069\u006e\u0073\u006b\u0069')
i = i.replace('Horror', u'\u0047\u0072\u006f\u007a\u006c\u006a\u0069\u0076\u006b\u0061')
i = i.replace('Music ', u'\u0047\u006c\u0061\u007a\u0062\u0065\u006e\u0069')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0069\u0073\u0074\u0065\u0072\u0069\u006a\u0061')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b\u0061')
i = i.replace('Science Fiction', u'\u005a\u006e\u0061\u006e\u0073\u0074\u0076\u0065\u006e\u0061 \u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b\u0061')
i = i.replace('Sci-Fi', u'\u005a\u006e\u0061\u006e\u0073\u0074\u0076\u0065\u006e\u0061 \u0066\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b\u0061')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0422\u0072\u0069\u006c\u0065\u0072')
i = i.replace('War', u'\u0056\u006f\u006a\u006e\u006f\u002d\u0070\u006f\u006c\u0069\u0074\u0069\u010d\u006e\u0069')
i = i.replace('Western', u'\u0057\u0065\u0073\u0074\u0065\u0072\u006e')
elif lang == 'sr':
i = i.replace('Action', u'\u0410\u043a\u0446\u0438\u043e\u043d\u0438')
i = i.replace('Adventure', u'\u0410\u0432\u0430\u043d\u0442\u0443\u0440\u0438\u0441\u0442\u0438\u0447\u043a\u0438')
i = i.replace('Animation', u'\u0426\u0440\u0442\u0430\u043d\u0438')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u041a\u043e\u043c\u0435\u0434\u0438\u0458\u0430')
i = i.replace('Crime', u'\u041a\u0440\u0438\u043c\u0438')
i = i.replace('Documentary', u'\u0414\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u0440\u043d\u0438')
i = i.replace('Drama', u'\u0414\u0440\u0430\u043c\u0430')
i = i.replace('Family', u'\u041f\u043e\u0440\u043e\u0434\u0438\u0447\u043d\u0438')
i = i.replace('Fantasy', u'\u0424\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0418\u0441\u0442\u043e\u0440\u0438\u0458\u0441\u043a\u0438')
i = i.replace('Horror', u'\u0425\u043e\u0440\u043e\u0440')
i = i.replace('Music ', u'\u041c\u0443\u0437\u0438\u0447\u043a\u0438')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u041c\u0438\u0441\u0442\u0435\u0440\u0438\u0458\u0430')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0409\u0443\u0431\u0430\u0432\u043d\u0438')
i = i.replace('Science Fiction', u'\u041d\u0430\u0443\u0447\u043d\u0430 \u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sci-Fi', u'\u041d\u0430\u0443\u0447\u043d\u0430 \u0444\u0430\u043d\u0442\u0430\u0441\u0442\u0438\u043a\u0430')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0422\u0440\u0438\u043b\u0435\u0440')
i = i.replace('War', u'\u0420\u0430\u0442\u043d\u0438')
i = i.replace('Western', u'\u0412\u0435\u0441\u0442\u0435\u0440\u043d')
elif lang == 'sv':
i = i.replace('Action', u'\u0041\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Adventure', u'\u00c4\u0076\u0065\u006e\u0074\u0079\u0072')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0065\u0072\u0061\u0074')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069')
i = i.replace('Crime', u'\u004b\u0072\u0069\u006d\u0069\u006e\u0061\u006c')
i = i.replace('Documentary', u'\u0044\u006f\u006b\u0075\u006d\u0065\u006e\u0074\u00e4\u0072')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d\u0061')
i = i.replace('Family', u'\u0046\u0061\u006d\u0069\u006c\u006a')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0079')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0048\u0069\u0073\u0074\u006f\u0072\u0069\u0073\u006b')
i = i.replace('Horror', u'\u0053\u006b\u0072\u00e4\u0063\u006b')
i = i.replace('Music ', u'\u004d\u0075\u0073\u0069\u0063')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u004d\u0079\u0073\u0074\u0069\u006b')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b')
i = i.replace('Science Fiction', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sci-Fi', u'\u0053\u0063\u0069\u0065\u006e\u0063\u0065 \u0046\u0069\u0063\u0074\u0069\u006f\u006e')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0054\u0068\u0072\u0069\u006c\u006c\u0065\u0072')
i = i.replace('War', u'\u004b\u0072\u0069\u0067')
i = i.replace('Western', u'\u0056\u00e4\u0073\u0074\u0065\u0072\u006e')
elif lang == 'tr':
i = i.replace('Action', u'\u0041\u006b\u0073\u0069\u0079\u006f\u006e')
i = i.replace('Adventure', u'\u004d\u0061\u0063\u0065\u0072\u0061')
i = i.replace('Animation', u'\u0041\u006e\u0069\u006d\u0061\u0073\u0079\u006f\u006e')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u004b\u006f\u006d\u0065\u0064\u0069')
i = i.replace('Crime', u'\u0053\u0075\u00e7')
i = i.replace('Documentary', u'\u0042\u0065\u006c\u0067\u0065\u0073\u0065\u006c')
i = i.replace('Drama', u'\u0044\u0072\u0061\u006d')
i = i.replace('Family', u'\u0041\u0069\u006c\u0065')
i = i.replace('Fantasy', u'\u0046\u0061\u006e\u0074\u0061\u0073\u0074\u0069\u006b')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u0054\u0061\u0072\u0069\u0068')
i = i.replace('Horror', u'\u004b\u006f\u0072\u006b\u0075')
i = i.replace('Music ', u'\u004d\u00fc\u007a\u0069\u006b')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u0047\u0069\u007a\u0065\u006d')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u0052\u006f\u006d\u0061\u006e\u0074\u0069\u006b')
i = i.replace('Science Fiction', u'\u0042\u0069\u006c\u0069\u006d\u002d\u004b\u0075\u0072\u0067\u0075')
i = i.replace('Sci-Fi', u'\u0042\u0069\u006c\u0069\u006d\u002d\u004b\u0075\u0072\u0067\u0075')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u0047\u0065\u0072\u0069\u006c\u0069\u006d')
i = i.replace('War', u'\u0053\u0061\u0076\u0061\u015f')
i = i.replace('Western', u'\u0056\u0061\u0068\u015f\u0069 \u0042\u0061\u0074\u0131')
elif lang == 'zh':
i = i.replace('Action', u'\u52a8\u4f5c')
i = i.replace('Adventure', u'\u5192\u9669')
i = i.replace('Animation', u'\u52a8\u753b')
i = i.replace('Biography', u'Biography')
i = i.replace('Comedy', u'\u559c\u5267')
i = i.replace('Crime', u'\u72af\u7f6a')
i = i.replace('Documentary', u'\u7eaa\u5f55')
i = i.replace('Drama', u'\u5267\u60c5')
i = i.replace('Family', u'\u5bb6\u5ead')
i = i.replace('Fantasy', u'\u5947\u5e7b')
i = i.replace('Game-Show', u'Game-Show')
i = i.replace('History', u'\u5386\u53f2')
i = i.replace('Horror', u'\u6050\u6016')
i = i.replace('Music ', u'\u97f3\u4e50')
i = i.replace('Musical', u'Musical')
i = i.replace('Mystery', u'\u60ac\u7591')
i = i.replace('News', u'News')
i = i.replace('Reality-TV', u'Reality-TV')
i = i.replace('Romance', u'\u7231\u60c5')
i = i.replace('Science Fiction', u'\u79d1\u5e7b')
i = i.replace('Sci-Fi', u'\u79d1\u5e7b')
i = i.replace('Sport', u'Sport')
i = i.replace('Talk-Show', u'Talk-Show')
i = i.replace('Thriller', u'\u60ca\u609a')
i = i.replace('War', u'\u6218\u4e89')
i = i.replace('Western', u'\u897f\u90e8')
return i
| [
"[email protected]"
] | |
f942e1aeb559fdac152b1e65d28e59acc2f85863 | 7d172bc83bc61768a09cc97746715b8ec0e13ced | /catalog/migrations/0003_saleorder.py | 65f1ca2bf38a92aa7ef0c747114f6b61e4a61de3 | [] | no_license | shivam1111/jjuice | a3bcd7ee0ae6647056bdc62ff000ce6e6af27594 | 6a2669795ed4bb4495fda7869eeb221ed6535582 | refs/heads/master | 2020-04-12T05:01:27.981792 | 2018-11-08T13:00:49 | 2018-11-08T13:00:49 | 81,114,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-03-30 07:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0002_s3object'),
]
operations = [
migrations.CreateModel(
name='SaleOrder',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, verbose_name='Name')),
],
options={
'db_table': 'sale_order',
'managed': False,
},
),
]
| [
"[email protected]"
] | |
370bdedfa4af55d99c1d4c1db116c26d97c39037 | aea96aa406250c3a2a8f2799e6cbbad256c262c3 | /EG/reduce_str_float.py | f08662bdfd724cd99ca78245308d53339a3013ec | [] | no_license | xiaochuanjiejie/python_exercise | cb0ffaa4b7c961c8ca9847526c84ee6ba261620c | 710fa85fd2d7a17994081bdc5f8b5ff66b77416e | refs/heads/master | 2021-01-21T16:18:04.640093 | 2017-08-11T10:02:49 | 2017-08-11T10:02:49 | 95,403,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | #coding: utf-8
import math
from functools import reduce
s = '1234.5678'
index = s.index('.')
n = len(s) - 1 - index
s = s.replace('.','')
print s
def chr2num(s):
return {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}[s]
print map(chr2num,s)
lst = map(chr2num,s)
# lst = list(map(chr2num,s))
# print lst
def cal(x,y):
return x * 10 + y
number = reduce(cal,lst)
print number
floatx = number / math.pow(10,n)
print floatx | [
"[email protected]"
] | |
50fa25dc930404d0d7198078e8b171b750953f5c | ca5c5becd8b57b4d77af3aa776b3c478ca962bf0 | /src/main/ODR_MC/branches/v3/upFun.py | 28e5d2a45a215fc146e7f361050eb44ad40d186e | [] | no_license | gerritholl/Harmonisation | 42118c46d093115ddd87fca094a9ac8881aede71 | 31b8bd5a0da8c6fc4a31453cf7801fcca25d4951 | refs/heads/master | 2021-09-07T03:07:03.843117 | 2017-12-18T13:57:38 | 2017-12-18T13:57:38 | 110,711,229 | 0 | 0 | null | 2017-11-14T15:53:19 | 2017-11-14T15:53:19 | null | UTF-8 | Python | false | false | 4,966 | py | """ FIDUCEO FCDR harmonisation
Author: Arta Dilo / NPL MM
Date created: 09-01-2017
Last update: 17-01-2017
Functions for propagating uncertainty to the calibrated radiance:
- function to calculate first derivatives to measurement eq. variables,
- and first derivatives to calibration coefficients;
- function for uncertainty propagation using GUM.
"""
import numpy as np
class avhrr(object):
''' The class contains a function for the measurement equation and functions
for calculating sensitivity coefficients to variables and parameters in the
measurement equation. '''
def __init__(self, nop, nos):
self.slabel = 'avhrr' # series label
self.nopairs = nop # number of sensor pairs in the series
self.nosensors = nos # number of sensors in the series
# set manually number of meas. eq. parameters; will change if needed
self.nocoefs = 4 # number of calibration coefficients
self.novars = 5 # number of meas. eq. variables
# AVHRR measurement equation
def measEq(self, X, a):
# add checks for number of calib. coefficients and variables
a0 = a[0] # AVHRR model coefficients
a1 = a[1]
a2 = a[2]
a3 = a[3]
CE = X[:,2] # Earth counts
Cs = X[:,0] # space counts
Cict = X[:,1] # ICT counts
Lict = X[:,3] # ICT radiance
To = X[:,4] # orbit temperature
# Earth radiance from Earth counts and calibration data
LE = a0 + (0.98514+a1)*Lict*(Cs-CE)/(Cs-Cict) + a2*(Cict-CE)*(Cs-CE)
LE += a3*To
return LE # return Earth radiance
''' Partial derivatives to measurement equation variables and coefficients;
these form the Jacobian row(s) for the LS in a pair sensor-reference. '''
def sensCoeff(self, X, a):
p = self.nocoefs # number of calibration coefficients
m = self.novars # number of harmonisation variables
a1 = a[1] # AVHRR model coefficients
a2 = a[2]
a3 = a[3]
CE = X[:,2] # Earth counts
Cs = X[:,0] # space counts
Cict = X[:,1] # ICT counts
Lict = X[:,3] # ICT radiance
To = X[:,4] # orbit temperature
# initialize array of sensitivity coefficients per data row
sens = np.zeros((CE.shape[0], p+m)) # should check it is 9
# partial derivatives to calibration coefficients
sens[:,0] = 1. # dLE / da0
sens[:,1] = Lict * (Cs - CE) / (Cs - Cict) # dLE / da1
sens[:,2] = (Cict - CE) * (Cs - CE) # dLE / da2
sens[:,3] = To # dLE / da3
# partial derivatives to meas.eq. variables
sens[:,4] = (0.98514+a1)*Lict*(CE-Cict)/(Cs-Cict)**2 + a2*(Cict-CE) # dLE/dCs
sens[:,5] = (0.98514+a1)*Lict*(Cs-CE)/(Cs-Cict)**2 + a2*(Cs-CE) # dLE/dCict
sens[:,6] = (0.98514+a1)*Lict/(Cict-Cs) + a2*(2*CE-Cs-Cict) # dLE/dCE
sens[:,7] = (0.98514+a1) * (Cs-CE) / (Cs-Cict) # dLE/dLict
sens[:,8] = a3 # dLE/dTo
return sens
''' Evaluate Earth radiance uncertainty from coefficients uncertainty '''
def va2ULE(self, X, a, Va):
p = self.nocoefs # number of calibration coefficients
sens = self.sensCoeff(X, a) # sensitivity coeffs for matchup obs.
# compute uncertainty from calibration coefficients
u2La = np.dot(sens[:, 0:p]**2, np.diag(Va)) # coeffs. variance component
corU = np.zeros((X[:,0].shape[0]))
for i in range(p-1):
for j in range(i+1,p):
corU[:] += 2 * sens[:,i] * sens[:,j] * Va[i,j]
u2La += corU # add coeffs' correlation component
return np.sqrt(u2La) # return radiance uncert. from coeffs uncertainty
''' Evaluate Earth radiance uncertainty via GUM law of propagation '''
def uncLE(self, X, a, uX, Va):
# assumes no correlation between X variables
p = self.nocoefs # number of calibration coefficients
m = self.novars # number of harmonisation variables
sens = self.sensCoeff(X, a) # sensitivity coeffs for matchup obs.
u2La = self.va2ULE(X, a, Va)**2 # uncertainty from calib. coefficients
# evaluate uncertainty from harmonisation data variables
u2LX = np.einsum('ij,ij->i', sens[:, p:p+m]**2, uX**2)
u2L = u2La + u2LX # total squared uncertainty of radiance
print "Ratio of coeffs' uncertainty component to total radiance uncertainty:"
print min(np.sqrt(u2La/u2L)), '-', max(np.sqrt(u2La/u2L))
return np.sqrt(u2L) # return uncertainty of Earth radiance
| [
"[email protected]"
] | |
11bc9407b651db938ff3f6333da6b4972a5e9ef3 | 35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d | /Python_Study/第七模块学习/Day01/用pycharm创建Django/venv/Lib/site-packages/django/contrib/gis/utils/layermapping.py | 972c24ce95f0b6498ee2791c87eee130c2af410d | [] | no_license | KongChan1988/51CTO-Treasure | 08b4ca412ad8a09d67c1ea79c7149f8573309ca4 | edb2e4bd11d39ac24cd240f3e815a88361867621 | refs/heads/master | 2021-07-04T15:57:56.164446 | 2019-07-24T15:28:36 | 2019-07-24T15:28:36 | 97,453,749 | 5 | 8 | null | 2019-10-30T22:05:12 | 2017-07-17T08:34:59 | Python | UTF-8 | Python | false | false | 27,129 | py | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping:
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.BigAutoField: OFTInteger64,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, str):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"Check the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
Check the Layer metadata and ensure that it's compatible with the
mapping information and model. Unlike previous revisions, there is no
need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Check the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, str)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Check the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, str):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, return a dictionary of keyword arguments for
constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`), construct
and return the uniqueness keyword arguments -- a subset of the feature
kwargs.
"""
if isinstance(self.unique, str):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verify if the OGR Field contents are acceptable to the model field. If
they are, return the verified value, otherwise raise an exception.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
retrieve the related model for the ForeignKey mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verify the geometry -- construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Return the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as exc:
raise LayerMapError(
'Could not translate between the data source and model geometry.'
) from exc
def geometry_field(self):
"Return the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Save the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user.txt
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except Exception: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| [
"[email protected]"
] | |
e3715d7cbdd7977bd57b89bffe7e1c7374827eb2 | 40fc1d38f2d4b643bc99df347c4ff3a763ba65e3 | /arcade/space_shooter/setup.py | 899a87123f997e3c1122f83ff3f7a77fa541a2a7 | [
"LicenseRef-scancode-public-domain",
"MIT",
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | alecordev/pygaming | 0be4b7a1c9e7922c63ce4cc369cd893bfef7b03c | 35e479b703acf038f47c2151b3759ad852781e4c | refs/heads/master | 2023-05-14T05:03:28.484678 | 2021-06-03T10:11:08 | 2021-06-03T10:11:08 | 372,768,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | import sys
from cx_Freeze import setup, Executable
import os
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"packages": ["os", "pygame"]}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
pygame_py_file = os.path.join("spaceshooter", "spaceShooter.py")
## The image and sound files are added manually into the zip file
## A fix for this would be released
setup(
name="Space Shooter",
version="0.0.2",
description="classic retro game made using pygame",
options={"build_exe": build_exe_options},
executables=[Executable(pygame_py_file, base=base)],
)
| [
"[email protected]"
] | |
90a99102e6ad23ec94bd9649921692bcd0e04eb7 | 76e8dddb8d12906514f0004f32af8ef305ad43e9 | /Dragon/python/dragon/vm/tensorflow/ops/nn_ops.py | ce0d7c84235221fc898b94a97501b81947b4cb8a | [
"BSD-2-Clause"
] | permissive | XJTUeducation/Dragon | bf98bd3a10449fa8948e0409a0243d666324b749 | 843204956ff7775c49d0d6193e1cd77ab512fbdd | refs/heads/master | 2020-04-10T13:02:17.446430 | 2018-12-04T07:56:55 | 2018-12-04T07:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,799 | py | # ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from dragon.core.tensor import Tensor
import dragon.ops as ops
__all__ = [
'convolution',
'relu',
'softmax',
'conv2d',
'conv2d_transpose',
'avg_pool',
'max_pool',
'xw_plus_b',
'bias_add',
'dropout',
'sigmoid_cross_entropy_with_logits',
'softmax_cross_entropy_with_logits',
'sparse_softmax_cross_entropy_with_logits',
'l2_loss'
]
def convolution(input, filter, padding, strides=None,
dilation_rate=None, name=None, data_format=None):
num_total_dims = filter.get_shape().ndims
if num_total_dims is None:
num_total_dims = input.get_shape().ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known.")
num_spatial_dims = num_total_dims - 2
# make default parameters
if data_format is None:
data_format = 'NHWC'
if strides is None:
strides = [1] * num_total_dims
else:
if len(strides) != num_total_dims:
_strides = [1] * num_total_dims
_n_provides = len(strides)
if data_format == 'NHWC':
_strides[1 : 1 + _n_provides] = strides
else:
_strides[2 : 2 + _n_provides] = strides
strides = _strides
if dilation_rate is not None:
if len(dilation_rate) != num_total_dims:
_dilation_rate = [1] * num_total_dims
_n_provides = len(dilation_rate)
if data_format == 'NHWC':
_dilation_rate[1 : 1 + _n_provides] = dilation_rate
else:
_dilation_rate[2 : 2 + _n_provides] = dilation_rate
dilation_rate = _dilation_rate
if num_spatial_dims == 2:
return conv2d(input, filter,
strides, padding, dilation_rate,
data_format, name)
else:
raise NotImplementedError('conv{}d is not implemented.'.format(num_spatial_dims))
def relu(features, name=None):
return ops.Relu(features, name=name)
def softmax(logits, dim=-1, name=None):
return ops.Softmax(logits, axis=dim)
def conv2d(input, filter, strides, padding, dilation_rate=None,
data_format='NHWC', name=None, **kwargs):
"""Compute 2D convolution according to the given 4D ``input`` and ``filter``.
For **NHWC** format, filter should be as ``[filter_height, filter_width, in_channels, out_channels]``.
For **NCHW** format, filter should be as ``[out_channels, in_channels, filter_height, filter_width]``.
Parameters
----------
input : Tensor
The input tensor.
filter : Tensor
The filter tensor.
strides : list of int
The strides with length 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
dilation_rate : list of int or None
The dilation rates with with length 4.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : str
The optional name for this operator.
Returns
-------
Tensor
The output tensor.
"""
if filter.shape is None:
raise ValueError('filter must have a valid shape.')
else:
if len(filter.shape) != 4:
raise ValueError('filter must be a 4D Tensor.')
if len(strides) != 4:
raise ValueError('strides must be a list with length 4.')
if dilation_rate is not None:
if len(dilation_rate) != 4:
raise ValueError(' dilation_rate must be a list with length 4.')
if data_format == 'NHWC':
output = ops.Conv2d([input, filter],
num_output=filter.shape[3],
kernel_size=filter.shape[0:2],
stride=strides[1:3],
dilation=dilation_rate[1:3] if dilation_rate is not None else 1,
padding=padding,
data_format=data_format)
return output
elif data_format == 'NCHW':
output = ops.Conv2d([input, filter],
num_output=filter.shape[0],
kernel_size=filter.shape[2:4],
stride=strides[2:4],
dilation=dilation_rate[2:4] if dilation_rate is not None else 1,
padding=padding,
data_format=data_format)
return output
else:
raise ValueError('Unknown data format: {}'.format(data_format))
def conv2d_transpose(value, filter, output_shape, strides, padding='SAME',
data_format='NHWC', name=None):
"""Compute 2D deconvolution according to the given 4D ``input`` and ``filter``.
For **NHWC** format, filter should be as ``[filter_height, filter_width, out_channels, in_channels]``.
For **NCHW** format, filter should be as ``[in_channels, out_channels, filter_height, filter_width]``.
``output_shape`` will be ignored if padding algorithm is **VALID**.
Parameters
----------
input : Tensor
The input tensor.
filter : Tensor
The filter tensor.
output_shape : list of int
The deterministic output shape for **SAME** padding.
strides : list of int
The strides with length 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : str
The optional name for this operator.
Returns
-------
Tensor
The output tensor.
"""
if filter.shape is None:
raise ValueError('filter must have a valid shape.')
else:
if len(filter.shape) != 4:
raise ValueError('filter must be a 4D Tensor.')
if len(strides) != 4:
raise ValueError('strides must be a list with length 4.')
if not isinstance(output_shape, list):
raise TypeError('output_shape should be a list.')
if len(output_shape) != 4:
raise ValueError('output_shape should be a list with length 4.')
if data_format == 'NHWC':
output = ops.Conv2dTranspose([value, filter],
num_output=filter.shape[2],
kernel_size=filter.shape[0:2],
stride=strides[1:3],
padding=padding,
data_format=data_format,
output_shape=output_shape)
return output
elif data_format == 'NCHW':
output = ops.Conv2dTranspose([value, filter],
num_output=filter.shape[1],
kernel_size=filter.shape[2:4],
stride=strides[2:4],
padding=padding,
data_format=data_format,
output_shape=output_shape)
return output
else:
raise ValueError('Unknown data format: {}'.format(data_format))
def avg_pool(value, ksize, strides, padding, data_format='NHWC', name=None):
"""Perform avg pooling on spatial axes.
Parameters
----------
value : Tensor
The input tensor.
ksize : list of int
The kernel size with length >= 4.
strides : list of int
The strides with length >= 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : None or str
The optional name of op.
Returns
-------
Tensor
The output tensor.
"""
if len(ksize) < 4:
raise ValueError('ksize must be a list with length >=4.')
if len(strides) < 4:
raise ValueError('strides must be a list with length >=4.')
if len(ksize) != len(strides):
raise ValueError('ksize and strides should have the same length.')
if len(ksize) == 4:
if data_format == 'NHWC':
if ksize[0] != 1 or ksize[3] != 1 or strides[0] != 1 or strides[3] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[1], ksize[2]], [strides[1], strides[2]],
padding=padding, data_format=data_format, mode='AVG')
if data_format == 'NCHW':
if ksize[0] != 1 or ksize[1] != 1 or strides[0] != 1 or strides[1] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[2], ksize[3]], [strides[2], strides[3]],
padding=padding, data_format=data_format, mode='AVG')
else:
raise NotImplementedError('Pool{}d has not been implemented yet.'.format(len(ksize) - 2))
def max_pool(value, ksize, strides, padding, data_format='NHWC', name=None):
"""Perform max pooling on spatial axes.
Parameters
----------
value : Tensor
The input tensor.
ksize : list of int
The kernel size with length >= 4.
strides : list of int
The strides with length >= 4.
padding : str
The padding algorithm. ``VALID`` or ``SAME``.
data_format : str
The data format. ``NHWC`` or ``NCHW``.
name : None or str
The optional name of op.
Returns
-------
Tensor
The output tensor.
"""
if len(ksize) < 4:
raise ValueError('ksize must be a list with length >=4.')
if len(strides) < 4:
raise ValueError('strides must be a list with length >=4.')
if len(ksize) != len(strides):
raise ValueError('ksize and strides should have the same length.')
if len(ksize) == 4:
if data_format == 'NHWC':
if ksize[0] != 1 or ksize[3] != 1 or strides[0] != 1 or strides[3] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[1], ksize[2]], [strides[1], strides[2]],
padding=padding, data_format=data_format, mode='MAX')
if data_format == 'NCHW':
if ksize[0] != 1 or ksize[1] != 1 or strides[0] != 1 or strides[1] != 1:
raise ValueError('The pooling can only be performed on spatial axes.')
return ops.Pool2d(value, [ksize[2], ksize[3]], [strides[2], strides[3]],
padding=padding, data_format=data_format, mode='MAX')
else:
raise NotImplementedError('Pool{}d has not been implemented yet.'.format(len(ksize) - 2))
def xw_plus_b(x, weights, biases, name=None):
if weights.shape is None:
raise ValueError('weights must have a valid shape.')
else:
if len(weights.shape) != 2:
raise ValueError('weights must be a 2D Tensor')
if biases.shape is None:
raise ValueError('biases must a have a valid shape.')
else:
if len(biases.shape) != 1:
raise ValueError('biases must be a 1D Tensor')
if weights.shape[1] != biases.shape[0]:
raise ValueError('the shape of weights and biaes are incompatible.')
return ops.InnerProduct([x, weights, biases], num_output=weights.shape[1], TransW=False)
def bias_add(value, bias, data_format='NHWC', name=None):
return ops.BiasAdd([value, bias], data_format=data_format)
def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
return ops.SigmoidCrossEntropy([logits, targets], normalization='UNIT', name=None)
def softmax_cross_entropy_with_logits(_sentinel=None,
labels=None, logits=None,
dim=-1, name=None):
if _sentinel is not None:
raise ValueError('Only call `softmax_cross_entropy_with_logits` '
'with named arguments (labels=..., logits=..., ...)')
if dim == -1: dim = 1
return ops.SoftmaxCrossEntropy([logits, labels], axis=dim, normalization='UNIT', name=name)
def sparse_softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None):
if dim == -1: dim = 1
return ops.SparseSoftmaxCrossEntropy([logits, labels], axis=dim, normalization='UNIT', name=name)
def l2_loss(t, name=None):
return (ops.Reduce(ops.Square(t), operation='SUM') * 0.5)
def dropout(x, keep_prob, name=None):
return ops.Dropout(x, 1 - keep_prob)
| [
"[email protected]"
] | |
8904beb072f5f0d6c02deb340ad9e1bde96aa958 | 6509c398816baffafa4a1fcfb2855e1bc9d1609b | /sistema-operacional/diretorios/pathlib/exemplos/pathlib-4.py | 7986086bea5a528b646fbaa9b9c5e9fc10c68789 | [] | no_license | marcoswebermw/learning-python | 6b0dfa81a0d085f4275865dce089d9b53b494aa5 | 931ed2985b8a3fec1a48c660c089e290aaac123d | refs/heads/master | 2021-10-27T21:19:46.013020 | 2019-04-19T23:25:46 | 2019-04-19T23:25:46 | 87,670,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # Listando só os arquivos de um diretório.
from pathlib import Path
diretorio = Path('.')
[print(x) for x in diretorio.iterdir() if x.is_file()] | [
"[email protected]"
] | |
ce3eb9f306532e3d901fc2acb81877bb8a80fbde | b70f00927b9ed862252ad7345ca39f9d44ae87a2 | /exec -l /bin/bash/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/command_lib/filestore/operations/flags.py | a2422ffca4165e41c34c82b6d85667ae3393cac0 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sparramore/Art-Roulette | 7654dedad6e9423dfc31bd0f807570b07a17a8fc | c897c9ec66c27ccab16f1a12213d09fe982d4a95 | refs/heads/master | 2021-07-06T13:04:22.141681 | 2018-07-12T23:30:13 | 2018-07-12T23:30:13 | 139,061,941 | 0 | 2 | null | 2020-07-25T11:32:11 | 2018-06-28T19:49:24 | Python | UTF-8 | Python | false | false | 1,156 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and helpers for the Cloud Filestore operations commands."""
from __future__ import unicode_literals
OPERATIONS_LIST_FORMAT = """\
table(
name.basename():label=OPERATION_NAME,
name.segment(3):label=LOCATION,
metadata.verb:label=TYPE,
metadata.target.basename(),
done.yesno(yes='DONE', no='RUNNING'):label=STATUS,
metadata.createTime.date():sort=1,
duration(start=metadata.createTime,end=metadata.endTime,precision=0,calendar=false).slice(2:).join("").yesno(no="<1S"):label=DURATION
)"""
| [
"[email protected]"
] | |
7df867c895807b675e26661a7c94fcedf8969c23 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/operationalinsights/v20200301preview/get_linked_storage_account.py | c615084e822e9a1b8d05805b45a7c7b14a8e0d07 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 4,552 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetLinkedStorageAccountResult',
'AwaitableGetLinkedStorageAccountResult',
'get_linked_storage_account',
]
@pulumi.output_type
class GetLinkedStorageAccountResult:
"""
Linked storage accounts top level resource container.
"""
def __init__(__self__, data_source_type=None, id=None, name=None, storage_account_ids=None, type=None):
if data_source_type and not isinstance(data_source_type, str):
raise TypeError("Expected argument 'data_source_type' to be a str")
pulumi.set(__self__, "data_source_type", data_source_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_account_ids and not isinstance(storage_account_ids, list):
raise TypeError("Expected argument 'storage_account_ids' to be a list")
pulumi.set(__self__, "storage_account_ids", storage_account_ids)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataSourceType")
def data_source_type(self) -> str:
"""
Linked storage accounts type.
"""
return pulumi.get(self, "data_source_type")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountIds")
def storage_account_ids(self) -> Optional[Sequence[str]]:
"""
Linked storage accounts resources ids.
"""
return pulumi.get(self, "storage_account_ids")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetLinkedStorageAccountResult(GetLinkedStorageAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLinkedStorageAccountResult(
data_source_type=self.data_source_type,
id=self.id,
name=self.name,
storage_account_ids=self.storage_account_ids,
type=self.type)
def get_linked_storage_account(data_source_type: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLinkedStorageAccountResult:
"""
Linked storage accounts top level resource container.
:param str data_source_type: Linked storage accounts type.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataSourceType'] = data_source_type
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:operationalinsights/v20200301preview:getLinkedStorageAccount', __args__, opts=opts, typ=GetLinkedStorageAccountResult).value
return AwaitableGetLinkedStorageAccountResult(
data_source_type=__ret__.data_source_type,
id=__ret__.id,
name=__ret__.name,
storage_account_ids=__ret__.storage_account_ids,
type=__ret__.type)
| [
"[email protected]"
] | |
4365938a5db92558c7c18ea93d358dfe9ffed5bd | 0b0d3246d39974cb8faff7d269da2d539415afab | /problem_python/p49.py | 88e5b7863a391d3d438604eab2bbd0cc41c6c173 | [] | no_license | xionghhcs/leetcode | 972e7ae4ca56b7100223630b294b5a97ba5dd7e8 | 8bd43dcd995a9de0270b8cea2d9a48df17ffc08b | refs/heads/master | 2020-03-07T17:18:08.465559 | 2019-09-29T11:11:26 | 2019-09-29T11:11:26 | 127,607,564 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | class Solution:
def groupAnagrams(self, strs):
import copy
strs_cp = copy.deepcopy(strs)
for i, item in enumerate(strs_cp):
item = list(item)
item.sort()
item = ''.join(item)
strs_cp[i] = item
table = dict()
for i, item in enumerate(strs_cp):
if item not in table:
table[item] = []
table[item].append(strs[i])
ans = []
for k in table:
ans.append(table[k])
return ans
| [
"[email protected]"
] | |
6036b4e9fe5bce86b786985656d485851ebc000e | 78918441c6735b75adcdf20380e5b6431891b21f | /api/views.py | 7db0c4896ac2f5194e3116b1611b1bf43493ac47 | [] | no_license | dede-20191130/PracticeDjango_2 | eba40532d5ce8bd4fd13fbd15d94f31942111cfa | 23593c0fa4c4dff04bd76583e8176e600ca69014 | refs/heads/master | 2020-12-23T14:23:08.039363 | 2020-02-27T12:16:00 | 2020-02-27T12:16:00 | 237,176,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | import json
from collections import OrderedDict
from django.http import HttpResponse
from mybook.models import Book
def render_json_response(request, data, status=None):
"""response を JSON で返却"""
json_str = json.dumps(data, ensure_ascii=False, indent=2)
callback = request.GET.get('callback')
if not callback:
callback = request.POST.get('callback') # POSTでJSONPの場合
if callback:
json_str = "%s(%s)" % (callback, json_str)
response = HttpResponse(json_str, content_type='application/javascript; charset=UTF-8', status=status)
else:
response = HttpResponse(json_str, content_type='application/json; charset=UTF-8', status=status)
return response
def book_list(request):
"""書籍と感想のJSONを返す"""
books = []
for book in Book.objects.all().order_by('id'):
impressions = []
for impression in book.impressions.order_by('id'):
impression_dict = OrderedDict([
('impression_id', impression.id),
('comment', impression.comment),
])
impressions.append(impression_dict)
book_dict = OrderedDict([
('book_id', book.id),
('name', book.name),
('publisher', book.publisher),
('page', book.page),
('impressions', impressions)
])
books.append(book_dict)
data = OrderedDict([('books', books)])
return render_json_response(request, data)
| [
"[email protected]"
] | |
9c0cf1c8261160763e6b9bd9b4485efef74b2d8d | 2990b0841b63f300a722107933c01c7237a7976b | /all_xuef/code/leetcode/TOOLS/BinaryTree-master/test/__init__.py | a503b9948e4bb811046d2e3362574e7f4bed412b | [] | no_license | xuefengCrown/Files_01_xuef | 8ede04751689e0495e3691fc5d8682da4d382b4d | 677329b0189149cb07e7ba934612ad2b3e38ae35 | refs/heads/master | 2021-05-15T04:34:49.936001 | 2019-01-23T11:50:54 | 2019-01-23T11:50:54 | 118,802,861 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | from test_binary_tree import *
from test_tree_node import *
| [
"[email protected]"
] | |
3dba9cc472654cbd43ec5366ccd01fa7bd6f03de | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/type_start_end_time.py | 9a4b3e1df4ef31fe8155d79f3f8f6c17d3a73f86 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 1,920 | py | from __future__ import annotations
from dataclasses import dataclass, field
__NAMESPACE__ = "http://www.travelport.com/schema/vehicle_v52_0"
@dataclass
class TypeStartEndTime:
"""
Used to specify earliest and latest pickup/dropoff times for a vehicle.
Parameters
----------
time
The time in 24 hour clock format.
requirement_passed
When true, the time requirement has been met.
mon
tue
wed
thu
fri
sat
sun
"""
class Meta:
name = "typeStartEndTime"
time: None | str = field(
default=None,
metadata={
"name": "Time",
"type": "Attribute",
"required": True,
}
)
requirement_passed: None | bool = field(
default=None,
metadata={
"name": "RequirementPassed",
"type": "Attribute",
}
)
mon: None | bool = field(
default=None,
metadata={
"name": "Mon",
"type": "Attribute",
}
)
tue: None | bool = field(
default=None,
metadata={
"name": "Tue",
"type": "Attribute",
}
)
wed: None | bool = field(
default=None,
metadata={
"name": "Wed",
"type": "Attribute",
}
)
thu: None | bool = field(
default=None,
metadata={
"name": "Thu",
"type": "Attribute",
}
)
fri: None | bool = field(
default=None,
metadata={
"name": "Fri",
"type": "Attribute",
}
)
sat: None | bool = field(
default=None,
metadata={
"name": "Sat",
"type": "Attribute",
}
)
sun: None | bool = field(
default=None,
metadata={
"name": "Sun",
"type": "Attribute",
}
)
| [
"[email protected]"
] | |
7a57b9d8fc4353b0116d5eb59291d529fd673296 | 91e98f30ab87f13cbd533c276e24690912690b35 | /BlaineFry/Phys_707_Model_Selection_v2.py | a908f6a9aff841b250489f5e5527751582a51e48 | [] | no_license | ladosamushia/PHYS707 | a5a3f4954746722a3c7e530730a7cbd01caeb5f4 | 968e143022d49bfe477590b38e40184e3affed02 | refs/heads/master | 2020-07-20T06:38:42.914658 | 2019-12-23T12:27:43 | 2019-12-23T12:27:43 | 206,591,395 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 09:52:28 2019
@author: Blaine Fry
"""
# import packages
import numpy as np
from numpy import random as rand
from matplotlib import pyplot as plt
#%% generate some data
Npoints = 50
mu_0 = 1.0
sigma_0 = 1.0
data = rand.normal(loc=mu_0,scale=sigma_0,size=Npoints)
# and make a histogram of it
xbounds = [-5,5]
Nbins = 20
Bins = np.linspace(xbounds[0],xbounds[1],num=Nbins+1)
plt.figure(1)
plt.xlim(xbounds[0],xbounds[1])
plt.xlabel('value')
plt.ylabel('Normalized Frequency')
plt.title('Data Histogram')
plt.grid(alpha=0.5)
plt.hist(data,bins=Bins,alpha=0.8,color='m',normed=True,label='Data') # may need to switch normed to density if this line calls an error
plt.legend()
#%% define the models to test out
# first, a general gaussian
def gauss(x,mu,sigma):
return (1/(sigma*np.sqrt(2*np.pi)))*np.exp(-((x-mu)*(x-mu))/(2*sigma*sigma))
# then the models
def Gauss_A(x):
return gauss(x,1.0,1.0)
def Gauss_B(x):
return gauss(x,1.2,1.0)
x = np.linspace(xbounds[0],xbounds[1],num=1000)
plt.plot(x,Gauss_A(x),'c-',label='Gauss A')
plt.plot(x,Gauss_B(x),'b-',label='Gauss B')
plt.legend()
#%% start comparing models
# P({x_i}) = P(x_1)*P(x_2)*P(x_3)*...
# logs would be better... consider revising
Ntrials = 1000
def compare_models(actual_dist,mu_ex,sigma_ex,model_1,model_2): # actual_dist = 'Gauss' or 'Cauchy'
log_ratios = []
for i in range(Ntrials):
if actual_dist is 'Gauss':
data = rand.normal(loc=mu_ex,scale=sigma_ex,size=Npoints)
else:
data = rand.standard_cauchy(size=Npoints)
# find the probability of the data set given model 1
prob1 = 1
for i in range(Npoints):
prob1 *= model_1(data[i])
# find the probability of the data set given model 2
prob2 = 1
for i in range(Npoints):
prob2 *= model_2(data[i])
log_ratios.append(np.log10(prob1/prob2))
return log_ratios
ratios_A = compare_models('Gauss',1.0,1.0,Gauss_A,Gauss_B) # compare the models if A is true
ratios_B = compare_models('Gauss',1.2,1.0,Gauss_A,Gauss_B) # compare the models if B is true
plt.figure(2)
plt.title('Model Comparison')
plt.ylabel('Normalized Frequency')
plt.xlabel(r'$\log_{10} \left(\frac{f_A}{f_B}\right)$')
plt.hist(ratios_A,bins=Ntrials/10,alpha=0.7,normed=True,label='A is True')
plt.hist(ratios_B,bins=Ntrials/10,alpha=0.7,normed=True,label='B is True')
plt.legend()
#%% Now we want to do the same, but with Cauchy vs Gauss
mu_star = 0
sigma_star = 1
def GAUSS(x):
return gauss(x,mu_star,sigma_star)
def CAUCHY(x):
return 1.0/((np.pi*sigma_star)*(1.0+(((x-mu_star)/sigma_star)**2)))
plt.figure(3)
plt.title('Example Distributions')
x = np.linspace(-5,5,100)
plt.plot(x,GAUSS(x),'b',label='Gauss')
plt.plot(x,CAUCHY(x),'r-',label='Cauchy')
plt.legend()
ratios_Gauss = compare_models('Gauss',0.0,1.0,GAUSS,CAUCHY)
ratios_Cauchy = compare_models('Cauchy',0.0,1.0,GAUSS,CAUCHY)
plt.figure(4)
plt.title('Gauss vs Cauchy')
plt.ylabel('Normalized Frequency')
plt.xlabel(r'$\log_{10} \left(\frac{f_{Gauss}}{f_{Cauchy}}\right)$')
plt.hist(ratios_Gauss,bins=Ntrials/10,alpha=0.7,normed=True,label='Gauss is True')
plt.hist(ratios_Cauchy,bins=Ntrials/10,alpha=0.7,normed=True,label='Cauchy is True')
plt.legend()
| [
"[email protected]"
] | |
4fed593d5f025735e0ad7e586d3fa993077381f3 | 5e5252812e67393a75830b313cd0d746c912123b | /python/Calculating with Functions.py | 5da5ee3fd6e14cf8e4c65e409919b2cbc840f9a6 | [] | no_license | Konohayui/Codewars | 20dfc6b147d2afd68172d5f5824b6c8c8dfa05f1 | 97291462e7b2e42e437355fb676e9152013a5e3a | refs/heads/master | 2021-10-19T18:07:26.973873 | 2019-02-22T22:52:33 | 2019-02-22T22:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | '''
Modified Xueyimei's solution
for better understanding
'''
def zero(f = None):
if not f:
print("first 0")
return 0
else:
print("second 0")
return f(0)
def one(f = None):
if not f:
print("first 1")
return 1
else:
print("second 1")
return f(1)
def two(f = None):
return 2 if not f else f(2)
def three(f = None):
return 3 if not f else f(3)
def four(f = None):
return 4 if not f else f(4)
def five(f = None):
return 5 if not f else f(5)
def six(f = None):
return 6 if not f else f(6)
def seven(f = None):
return 7 if not f else f(7)
def eight(f = None):
return 8 if not f else f(8)
def nine(f = None):
return 9 if not f else f(9)
def plus(y): return lambda x: int(x+y)
def minus(y): return lambda x: int(x-y)
def times(y): return lambda x: int(x*y)
def divided_by(y): return lambda x: int(x/y)
| [
"[email protected]"
] | |
d37fe7dd2eeed9cd1671d7f0927797b718040ff6 | b4166044870d1c026e86c95ac41e3e3613ee424f | /python_basic/abc035_a.py | cb3e26984addc996cbe33c23443e49ee4d0229ba | [] | no_license | nsakki55/AtCoder | 2cbb785415a7c0b9df9953ddc3706c90a5716a03 | 03c428e8eb8f24b8560d00e2388ba75509619690 | refs/heads/master | 2020-05-31T04:33:06.400697 | 2020-01-19T13:41:41 | 2020-01-19T13:41:41 | 190,099,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | w,h=map(int,input().split())
print('16:9' if w*h%144==0 else '4:3')
| [
"[email protected]"
] | |
b55a5033285fe85e350014f77edb19070e901038 | 947acace352c4b2e719e94600f7447d1382adfe2 | /env/Scripts/painter.py | 3b388efab78cf5cdae0a57d979b2692a59b44692 | [] | no_license | skconan/autoPlayAtariBreakout | ac9de1ef3342e81b57519fe588eb88e9bb6c6695 | 3a7167f31d810951b099c30bfceed0da6dcdf12f | refs/heads/master | 2022-12-21T15:21:44.552250 | 2017-10-10T08:54:18 | 2017-10-10T08:54:18 | 106,190,698 | 2 | 2 | null | 2022-12-11T06:28:01 | 2017-10-08T16:18:04 | Python | UTF-8 | Python | false | false | 2,215 | py | #!c:\users\skconan\desktop\พี่สอนน้อง\env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# painter widget
class PaintCanvas(tkinter.Canvas):
def __init__(self, master, image):
tkinter.Canvas.__init__(self, master,
width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=tkinter.NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| [
"[email protected]"
] | |
b8057bfd90277d7f954e3713e2198773a6ce19d8 | 78ade3f3f334593e601ea78c1e6fd8575f0fe86b | /tfx/examples/chicago_taxi_pipeline/taxi_utils_test.py | 466676a43ebd5b80bfdecd3f72a58490953f907b | [
"Apache-2.0"
] | permissive | rmothukuru/tfx | 82725e20a7d71265f791122ec3ec5d7708443761 | f46de4be29e96c123e33f90245dc5021d18f8294 | refs/heads/master | 2023-01-11T08:50:20.552722 | 2020-11-06T11:11:47 | 2020-11-06T11:11:47 | 279,754,672 | 1 | 1 | Apache-2.0 | 2020-07-15T03:37:39 | 2020-07-15T03:37:39 | null | UTF-8 | Python | false | false | 7,554 | py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.examples.chicago_taxi_pipeline.taxi_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import types
import apache_beam as beam
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform import beam as tft_beam
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_metadata.proto.v0 import schema_pb2
from tfx.components.trainer import executor as trainer_executor
from tfx.examples.chicago_taxi_pipeline import taxi_utils
from tfx.utils import io_utils
from tfx.utils import path_utils
class TaxiUtilsTest(tf.test.TestCase):
def setUp(self):
super(TaxiUtilsTest, self).setUp()
self._testdata_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'components/testdata')
def testUtils(self):
key = 'fare'
xfm_key = taxi_utils._transformed_name(key)
self.assertEqual(xfm_key, 'fare_xf')
def testPreprocessingFn(self):
schema_file = os.path.join(self._testdata_path, 'schema_gen/schema.pbtxt')
schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())
feature_spec = taxi_utils._get_raw_feature_spec(schema)
working_dir = self.get_temp_dir()
transform_graph_path = os.path.join(working_dir, 'transform_graph')
transformed_examples_path = os.path.join(
working_dir, 'transformed_examples')
# Run very simplified version of executor logic.
# TODO(kestert): Replace with tft_unit.assertAnalyzeAndTransformResults.
# Generate legacy `DatasetMetadata` object. Future version of Transform
# will accept the `Schema` proto directly.
legacy_metadata = dataset_metadata.DatasetMetadata(
dataset_schema.from_feature_spec(feature_spec))
decoder = tft.coders.ExampleProtoCoder(legacy_metadata.schema)
with beam.Pipeline() as p:
with tft_beam.Context(temp_dir=os.path.join(working_dir, 'tmp')):
examples = (
p
| 'ReadTrainData' >> beam.io.ReadFromTFRecord(
os.path.join(self._testdata_path, 'csv_example_gen/train/*'),
coder=beam.coders.BytesCoder(),
# TODO(b/114938612): Eventually remove this override.
validate=False)
| 'DecodeTrainData' >> beam.Map(decoder.decode))
(transformed_examples, transformed_metadata), transform_fn = (
(examples, legacy_metadata)
| 'AnalyzeAndTransform' >> tft_beam.AnalyzeAndTransformDataset(
taxi_utils.preprocessing_fn))
# WriteTransformFn writes transform_fn and metadata to subdirectories
# tensorflow_transform.SAVED_MODEL_DIR and
# tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.
# pylint: disable=expression-not-assigned
(transform_fn
|
'WriteTransformFn' >> tft_beam.WriteTransformFn(transform_graph_path))
encoder = tft.coders.ExampleProtoCoder(transformed_metadata.schema)
(transformed_examples
| 'EncodeTrainData' >> beam.Map(encoder.encode)
| 'WriteTrainData' >> beam.io.WriteToTFRecord(
os.path.join(transformed_examples_path,
'train/transformed_examples.gz'),
coder=beam.coders.BytesCoder()))
# pylint: enable=expression-not-assigned
# Verify the output matches golden output.
# NOTE: we don't verify that transformed examples match golden output.
expected_transformed_schema = io_utils.parse_pbtxt_file(
os.path.join(
self._testdata_path,
'transform/transform_graph/transformed_metadata/schema.pbtxt'),
schema_pb2.Schema())
transformed_schema = io_utils.parse_pbtxt_file(
os.path.join(transform_graph_path, 'transformed_metadata/schema.pbtxt'),
schema_pb2.Schema())
# Clear annotations so we only have to test main schema.
transformed_schema.ClearField('annotation')
for feature in transformed_schema.feature:
feature.ClearField('annotation')
self.assertEqual(transformed_schema, expected_transformed_schema)
def testTrainerFn(self):
temp_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
schema_file = os.path.join(self._testdata_path, 'schema_gen/schema.pbtxt')
output_dir = os.path.join(temp_dir, 'output_dir')
trainer_fn_args = trainer_executor.TrainerFnArgs(
train_files=os.path.join(self._testdata_path,
'transform/transformed_examples/train/*.gz'),
transform_output=os.path.join(self._testdata_path,
'transform/transform_graph'),
output_dir=output_dir,
serving_model_dir=os.path.join(temp_dir, 'serving_model_dir'),
eval_files=os.path.join(self._testdata_path,
'transform/transformed_examples/eval/*.gz'),
schema_file=schema_file,
train_steps=1,
eval_steps=1,
verbosity='INFO',
base_model=None)
schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())
training_spec = taxi_utils.trainer_fn(trainer_fn_args, schema)
estimator = training_spec['estimator']
train_spec = training_spec['train_spec']
eval_spec = training_spec['eval_spec']
eval_input_receiver_fn = training_spec['eval_input_receiver_fn']
self.assertIsInstance(estimator,
tf.estimator.DNNLinearCombinedClassifier)
self.assertIsInstance(train_spec, tf.estimator.TrainSpec)
self.assertIsInstance(eval_spec, tf.estimator.EvalSpec)
self.assertIsInstance(eval_input_receiver_fn, types.FunctionType)
# Test keep_max_checkpoint in RunConfig
self.assertGreater(estimator._config.keep_checkpoint_max, 1)
# Train for one step, then eval for one step.
eval_result, exports = tf.estimator.train_and_evaluate(
estimator, train_spec, eval_spec)
self.assertGreater(eval_result['loss'], 0.0)
self.assertEqual(len(exports), 1)
self.assertGreaterEqual(len(tf.io.gfile.listdir(exports[0])), 1)
# Export the eval saved model.
eval_savedmodel_path = tfma.export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=path_utils.eval_model_dir(output_dir),
eval_input_receiver_fn=eval_input_receiver_fn)
self.assertGreaterEqual(len(tf.io.gfile.listdir(eval_savedmodel_path)), 1)
# Test exported serving graph.
with tf.compat.v1.Session() as sess:
metagraph_def = tf.compat.v1.saved_model.loader.load(
sess, [tf.saved_model.SERVING], exports[0])
self.assertIsInstance(metagraph_def, tf.compat.v1.MetaGraphDef)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
d3f7e5a38010e610526dfe18104e43a8f58375e6 | c4ecc70400f3c4375dd4b2335673137dd36b72b4 | /venv/lib/python3.6/site-packages/xero_python/accounting/models/contact_groups.py | 44c5b00b3e94d4523d3baf225c292a9d849de367 | [
"MIT"
] | permissive | TippyFlitsUK/FarmXero | 1bb3496d164d66c940bd3012e36e1763990ff30d | 881b1e6648e927631b276e66a4c5287e4de2cbc1 | refs/heads/main | 2023-07-05T14:49:57.186130 | 2021-08-19T19:33:48 | 2021-08-19T19:33:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | # coding: utf-8
"""
Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class ContactGroups(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"contact_groups": "list[ContactGroup]"}
attribute_map = {"contact_groups": "ContactGroups"}
def __init__(self, contact_groups=None): # noqa: E501
"""ContactGroups - a model defined in OpenAPI""" # noqa: E501
self._contact_groups = None
self.discriminator = None
if contact_groups is not None:
self.contact_groups = contact_groups
@property
def contact_groups(self):
"""Gets the contact_groups of this ContactGroups. # noqa: E501
:return: The contact_groups of this ContactGroups. # noqa: E501
:rtype: list[ContactGroup]
"""
return self._contact_groups
@contact_groups.setter
def contact_groups(self, contact_groups):
"""Sets the contact_groups of this ContactGroups.
:param contact_groups: The contact_groups of this ContactGroups. # noqa: E501
:type: list[ContactGroup]
"""
self._contact_groups = contact_groups
| [
"[email protected]"
] | |
d1a3312fd06cdd1c33319651970db66ccf6feaff | 844501294ca37f1859b9aa0a258e6dd6b1bf2349 | /snipe/__init__.py | ed31be10c2f86531161797372795b4dd3a2ba4bb | [
"MIT",
"BSD-2-Clause"
] | permissive | 1ts-org/snipe | 2ac1719bc8f6b3b158c04536464f866c34051253 | ad84a629e9084f161e0fcf811dc86ba54aaf9e2b | refs/heads/master | 2021-06-04T22:32:36.038607 | 2020-03-27T05:18:36 | 2020-04-05T21:50:42 | 18,642,653 | 6 | 3 | NOASSERTION | 2019-10-08T02:02:50 | 2014-04-10T16:01:32 | Python | UTF-8 | Python | false | false | 1,377 | py | # -*- encoding: utf-8 -*-
# Copyright © 2014 the Snipe contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
| [
"[email protected]"
] | |
7b47974d7c6dff9d2d526ea771620b522c940bca | 5f4da925312f9ad4b4de36e7d1861031d3f03731 | /app.py | 964943b9a931d3e43f46b67109b0c953a4cb9dad | [] | no_license | geofferyj/PROJECT1 | 1b1c0cad5c3766589af8291b0c2635d15cfd599d | 89cdfe42e27c3176dbdce79654d1161013e041cf | refs/heads/master | 2021-01-01T12:28:51.167516 | 2020-03-02T15:59:41 | 2020-03-02T15:59:41 | 239,279,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,679 | py | import os, requests
from functools import wraps
from flask import Flask, session, redirect, render_template, url_for, request, flash, jsonify, make_response, abort
from flask_session import Session
from sqlalchemy import create_engine, exc
from sqlalchemy.orm import scoped_session, sessionmaker
app = Flask(__name__)
dbstring = "postgres://fabeidpsjarnlm:080cd5f8a5a7ce8dd8d6c71863c76924e7a26ebcab39588e6dc637a1741bf496@ec2-3-234-109-123.compute-1.amazonaws.com:5432/de693jkmt9rih3"
# Configure session to use filesystem
app.config['SECRET_KEY'] = "efd432e0aca715610c505c533037b95d6fb22f5692a0d33820ab7b19ef06f513"
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine(dbstring)
db = scoped_session(sessionmaker(bind=engine))
db.execute("""CREATE TABLE IF NOT EXISTS users(uid SERIAL PRIMARY KEY,
name VARCHAR NOT NULL,
username VARCHAR NOT NULL UNIQUE,
email VARCHAR NOT NULL UNIQUE,
password VARCHAR NOT NULL)""")
db.execute("""CREATE TABLE IF NOT EXISTS books(isbn VARCHAR PRIMARY KEY,
title VARCHAR NOT NULL,
author VARCHAR NOT NULL,
year INTEGER NOT NULL)""")
db.execute("""CREATE TABLE IF NOT EXISTS reviews(id SERIAL PRIMARY KEY,
uid INTEGER NOT NULL REFERENCES users(uid),
isbn VARCHAR NOT NULL REFERENCES books(isbn),
review VARCHAR NOT NULL,
rating INTEGER CHECK(rating > 0 AND rating <= 5) NOT NULL,
review_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT unique_uid_isbn UNIQUE(uid,isbn)
)""")
db.commit()
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if 'uid' not in session:
return redirect(url_for('login', next=request.url))
return func(*args, **kwargs)
return wrapper
@app.route("/", methods = ["POST", "GET"])
@login_required
def index():
if request.method == "POST":
search = request.form.get("search")
data = db.execute("SELECT * FROM books WHERE title ILIKE :search OR author ILIKE :search OR isbn ILIKE :search", {"search": '%' + search + '%'}).fetchall()
if data:
return render_template('index.html', data=data)
else:
flash("Sorry No match was found for your search")
return render_template('index.html', data=data)
return render_template('index.html')
@app.route("/login/", methods = ["POST", "GET"])
def login():
if request.method == "POST":
form = request.form
email = form["email"]
password = form["password"]
next_url = form["next"]
user = db.execute("SELECT uid FROM users WHERE email = :email", {"email": email}).fetchone()
if user:
session["uid"] = user.uid
if next_url:
flash("Login successful")
return redirect(next_url)
return redirect(url_for("index"))
else:
flash("user not found")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/logout/")
def logout():
session.pop("uid", None)
return redirect(url_for("login"))
@app.route("/signup/", methods = ["POST", "GET"])
def signup():
if request.method == "POST":
form = request.form
username = form["username"]
name = form["name"]
email = form["email"]
password = form["password"]
try:
db.execute("INSERT INTO users(name, username, email, password) VALUES(:name, :username, :email, :password)", {
"name": name, "username": username, "email": email, "password": password})
db.commit()
return redirect(url_for('login'))
except exc.IntegrityError:
flash('Username Already exists')
return redirect(url_for('signup'))
return render_template('signup.html')
@app.route("/book/<isbn>/", methods = ["GET", "POST"])
@login_required
def book_details(isbn):
if request.method == "POST":
review = request.form.get("review")
rating = request.form.get("rating")
uid = session["uid"]
try:
db.execute("INSERT INTO reviews (uid, isbn, review, rating) VALUES(:uid, :isbn, :review, :rating)", {"uid": uid, "isbn": isbn, "review": review, "rating": rating})
db.commit()
except exc.IntegrityError:
flash('You have already revied this book')
return redirect(url_for('book_details', isbn=isbn))
reviews = db.execute("SELECT name, review, rating FROM users, reviews WHERE users.uid = reviews.uid AND reviews.isbn = :isbn ORDER BY reviews.review_date", {"isbn":isbn})
details = db.execute("SELECT * FROM books WHERE isbn = :isbn", {"isbn":isbn}).fetchone()
res = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "e9hh8mpJf995M7SzMfst5A", "isbns": isbn}).json()
for i in res['books']:
gr_data = i
return render_template("book_details.html", details=details, reviews=reviews, gr_data=gr_data)
@app.route("/api/<isbn>/", methods=['GET'])
def api(isbn):
if request.method == 'GET':
book = db.execute('SELECT * FROM books WHERE isbn = :isbn', {'isbn': isbn}).fetchone()
if book:
rating = db.execute("SELECT ROUND( AVG(rating), 2) FROM reviews WHERE isbn = :isbn", {'isbn':isbn}).fetchone()
review = db.execute("SELECT COUNT(review) FROM reviews WHERE isbn = :isbn", {'isbn':isbn}).fetchone()
for i in rating:
if i:
avg_rating = float(i)
else:
avg_rating = 0
for i in review:
if i:
review_count = int(i)
else:
review_count = 0
return make_response(jsonify({
"title": book.title,
"author": book.author,
"year": book.year,
"isbn": book.isbn,
"review_count": review_count,
"average_score": avg_rating,
}))
else:
return abort(404)
@app.shell_context_processor
def make_shell_context():
return {'db': db}
if __name__ == "__main__":
app.debug = True
app.run() | [
"[email protected]"
] | |
38c7b8ae0a1fe7c519e2cb5f2fca8b9894080414 | bcc00e164c3d20b3c0ac1099741a71491af0e302 | /.history/neotropical_datasetAPI_20191014144558.py | 7ff867bf62e30070273816b13537d6b29785d50f | [] | no_license | manasa151/Toshokan | cff2af75c480bd629b49ce39c17857b316102e45 | 192c7eaf8523e38fa5821affdec91eb60ae5b7ce | refs/heads/master | 2020-08-05T14:56:10.285024 | 2019-10-15T17:07:09 | 2019-10-15T17:07:09 | 212,586,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,422 | py | import csv
from os import makedirs
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
import time
from io import BytesIO
from PIL import _imaging
from PIL import Image
import requests
import datetime
import os
import time
from os.path import getsize, join
import imghdr
import os
from os.path import getsize, join
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from io import BytesIO
from PIL import Image
import requests
import datetime
import os
import time
from os.path import getsize, join
import imghdr
import os
from os.path import getsize, join
def search_bySpecies():
species = 'Semioptera wallacii'
with open('request_species.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for row in csv_reader:
if species == row[2]:
# makedirs(f'testing/{row[1]}', exist_ok=True)
makedirs(f'{row[0]}/{row[1]}/{row[2]}', exist_ok=True)
# download_species_byOrder(row[0], row[1], row[2])
print(row)
def NEW_download_from_CSV():
with open('request_species.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for row in csv_reader:
makedirs(f'{row[0]}/{row[1]}/{row[2]}', exist_ok=True)
download_species_byOrder(row[0], row[1], row[2], row[3])
time.sleep(10)
def download_species_byOrder(bird_family, bird_order, bird_species, tax_code):
# initate web driver
ebird_url = f'https://ebird.org/species/{tax_code}'
chromeDriver = 'C:\\Users\\jmentore\\Documents\\Selenium Chrome Driver\\chromedriver.exe'
driver = webdriver.Chrome(executable_path=chromeDriver)
driver.get(ebird_url)
driver.maximize_window()
time.sleep(3)
# Clicks the view all link
view_all = driver.find_element(
By.XPATH, '/html/body/div/div[7]/div/div/div[2]/div[1]/a')
time.sleep(5)
view_all.click()
ids = driver.find_elements_by_tag_name('img')
sci_name = bird_species
family = bird_family
order = bird_order
ebird_counter = 0
file_ext = '.jpg'
show_more = driver.find_element_by_id('show_more')
while show_more.is_displayed():
try:
for ii in ids:
download_link = ii.get_attribute('src')
r = requests.get(download_link)
img = Image.open(BytesIO(r.content))
ebird_counter = ebird_counter + 1
img.save(
f'{family}/{order}/{sci_name}/{sci_name}-{ebird_counter}{file_ext}')
time.sleep(5)
print(download_link)
time.sleep(5)
driver.find_element_by_xpath('//*[@id="show_more"]').click()
except Exception as e:
messages.append(e)
time.sleep(1)
if not show_more.is_displayed():
print(f'Total url extracted: {ebird_counter}')
driver.quit()
def post_safe(url, params):
done = False
tries_left = 3
messages = []
while tries_left and not done:
tries_left -= 1
try:
response = requests.post(url, data=params)
done = True
except Exception as e:
messages.append(e)
time.sleep(1)
if not done:
output = "%s\n" % (datetime.now().strftime('%Y-%m-%d %H:%M'),)
output += "requests() failed 3 times:\n"
for m in messages:
output += m+"\n"
print(output)
return done
def test(tax_code):
ebird_url = f'https://ebird.org/species/{tax_code}'
chromeDriver = 'C:\\Users\\jmentore\\Documents\\Selenium Chrome Driver\\chromedriver.exe'
driver = webdriver.Chrome(executable_path=chromeDriver)
driver.get(ebird_url)
driver.maximize_window()
time.sleep(3)
# Clicks the view all link
view_all = driver.find_element(
By.XPATH, '/html/body/div/div[7]/div/div/div[2]/div[1]/a')
time.sleep(5)
view_all.click()
NEW_download_from_CSV()
# search_bySpecies()
test('walsta2')
# search_byTaxcode('zimant1')
| [
"[email protected]"
] | |
fc9c235e3d4f8607eaf02246e0cb7385120abb75 | 17c280ade4159d4d8d5a48d16ba3989470eb3f46 | /18/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM4500_R_0-7.py | ae645a54658b7cd536c87077e75805da8681f2d2 | [] | no_license | chengchen1993/run2_ntuple | 798ff18489ff5185dadf3d1456a4462e1dbff429 | c16c2b203c05a3eb77c769f63a0bcdf8b583708d | refs/heads/master | 2021-06-25T18:27:08.534795 | 2021-03-15T06:08:01 | 2021-03-15T06:08:01 | 212,079,804 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | from WMCore.Configuration import Configuration
name = 'WWW'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M4500_R0-7_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Autumn18_V19_MC_L1FastJet_AK4PFchs.txt','Autumn18_V19_MC_L2Relative_AK4PFchs.txt','Autumn18_V19_MC_L3Absolute_AK4PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFchs.txt','Autumn18_V19_MC_L2Relative_AK8PFchs.txt','Autumn18_V19_MC_L3Absolute_AK8PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK8PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK8PFPuppi.txt','Autumn18_V19_MC_L1FastJet_AK4PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK4PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK4PFPuppi.txt' ]
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M4500-R0-7_TuneCP5_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outLFNDirBase='/store/group/phys_b2g/chench/cc/'#chench/'# = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M4500_R0-7_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"[email protected]"
] | |
10d919ed0109a6401f4dd3ac01502930a7d4097e | 80383bd5f39fd7eacff50f4b0fcc3c5e7c8329e0 | /reddwarf/tests/api/instances_delete.py | 9bef213d56cfc68c4ac1598aaffd3bb0d1ab7020 | [] | no_license | imsplitbit/reddwarf | 646409a2365459515b37f70445c0acb22610898d | 2f50d9a12a390c6016aad6a612a14bd6c34b66fd | refs/heads/master | 2020-05-19T15:45:26.733102 | 2013-01-08T21:37:10 | 2013-01-08T21:37:10 | 2,270,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,257 | py | import time
from proboscis import after_class
from proboscis import before_class
from proboscis import test
from proboscis.asserts import *
from proboscis.decorators import time_out
from reddwarfclient import exceptions
from reddwarf.tests.util import create_dbaas_client
from reddwarf.tests.util import poll_until
from reddwarf.tests.util import test_config
from reddwarf.tests.util.users import Requirements
class TestBase(object):
def set_up(self):
reqs = Requirements(is_admin=True)
self.user = test_config.users.find_user(reqs)
self.dbaas = create_dbaas_client(self.user)
def create_instance(self, name, size=1):
result = self.dbaas.instances.create(name, 1, {'size': size}, [], [])
return result.id
def wait_for_instance_status(self, instance_id, status="ACTIVE"):
poll_until(lambda: self.dbaas.instances.get(instance_id),
lambda instance: instance.status == status,
time_out=10)
def wait_for_instance_task_status(self, instance_id, description):
poll_until(lambda: self.dbaas.management.show(instance_id),
lambda instance: instance.task_description == description,
time_out=10)
def is_instance_deleted(self, instance_id):
while True:
try:
instance = self.dbaas.instances.get(instance_id)
except exceptions.NotFound:
return True
time.sleep(.5)
def get_task_info(self, instance_id):
instance = self.dbaas.management.show(instance_id)
return instance.status, instance.task_description
def delete_instance(self, instance_id, assert_deleted=True):
instance = self.dbaas.instances.get(instance_id)
instance.delete()
if assert_deleted:
assert_true(self.is_instance_deleted(instance_id))
def delete_errored_instance(self, instance_id):
self.wait_for_instance_status(instance_id, 'ERROR')
status, desc = self.get_task_info(instance_id)
assert_equal(status, "ERROR")
self.delete_instance(instance_id)
@test(runs_after_groups=["services.initialize"],
groups=['dbaas.api.instances.delete'])
class ErroredInstanceDelete(TestBase):
"""
Test that an instance in an ERROR state is actually deleted when delete
is called.
"""
@before_class
def set_up(self):
"""Create some flawed instances."""
super(ErroredInstanceDelete, self).set_up()
# Create an instance that fails during server prov.
self.server_error = self.create_instance('test_SERVER_ERROR')
# Create an instance that fails during volume prov.
self.volume_error = self.create_instance('test_VOLUME_ERROR', size=9)
# Create an instance that fails during DNS prov.
#self.dns_error = self.create_instance('test_DNS_ERROR')
# Create an instance that fails while it's been deleted the first time.
self.delete_error = self.create_instance('test_ERROR_ON_DELETE')
@test
@time_out(20)
def delete_server_error(self):
self.delete_errored_instance(self.server_error)
@test
@time_out(20)
def delete_volume_error(self):
self.delete_errored_instance(self.volume_error)
@test(enabled=False)
@time_out(20)
def delete_dns_error(self):
self.delete_errored_instance(self.dns_error)
@test
@time_out(20)
def delete_error_on_delete_instance(self):
id = self.delete_error
self.wait_for_instance_status(id, 'ACTIVE')
self.wait_for_instance_task_status(id, 'No tasks for the instance.')
instance = self.dbaas.management.show(id)
assert_equal(instance.status, "ACTIVE")
assert_equal(instance.task_description, 'No tasks for the instance.')
# Try to delete the instance. This fails the first time due to how
# the test fake is setup.
self.delete_instance(id, assert_deleted=False)
instance = self.dbaas.management.show(id)
assert_equal(instance.status, "SHUTDOWN")
assert_equal(instance.task_description, "Deleting the instance.")
# Try a second time. This will succeed.
self.delete_instance(id)
| [
"[email protected]"
] | |
2fbb5fdd65ad8826645ab7e7e699ebefa0f72824 | 22cd0e79f4dd999e40a1d6ff829f8ef4f8d9df9a | /samples/kuka_maze_planner/devel/lib/python2.7/dist-packages/rll_msgs/msg/_DefaultMoveIfaceActionResult.py | 8cdca9ad71548b62110f5d3b6c4fddf5b2278554 | [] | no_license | anupamkaul/robond | b3f9cefbf0205d3c1db14b9982a95f61644a49b1 | f5b1e4323c0bf0ccd7d78dbef8c9e0ddf1a85d17 | refs/heads/master | 2021-06-21T14:06:09.368041 | 2020-11-30T19:51:36 | 2020-11-30T19:51:36 | 146,023,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,018 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rll_msgs/DefaultMoveIfaceActionResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import rll_msgs.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class DefaultMoveIfaceActionResult(genpy.Message):
_md5sum = "1eb06eeff08fa7ea874431638cb52332"
_type = "rll_msgs/DefaultMoveIfaceActionResult"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
DefaultMoveIfaceResult result
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: rll_msgs/DefaultMoveIfaceResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
"""
__slots__ = ['header','status','result']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','rll_msgs/DefaultMoveIfaceResult']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,result
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(DefaultMoveIfaceActionResult, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = rll_msgs.msg.DefaultMoveIfaceResult()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.result = rll_msgs.msg.DefaultMoveIfaceResult()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = rll_msgs.msg.DefaultMoveIfaceResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = rll_msgs.msg.DefaultMoveIfaceResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
| [
"[email protected]"
] | |
ea67b257d486b0630025a0e3b1ae137a45ba25a4 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-3533.py | 9879255d0fdf486ca055ce49640b6af70af43a61 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,754 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for $ID in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
d6f9aae369f645e06dd5a81e0da92deb03d22e25 | 350d6b7246d6ef8161bdfccfb565b8671cc4d701 | /Insert Interval.py | fdec8d8c9a46850b59d3f652b43f6c85e069796d | [] | no_license | YihaoGuo2018/leetcode_python_2 | 145d5fbe7711c51752b2ab47a057b37071d2fbf7 | 2065355198fd882ab90bac6041c1d92d1aff5c65 | refs/heads/main | 2023-02-14T14:25:58.457991 | 2021-01-14T15:57:10 | 2021-01-14T15:57:10 | 329,661,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | class Solution:
def insert(self, intervals: 'List[Interval]', newInterval: 'Interval') -> 'List[Interval]':
# init data
new_start, new_end = newInterval
idx, n = 0, len(intervals)
output = []
# add all intervals starting before newInterval
while idx < n and new_start > intervals[idx][0]:
output.append(intervals[idx])
idx += 1
# add newInterval
# if there is no overlap, just add the interval
if not output or output[-1][1] < new_start:
output.append(newInterval)
# if there is an overlap, merge with the last interval
else:
output[-1][1] = max(output[-1][1], new_end)
# add next intervals, merge with newInterval if needed
while idx < n:
interval = intervals[idx]
start, end = interval
idx += 1
# if there is no overlap, just add an interval
if output[-1][1] < start:
output.append(interval)
# if there is an overlap, merge with the last interval
else:
output[-1][1] = max(output[-1][1], end)
return output | [
"[email protected]"
] | |
139a215386fd73c93520824d0d8e1a4d7e908698 | 7dbbde919349fdc3651eff1a7be744aed25eea30 | /scripts/multiprocessing_example.py | 13f78ecf32de52baab2847baa7991b1bf9d173e0 | [] | no_license | adrn/scicoder-notebooks | 06ca10a12c4f89a5c2e4062c70b6e4eb3bc0b1b0 | 7c8a5850200c3fb78aca1c336af7ed47ad52c52a | refs/heads/master | 2021-03-12T21:37:39.597404 | 2013-07-08T04:37:23 | 2013-07-08T04:37:23 | 11,226,565 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # coding: utf-8
""" Demonstration of the built-in multiprocessing package """
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import multiprocessing
# Define a 'task' or 'worker' function -- something function that you
# need to call over and over and over
def task(x):
return x**2
if __name__ == "__main__":
# a pool is like a magical box that knows how to execute things on
# multiple CPUs
pool = multiprocessing.Pool(processes=4)
# this will run the function task() on all values in range(10000)
result = pool.map(task, range(10000))
print(result) | [
"[email protected]"
] | |
5a41960a55928dd63bb70c8a7008554e17a3496e | fa04e703556632fb6f513181070a496294b4f0dd | /patchnotifyer.py | e9804345e8e3f1936beb087a831e11a4efd27754 | [] | no_license | mhagander/patchnotifyer | a377d741c3837cbe6e5c8026ceced9a0a4b4c056 | 14c9b1d14780460645807227176db01aeef18267 | refs/heads/master | 2021-01-11T15:01:35.217795 | 2017-01-31T10:03:56 | 2017-01-31T10:03:56 | 80,282,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,583 | py | #!/usr/bin/env python3
import argparse
from io import StringIO
import socket
import smtplib
from email.mime.text import MIMEText
import apt_pkg
class _DevNullProgress(object):
# Need this class to make the apt output not go to the console
def update(self, percent = None):
pass
def done(self, item = None):
pass
def stop(self):
pass
def pulse(self, owner = None):
pass
def update_status(self, a, b, c, d):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Patch status notifyer")
parser.add_argument('--fromaddr', type=str, help='From email address')
parser.add_argument('--toaddr', type=str, help='To email address')
parser.add_argument('--subject', type=str, help='Subject', default="Patch status on {0}".format(socket.gethostname()))
parser.add_argument('--ignorepkg', type=str, nargs='+', default='Ignore packages by exact name')
args = parser.parse_args()
if args.fromaddr and not args.toaddr:
parser.error("Can't specify from without to")
if args.toaddr and not args.fromaddr:
parser.error("Can't specify to without from")
status = StringIO()
apt_pkg.init()
# Turn off cache to avoid concurrency issues
apt_pkg.config.set("Dir::Cache::pkgcache","")
# "apt-get update"
sl = apt_pkg.SourceList()
sl.read_main_list()
tmpcache = apt_pkg.Cache(_DevNullProgress())
tmpcache.update(_DevNullProgress(), sl)
# Now do the actual check
cache = apt_pkg.Cache(_DevNullProgress())
depcache = apt_pkg.DepCache(cache)
depcache.read_pinfile()
depcache.init()
if depcache.broken_count > 0:
status.write("Depcache broken count is {0}\n\n".format(depcache.broken_count))
depcache.upgrade(True)
if depcache.del_count > 0:
status.write("Dist-upgrade generated {0} pending package removals!\n\n".format(depcache.del_count))
for pkg in cache.packages:
if depcache.marked_install(pkg) or depcache.marked_upgrade(pkg):
if pkg.name in args.ignorepkg:
continue
status.write("Package {0} requires an update\n".format(pkg.name))
if status.tell() > 0:
if args.fromaddr:
# Send email!
msg = MIMEText(status.getvalue())
msg['Subject'] = args.subject
msg['From'] = args.fromaddr
msg['To'] = args.toaddr
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
else:
print(status.getvalue())
| [
"[email protected]"
] | |
8b51c987b63b3177ed110cb9eba833dc3e9b1891 | c1cd6a7a446934c428bc4fbf988f8d6680460488 | /dist/restclient.app/Contents/Resources/py2app/bootstrap/path_inject.py | ace081845b5061c6e400919901db030b61234c9f | [] | no_license | devvmh/restclient-py2app | ed016d1763ee99779388c8700dfb9c129cf8ce1a | 6826f6cb81c08a36b30878683a58e4f7a18f5041 | refs/heads/master | 2021-01-10T12:01:31.411373 | 2016-01-18T03:34:02 | 2016-01-18T03:34:02 | 49,850,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | /Users/devin/git/restclient/venv/lib/python2.7/site-packages/py2app/bootstrap/path_inject.py | [
"[email protected]"
] | |
eb86c5d2bcdc85721b23e67fb5747812f0c969e5 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/providerhub/v20201120/get_skus_nested_resource_type_first.py | 48f43b29f82376eb8e0e141895212a4bf923acac | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,018 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetSkusNestedResourceTypeFirstResult',
'AwaitableGetSkusNestedResourceTypeFirstResult',
'get_skus_nested_resource_type_first',
]
@pulumi.output_type
class GetSkusNestedResourceTypeFirstResult:
def __init__(__self__, id=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.SkuResourceResponseProperties':
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSkusNestedResourceTypeFirstResult(GetSkusNestedResourceTypeFirstResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSkusNestedResourceTypeFirstResult(
id=self.id,
name=self.name,
properties=self.properties,
type=self.type)
def get_skus_nested_resource_type_first(nested_resource_type_first: Optional[str] = None,
provider_namespace: Optional[str] = None,
resource_type: Optional[str] = None,
sku: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSkusNestedResourceTypeFirstResult:
"""
Use this data source to access information about an existing resource.
:param str nested_resource_type_first: The first child resource type.
:param str provider_namespace: The name of the resource provider hosted within ProviderHub.
:param str resource_type: The resource type.
:param str sku: The SKU.
"""
__args__ = dict()
__args__['nestedResourceTypeFirst'] = nested_resource_type_first
__args__['providerNamespace'] = provider_namespace
__args__['resourceType'] = resource_type
__args__['sku'] = sku
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:providerhub/v20201120:getSkusNestedResourceTypeFirst', __args__, opts=opts, typ=GetSkusNestedResourceTypeFirstResult).value
return AwaitableGetSkusNestedResourceTypeFirstResult(
id=__ret__.id,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| [
"[email protected]"
] | |
147eabb86c23fd8281b4ba09190388f7a3989371 | 549afd4c4c5c9b401a2643210d6a4d75b7aaa308 | /src/optlang_operations.py | 17e0c0f49974128cd21393d536cc7587fb94db62 | [] | no_license | OGalOz/FBA_Learn_Python | ff6c4ab5335b8f0cbfead5dc8da7392429503235 | 2df9b6fd128db8af1f97f6d12e9ab34ec5268a49 | refs/heads/master | 2023-05-27T03:06:27.671342 | 2019-10-07T18:19:34 | 2019-10-07T18:19:34 | 210,938,075 | 0 | 2 | null | 2019-10-03T21:48:04 | 2019-09-25T20:49:44 | Python | UTF-8 | Python | false | false | 2,749 | py | # In this file we make use of optlang
# More info here: https://optlang.readthedocs.io/en/latest/
from optlang import Model, Variable, Constraint, Objective
# You can declare the symbolic variables here with upper and lower bounds:
'''
x1 = Variable('x1', lb=0, ub = 100)
'''
#S is the stoichiomatrix as passed in by numpy
# objective function is the last variable (v)
# upperbound needs to be an int
# Constraints is as long as the amount of compounds
# flux_bounds is as long as the amount of reactions. It is a d2_list
# flux_bounds = [[lower bounds],[upper bounds]]
def stoichiomatrix_solution(S, flux_bounds, objective_index, objective_direction):
#We make a variable 'v-(index)' for each reaction (column) in the matrix:
variables = make_variables(S, flux_bounds)
constraints = make_constraints(S, variables)
obj = make_objective(objective_index, objective_direction, variables)
model= Model(name='Stoichiomatrix')
model.objective = obj
model.add(constraints)
status = model.optimize()
return [status, model]
# This function makes the variables
def make_variables(S, flux_bounds):
variables = []
row_1 = S[0]
for i in range(len(row_1)):
v = Variable('v-' + str(i+1), lb = flux_bounds[0][i], ub = flux_bounds[1][i])
variables.append(v)
print(variables)
return variables
def make_constraints(S, variables):
#Creating the constraints, one per compound:
constraints = []
for row in S:
constraint_sum = 0
for i in range(len(row)):
constraint_sum += row[i]*variables[i]
c = Constraint(constraint_sum, lb=0, ub =0)
constraints.append(c)
return constraints
def make_objective(objective_index, objective_direction, variables):
#The objective is just to either Maximize or Minimize a Variable.
obj_var = variables[objective_index]
print("Objective variable name: " + obj_var.name)
obj = Objective(variables[objective_index], direction = objective_direction)
return obj
def model_print(model):
print("status:", model.status)
#print("objective variable name: " + model.objective.name)
print("objective value:", model.objective.value)
print("----------")
print(model.variables.items())
for var_name, var in model.variables.items():
print(var_name, "=", var.primal)
def make_fluxes(model):
#fluxes holds the names and their values, then we sort by that and make the fluxes array
fluxes = []
for var_name, var in model.variables.items():
fluxes.append([int(var_name[2:]),var.primal])
fluxes.sort(key = lambda fluxes: fluxes[0])
flux_array = []
for flux in fluxes:
flux_array.append(flux[1])
return flux_array
| [
"[email protected]"
] | |
1f72205dad514935455fd3be194807f4ebba7730 | 7a10bf8748c7ce9c24c5461c21b5ebf420f18109 | /ml_training/PythonCode/P4_Pandas_Basics.py | 76770dbb25d18994fa84bd5163e320d499c538b4 | [] | no_license | VishalChak/machine_learning | aced4b4bf65bbbd08c966a2f028f217a918186d5 | c6e29abe0509a43713f35ebf53da29cd1f0314c1 | refs/heads/master | 2021-06-15T07:13:56.583097 | 2019-10-05T06:01:58 | 2019-10-05T06:01:58 | 133,164,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,051 | py |
# Import Library
import pandas as pd
# Read data from a url
url = "https://vincentarelbundock.github.io/Rdatasets/csv/datasets/HairEyeColor.csv"
df = pd.read_csv(url)
# Type of the df object
type(df)
# Column names
list(df)
# Show first few rows
df.head()
# Show last few rows
df.tail()
# Data type of each column
df.dtypes
# Return number of columns and rows of dataframe
df.shape
# Number of rows
len(df.index)
# Number of columns
len(df.columns)
# Basic statistics
df.describe()
# Extract first three rows
df[0:3]
# or
#df.iloc[:3]
# Filter for black hair
#df[df['Hair']=="Black"]
# or
df.query("Hair =='Black'")
# Filter for males who have black hair
#df[(df['Hair']=="Black") & (df["Sex"]=="Male")]
# or
df.query("Hair == 'Black' & Sex =='Male'")
#WAP to Filter for those who have brown eye or black hair
#Ans:
z = df[(df['Hair']=="Black") | (df["Eye"]=="Brown")]
# or
z = df.query("Hair == 'Black' | Eye =='Brown'")
z.head(6)
# Filter for eye color of blue, hazel and green
df[df.Eye.isin(['Blue','Hazel','Green'])].head()
# Select one column
df[["Eye"]].head()
# or
df.Eye.head()
# Select two columns
df[["Eye","Sex"]].head()
# Unique Eye colors
df["Eye"].unique()
# Maximum of the "Freq" column
df.Freq.max()
# Call functions on multiple columns
import numpy as np
pd.DataFrame({'Max_freq': [df.Freq.max()], 'Min_freq': [df.Freq.min()], 'Std_freq': [np.std(df.Freq)]})
# Maximum Frequency by Sex
df.groupby("Sex").agg({"Freq":"max"})
#Display max Freq by color
df.groupby("Eye").agg({"Freq":"max"})
# Count by Eye color and Sex
df.groupby(["Eye","Sex"]).agg({"Freq":"count"}).rename(columns={"Freq":"Count"})
# Call functions for grouping
df.assign(Gt50 = (df.Freq > 50)).groupby("Gt50").agg({"Gt50":"count"}).rename(columns ={"Gt50":"Count"})
# Do the analysis on selected rows only
pd.DataFrame({'Max_freq': [df[0:10].Freq.max()], 'Min_freq': [df[0:10].Freq.min()], 'Std_freq': [np.std(df[0:10].Freq)]})
# Remove a column
df.drop('Unnamed: 0', 1).head()
# Return the first occurance
df.query("Eye == 'Blue'")[:1]
# Return the last occurance
df.query("Eye == 'Blue'")[-1:]
# Return a count
df[df.Eye.isin(['Blue','Hazel']) & (df.Sex=="Male")].shape[0]
# Count for each group
df[df.Eye.isin(['Blue','Hazel']) & (df.Sex=="Male")].groupby(["Eye","Sex"]).agg({"Freq":"count"}).rename(columns={"Freq":"Count"})
# Order in ascending order
df.sort_values(by='Freq').tail(6)
# Order in descending order
df.sort_values(by='Freq', ascending = False).tail(6)
# "Freq" in descending and "Eye" in ascending
df.sort_values(by=['Freq','Eye'], ascending = [False,True]).tail(6)
# Rename columns
df.rename(columns = {"Freq":"Frequency","Eye":"Eye_Color"}).tail()
# Unique rows
df[["Eye","Sex"]].drop_duplicates()
# Create new column
df.assign(Eye_Hair =df.Eye + df.Hair)[["Eye","Hair","Eye_Hair"]].head()
| [
"[email protected]"
] | |
d332b3dd09b91c5e952ba6af93587d2050fea535 | f20d9ff8aafb8ef2d3e4a14b1d055be7c1a1e0db | /create_database.py | 1c4848520b8d61043baad8f24786a792f0988323 | [] | no_license | HopeCheung/menu_api | 25fee2d807e86245bc547c753a8bc156d99b9962 | bfb410bfe5cd686e237f937f64bac198e178c75e | refs/heads/master | 2020-05-09T20:59:49.467719 | 2019-04-15T17:42:24 | 2019-04-15T17:42:24 | 181,426,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import os
import sqlite3
conn = sqlite3.connect("menu.db")
conn.execute('create table menu (id int, name varchar(20), item varchar(20))')
cur = conn.cursor()
cur.execute('insert into menu values(1, "Lunch Specials", "Chicken")')
cur.execute('insert into menu values(2, "Dinner Specials", "Pork")')
cur.execute('insert into menu values(3, "Specials of the day", "Salad")')
cur.execute('insert into menu values(1, "Lunch Specials", "Beef")')
cur.execute('insert into menu values(2, "Dinner Specials", "Sheep")')
cur.execute('insert into menu values(3, "Specials of the day", "Vegetables")')
conn.commit()
cur = conn.cursor()
cur.execute("select * from menu")
print(cur.fetchall())
| [
"[email protected]"
] | |
8543373d98c1f04b791fbc898524b98731cd31c2 | 490fad8eb8856c16b3d1d2e1ac3d00f5bd1280ba | /langsea/managers/category_manager.py | 5e1600baac8d6797c904a7ef17ec7107403b641f | [
"MIT"
] | permissive | blancheta/langsea | ebd12b16ff1b36d4292f527ec58f23b93deecbe7 | e268b43fb94e3234ac161f2e5d9600d51360e4b3 | refs/heads/master | 2020-12-25T14:14:49.029568 | 2016-08-20T16:31:00 | 2016-08-20T16:31:00 | 66,143,438 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from langsea.models.category import Category
class CategoryManager:
categories_api_url = 'http://www.langsea.org/api/categories/'
def all(self):
response = requests.get(self.categories_api_url)
if response.ok:
categories = []
for category_json in response.json():
categories.append(Category(*category_json))
return categories
def get(self, name):
response = requests.get(self.categories_api_url + name)
category = None
if response.ok:
category = Category(*response.json())
return category
| [
"[email protected]"
] | |
f8fb78b34913903cdd4e7dbecf2b63afad70b866 | b19a1baf69d1f7ba05a02ace7dfcba15c8d47cfb | /my_random.py | 1a36aadaabd3e30ae66d3858d940a5fa861897f8 | [] | no_license | MarkHofstetter/20191018-wifi-python | 20ed5de1cf28996902cecf7cd681d054e0d06739 | 7427b896783059a77c541e95df851a492ef5ebb9 | refs/heads/master | 2020-08-15T03:43:42.964992 | 2019-10-28T14:39:17 | 2019-10-28T14:39:17 | 215,275,139 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | # kopfrechen
# der benutzer bekommt 2 unterschiedliche zufallszahlen jeweils im Bereich 1 - 10
# die muss der Benutzer multiplizieren
# und es wird ueberprueft ob die aufgabe richtig geloest wurde
# zusatz
# 10 verschiedene Aufgaben stellen und sich merken
# wieviel richtig und falsch waren
import random
from util import user_input_positive_number
# import util
wrong = 0
right = 0
user_input = user_input_positive_number(question = 'Wieviele Runden')
for i in range(0, user_input):
print(i)
m1 = random.randint(1, 10)
m2 = random.randint(1, 10)
print(str(m1) + ' mal ' + str(m2) + ' ergibt?')
product = m1 * m2
user_input = user_input_positive_number('Bitte eine Lösung eingeben: ')
if product == user_input:
print("Richtig!")
right += 1
else:
print("Falsch!")
wrong += 1
print('Richtig: ' + str(right) )
print('Falsch: ' + str(wrong))
print('Korrekt {:0.2f} %'.format(right/(i+1)*100))
| [
"[email protected]"
] | |
4befe135006f88eaa43f75a4a79d805a6d066eaa | 6188f8ef474da80c9e407e8040de877273f6ce20 | /examples/docs_snippets/docs_snippets/guides/dagster/asset_tutorial/non_argument_deps.py | 9d15421d4ee2978a194a43bb4d650ad0f3abb1eb | [
"Apache-2.0"
] | permissive | iKintosh/dagster | 99f2a1211de1f3b52f8bcf895dafaf832b999de2 | 932a5ba35263deb7d223750f211c2ddfa71e6f48 | refs/heads/master | 2023-01-24T15:58:28.497042 | 2023-01-20T21:51:35 | 2023-01-20T21:51:35 | 276,410,978 | 1 | 0 | Apache-2.0 | 2020-07-01T15:19:47 | 2020-07-01T15:13:56 | null | UTF-8 | Python | false | false | 2,104 | py | """isort:skip_file"""
import csv
import requests
from dagster import asset
@asset
def cereals():
response = requests.get("https://docs.dagster.io/assets/cereal.csv")
lines = response.text.split("\n")
return [row for row in csv.DictReader(lines)]
@asset
def nabisco_cereals(cereals):
"""Cereals manufactured by Nabisco"""
return [row for row in cereals if row["mfr"] == "N"]
@asset
def cereal_protein_fractions(cereals):
"""
For each cereal, records its protein content as a fraction of its total mass.
"""
result = {}
for cereal in cereals:
total_grams = float(cereal["weight"]) * 28.35
result[cereal["name"]] = float(cereal["protein"]) / total_grams
return result
@asset
def highest_protein_nabisco_cereal(nabisco_cereals, cereal_protein_fractions):
"""
The name of the nabisco cereal that has the highest protein content.
"""
sorted_by_protein = sorted(
nabisco_cereals, key=lambda cereal: cereal_protein_fractions[cereal["name"]]
)
return sorted_by_protein[-1]["name"]
# cereal_ratings_zip_start
import urllib.request
@asset
def cereal_ratings_zip() -> None:
urllib.request.urlretrieve(
"https://dagster-git-tutorial-nothing-elementl.vercel.app/assets/cereal-ratings.csv.zip",
"cereal-ratings.csv.zip",
)
# cereal_ratings_zip_end
# cereal_ratings_csv_start
import zipfile
@asset(non_argument_deps={"cereal_ratings_zip"})
def cereal_ratings_csv() -> None:
with zipfile.ZipFile("cereal-ratings.csv.zip", "r") as zip_ref:
zip_ref.extractall(".")
# cereal_ratings_csv_end
# nabisco_cereal_ratings_start
@asset(non_argument_deps={"cereal_ratings_csv"})
def nabisco_cereal_ratings(nabisco_cereals):
with open("cereal-ratings.csv", "r") as f:
cereal_ratings = {
row["name"]: row["rating"] for row in csv.DictReader(f.readlines())
}
result = {}
for nabisco_cereal in nabisco_cereals:
name = nabisco_cereal["name"]
result[name] = cereal_ratings[name]
return result
# nabisco_cereal_ratings_end
| [
"[email protected]"
] | |
36d309841dbe245ef49c789e87285f004a3dd0c7 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/python/keras/_impl/keras/layers/gru_test.py | 48e7e14f5ab73b534ab0d1c765ad2572b2930b2b | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 7,280 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GRU layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
class GRULayerTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_in_graph_and_eager_modes()
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(RMSPropOptimizer(0.01), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
@tf_test_util.run_in_graph_and_eager_modes()
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
@tf_test_util.run_in_graph_and_eager_modes()
def test_implementation_mode_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.GRU,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_statefulness_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.GRU
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_regularizers_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
with self.test_session():
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
def test_constraints_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
with self.test_session():
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_with_masking_layer_GRU(self):
layer_class = keras.layers.GRU
with self.test_session():
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_GRU(self):
layer_class = keras.layers.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
cd0aa0709e33dd26bd91d1e55facc99254e15216 | d0b6940acef2dd8ee8e37dc1034cb7e6799889f8 | /build/ur_e_description/catkin_generated/pkg.installspace.context.pc.py | eae8692e9c7a38050d6c96c32a004725305a6ce3 | [] | no_license | robwoidi/ws_MoveIt | 5477202f01e7ddd9c4bdf21187c755fe80bf6c4d | 9b004cd792193337f9f3e628ded7a63e97f85295 | refs/heads/master | 2023-07-06T01:54:24.311735 | 2021-08-08T11:57:34 | 2021-08-08T11:57:34 | 393,943,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur_e_description"
PROJECT_SPACE_DIR = "/home/stefan/ws_moveit/install"
PROJECT_VERSION = "1.2.7"
| [
"[email protected]"
] | |
fe6afb0a5ceacf91383ce734fe45b592f58f00f9 | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/comparison/test_chart_axis30.py | 9e4ec252d029acea26ddf0d4218712e6c3c78c56 | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 1,350 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_axis30.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [69200896, 69215360]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
chart.set_x_axis({'position_axis': 'on_tick'})
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
17155a2faf01fd4d1b8ef2bd64c48e450adac8c7 | 8aa04db29bae5e0391543349eb2c0f778c56ffae | /tensorflow/python/trackable/asset.py | c218f7240e4f29d6e95140050581981776c3b287 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | mansnils/tensorflow | ec1a840f8fca6742d6e54dcf7b00eae0180f4023 | b0164f014fd4f1b5af2c7b578aa7687198c5d92e | refs/heads/master | 2023-01-30T00:13:07.772844 | 2023-01-09T09:45:45 | 2023-01-09T09:49:49 | 226,075,754 | 1 | 0 | Apache-2.0 | 2019-12-05T10:27:38 | 2019-12-05T10:27:37 | null | UTF-8 | Python | false | false | 4,278 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Asset-type Trackable object."""
import os
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import path_helpers
from tensorflow.python.trackable import base
from tensorflow.python.util.tf_export import tf_export
@tf_export("saved_model.Asset")
class Asset(base.Trackable):
"""Represents a file asset to hermetically include in a SavedModel.
A SavedModel can include arbitrary files, called assets, that are needed
for its use. For example a vocabulary file used initialize a lookup table.
When a trackable object is exported via `tf.saved_model.save()`, all the
`Asset`s reachable from it are copied into the SavedModel assets directory.
Upon loading, the assets and the serialized functions that depend on them
will refer to the correct filepaths inside the SavedModel directory.
Example:
```
filename = tf.saved_model.Asset("file.txt")
@tf.function(input_signature=[])
def func():
return tf.io.read_file(filename)
trackable_obj = tf.train.Checkpoint()
trackable_obj.func = func
trackable_obj.filename = filename
tf.saved_model.save(trackable_obj, "/tmp/saved_model")
# The created SavedModel is hermetic, it does not depend on
# the original file and can be moved to another path.
tf.io.gfile.remove("file.txt")
tf.io.gfile.rename("/tmp/saved_model", "/tmp/new_location")
reloaded_obj = tf.saved_model.load("/tmp/new_location")
print(reloaded_obj.func())
```
Attributes:
asset_path: A path, or a 0-D `tf.string` tensor with path to the asset.
"""
def __init__(self, path):
"""Record the full path to the asset."""
if isinstance(path, os.PathLike):
path = os.fspath(path)
# The init_scope prevents functions from capturing `path` in an
# initialization graph, since it is transient and should not end up in a
# serialized function body.
with ops.init_scope(), ops.device("CPU"):
self._path = ops.convert_to_tensor(
path, dtype=dtypes.string, name="asset_path")
@property
def asset_path(self):
"""Fetch the current asset path."""
return self._path
@classmethod
def _deserialize_from_proto(cls, object_proto, export_dir, asset_file_def,
**unused_kwargs):
proto = object_proto.asset
filename = file_io.join(
path_helpers.get_assets_dir(export_dir),
asset_file_def[proto.asset_file_def_index].filename)
asset = cls(filename)
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset.asset_path)
return asset
def _add_trackable_child(self, name, value):
setattr(self, name, value)
def _export_to_saved_model_graph(self, tensor_map, **unused_kwargs):
# TODO(b/205008097): Instead of mapping 1-1 between trackable asset
# and asset in the graph def consider deduping the assets that
# point to the same file.
asset_path_initializer = array_ops.placeholder(
shape=self.asset_path.shape,
dtype=dtypes.string,
name="asset_path_initializer")
asset_variable = resource_variable_ops.ResourceVariable(
asset_path_initializer)
tensor_map[self.asset_path] = asset_variable
return [self.asset_path]
ops.register_tensor_conversion_function(
Asset, lambda asset, **kw: ops.convert_to_tensor(asset.asset_path, **kw))
| [
"[email protected]"
] | |
e86d67f32b9eade3829748ae16ebc5608042241f | f791462fb1286607d16459c1602d133f8d8c8b59 | /test/test_distributions_mixture.py | a1ab093e65ea5c73f333d6fcd898c35cb3340e73 | [
"Apache-2.0"
] | permissive | pyro-ppl/numpyro | b071ed2bd93be41bafc3da8764c9f5617f996d92 | ca96eca8e8e1531e71ba559ef7a8ad3b4b68cbc2 | refs/heads/master | 2023-09-03T15:56:13.252692 | 2023-08-28T14:32:25 | 2023-08-28T14:32:25 | 170,580,540 | 1,941 | 219 | Apache-2.0 | 2023-09-04T11:26:11 | 2019-02-13T21:13:59 | Python | UTF-8 | Python | false | false | 5,161 | py | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pytest
import jax
import jax.numpy as jnp
import numpyro.distributions as dist
rng_key = jax.random.PRNGKey(42)
def get_normal(batch_shape):
"""Get parameterized Normal with given batch shape."""
loc = jnp.zeros(batch_shape)
scale = jnp.ones(batch_shape)
normal = dist.Normal(loc=loc, scale=scale)
return normal
def get_mvn(batch_shape):
"""Get parameterized MultivariateNormal with given batch shape."""
dimensions = 2
loc = jnp.zeros((*batch_shape, dimensions))
cov_matrix = jnp.eye(dimensions, dimensions)
for i, s in enumerate(batch_shape):
loc = jnp.repeat(jnp.expand_dims(loc, i), s, axis=i)
cov_matrix = jnp.repeat(jnp.expand_dims(cov_matrix, i), s, axis=i)
mvn = dist.MultivariateNormal(loc=loc, covariance_matrix=cov_matrix)
return mvn
@pytest.mark.parametrize("jax_dist_getter", [get_normal, get_mvn])
@pytest.mark.parametrize("nb_mixtures", [1, 3])
@pytest.mark.parametrize("batch_shape", [(), (1,), (7,), (2, 5)])
@pytest.mark.parametrize("same_family", [True, False])
def test_mixture_same_batch_shape(
jax_dist_getter, nb_mixtures, batch_shape, same_family
):
mixing_probabilities = jnp.ones(nb_mixtures) / nb_mixtures
for i, s in enumerate(batch_shape):
mixing_probabilities = jnp.repeat(
jnp.expand_dims(mixing_probabilities, i), s, axis=i
)
assert jnp.allclose(mixing_probabilities.sum(axis=-1), 1.0)
mixing_distribution = dist.Categorical(probs=mixing_probabilities)
if same_family:
component_distribution = jax_dist_getter((*batch_shape, nb_mixtures))
else:
component_distribution = [
jax_dist_getter(batch_shape) for _ in range(nb_mixtures)
]
_test_mixture(mixing_distribution, component_distribution)
@pytest.mark.parametrize("jax_dist_getter", [get_normal, get_mvn])
@pytest.mark.parametrize("nb_mixtures", [3])
@pytest.mark.parametrize("mixing_batch_shape, component_batch_shape", [[(2,), (7, 2)]])
@pytest.mark.parametrize("same_family", [True, False])
def test_mixture_broadcast_batch_shape(
jax_dist_getter, nb_mixtures, mixing_batch_shape, component_batch_shape, same_family
):
# Create mixture
mixing_probabilities = jnp.ones(nb_mixtures) / nb_mixtures
for i, s in enumerate(mixing_batch_shape):
mixing_probabilities = jnp.repeat(
jnp.expand_dims(mixing_probabilities, i), s, axis=i
)
assert jnp.allclose(mixing_probabilities.sum(axis=-1), 1.0)
mixing_distribution = dist.Categorical(probs=mixing_probabilities)
if same_family:
component_distribution = jax_dist_getter((*component_batch_shape, nb_mixtures))
else:
component_distribution = [
jax_dist_getter(component_batch_shape) for _ in range(nb_mixtures)
]
_test_mixture(mixing_distribution, component_distribution)
def _test_mixture(mixing_distribution, component_distribution):
# Create mixture
mixture = dist.Mixture(
mixing_distribution=mixing_distribution,
component_distributions=component_distribution,
)
assert (
mixture.mixture_size == mixing_distribution.probs.shape[-1]
), "Mixture size needs to be the size of the probability vector"
if isinstance(component_distribution, dist.Distribution):
assert (
mixture.batch_shape == component_distribution.batch_shape[:-1]
), "Mixture batch shape needs to be the component batch shape without the mixture dimension."
else:
assert (
mixture.batch_shape == component_distribution[0].batch_shape
), "Mixture batch shape needs to be the component batch shape."
# Test samples
sample_shape = (11,)
# Samples from component distribution(s)
component_samples = mixture.component_sample(rng_key, sample_shape)
assert component_samples.shape == (
*sample_shape,
*mixture.batch_shape,
mixture.mixture_size,
*mixture.event_shape,
)
# Samples from mixture
samples = mixture.sample(rng_key, sample_shape=sample_shape)
assert samples.shape == (*sample_shape, *mixture.batch_shape, *mixture.event_shape)
# Check log_prob
lp = mixture.log_prob(samples)
nb_value_dims = len(samples.shape) - mixture.event_dim
expected_shape = samples.shape[:nb_value_dims]
assert lp.shape == expected_shape
# Samples with indices
samples_, [indices] = mixture.sample_with_intermediates(
rng_key, sample_shape=sample_shape
)
assert samples_.shape == samples.shape
assert indices.shape == (*sample_shape, *mixture.batch_shape)
assert jnp.issubdtype(indices.dtype, jnp.integer)
assert (indices >= 0).all() and (indices < mixture.mixture_size).all()
# Check mean
mean = mixture.mean
assert mean.shape == mixture.shape()
# Check variance
var = mixture.variance
assert var.shape == mixture.shape()
# Check cdf
if mixture.event_shape == ():
cdf = mixture.cdf(samples)
assert cdf.shape == (*sample_shape, *mixture.shape())
| [
"[email protected]"
] | |
815c29c7ac315b39685f4cb97cfe0129b2f4b029 | b2c0517a0421c32f6782d76e4df842875d6ffce5 | /Algorithms/Dynamic Programming/121. Best Time to Buy and Sell Stock.py | ebf0f3692c9e7ba74f468b736b65e900ba63d3d1 | [] | no_license | SuYuxi/yuxi | e875b1536dc4b363194d0bef7f9a5aecb5d6199a | 45ad23a47592172101072a80a90de17772491e04 | refs/heads/master | 2022-10-04T21:29:42.017462 | 2022-09-30T04:00:48 | 2022-09-30T04:00:48 | 66,703,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | #Forward
class Solution(object):
def maxProfit(self, prices):
if(not prices):
return 0
minP = prices[0]
maxPro = 0
for i in prices:
if(i <= minP):
minP = i
else:
maxPro = max(maxPro, i-minP)
return maxPro
#Backward
class Solution(object):
def maxProfit(self, prices):
maxP = 0
maxPro = 0
for i in prices[::-1]:
if(i > maxP):
maxP = i
else:
maxPro = max(maxPro, maxP - i)
return maxPro
#Kadane's Algorithm
#Max sum Contiguous subarray search
class Solution(object):
def maxProfit(self, prices):
L = []
for i in range(1, len(prices)):
L.append(prices[i] - prices[i-1])
maxCur = 0
maxSofar = 0
for i in L:
maxCur = max(0, maxCur + i)
maxSofar = max(maxSofar, maxCur)
return maxSofar
#Lite version
class Solution(object):
def maxProfit(self, prices):
maxCur = 0
maxSofar = 0
for i in range(1, len(prices)):
maxCur = max(0, maxCur + prices[i] - prices[i-1])
maxSofar = max(maxSofar, maxCur)
return maxSofar
| [
"[email protected]"
] | |
c174eeaece6b1b311b305f2b8e6aae548566a5fb | b314518eb3e33c872f880c4f80a0f3d0856cf9ee | /12_marks.py | bd31d77188a83adcb12a63ab0e53a8fd0675250c | [] | no_license | namntran/2021_python_principles | 0ba48d2cb6ff32a4fefd0b13ae24d2376e17740e | bf33210f9b0e02dfefe7a9a008936e8f47d25149 | refs/heads/main | 2023-03-10T15:47:48.930202 | 2021-02-25T07:27:53 | 2021-02-25T07:27:53 | 330,814,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | # 12_marks.py
# Prompt for and read marks for a test unit a negative number is entered.
# Print the number of marks entered and the average (arithmetic mean) of marks
# Print the highest and lowest marks
# use indefinite loop - while loop
n = 0
total = 0.0
mark = float(input("Enter a mark: ")) #initialise variables to the first value it should have, don't make up numbers
highest = mark #initialise variables to the first value it should have, don't make up numbers
lowest = mark #initialise variables to the first value it should have, don't make up numbers
while mark >= 0.0:
n += 1
total += mark
if mark > highest:
highest = mark
if mark < lowest:
lowest = mark
mark = float(input("Enter a mark: "))
print("The number of marks: ", n)
if n > 0: # only print average if n > 0
print("The average mark is: ", total/ n)
print("The lowest mark is: ", lowest)
print("The highest mark is: ", highest)
| [
"[email protected]"
] | |
25a56b9668be160cc2d3f1113f3f44564b46c9fe | 356151747d2a6c65429e48592385166ab48c334c | /backend/manager/threads/manage_chef/th_remove_chef_query.py | ea0d3f08d872c0edeeb0b8a88499869306d0296d | [] | no_license | therealrahulsahu/se_project | c82b2d9d467decd30a24388f66427c7805c23252 | c9f9fd5594191ab7dce0504ca0ab3025aa26a0c1 | refs/heads/master | 2020-06-25T02:51:30.355677 | 2020-04-20T13:01:36 | 2020-04-20T13:01:36 | 199,175,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | from PyQt5.QtCore import QThread, pyqtSignal
class ThreadRemoveChefQuery(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, parent_class):
super().__init__()
self.output_list = []
self.parent_class = parent_class
def run(self):
in_name = r'(?i){}'.format(self.parent_class.curr_wid.le_rm_chef.text().strip())
self.output_list = []
self.output_itmes = []
from errors import ChefNotFoundError
from pymongo.errors import AutoReconnect
try:
myc = self.parent_class.MW.DB.chef
data_list = list(myc.find({'name': {'$regex': in_name}},
{'password': 0, 'phone': 0}).limit(10))
if data_list:
self.output_itmes = data_list
self.output_list = [x['_id'] for x in data_list]
self.parent_class.MW.mess('List Fetched')
self.signal.emit(True)
else:
self.parent_class.curr_wid.bt_rm_confirm.setEnabled(False)
raise ChefNotFoundError
except ChefNotFoundError as ob:
self.parent_class.MW.mess(str(ob))
except AutoReconnect:
self.parent_class.MW.mess('-->> Network Error <<--')
finally:
self.parent_class.curr_wid.bt_get_rm_chef.setEnabled(True)
| [
"[email protected]"
] | |
c273e5c4afb916b2bfe8fda1dff478b84e299c6e | 27aaadf435779c29012233cb1dacf27bd9dd0d0f | /cdn-20141111/alibabacloud_cdn20141111/client.py | ef23e019ac3566d3e731afcffe814a639e52c28f | [
"Apache-2.0"
] | permissive | aliyun/alibabacloud-python-sdk | afadedb09db5ba6c2bc6b046732b2a6dc215f004 | e02f34e07a7f05e898a492c212598a348d903739 | refs/heads/master | 2023-08-22T20:26:44.695288 | 2023-08-22T12:27:39 | 2023-08-22T12:27:39 | 288,972,087 | 43 | 29 | null | 2022-09-26T09:21:19 | 2020-08-20T10:08:11 | Python | UTF-8 | Python | false | false | 99,229 | py | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from typing import Dict
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_endpoint_util.client import Client as EndpointUtilClient
from alibabacloud_cdn20141111 import models as cdn_20141111_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = 'central'
self._endpoint_map = {
'ap-northeast-1': 'cdn.ap-southeast-1.aliyuncs.com',
'ap-south-1': 'cdn.ap-southeast-1.aliyuncs.com',
'ap-southeast-1': 'cdn.ap-southeast-1.aliyuncs.com',
'ap-southeast-2': 'cdn.ap-southeast-1.aliyuncs.com',
'ap-southeast-3': 'cdn.ap-southeast-1.aliyuncs.com',
'ap-southeast-5': 'cdn.ap-southeast-1.aliyuncs.com',
'eu-central-1': 'cdn.ap-southeast-1.aliyuncs.com',
'eu-west-1': 'cdn.ap-southeast-1.aliyuncs.com',
'me-east-1': 'cdn.ap-southeast-1.aliyuncs.com',
'us-east-1': 'cdn.ap-southeast-1.aliyuncs.com',
'us-west-1': 'cdn.ap-southeast-1.aliyuncs.com'
}
self.check_config(config)
self._endpoint = self.get_endpoint('cdn', self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint)
def get_endpoint(
self,
product_id: str,
region_id: str,
endpoint_rule: str,
network: str,
suffix: str,
endpoint_map: Dict[str, str],
endpoint: str,
) -> str:
if not UtilClient.empty(endpoint):
return endpoint
if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(endpoint_map.get(region_id)):
return endpoint_map.get(region_id)
return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix)
def add_cdn_domain_with_options(
self,
request: cdn_20141111_models.AddCdnDomainRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.AddCdnDomainResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.cdn_type):
query['CdnType'] = request.cdn_type
if not UtilClient.is_unset(request.check_url):
query['CheckUrl'] = request.check_url
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.priorities):
query['Priorities'] = request.priorities
if not UtilClient.is_unset(request.region):
query['Region'] = request.region
if not UtilClient.is_unset(request.resource_group_id):
query['ResourceGroupId'] = request.resource_group_id
if not UtilClient.is_unset(request.scope):
query['Scope'] = request.scope
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.source_port):
query['SourcePort'] = request.source_port
if not UtilClient.is_unset(request.source_type):
query['SourceType'] = request.source_type
if not UtilClient.is_unset(request.sources):
query['Sources'] = request.sources
if not UtilClient.is_unset(request.top_level_domain):
query['TopLevelDomain'] = request.top_level_domain
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='AddCdnDomain',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.AddCdnDomainResponse(),
self.call_api(params, req, runtime)
)
async def add_cdn_domain_with_options_async(
self,
request: cdn_20141111_models.AddCdnDomainRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.AddCdnDomainResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.cdn_type):
query['CdnType'] = request.cdn_type
if not UtilClient.is_unset(request.check_url):
query['CheckUrl'] = request.check_url
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.priorities):
query['Priorities'] = request.priorities
if not UtilClient.is_unset(request.region):
query['Region'] = request.region
if not UtilClient.is_unset(request.resource_group_id):
query['ResourceGroupId'] = request.resource_group_id
if not UtilClient.is_unset(request.scope):
query['Scope'] = request.scope
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.source_port):
query['SourcePort'] = request.source_port
if not UtilClient.is_unset(request.source_type):
query['SourceType'] = request.source_type
if not UtilClient.is_unset(request.sources):
query['Sources'] = request.sources
if not UtilClient.is_unset(request.top_level_domain):
query['TopLevelDomain'] = request.top_level_domain
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='AddCdnDomain',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.AddCdnDomainResponse(),
await self.call_api_async(params, req, runtime)
)
def add_cdn_domain(
self,
request: cdn_20141111_models.AddCdnDomainRequest,
) -> cdn_20141111_models.AddCdnDomainResponse:
runtime = util_models.RuntimeOptions()
return self.add_cdn_domain_with_options(request, runtime)
async def add_cdn_domain_async(
self,
request: cdn_20141111_models.AddCdnDomainRequest,
) -> cdn_20141111_models.AddCdnDomainResponse:
runtime = util_models.RuntimeOptions()
return await self.add_cdn_domain_with_options_async(request, runtime)
def describe_cdn_domain_detail_with_options(
self,
request: cdn_20141111_models.DescribeCdnDomainDetailRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeCdnDomainDetailResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeCdnDomainDetail',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeCdnDomainDetailResponse(),
self.call_api(params, req, runtime)
)
async def describe_cdn_domain_detail_with_options_async(
self,
request: cdn_20141111_models.DescribeCdnDomainDetailRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeCdnDomainDetailResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeCdnDomainDetail',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeCdnDomainDetailResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_cdn_domain_detail(
self,
request: cdn_20141111_models.DescribeCdnDomainDetailRequest,
) -> cdn_20141111_models.DescribeCdnDomainDetailResponse:
runtime = util_models.RuntimeOptions()
return self.describe_cdn_domain_detail_with_options(request, runtime)
async def describe_cdn_domain_detail_async(
self,
request: cdn_20141111_models.DescribeCdnDomainDetailRequest,
) -> cdn_20141111_models.DescribeCdnDomainDetailResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_cdn_domain_detail_with_options_async(request, runtime)
def describe_cdn_domain_logs_with_options(
self,
request: cdn_20141111_models.DescribeCdnDomainLogsRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeCdnDomainLogsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.log_day):
query['LogDay'] = request.log_day
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.page_number):
query['PageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['PageSize'] = request.page_size
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeCdnDomainLogs',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeCdnDomainLogsResponse(),
self.call_api(params, req, runtime)
)
async def describe_cdn_domain_logs_with_options_async(
self,
request: cdn_20141111_models.DescribeCdnDomainLogsRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeCdnDomainLogsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.log_day):
query['LogDay'] = request.log_day
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.page_number):
query['PageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['PageSize'] = request.page_size
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeCdnDomainLogs',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeCdnDomainLogsResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_cdn_domain_logs(
self,
request: cdn_20141111_models.DescribeCdnDomainLogsRequest,
) -> cdn_20141111_models.DescribeCdnDomainLogsResponse:
runtime = util_models.RuntimeOptions()
return self.describe_cdn_domain_logs_with_options(request, runtime)
async def describe_cdn_domain_logs_async(
self,
request: cdn_20141111_models.DescribeCdnDomainLogsRequest,
) -> cdn_20141111_models.DescribeCdnDomainLogsResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_cdn_domain_logs_with_options_async(request, runtime)
def describe_cdn_service_with_options(
self,
request: cdn_20141111_models.DescribeCdnServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeCdnServiceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeCdnService',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeCdnServiceResponse(),
self.call_api(params, req, runtime)
)
async def describe_cdn_service_with_options_async(
self,
request: cdn_20141111_models.DescribeCdnServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeCdnServiceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeCdnService',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeCdnServiceResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_cdn_service(
self,
request: cdn_20141111_models.DescribeCdnServiceRequest,
) -> cdn_20141111_models.DescribeCdnServiceResponse:
runtime = util_models.RuntimeOptions()
return self.describe_cdn_service_with_options(request, runtime)
async def describe_cdn_service_async(
self,
request: cdn_20141111_models.DescribeCdnServiceRequest,
) -> cdn_20141111_models.DescribeCdnServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_cdn_service_with_options_async(request, runtime)
def describe_domain_bps_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainBpsDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainBpsDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_type):
query['DomainType'] = request.domain_type
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainBpsData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainBpsDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_bps_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainBpsDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainBpsDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_type):
query['DomainType'] = request.domain_type
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainBpsData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainBpsDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_bps_data(
self,
request: cdn_20141111_models.DescribeDomainBpsDataRequest,
) -> cdn_20141111_models.DescribeDomainBpsDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_bps_data_with_options(request, runtime)
async def describe_domain_bps_data_async(
self,
request: cdn_20141111_models.DescribeDomainBpsDataRequest,
) -> cdn_20141111_models.DescribeDomainBpsDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_bps_data_with_options_async(request, runtime)
def describe_domain_bps_data_by_time_stamp_with_options(
self,
request: cdn_20141111_models.DescribeDomainBpsDataByTimeStampRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainBpsDataByTimeStampResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.isp_names):
query['IspNames'] = request.isp_names
if not UtilClient.is_unset(request.location_names):
query['LocationNames'] = request.location_names
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.time_point):
query['TimePoint'] = request.time_point
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainBpsDataByTimeStamp',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainBpsDataByTimeStampResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_bps_data_by_time_stamp_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainBpsDataByTimeStampRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainBpsDataByTimeStampResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.isp_names):
query['IspNames'] = request.isp_names
if not UtilClient.is_unset(request.location_names):
query['LocationNames'] = request.location_names
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.time_point):
query['TimePoint'] = request.time_point
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainBpsDataByTimeStamp',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainBpsDataByTimeStampResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_bps_data_by_time_stamp(
self,
request: cdn_20141111_models.DescribeDomainBpsDataByTimeStampRequest,
) -> cdn_20141111_models.DescribeDomainBpsDataByTimeStampResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_bps_data_by_time_stamp_with_options(request, runtime)
async def describe_domain_bps_data_by_time_stamp_async(
self,
request: cdn_20141111_models.DescribeDomainBpsDataByTimeStampRequest,
) -> cdn_20141111_models.DescribeDomainBpsDataByTimeStampResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_bps_data_by_time_stamp_with_options_async(request, runtime)
def describe_domain_file_size_proportion_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainFileSizeProportionDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainFileSizeProportionDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainFileSizeProportionData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainFileSizeProportionDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_file_size_proportion_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainFileSizeProportionDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainFileSizeProportionDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainFileSizeProportionData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainFileSizeProportionDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_file_size_proportion_data(
self,
request: cdn_20141111_models.DescribeDomainFileSizeProportionDataRequest,
) -> cdn_20141111_models.DescribeDomainFileSizeProportionDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_file_size_proportion_data_with_options(request, runtime)
async def describe_domain_file_size_proportion_data_async(
self,
request: cdn_20141111_models.DescribeDomainFileSizeProportionDataRequest,
) -> cdn_20141111_models.DescribeDomainFileSizeProportionDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_file_size_proportion_data_with_options_async(request, runtime)
def describe_domain_flow_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainFlowDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainFlowDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_type):
query['DomainType'] = request.domain_type
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainFlowData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainFlowDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_flow_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainFlowDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainFlowDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_type):
query['DomainType'] = request.domain_type
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainFlowData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainFlowDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_flow_data(
self,
request: cdn_20141111_models.DescribeDomainFlowDataRequest,
) -> cdn_20141111_models.DescribeDomainFlowDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_flow_data_with_options(request, runtime)
async def describe_domain_flow_data_async(
self,
request: cdn_20141111_models.DescribeDomainFlowDataRequest,
) -> cdn_20141111_models.DescribeDomainFlowDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_flow_data_with_options_async(request, runtime)
def describe_domain_hit_rate_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainHitRateDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainHitRateDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainHitRateData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainHitRateDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_hit_rate_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainHitRateDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainHitRateDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainHitRateData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainHitRateDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_hit_rate_data(
self,
request: cdn_20141111_models.DescribeDomainHitRateDataRequest,
) -> cdn_20141111_models.DescribeDomainHitRateDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_hit_rate_data_with_options(request, runtime)
async def describe_domain_hit_rate_data_async(
self,
request: cdn_20141111_models.DescribeDomainHitRateDataRequest,
) -> cdn_20141111_models.DescribeDomainHitRateDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_hit_rate_data_with_options_async(request, runtime)
def describe_domain_http_code_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainHttpCodeDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainHttpCodeDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainHttpCodeData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainHttpCodeDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_http_code_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainHttpCodeDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainHttpCodeDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainHttpCodeData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainHttpCodeDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_http_code_data(
self,
request: cdn_20141111_models.DescribeDomainHttpCodeDataRequest,
) -> cdn_20141111_models.DescribeDomainHttpCodeDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_http_code_data_with_options(request, runtime)
async def describe_domain_http_code_data_async(
self,
request: cdn_20141111_models.DescribeDomainHttpCodeDataRequest,
) -> cdn_20141111_models.DescribeDomainHttpCodeDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_http_code_data_with_options_async(request, runtime)
def describe_domain_ispdata_with_options(
self,
request: cdn_20141111_models.DescribeDomainISPDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainISPDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainISPData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainISPDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_ispdata_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainISPDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainISPDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainISPData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainISPDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_ispdata(
self,
request: cdn_20141111_models.DescribeDomainISPDataRequest,
) -> cdn_20141111_models.DescribeDomainISPDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_ispdata_with_options(request, runtime)
async def describe_domain_ispdata_async(
self,
request: cdn_20141111_models.DescribeDomainISPDataRequest,
) -> cdn_20141111_models.DescribeDomainISPDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_ispdata_with_options_async(request, runtime)
def describe_domain_qps_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainQpsDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainQpsDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_type):
query['DomainType'] = request.domain_type
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainQpsData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainQpsDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_qps_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainQpsDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainQpsDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_type):
query['DomainType'] = request.domain_type
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainQpsData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainQpsDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_qps_data(
self,
request: cdn_20141111_models.DescribeDomainQpsDataRequest,
) -> cdn_20141111_models.DescribeDomainQpsDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_qps_data_with_options(request, runtime)
async def describe_domain_qps_data_async(
self,
request: cdn_20141111_models.DescribeDomainQpsDataRequest,
) -> cdn_20141111_models.DescribeDomainQpsDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_qps_data_with_options_async(request, runtime)
def describe_domain_region_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainRegionDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainRegionDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainRegionData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainRegionDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_region_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainRegionDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainRegionDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainRegionData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainRegionDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_region_data(
self,
request: cdn_20141111_models.DescribeDomainRegionDataRequest,
) -> cdn_20141111_models.DescribeDomainRegionDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_region_data_with_options(request, runtime)
async def describe_domain_region_data_async(
self,
request: cdn_20141111_models.DescribeDomainRegionDataRequest,
) -> cdn_20141111_models.DescribeDomainRegionDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_region_data_with_options_async(request, runtime)
def describe_domain_req_hit_rate_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainReqHitRateDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainReqHitRateDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainReqHitRateData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainReqHitRateDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_req_hit_rate_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainReqHitRateDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainReqHitRateDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainReqHitRateData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainReqHitRateDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_req_hit_rate_data(
self,
request: cdn_20141111_models.DescribeDomainReqHitRateDataRequest,
) -> cdn_20141111_models.DescribeDomainReqHitRateDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_req_hit_rate_data_with_options(request, runtime)
async def describe_domain_req_hit_rate_data_async(
self,
request: cdn_20141111_models.DescribeDomainReqHitRateDataRequest,
) -> cdn_20141111_models.DescribeDomainReqHitRateDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_req_hit_rate_data_with_options_async(request, runtime)
def describe_domain_src_bps_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainSrcBpsDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainSrcBpsDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.fix_time_gap):
query['FixTimeGap'] = request.fix_time_gap
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainSrcBpsData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainSrcBpsDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_src_bps_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainSrcBpsDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainSrcBpsDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.fix_time_gap):
query['FixTimeGap'] = request.fix_time_gap
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainSrcBpsData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainSrcBpsDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_src_bps_data(
self,
request: cdn_20141111_models.DescribeDomainSrcBpsDataRequest,
) -> cdn_20141111_models.DescribeDomainSrcBpsDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_src_bps_data_with_options(request, runtime)
async def describe_domain_src_bps_data_async(
self,
request: cdn_20141111_models.DescribeDomainSrcBpsDataRequest,
) -> cdn_20141111_models.DescribeDomainSrcBpsDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_src_bps_data_with_options_async(request, runtime)
def describe_domain_src_flow_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainSrcFlowDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainSrcFlowDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.fix_time_gap):
query['FixTimeGap'] = request.fix_time_gap
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainSrcFlowData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainSrcFlowDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_src_flow_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainSrcFlowDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainSrcFlowDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.fix_time_gap):
query['FixTimeGap'] = request.fix_time_gap
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainSrcFlowData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainSrcFlowDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_src_flow_data(
self,
request: cdn_20141111_models.DescribeDomainSrcFlowDataRequest,
) -> cdn_20141111_models.DescribeDomainSrcFlowDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_src_flow_data_with_options(request, runtime)
async def describe_domain_src_flow_data_async(
self,
request: cdn_20141111_models.DescribeDomainSrcFlowDataRequest,
) -> cdn_20141111_models.DescribeDomainSrcFlowDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_src_flow_data_with_options_async(request, runtime)
def describe_domain_uv_data_with_options(
self,
request: cdn_20141111_models.DescribeDomainUvDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainUvDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainUvData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainUvDataResponse(),
self.call_api(params, req, runtime)
)
async def describe_domain_uv_data_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainUvDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainUvDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainUvData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainUvDataResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domain_uv_data(
self,
request: cdn_20141111_models.DescribeDomainUvDataRequest,
) -> cdn_20141111_models.DescribeDomainUvDataResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domain_uv_data_with_options(request, runtime)
async def describe_domain_uv_data_async(
self,
request: cdn_20141111_models.DescribeDomainUvDataRequest,
) -> cdn_20141111_models.DescribeDomainUvDataResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domain_uv_data_with_options_async(request, runtime)
def describe_domains_by_source_with_options(
self,
request: cdn_20141111_models.DescribeDomainsBySourceRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainsBySourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.sources):
query['Sources'] = request.sources
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainsBySource',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainsBySourceResponse(),
self.call_api(params, req, runtime)
)
async def describe_domains_by_source_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainsBySourceRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainsBySourceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.sources):
query['Sources'] = request.sources
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainsBySource',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainsBySourceResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domains_by_source(
self,
request: cdn_20141111_models.DescribeDomainsBySourceRequest,
) -> cdn_20141111_models.DescribeDomainsBySourceResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domains_by_source_with_options(request, runtime)
async def describe_domains_by_source_async(
self,
request: cdn_20141111_models.DescribeDomainsBySourceRequest,
) -> cdn_20141111_models.DescribeDomainsBySourceResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domains_by_source_with_options_async(request, runtime)
def describe_domains_usage_by_day_with_options(
self,
request: cdn_20141111_models.DescribeDomainsUsageByDayRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainsUsageByDayResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainsUsageByDay',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainsUsageByDayResponse(),
self.call_api(params, req, runtime)
)
async def describe_domains_usage_by_day_with_options_async(
self,
request: cdn_20141111_models.DescribeDomainsUsageByDayRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeDomainsUsageByDayResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeDomainsUsageByDay',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeDomainsUsageByDayResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_domains_usage_by_day(
self,
request: cdn_20141111_models.DescribeDomainsUsageByDayRequest,
) -> cdn_20141111_models.DescribeDomainsUsageByDayResponse:
runtime = util_models.RuntimeOptions()
return self.describe_domains_usage_by_day_with_options(request, runtime)
async def describe_domains_usage_by_day_async(
self,
request: cdn_20141111_models.DescribeDomainsUsageByDayRequest,
) -> cdn_20141111_models.DescribeDomainsUsageByDayResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_domains_usage_by_day_with_options_async(request, runtime)
def describe_refresh_quota_with_options(
self,
request: cdn_20141111_models.DescribeRefreshQuotaRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeRefreshQuotaResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeRefreshQuota',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeRefreshQuotaResponse(),
self.call_api(params, req, runtime)
)
async def describe_refresh_quota_with_options_async(
self,
request: cdn_20141111_models.DescribeRefreshQuotaRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeRefreshQuotaResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeRefreshQuota',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeRefreshQuotaResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_refresh_quota(
self,
request: cdn_20141111_models.DescribeRefreshQuotaRequest,
) -> cdn_20141111_models.DescribeRefreshQuotaResponse:
runtime = util_models.RuntimeOptions()
return self.describe_refresh_quota_with_options(request, runtime)
async def describe_refresh_quota_async(
self,
request: cdn_20141111_models.DescribeRefreshQuotaRequest,
) -> cdn_20141111_models.DescribeRefreshQuotaResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_refresh_quota_with_options_async(request, runtime)
def describe_top_domains_by_flow_with_options(
self,
request: cdn_20141111_models.DescribeTopDomainsByFlowRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeTopDomainsByFlowResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.limit):
query['Limit'] = request.limit
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.product):
query['Product'] = request.product
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeTopDomainsByFlow',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeTopDomainsByFlowResponse(),
self.call_api(params, req, runtime)
)
async def describe_top_domains_by_flow_with_options_async(
self,
request: cdn_20141111_models.DescribeTopDomainsByFlowRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeTopDomainsByFlowResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.limit):
query['Limit'] = request.limit
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.product):
query['Product'] = request.product
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeTopDomainsByFlow',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeTopDomainsByFlowResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_top_domains_by_flow(
self,
request: cdn_20141111_models.DescribeTopDomainsByFlowRequest,
) -> cdn_20141111_models.DescribeTopDomainsByFlowResponse:
runtime = util_models.RuntimeOptions()
return self.describe_top_domains_by_flow_with_options(request, runtime)
async def describe_top_domains_by_flow_async(
self,
request: cdn_20141111_models.DescribeTopDomainsByFlowRequest,
) -> cdn_20141111_models.DescribeTopDomainsByFlowResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_top_domains_by_flow_with_options_async(request, runtime)
def describe_user_domains_with_options(
self,
request: cdn_20141111_models.DescribeUserDomainsRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeUserDomainsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.cdn_type):
query['CdnType'] = request.cdn_type
if not UtilClient.is_unset(request.check_domain_show):
query['CheckDomainShow'] = request.check_domain_show
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_search_type):
query['DomainSearchType'] = request.domain_search_type
if not UtilClient.is_unset(request.domain_status):
query['DomainStatus'] = request.domain_status
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.page_number):
query['PageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['PageSize'] = request.page_size
if not UtilClient.is_unset(request.resource_group_id):
query['ResourceGroupId'] = request.resource_group_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.sources):
query['Sources'] = request.sources
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeUserDomains',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeUserDomainsResponse(),
self.call_api(params, req, runtime)
)
async def describe_user_domains_with_options_async(
self,
request: cdn_20141111_models.DescribeUserDomainsRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.DescribeUserDomainsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.cdn_type):
query['CdnType'] = request.cdn_type
if not UtilClient.is_unset(request.check_domain_show):
query['CheckDomainShow'] = request.check_domain_show
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_search_type):
query['DomainSearchType'] = request.domain_search_type
if not UtilClient.is_unset(request.domain_status):
query['DomainStatus'] = request.domain_status
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.page_number):
query['PageNumber'] = request.page_number
if not UtilClient.is_unset(request.page_size):
query['PageSize'] = request.page_size
if not UtilClient.is_unset(request.resource_group_id):
query['ResourceGroupId'] = request.resource_group_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.sources):
query['Sources'] = request.sources
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='DescribeUserDomains',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.DescribeUserDomainsResponse(),
await self.call_api_async(params, req, runtime)
)
def describe_user_domains(
self,
request: cdn_20141111_models.DescribeUserDomainsRequest,
) -> cdn_20141111_models.DescribeUserDomainsResponse:
runtime = util_models.RuntimeOptions()
return self.describe_user_domains_with_options(request, runtime)
async def describe_user_domains_async(
self,
request: cdn_20141111_models.DescribeUserDomainsRequest,
) -> cdn_20141111_models.DescribeUserDomainsResponse:
runtime = util_models.RuntimeOptions()
return await self.describe_user_domains_with_options_async(request, runtime)
def open_cdn_service_with_options(
self,
request: cdn_20141111_models.OpenCdnServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.OpenCdnServiceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.internet_charge_type):
query['InternetChargeType'] = request.internet_charge_type
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OpenCdnService',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.OpenCdnServiceResponse(),
self.call_api(params, req, runtime)
)
async def open_cdn_service_with_options_async(
self,
request: cdn_20141111_models.OpenCdnServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.OpenCdnServiceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.internet_charge_type):
query['InternetChargeType'] = request.internet_charge_type
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='OpenCdnService',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.OpenCdnServiceResponse(),
await self.call_api_async(params, req, runtime)
)
def open_cdn_service(
self,
request: cdn_20141111_models.OpenCdnServiceRequest,
) -> cdn_20141111_models.OpenCdnServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdn_service_with_options(request, runtime)
async def open_cdn_service_async(
self,
request: cdn_20141111_models.OpenCdnServiceRequest,
) -> cdn_20141111_models.OpenCdnServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdn_service_with_options_async(request, runtime)
def push_object_cache_with_options(
self,
request: cdn_20141111_models.PushObjectCacheRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.PushObjectCacheResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.area):
query['Area'] = request.area
if not UtilClient.is_unset(request.object_path):
query['ObjectPath'] = request.object_path
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='PushObjectCache',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.PushObjectCacheResponse(),
self.call_api(params, req, runtime)
)
async def push_object_cache_with_options_async(
self,
request: cdn_20141111_models.PushObjectCacheRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.PushObjectCacheResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.area):
query['Area'] = request.area
if not UtilClient.is_unset(request.object_path):
query['ObjectPath'] = request.object_path
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='PushObjectCache',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.PushObjectCacheResponse(),
await self.call_api_async(params, req, runtime)
)
def push_object_cache(
self,
request: cdn_20141111_models.PushObjectCacheRequest,
) -> cdn_20141111_models.PushObjectCacheResponse:
runtime = util_models.RuntimeOptions()
return self.push_object_cache_with_options(request, runtime)
async def push_object_cache_async(
self,
request: cdn_20141111_models.PushObjectCacheRequest,
) -> cdn_20141111_models.PushObjectCacheResponse:
runtime = util_models.RuntimeOptions()
return await self.push_object_cache_with_options_async(request, runtime)
def refresh_object_caches_with_options(
self,
request: cdn_20141111_models.RefreshObjectCachesRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.RefreshObjectCachesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.object_path):
query['ObjectPath'] = request.object_path
if not UtilClient.is_unset(request.object_type):
query['ObjectType'] = request.object_type
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='RefreshObjectCaches',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.RefreshObjectCachesResponse(),
self.call_api(params, req, runtime)
)
async def refresh_object_caches_with_options_async(
self,
request: cdn_20141111_models.RefreshObjectCachesRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.RefreshObjectCachesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.object_path):
query['ObjectPath'] = request.object_path
if not UtilClient.is_unset(request.object_type):
query['ObjectType'] = request.object_type
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='RefreshObjectCaches',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.RefreshObjectCachesResponse(),
await self.call_api_async(params, req, runtime)
)
def refresh_object_caches(
self,
request: cdn_20141111_models.RefreshObjectCachesRequest,
) -> cdn_20141111_models.RefreshObjectCachesResponse:
runtime = util_models.RuntimeOptions()
return self.refresh_object_caches_with_options(request, runtime)
async def refresh_object_caches_async(
self,
request: cdn_20141111_models.RefreshObjectCachesRequest,
) -> cdn_20141111_models.RefreshObjectCachesResponse:
runtime = util_models.RuntimeOptions()
return await self.refresh_object_caches_with_options_async(request, runtime)
def test_describe_domain_bps_data_with_options(
self,
request: cdn_20141111_models.TestDescribeDomainBpsDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.TestDescribeDomainBpsDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_type):
query['DomainType'] = request.domain_type
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='TestDescribeDomainBpsData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.TestDescribeDomainBpsDataResponse(),
self.call_api(params, req, runtime)
)
async def test_describe_domain_bps_data_with_options_async(
self,
request: cdn_20141111_models.TestDescribeDomainBpsDataRequest,
runtime: util_models.RuntimeOptions,
) -> cdn_20141111_models.TestDescribeDomainBpsDataResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.domain_name):
query['DomainName'] = request.domain_name
if not UtilClient.is_unset(request.domain_type):
query['DomainType'] = request.domain_type
if not UtilClient.is_unset(request.end_time):
query['EndTime'] = request.end_time
if not UtilClient.is_unset(request.interval):
query['Interval'] = request.interval
if not UtilClient.is_unset(request.isp_name_en):
query['IspNameEn'] = request.isp_name_en
if not UtilClient.is_unset(request.location_name_en):
query['LocationNameEn'] = request.location_name_en
if not UtilClient.is_unset(request.start_time):
query['StartTime'] = request.start_time
if not UtilClient.is_unset(request.time_merge):
query['TimeMerge'] = request.time_merge
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='TestDescribeDomainBpsData',
version='2014-11-11',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
cdn_20141111_models.TestDescribeDomainBpsDataResponse(),
await self.call_api_async(params, req, runtime)
)
def test_describe_domain_bps_data(
self,
request: cdn_20141111_models.TestDescribeDomainBpsDataRequest,
) -> cdn_20141111_models.TestDescribeDomainBpsDataResponse:
runtime = util_models.RuntimeOptions()
return self.test_describe_domain_bps_data_with_options(request, runtime)
async def test_describe_domain_bps_data_async(
self,
request: cdn_20141111_models.TestDescribeDomainBpsDataRequest,
) -> cdn_20141111_models.TestDescribeDomainBpsDataResponse:
runtime = util_models.RuntimeOptions()
return await self.test_describe_domain_bps_data_with_options_async(request, runtime)
| [
"[email protected]"
] | |
858a53123632c2341a8d43156ec562807a7a9d52 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_205/ch139_2020_04_01_19_45_57_332549.py | 850ba88067b364259c8a558e3cceaab56293684b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py |
def arcotangente(x,n):
m = 3
i = 3
u = -1
z = x
while (m<n):
z += u*(x**m/i)
u*=-1
m+=2
i+=2
return z | [
"[email protected]"
] | |
3216fe50f659f9555182cd6e9010327a99bc736c | 50c2bf03543eff23ec2e88f086e33848b50b5c4f | /docs/links.py | 7fb1ce92193eab8aaee889f6876ac192227aa78d | [] | no_license | CiscoTestAutomation/geniefiletransferutilslib | d06967476d78eafe1984a9991a57def25523ade7 | 9c32f121816d7d8f4a1fc4fc1b7c2fe0cf4e9449 | refs/heads/master | 2021-06-03T21:04:24.922438 | 2020-01-20T19:36:53 | 2020-01-20T19:36:53 | 131,624,514 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py |
internal_links = {'pyats': ('%s://wwwin-pyats.cisco.com', 'pyATS'),
'devnet': ('%s://developer.cisco.com/', 'Cisco DevNet'),
'multiprotocolfileutilities': ('%s://wwwin-pyats.cisco.com/documentation/html/utilities/file_transfer_utilities.html', 'Multiprotocol File Transfer'),
'mailto': ('mailto:asg-genie-support@%s','mailto'),
'communityforum': ('%s://piestack.cisco.com', 'community forum'),
}
external_links = {'pyats': ('%ss://developer.cisco.com/site/pyats/', 'pyATS'),
'devnet': ('%ss://developer.cisco.com/', 'Cisco DevNet'),
'multiprotocolfileutilities': ('%ss://pubhub.devnetcloud.com/media/pyats/docs/utilities/file_transfer_utilities.html', 'Multiprotocol File Transfer'),
'mailto': ('mailto:pyats-support-ext@%s','mailto'),
'communityforum': ('%ss://communities.cisco.com/community/developer/pyats', 'community forum'),
}
| [
"[email protected]"
] | |
221b36b5f091132763c293d1bd0373aa8ab7f2c8 | 7f80ea25908ce2eba6f6a72689f88c142319fe56 | /backtracking/baekjoon/2580.py | 6451fe1e80ba3457eba728de64bdc892abf909aa | [] | no_license | JUNGEEYOU/Algorithm-Problems | 1b242ae3aec3005d4e449f8b6170a63d1acac60b | 5e4a8a37254120c7c572b545d99006ebb512e151 | refs/heads/main | 2023-04-06T11:45:47.867171 | 2021-04-22T13:49:36 | 2021-04-22T13:49:36 | 353,240,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | import sys
zero_list = []
arr = []
for i in range(9):
x = list(map(int, sys.stdin.readline().split()))
zero_list.extend([(i, j) for j in range(len(x)) if x[j] == 0])
arr.append(x)
zero = len(zero_list)
result = []
def dfs():
if len(result) == zero:
return
# 나의 가로, 나의 세로, 나의 사각형이 모두 합이 45
flag_x = False
flag_y = False
for x, y in zero_list:
for i in arr[x]:
if i == 0:
flag_x = True
break
for i in range(9):
if arr[i][y] == 0:
flag_y = True
break
| [
"[email protected]"
] | |
6b3e19b3c633b7ce0aa72c220770ab72ab12a828 | 6a0589aa1a5f9071cbcee3f84452c880bf96c12d | /tests/conftest.py | 1b5dcb8b25e52d3f3937e03f61d604e1bf155437 | [
"MIT"
] | permissive | UWPCE-PythonCert/py220_extras | d3203e2fd44ee840d008fac9597a5b0c165e8cc7 | 57336429fb782c4901e7709c0275242e6af4264a | refs/heads/master | 2020-12-01T23:42:58.660565 | 2020-03-11T02:44:18 | 2020-03-11T02:44:18 | 230,816,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | # -*- coding: utf-8 -*-
"""
Dummy conftest.py for uw_py220_extras.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
# import pytest
| [
"[email protected]"
] | |
72f1f59bbbd15bb91ff2e22139d375f363c4fe26 | e228abda54dc7ab992ba634997b0d21b7200d091 | /runtests.py | 9efa69f957de2f6c612397fa92b4ccd7e605a565 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | AltSchool/dynamic-rest | 3fd2456d72fdf84e75556bc0fea7303e496b7ec7 | ed69e5af4ddf153e6eb304b7db80cc6adbf4d654 | refs/heads/master | 2023-09-06T01:57:47.555537 | 2023-03-27T16:15:22 | 2023-03-27T16:15:22 | 31,736,312 | 812 | 131 | MIT | 2023-05-28T09:15:45 | 2015-03-05T21:05:17 | Python | UTF-8 | Python | false | false | 3,117 | py | #! /usr/bin/env python
# Adopted from Django REST Framework:
# https://github.com/tomchristie/django-rest-framework/blob/master/runtests.py
from __future__ import print_function
import os
import subprocess
import sys
import pytest
APP_NAME = 'dynamic_rest'
TESTS = 'tests'
BENCHMARKS = 'benchmarks'
PYTEST_ARGS = {
'default': [
TESTS, '--tb=short', '-s', '-rw'
],
'fast': [
TESTS, '--tb=short', '-q', '-s', '-rw'
],
}
FLAKE8_ARGS = [APP_NAME, TESTS]
sys.path.append(os.path.dirname(__file__))
def exit_on_failure(ret, message=None):
if ret:
sys.exit(ret)
def flake8_main(args):
print('Running flake8 code linting')
ret = subprocess.call(['flake8'] + args)
print('flake8 failed' if ret else 'flake8 passed')
return ret
def split_class_and_function(string):
class_string, function_string = string.split('.', 1)
return "%s and %s" % (class_string, function_string)
def is_function(string):
# `True` if it looks like a test function is included in the string.
return string.startswith('test_') or '.test_' in string
def is_class(string):
# `True` if first character is uppercase - assume it's a class name.
return string[0] == string[0].upper()
if __name__ == "__main__":
try:
sys.argv.remove('--nolint')
except ValueError:
run_flake8 = True
else:
run_flake8 = False
try:
sys.argv.remove('--lintonly')
except ValueError:
run_tests = True
else:
run_tests = False
try:
sys.argv.remove('--benchmarks')
except ValueError:
run_benchmarks = False
else:
run_benchmarks = True
try:
sys.argv.remove('--fast')
except ValueError:
style = 'default'
else:
style = 'fast'
run_flake8 = False
if len(sys.argv) > 1:
pytest_args = sys.argv[1:]
first_arg = pytest_args[0]
try:
pytest_args.remove('--coverage')
except ValueError:
pass
else:
pytest_args = [
'--cov-report',
'xml',
'--cov',
APP_NAME
] + pytest_args
if first_arg.startswith('-'):
# `runtests.py [flags]`
pytest_args = [TESTS] + pytest_args
elif is_class(first_arg) and is_function(first_arg):
# `runtests.py TestCase.test_function [flags]`
expression = split_class_and_function(first_arg)
pytest_args = [TESTS, '-k', expression] + pytest_args[1:]
elif is_class(first_arg) or is_function(first_arg):
# `runtests.py TestCase [flags]`
# `runtests.py test_function [flags]`
pytest_args = [TESTS, '-k', pytest_args[0]] + pytest_args[1:]
else:
pytest_args = PYTEST_ARGS[style]
if run_benchmarks:
pytest_args[0] = BENCHMARKS
pytest_args.append('--ds=%s.settings' % BENCHMARKS)
if run_tests:
exit_on_failure(pytest.main(pytest_args))
if run_flake8:
exit_on_failure(flake8_main(FLAKE8_ARGS))
| [
"[email protected]"
] | |
b8a648e695ffd41107411a2a06894c584e2e6f86 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/securityinsights/v20210301preview/get_dynamics365_data_connector.py | aab4cce733e30b9d124ff6383db6269c8390a7b0 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,892 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDynamics365DataConnectorResult',
'AwaitableGetDynamics365DataConnectorResult',
'get_dynamics365_data_connector',
]
@pulumi.output_type
class GetDynamics365DataConnectorResult:
"""
Represents Dynamics365 data connector.
"""
def __init__(__self__, data_types=None, etag=None, id=None, kind=None, name=None, system_data=None, tenant_id=None, type=None):
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> 'outputs.Dynamics365DataConnectorDataTypesResponse':
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the data connector
Expected value is 'Dynamics365'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id to connect to, and get the data from.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetDynamics365DataConnectorResult(GetDynamics365DataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDynamics365DataConnectorResult(
data_types=self.data_types,
etag=self.etag,
id=self.id,
kind=self.kind,
name=self.name,
system_data=self.system_data,
tenant_id=self.tenant_id,
type=self.type)
def get_dynamics365_data_connector(data_connector_id: Optional[str] = None,
operational_insights_resource_provider: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDynamics365DataConnectorResult:
"""
Represents Dynamics365 data connector.
:param str data_connector_id: Connector ID
:param str operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['operationalInsightsResourceProvider'] = operational_insights_resource_provider
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20210301preview:getDynamics365DataConnector', __args__, opts=opts, typ=GetDynamics365DataConnectorResult).value
return AwaitableGetDynamics365DataConnectorResult(
data_types=__ret__.data_types,
etag=__ret__.etag,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
system_data=__ret__.system_data,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
| [
"[email protected]"
] | |
286036647230c1f20766d06e3e4a66ddc5f011b7 | 2a1f4c4900693c093b2fcf4f84efa60650ef1424 | /py/probe/functions/usb.py | 01655565f4c286d2a11fe60aa67c5066b1325d29 | [
"BSD-3-Clause"
] | permissive | bridder/factory | b925f494303728fa95017d1ba3ff40ac5cf6a2fd | a1b0fccd68987d8cd9c89710adc3c04b868347ec | refs/heads/master | 2023-08-10T18:51:08.988858 | 2021-09-21T03:25:28 | 2021-09-21T03:25:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,148 | py | # Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from cros.factory.probe.functions import sysfs
from cros.factory.probe.lib import cached_probe_function
REQUIRED_FIELDS = ['idVendor', 'idProduct']
OPTIONAL_FIELDS = ['manufacturer', 'product', 'bcdDevice']
def ReadUSBSysfs(dir_path):
result = sysfs.ReadSysfs(
dir_path, REQUIRED_FIELDS, optional_keys=OPTIONAL_FIELDS)
if result:
result['bus_type'] = 'usb'
return result
class USBFunction(cached_probe_function.GlobPathCachedProbeFunction):
"""Probes all usb devices listed in the sysfs ``/sys/bus/usb/devices/``.
Description
-----------
This function goes through ``/sys/bus/usb/devices/`` to read attributes of
each usb device (also includes usb root hub) listed there. Each result
should contain these fields:
- ``device_path``: Pathname of the sysfs directory.
- ``idVendor``
- ``idProduct``
The result might also contain these optional fields if they are exported in
the sysfs entry:
- ``manufacturer``
- ``product``
- ``bcdDevice``
Examples
--------
Let's say the Chromebook has two usb devices. One of which
(at ``/sys/bus/usb/devices/1-1``) has the attributes:
- ``idVendor=0x0123``
- ``idProduct=0x4567``
- ``manufacturer=Google``
- ``product=Google Fancy Camera``
- ``bcdDevice=0x8901``
And the other one (at ``/sys/bus/usb/devices/1-2``) has the attributes:
- ``idVendor=0x0246``
- ``idProduct=0x1357``
- ``product=Goofy Bluetooth``
Then the probe statement::
{
"eval": "usb"
}
will have the corresponding probed result::
[
{
"bus_type": "usb",
"idVendor": "0123",
"idProduct": "4567",
"manufacturer": "Google",
"product": "Google Fancy Camera",
"bcdDevice": "8901"
},
{
"bus_type": "usb",
"idVendor": "0246",
"idProduct": "1357",
"product": "Goofy Bluetooth"
}
]
To verify if the Chromebook has Google Fancy Camera or not, you can write
a probe statement like::
{
"eval": "usb",
"expect": {
"idVendor": "0123",
"idProduct": "4567"
}
}
and verify if the ``camera`` field of the probed result dict contains
elements or not.
You can also specify ``dir_path`` argument directly to ask the function
to probe that sysfs USB entry. For example, the probe statement ::
{
"eval": "usb:/sys/bus/usb/devices/1-1"
}
will have the corresponding probed results::
[
{
"bus_type": "usb",
"idVendor": "0123",
...
}
]
"""
GLOB_PATH = '/sys/bus/usb/devices/*'
@classmethod
def ProbeDevice(cls, dir_path):
# A valid usb device name is <roothub_num>-<addr>[.<addr2>[.<addr3>...]] or
# usb[0-9]+ for usb root hub.
name = os.path.basename(dir_path)
if (not re.match(r'^[0-9]+-[0-9]+(\.[0-9]+)*$', name) and
not re.match(r'^usb[0-9]+$', name)):
return None
return ReadUSBSysfs(dir_path)
| [
"[email protected]"
] | |
202a88655b5c4915d28f86f89d310486eed37aa5 | 5667b69eee4b384e09625c1c65799a9785336b5b | /ivi/tektronix/tektronixMDO4104.py | 4255a76b58b35d946e378bc805e59ee55b55848d | [
"MIT"
] | permissive | Diti24/python-ivi | ffae0aa38e7340fa142929541ded2148f41e8a9a | 4bf570eeb370789404d5bae8a439b6bbdb57647e | refs/heads/master | 2020-04-08T04:07:06.326253 | 2019-08-05T16:52:00 | 2019-08-05T16:52:00 | 60,081,649 | 0 | 1 | null | 2016-05-31T13:40:19 | 2016-05-31T10:51:47 | Python | UTF-8 | Python | false | false | 1,640 | py | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMDO4000 import *
class tektronixMDO4104(tektronixMDO4000):
"Tektronix MDO4104 IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MDO4104')
super(tektronixMDO4104, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._init_channels()
| [
"[email protected]"
] | |
dfeb29a64581f84d9e2ab512576acb3bf5fbf769 | 51aa2894c317f60726fe9a778999eb7851b6be3e | /140_gui/pyqt_pyside/examples/PyQt_PySide_book/002_Processing_of_signals_and_events/+21_Handling signal and slot/21_9_Using class QTimer.py | d3b27afda8a2731f5c7749a149ae85dd10462344 | [] | no_license | pranaymate/Python_Topics | dd7b288ab0f5bbee71d57080179d6481aae17304 | 33d29e0a5bf4cde104f9c7f0693cf9897f3f2101 | refs/heads/master | 2022-04-25T19:04:31.337737 | 2020-04-26T00:36:03 | 2020-04-26T00:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | # -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
import time
class MyWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle("Использование класса QTimer")
self.resize(200, 100)
self.label = QtGui.QLabel("")
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.button1 = QtGui.QPushButton("Запустить")
self.button2 = QtGui.QPushButton("Остановить")
self.button2.setEnabled(False)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.label)
vbox.addWidget(self.button1)
vbox.addWidget(self.button2)
self.setLayout(vbox)
self.connect(self.button1, QtCore.SIGNAL("clicked()"),
self.on_clicked_button1)
self.connect(self.button2, QtCore.SIGNAL("clicked()"),
self.on_clicked_button2)
self.timer = QtCore.QTimer()
self.connect(self.timer, QtCore.SIGNAL("timeout()"),
self.on_timeout);
def on_clicked_button1(self):
self.timer.start(1000) # 1 секунда
self.button1.setEnabled(False)
self.button2.setEnabled(True)
def on_clicked_button2(self):
self.timer.stop()
self.button1.setEnabled(True)
self.button2.setEnabled(False)
def on_timeout(self):
self.label.setText(time.strftime("%H:%M:%S"))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
window = MyWindow()
window.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
f020b777a70bdb831c3655afbe4fb727539df1c7 | 96d31b21fbc196fe83d22ee0fdeb63ba2e58ac4e | /hdf.py | cddbf2eb30743a6d615a5e500d608a8854ec5b2a | [] | no_license | Sandy4321/analysis | 7e0a392b9a9ac79fcefc5504e77303d4baa1b93a | ec2751eddbb5dd64c12d4386a86cda4515302419 | refs/heads/master | 2021-01-21T07:31:25.391005 | 2013-06-22T13:16:38 | 2013-06-22T13:16:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,266 | py | import h5py
import pandas
import os
def add_col(hdf, name, data, compression = 'lzf'):
parts = name.split('/')
dirpath = '/'.join(parts[:-1])
if len(dirpath) > 0 and dirpath not in hdf:
hdf.create_group(dirpath)
hdf.create_dataset(name,
data=data,
dtype=data.dtype,
compression=compression,
chunks=True)
def dict_to_hdf(data, path, header, feature_names = None):
try:
os.remove(path)
except OSError:
pass
hdf = h5py.File(path, 'w')
if feature_names is None:
print "[dict_to_hdf] No feature names given, using dict keys"
feature_names = data.keys()
hdf.attrs['features'] = feature_names
ccy1 = header['ccy'][0]
ccy2 = header['ccy'][1]
hdf.attrs['ccy1'] = ccy1.encode('ascii')
hdf.attrs['ccy2'] = ccy2.encode('ascii')
hdf.attrs['ccy'] = (ccy1 + "/" + ccy2).encode('ascii')
hdf.attrs['year'] = header['year']
hdf.attrs['month'] = header['month']
hdf.attrs['day'] = header['day']
hdf.attrs['venue'] = header['venue'].encode('ascii')
hdf.attrs['start_time'] = data['t'][0]
hdf.attrs['end_time'] = data['t'][-1]
for name, vec in data.items():
add_col(hdf, name, vec)
# if program quits before this flag is added, ok to overwrite
# file in the future
hdf.attrs['finished'] = True
hdf.close()
def header_from_hdf(f):
a = f.attrs
assert 'ccy1' in a
assert 'ccy2' in a
assert 'year' in a
assert 'month' in a
assert 'day' in a
assert 'venue' in a
assert 'start_time' in a
assert 'end_time' in a
assert 'features' in a
header = {
'ccy': (a['ccy1'], a['ccy2']),
'year' : a['year'],
'month' : a['month'],
'day' : a['day'],
'venue' : a['venue'],
'start_time' : a['start_time'],
'end_time' : a['end_time'],
'features': a['features'],
}
return header
def header_from_hdf_filename(filename):
f = h5py.File(filename)
header = header_from_hdf(f)
f.close()
return header
def same_features(f1, f2):
s1 = set(f1)
s2 = set(f2)
same = s1 == s2
if not same:
print "Different features:", \
s1.symmetric_difference(s2)
return same
# file exists and 'finished' flag is true
def complete_hdf_exists(filename, feature_names):
if not os.path.exists(filename):
print "Doesn't exist"
return False
try:
f = h5py.File(filename, 'r')
attrs = f.attrs
finished = 'finished' in attrs and attrs['finished']
has_ccy = 'ccy1' in attrs and 'ccy2' in attrs
has_date = 'year' in attrs and 'month' in attrs and 'day' in f.attrs
has_venue = 'venue' in attrs
has_features = 'features' in attrs
if has_features:
have_same_features = same_features(attrs['features'], feature_names)
else:
have_same_features = False
f.close()
return finished and has_ccy and has_date and has_venue and \
has_features and have_same_features
except:
import sys
print sys.exc_info()
return False
import numpy as np
def dataframe_from_hdf(f):
cols = dict([(k,np.array(v[:])) for k, v in f.items()])
return pandas.DataFrame(data=cols, index=f['t'], dtype='float')
def dataframe_from_hdf_filename(path):
f = h5py.File(path)
df = dataframe_from_hdf(f)
f.close()
return df
| [
"[email protected]"
] | |
febab4b2955536ed556e53abca2fbc70e4387f08 | f0604a3a32177e6baa0fad2c01766c3e99df3fe6 | /courator/config.py | 91678558386039d8d473ee9a055c685c75b9b02c | [
"MIT"
] | permissive | Courator/courator-backend | 354390902ae6bc8faa17e47ef2c3596162423f52 | 726845a06c1be7693fd107bdf571ea40b7d398ec | refs/heads/master | 2021-04-22T02:07:09.563157 | 2020-05-07T08:09:30 | 2020-05-07T08:09:30 | 249,842,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | import logging
from databases import DatabaseURL
from starlette.config import Config
from starlette.datastructures import Secret
from .logging import setup_logging
config = Config(".env")
DEBUG = config("DEBUG", cast=bool, default=False)
DATABASE_URL: DatabaseURL = config("DB_CONNECTION", cast=DatabaseURL)
SECRET_KEY: Secret = config("SECRET_KEY", cast=Secret)
TOKEN_EXPIRATION_DAYS: float = config("TOKEN_EXPIRATION_DAYS", cast=float, default=60.0)
TOKEN_ALGORITHM = "HS256"
setup_logging(
("uvicorn.asgi", "uvicorn.access"),
logging.DEBUG if DEBUG else logging.INFO
)
| [
"[email protected]"
] | |
3a196c69b9f2abbd039544758aa0e5f4ffeb1fc0 | 7a1b88d06ea18772b065b43d775cec6dd2acdf80 | /1620.py | af8268a37b2479d7b8eb091095dcf56bd0c39388 | [] | no_license | skaurl/baekjoon-online-judge | 28144cca45168e79b1ae0baa9a351f498f8d19ab | 1620d298c2f429e03c5f9387d8aca13763f5c731 | refs/heads/master | 2023-07-26T10:07:29.724066 | 2021-09-07T09:21:02 | 2021-09-07T09:21:02 | 299,019,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | import sys
a,b = map(int,sys.stdin.readline().strip().split())
dict_1 = {}
dict_2 = {}
for i in range(a):
name = sys.stdin.readline().strip()
dict_1[name] = i+1
dict_2[i+1] = name
for i in range(b):
x = sys.stdin.readline().strip()
try:
print(dict_2[int(x)])
except:
print(dict_1[x]) | [
"[email protected]"
] | |
06df8306b20a7459428d2bec87c1891edbec67bc | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/1593.py | e3d0777f40977183a794b3eb8007eeb8c64e1509 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | #!/usr/bin/env python
mm = {'a': 'y', 'c': 'e', 'b': 'h', 'e': 'o', 'd': 's', 'g': 'v', 'f': 'c', 'i': 'd', 'h': 'x', 'k': 'i', 'j': 'u', 'm': 'l', 'l': 'g', 'o': 'k', 'n': 'b', 'p': 'r', 's': 'n', 'r': 't', 'u': 'j', 't': 'w', 'w': 'f', 'v': 'p', 'y': 'a', 'x': 'm', 'q': 'z','z':'q',' ': ' ','\n': ''}
def str_tran(inp):
string = ''
for i in inp:
string += mm[i]
return string
if __name__ == '__main__':
f = open('input')
a = int( f.readline() )
for case in range(a):
line = f.readline()
result = str_tran(line)
print "Case #"+str(case+1)+":", result
| [
"[email protected]"
] | |
f7b756861161b2a1d93f5522f0606c0fa0e8c1a9 | 09cd370cdae12eb45090033a00e9aae45ee26638 | /STUDY/Graph Theory/18-43 어두운 길.py | ec19f71096e072352c9493057e1ec9be94080fd2 | [] | no_license | KWONILCHEOL/Python | ee340f6328945651eb29d2b23c425a92c84a4adb | 1ea5f5f74894a5929e0e894c5c12f049b8eb9fb4 | refs/heads/main | 2023-04-11T09:36:54.874638 | 2021-04-24T04:29:12 | 2021-04-24T04:29:12 | 328,658,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | import sys
input = sys.stdin.readline
def find_parent(parent, x):
while parent[x] != x:
parent[x], x = parent[parent[x]], parent[x]
return x
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
n, m = map(int, input().split())
parent = [i for i in range(n)]
edges = []
total = 0
for _ in range(m):
a, b, c = map(int, input().split())
edges.append((c,a,b))
total += c
edges.sort()
for c,a,b in edges:
if find_parent(parent, a) != find_parent(parent, b):
union_parent(parent, a, b)
total -= c
print(total)
# 7 11
# 0 1 7
# 0 3 5
# 1 2 8
# 1 3 9
# 1 4 7
# 2 4 5
# 3 4 15
# 3 5 6
# 4 5 8
# 4 6 9
# 5 6 11 | [
"[email protected]"
] | |
17e37a8ad51613ef444d2626b00890e753fa8ae1 | 1bdbf12e6fa1091beeb4ce0d923664e779cda509 | /NumPy/Getting_Started.py | 6af75b8b95932f5dc297a0092c0db418cb5b32b1 | [] | no_license | balajisaikumar2000/Python-Snippets | d61176f3a3752861c5a355c09b27c418727b90fd | b36a642b99cb2517438f773d21654ae4fef2cc05 | refs/heads/master | 2023-01-24T05:57:03.654796 | 2020-12-05T13:38:58 | 2020-12-05T13:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | import numpy as np
arr = np.array([1,2,3,4,5])
print(arr)
#version:
print(np.__version__) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.