ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3cec500a0ca6e12402a8268e9328f5079dc0fd | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Harmonic calculations for frequency representations'''
import numpy as np
import scipy.interpolate
import scipy.signal
from ..util.exceptions import ParameterError
__all__ = ['salience', 'interp_harmonics']
def salience(S, freqs, h_range, weights=None, aggregate=None,
filter_peaks=True, fill_value=np.nan, kind='linear', axis=0):
"""Harmonic salience function.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input time frequency magnitude representation (stft, ifgram, etc).
Must be real-valued and non-negative.
freqs : np.ndarray, shape=(S.shape[axis])
The frequency values corresponding to S's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to include in salience computation. The first harmonic (1)
corresponds to `S` itself. Values less than one (e.g., 1/2) correspond
to sub-harmonics.
weights : list-like
The weight to apply to each harmonic in the summation. (default:
uniform weights). Must be the same length as `harmonics`.
aggregate : function
aggregation function (default: `np.average`)
If `aggregate=np.average`, then a weighted average is
computed per-harmonic according to the specified weights.
For all other aggregation functions, all harmonics
are treated equally.
filter_peaks : bool
If true, returns harmonic summation only on frequencies of peak
magnitude. Otherwise returns harmonic summation over the full spectrum.
Defaults to True.
fill_value : float
The value to fill non-peaks in the output representation. (default:
np.nan) Only used if `filter_peaks == True`.
kind : str
Interpolation type for harmonic estimation.
See `scipy.interpolate.interp1d`.
axis : int
The axis along which to compute harmonics
Returns
-------
S_sal : np.ndarray, shape=(len(h_range), [x.shape])
`S_sal` will have the same shape as `S`, and measure
the overal harmonic energy at each frequency.
See Also
--------
interp_harmonics
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> S = np.abs(librosa.stft(y))
>>> freqs = librosa.core.fft_frequencies(sr)
>>> harms = [1, 2, 3, 4]
>>> weights = [1.0, 0.5, 0.33, 0.25]
>>> S_sal = librosa.salience(S, freqs, harms, weights, fill_value=0)
>>> print(S_sal.shape)
(1025, 646)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(S_sal,
... ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Salience spectrogram')
>>> plt.tight_layout()
"""
if aggregate is None:
aggregate = np.average
if weights is None:
weights = np.ones((len(h_range), ))
else:
weights = np.array(weights, dtype=float)
S_harm = interp_harmonics(S, freqs, h_range, kind=kind, axis=axis)
if aggregate is np.average:
S_sal = aggregate(S_harm, axis=0, weights=weights)
else:
S_sal = aggregate(S_harm, axis=0)
if filter_peaks:
S_peaks = scipy.signal.argrelmax(S, axis=0)
S_out = np.empty(S.shape)
S_out.fill(fill_value)
S_out[S_peaks[0], S_peaks[1]] = S_sal[S_peaks[0], S_peaks[1]]
S_sal = S_out
return S_sal
def interp_harmonics(x, freqs, h_range, kind='linear', fill_value=0, axis=0):
'''Compute the energy at harmonics of time-frequency representation.
Given a frequency-based energy representation such as a spectrogram
or tempogram, this function computes the energy at the chosen harmonics
of the frequency axis. (See examples below.)
The resulting harmonic array can then be used as input to a salience
computation.
Parameters
----------
x : np.ndarray
The input energy
freqs : np.ndarray, shape=(X.shape[axis])
The frequency values corresponding to X's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
Returns
-------
x_harm : np.ndarray, shape=(len(h_range), [x.shape])
`x_harm[i]` will have the same shape as `x`, and measure
the energy at the `h_range[i]` harmonic of each frequency.
See Also
--------
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate sub-harmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3, 2, i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# X_out will be the same shape as X, plus a leading
# axis that has length = len(h_range)
out_shape = [len(h_range)]
out_shape.extend(x.shape)
x_out = np.zeros(out_shape, dtype=x.dtype)
if freqs.ndim == 1 and len(freqs) == x.shape[axis]:
harmonics_1d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
elif freqs.ndim == 2 and freqs.shape == x.shape:
harmonics_2d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
else:
raise ParameterError('freqs.shape={} does not match '
'input shape={}'.format(freqs.shape, x.shape))
return x_out
def harmonics_1d(harmonic_out, x, freqs, h_range, kind='linear',
fill_value=0, axis=0):
'''Populate a harmonic tensor from a time-frequency representation.
Parameters
----------
harmonic_out : np.ndarray, shape=(len(h_range), X.shape)
The output array to store harmonics
X : np.ndarray
The input energy
freqs : np.ndarray, shape=(x.shape[axis])
The frequency values corresponding to x's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate subharmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3,2,i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# Note: this only works for fixed-grid, 1d interpolation
f_interp = scipy.interpolate.interp1d(freqs, x,
kind=kind,
axis=axis,
copy=False,
bounds_error=False,
fill_value=fill_value)
idx_out = [slice(None)] * harmonic_out.ndim
# Compute the output index of the interpolated values
interp_axis = 1 + (axis % x.ndim)
# Iterate over the harmonics range
for h_index, harmonic in enumerate(h_range):
idx_out[0] = h_index
# Iterate over frequencies
for f_index, frequency in enumerate(freqs):
# Offset the output axis by 1 to account for the harmonic index
idx_out[interp_axis] = f_index
# Estimate the harmonic energy at this frequency across time
harmonic_out[tuple(idx_out)] = f_interp(harmonic * frequency)
def harmonics_2d(harmonic_out, x, freqs, h_range, kind='linear', fill_value=0,
axis=0):
'''Populate a harmonic tensor from a time-frequency representation with
time-varying frequencies.
Parameters
----------
harmonic_out : np.ndarray
The output array to store harmonics
x : np.ndarray
The input energy
freqs : np.ndarray, shape=x.shape
The frequency values corresponding to each element of `x`
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself. Values less than one (e.g., 1/2) correspond to
sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
harmonics_1d
'''
idx_in = [slice(None)] * x.ndim
idx_freq = [slice(None)] * x.ndim
idx_out = [slice(None)] * harmonic_out.ndim
# This is the non-interpolation axis
ni_axis = (1 + axis) % x.ndim
# For each value in the non-interpolated axis, compute its harmonics
for i in range(x.shape[ni_axis]):
idx_in[ni_axis] = slice(i, i + 1)
idx_freq[ni_axis] = i
idx_out[1 + ni_axis] = idx_in[ni_axis]
harmonics_1d(harmonic_out[idx_out], x[idx_in], freqs[idx_freq],
h_range, kind=kind, fill_value=fill_value,
axis=axis)
|
py | 1a3cec8fd4bbfbbabac0a1c44f62159820567fd9 | # -*- coding: utf-8 -*-
"""
Common tools for preparing Plumed inputs for calculations
"""
class BasePlumedInputGenerator(object):
"""
Baseclass for the standard Plumed inputs.
"""
_INPUT_FILE_NAME = 'plumed.in'
_OUTPUT_FILE_NAME = 'plumed.out'
_COLVAR_FILE_NAME = 'COLVAR.dat'
def _generate_plumed_inputfile(self, parameters):
"""
Prepare the input file for Plumed
param: parameters: dictionary of input data
returns:
"""
# Empty string to hold the content of the input file
input_text = ""
# Run over the primary keys of the parameters dictionary
# Each key corresponds to a Plumed action.
# Write this to this keyword to the stanza
# Run over the arguments to this key word
# Add them and finally add the stanza to the input file
# string.
for action, args in parameters.items():
stanza = action + ' '
for item in args:
stanza += item + ' '
input_text += stanza + '\n'
return print(input_text)
|
py | 1a3ced288d88a858814d84c8279566e72a58b898 | import logging
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection import SelectKBest, chi2
import numpy as np
from sklearn.feature_selection.univariate_selection import _clean_nans
from discoutils.thesaurus_loader import Vectors
from eval.utils.misc import calculate_log_odds, update_dict_according_to_mask
__author__ = 'mmb28'
class VectorBackedSelectKBest(SelectKBest):
"""
An extention of sklearn's SelectKBest, which also contains a VectorStore. Feature selection is done
in two optional steps:
1: Remove all features that are not contained in the vector store
2: Remove any remaining low-scoring features to ensure a maximum of k features are left fit
Additionally, this class stores a vocabulary (like a vectorizer), which maps features to a corresponding columns
in the feature vector matrix. This is so that a FeatureVectorsCsvDumper can be placed after this object in a
pipeline.
Also, this object assumes its input is not a matrix X (as in SelectKBest), but a tuple (X, vocabulary). The
vocabulary is provided by ThesaurusVectorizer, which comes before this object in a pipeline and represents the
mapping of features to columns in X before any feature selection is done.
"""
def __init__(self, score_func=chi2, k='all', must_be_in_thesaurus=False, min_log_odds_score=0, **kwargs):
"""
:param min_log_odds_score: any feature with a log odds score between -min_log_odds_score and
min_log_odds_score will be removed. Assumes the classification problem is binary.
"""
if not score_func:
score_func = chi2
self.k = k
self.must_be_in_thesaurus = must_be_in_thesaurus
self.min_log_odds_score = min_log_odds_score
self.vocabulary_ = None
super(VectorBackedSelectKBest, self).__init__(score_func=score_func, k=k)
def fit(self, X, y, vector_source=None, clusters=None, **kwargs):
if vector_source is None and clusters is None and self.must_be_in_thesaurus:
logging.error('You requested feature selection based on vector presence '
'but did not provide a vector source.')
raise ValueError('sector source (vectors or clusters) required with must_be_in_thesaurus')
if self.must_be_in_thesaurus:
self.vector_source = vector_source if vector_source else set(clusters.index)
# Vectorizer also returns its vocabulary, store it and work with the rest
X, self.vocabulary_ = X
if self.k == 'all' or int(self.k) >= X.shape[1]:
# do not bother calculating feature informativeness if all features will be used anyway
self.scores_ = np.ones((X.shape[1],))
else:
super(VectorBackedSelectKBest, self).fit(X, y)
self.vectors_mask = self._zero_score_of_oot_feats() \
if self.must_be_in_thesaurus else np.ones(X.shape[1], dtype=bool)
self.log_odds_mask = self._zero_score_of_low_log_odds_features(X, y) \
if self.min_log_odds_score > 0 else np.ones(X.shape[1], dtype=bool);
return self
def transform(self, X):
# Vectorizer also returns its vocabulary, remove it
if self.vocabulary_:
return super(VectorBackedSelectKBest, self).transform(X[0]), self.vocabulary_
else:
# Sometimes the training set contain no features. We don't want this to break the experiment,
# so let is slide
logging.error('Empty vocabulary')
return X[0], self.vocabulary_
def _zero_score_of_oot_feats(self):
mask = np.ones(self.scores_.shape, dtype=bool)
for feature, index in self.vocabulary_.items():
if feature not in self.vector_source:
mask[index] = False
if np.count_nonzero(mask) == 0:
logging.error('Feature selector removed all features')
raise ValueError('Empty vocabulary')
return mask
def _zero_score_of_low_log_odds_features(self, X, y):
if self.min_log_odds_score <= 0:
# we don't want to use log odds score, return an all-true mask
return np.ones(X.shape[1])
if len(set(y)) != 2:
raise ValueError('Calculating a log odds score requires a binary classification task')
log_odds = calculate_log_odds(X, y)
return (log_odds > self.min_log_odds_score) | (log_odds < -self.min_log_odds_score)
def _get_support_mask(self):
k = self.k
chi2_scores = self.scores_
chi2_mask = np.ones(chi2_scores.shape, dtype=bool)
if k != 'all' and k < len(chi2_scores):
# we don't want all features to be kept, and the number we want is less than the number available
chi2_scores = _clean_nans(chi2_scores)
selected_indices = np.argsort(chi2_scores)[:k]
chi2_mask[selected_indices] = False
mask = chi2_mask & self.vectors_mask & self.log_odds_mask
logging.info('%d/%d features survived feature selection', np.count_nonzero(mask), len(mask))
# Only keep the scores of the features that survived. This array is used to check the
# input data shape at train and decode time matches. However, because the post-feature-selections
# vocabulary is passed back into the vectorizer, at decode time the input will likely be smaller. This is
# like doing feature selection in the vectorizer.
self.scores_ = self.scores_[mask]
self.log_odds_mask = self.log_odds_mask[mask]
self.vectors_mask = self.vectors_mask[mask]
self.vocabulary_ = update_dict_according_to_mask(self.vocabulary_, mask)
return mask
class MetadataStripper(BaseEstimator, TransformerMixin):
"""
The current implementation of ThesaurusVectorizer's fit() returns not just a data matrix, but also some
metadata (its vocabulary). This class is meant to sit in a pipeline behind the vectorizer to remove that
metadata, so that it doesn't break other items in the pipeline.
Currently several other pipeline elements can make use of this data ( VectorBackedSelectKBest and
FeatureVectorsCsvDumper). This class must come after these in a pipeline as they do not have any
defensive checks
"""
def fit(self, X, y, vector_source=None, strategy='linear', k=None, **kwargs):
matrix, self.voc = X # store voc, may be handy for for debugging
self.vector_source = vector_source
if isinstance(self.vector_source, Vectors):
# the vector source can be either a Thesaurus or Vectors. Both can provide nearest neighbours,
# but the latter needs this method to be called first
if not k:
k = 10
self.vector_source.init_sims([str(foo) for foo in self.voc.keys()],
strategy=strategy, n_neighbors=k)
return self
def transform(self, X, **kwargs):
# if X is a tuple, strip metadata, otherwise let it be
return X[0] if tuple(X) == X else X
|
py | 1a3ced7dd892ee5374e1c4e27b52d18d3c2a5c1c | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import datetime
import hashlib
import ipaddress
import warnings
from enum import Enum
from asn1crypto.keys import PublicKeyInfo
import six
from cryptography import utils
from cryptography.hazmat.primitives import constant_time, serialization
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
from cryptography.x509.general_name import GeneralName, IPAddress, OtherName
from cryptography.x509.name import Name, RelativeDistinguishedName
from cryptography.x509.oid import (
CRLEntryExtensionOID, ExtensionOID, ObjectIdentifier
)
def _key_identifier_from_public_key(public_key):
if isinstance(public_key, RSAPublicKey):
data = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.PKCS1,
)
elif isinstance(public_key, EllipticCurvePublicKey):
data = public_key.public_numbers().encode_point()
else:
# This is a very slow way to do this.
serialized = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo
)
data = six.binary_type(PublicKeyInfo.load(serialized)['public_key'])
return hashlib.sha1(data).digest()
class DuplicateExtension(Exception):
def __init__(self, msg, oid):
super(DuplicateExtension, self).__init__(msg)
self.oid = oid
class UnsupportedExtension(Exception):
def __init__(self, msg, oid):
super(UnsupportedExtension, self).__init__(msg)
self.oid = oid
class ExtensionNotFound(Exception):
def __init__(self, msg, oid):
super(ExtensionNotFound, self).__init__(msg)
self.oid = oid
@six.add_metaclass(abc.ABCMeta)
class ExtensionType(object):
@abc.abstractproperty
def oid(self):
"""
Returns the oid associated with the given extension type.
"""
class Extensions(object):
def __init__(self, extensions):
self._extensions = extensions
def get_extension_for_oid(self, oid):
for ext in self:
if ext.oid == oid:
return ext
raise ExtensionNotFound("No {0} extension was found".format(oid), oid)
def get_extension_for_class(self, extclass):
if extclass is UnrecognizedExtension:
raise TypeError(
"UnrecognizedExtension can't be used with "
"get_extension_for_class because more than one instance of the"
" class may be present."
)
for ext in self:
if isinstance(ext.value, extclass):
return ext
raise ExtensionNotFound(
"No {0} extension was found".format(extclass), extclass.oid
)
def __iter__(self):
return iter(self._extensions)
def __len__(self):
return len(self._extensions)
def __getitem__(self, idx):
return self._extensions[idx]
def __repr__(self):
return (
"<Extensions({0})>".format(self._extensions)
)
@utils.register_interface(ExtensionType)
class CRLNumber(object):
oid = ExtensionOID.CRL_NUMBER
def __init__(self, crl_number):
if not isinstance(crl_number, six.integer_types):
raise TypeError("crl_number must be an integer")
self._crl_number = crl_number
def __eq__(self, other):
if not isinstance(other, CRLNumber):
return NotImplemented
return self.crl_number == other.crl_number
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.crl_number)
def __repr__(self):
return "<CRLNumber({0})>".format(self.crl_number)
crl_number = utils.read_only_property("_crl_number")
@utils.register_interface(ExtensionType)
class AuthorityKeyIdentifier(object):
oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER
def __init__(self, key_identifier, authority_cert_issuer,
authority_cert_serial_number):
if (authority_cert_issuer is None) != (
authority_cert_serial_number is None
):
raise ValueError(
"authority_cert_issuer and authority_cert_serial_number "
"must both be present or both None"
)
if authority_cert_issuer is not None:
authority_cert_issuer = list(authority_cert_issuer)
if not all(
isinstance(x, GeneralName) for x in authority_cert_issuer
):
raise TypeError(
"authority_cert_issuer must be a list of GeneralName "
"objects"
)
if authority_cert_serial_number is not None and not isinstance(
authority_cert_serial_number, six.integer_types
):
raise TypeError(
"authority_cert_serial_number must be an integer"
)
self._key_identifier = key_identifier
self._authority_cert_issuer = authority_cert_issuer
self._authority_cert_serial_number = authority_cert_serial_number
@classmethod
def from_issuer_public_key(cls, public_key):
digest = _key_identifier_from_public_key(public_key)
return cls(
key_identifier=digest,
authority_cert_issuer=None,
authority_cert_serial_number=None
)
@classmethod
def from_issuer_subject_key_identifier(cls, ski):
return cls(
key_identifier=ski.value.digest,
authority_cert_issuer=None,
authority_cert_serial_number=None
)
def __repr__(self):
return (
"<AuthorityKeyIdentifier(key_identifier={0.key_identifier!r}, "
"authority_cert_issuer={0.authority_cert_issuer}, "
"authority_cert_serial_number={0.authority_cert_serial_number}"
")>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AuthorityKeyIdentifier):
return NotImplemented
return (
self.key_identifier == other.key_identifier and
self.authority_cert_issuer == other.authority_cert_issuer and
self.authority_cert_serial_number ==
other.authority_cert_serial_number
)
def __ne__(self, other):
return not self == other
key_identifier = utils.read_only_property("_key_identifier")
authority_cert_issuer = utils.read_only_property("_authority_cert_issuer")
authority_cert_serial_number = utils.read_only_property(
"_authority_cert_serial_number"
)
@utils.register_interface(ExtensionType)
class SubjectKeyIdentifier(object):
oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER
def __init__(self, digest):
self._digest = digest
@classmethod
def from_public_key(cls, public_key):
return cls(_key_identifier_from_public_key(public_key))
digest = utils.read_only_property("_digest")
def __repr__(self):
return "<SubjectKeyIdentifier(digest={0!r})>".format(self.digest)
def __eq__(self, other):
if not isinstance(other, SubjectKeyIdentifier):
return NotImplemented
return constant_time.bytes_eq(self.digest, other.digest)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.digest)
@utils.register_interface(ExtensionType)
class AuthorityInformationAccess(object):
oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS
def __init__(self, descriptions):
descriptions = list(descriptions)
if not all(isinstance(x, AccessDescription) for x in descriptions):
raise TypeError(
"Every item in the descriptions list must be an "
"AccessDescription"
)
self._descriptions = descriptions
def __iter__(self):
return iter(self._descriptions)
def __len__(self):
return len(self._descriptions)
def __repr__(self):
return "<AuthorityInformationAccess({0})>".format(self._descriptions)
def __eq__(self, other):
if not isinstance(other, AuthorityInformationAccess):
return NotImplemented
return self._descriptions == other._descriptions
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._descriptions[idx]
class AccessDescription(object):
def __init__(self, access_method, access_location):
if not isinstance(access_method, ObjectIdentifier):
raise TypeError("access_method must be an ObjectIdentifier")
if not isinstance(access_location, GeneralName):
raise TypeError("access_location must be a GeneralName")
self._access_method = access_method
self._access_location = access_location
def __repr__(self):
return (
"<AccessDescription(access_method={0.access_method}, access_locati"
"on={0.access_location})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AccessDescription):
return NotImplemented
return (
self.access_method == other.access_method and
self.access_location == other.access_location
)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.access_method, self.access_location))
access_method = utils.read_only_property("_access_method")
access_location = utils.read_only_property("_access_location")
@utils.register_interface(ExtensionType)
class BasicConstraints(object):
oid = ExtensionOID.BASIC_CONSTRAINTS
def __init__(self, ca, path_length):
if not isinstance(ca, bool):
raise TypeError("ca must be a boolean value")
if path_length is not None and not ca:
raise ValueError("path_length must be None when ca is False")
if (
path_length is not None and
(not isinstance(path_length, six.integer_types) or path_length < 0)
):
raise TypeError(
"path_length must be a non-negative integer or None"
)
self._ca = ca
self._path_length = path_length
ca = utils.read_only_property("_ca")
path_length = utils.read_only_property("_path_length")
def __repr__(self):
return ("<BasicConstraints(ca={0.ca}, "
"path_length={0.path_length})>").format(self)
def __eq__(self, other):
if not isinstance(other, BasicConstraints):
return NotImplemented
return self.ca == other.ca and self.path_length == other.path_length
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.ca, self.path_length))
@utils.register_interface(ExtensionType)
class CRLDistributionPoints(object):
oid = ExtensionOID.CRL_DISTRIBUTION_POINTS
def __init__(self, distribution_points):
distribution_points = list(distribution_points)
if not all(
isinstance(x, DistributionPoint) for x in distribution_points
):
raise TypeError(
"distribution_points must be a list of DistributionPoint "
"objects"
)
self._distribution_points = distribution_points
def __iter__(self):
return iter(self._distribution_points)
def __len__(self):
return len(self._distribution_points)
def __repr__(self):
return "<CRLDistributionPoints({0})>".format(self._distribution_points)
def __eq__(self, other):
if not isinstance(other, CRLDistributionPoints):
return NotImplemented
return self._distribution_points == other._distribution_points
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._distribution_points[idx]
class DistributionPoint(object):
def __init__(self, full_name, relative_name, reasons, crl_issuer):
if full_name and relative_name:
raise ValueError(
"You cannot provide both full_name and relative_name, at "
"least one must be None."
)
if full_name:
full_name = list(full_name)
if not all(isinstance(x, GeneralName) for x in full_name):
raise TypeError(
"full_name must be a list of GeneralName objects"
)
if relative_name:
if isinstance(relative_name, Name):
warnings.warn(
"relative_name=<Name> is deprecated and will "
"be removed in a future version; use "
"<RelativeDistinguishedName> instead.",
utils.DeprecatedIn16,
stacklevel=2
)
relative_name = RelativeDistinguishedName(relative_name)
elif not isinstance(relative_name, RelativeDistinguishedName):
raise TypeError(
"relative_name must be a RelativeDistinguishedName"
)
if crl_issuer:
crl_issuer = list(crl_issuer)
if not all(isinstance(x, GeneralName) for x in crl_issuer):
raise TypeError(
"crl_issuer must be None or a list of general names"
)
if reasons and (not isinstance(reasons, frozenset) or not all(
isinstance(x, ReasonFlags) for x in reasons
)):
raise TypeError("reasons must be None or frozenset of ReasonFlags")
if reasons and (
ReasonFlags.unspecified in reasons or
ReasonFlags.remove_from_crl in reasons
):
raise ValueError(
"unspecified and remove_from_crl are not valid reasons in a "
"DistributionPoint"
)
if reasons and not crl_issuer and not (full_name or relative_name):
raise ValueError(
"You must supply crl_issuer, full_name, or relative_name when "
"reasons is not None"
)
self._full_name = full_name
self._relative_name = relative_name
self._reasons = reasons
self._crl_issuer = crl_issuer
def __repr__(self):
return (
"<DistributionPoint(full_name={0.full_name}, relative_name={0.rela"
"tive_name}, reasons={0.reasons}, crl_issuer={0.crl_is"
"suer})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, DistributionPoint):
return NotImplemented
return (
self.full_name == other.full_name and
self.relative_name == other.relative_name and
self.reasons == other.reasons and
self.crl_issuer == other.crl_issuer
)
def __ne__(self, other):
return not self == other
full_name = utils.read_only_property("_full_name")
relative_name = utils.read_only_property("_relative_name")
reasons = utils.read_only_property("_reasons")
crl_issuer = utils.read_only_property("_crl_issuer")
class ReasonFlags(Enum):
unspecified = "unspecified"
key_compromise = "keyCompromise"
ca_compromise = "cACompromise"
affiliation_changed = "affiliationChanged"
superseded = "superseded"
cessation_of_operation = "cessationOfOperation"
certificate_hold = "certificateHold"
privilege_withdrawn = "privilegeWithdrawn"
aa_compromise = "aACompromise"
remove_from_crl = "removeFromCRL"
@utils.register_interface(ExtensionType)
class PolicyConstraints(object):
oid = ExtensionOID.POLICY_CONSTRAINTS
def __init__(self, require_explicit_policy, inhibit_policy_mapping):
if require_explicit_policy is not None and not isinstance(
require_explicit_policy, six.integer_types
):
raise TypeError(
"require_explicit_policy must be a non-negative integer or "
"None"
)
if inhibit_policy_mapping is not None and not isinstance(
inhibit_policy_mapping, six.integer_types
):
raise TypeError(
"inhibit_policy_mapping must be a non-negative integer or None"
)
if inhibit_policy_mapping is None and require_explicit_policy is None:
raise ValueError(
"At least one of require_explicit_policy and "
"inhibit_policy_mapping must not be None"
)
self._require_explicit_policy = require_explicit_policy
self._inhibit_policy_mapping = inhibit_policy_mapping
def __repr__(self):
return (
u"<PolicyConstraints(require_explicit_policy={0.require_explicit"
u"_policy}, inhibit_policy_mapping={0.inhibit_policy_"
u"mapping})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, PolicyConstraints):
return NotImplemented
return (
self.require_explicit_policy == other.require_explicit_policy and
self.inhibit_policy_mapping == other.inhibit_policy_mapping
)
def __ne__(self, other):
return not self == other
require_explicit_policy = utils.read_only_property(
"_require_explicit_policy"
)
inhibit_policy_mapping = utils.read_only_property(
"_inhibit_policy_mapping"
)
@utils.register_interface(ExtensionType)
class CertificatePolicies(object):
oid = ExtensionOID.CERTIFICATE_POLICIES
def __init__(self, policies):
policies = list(policies)
if not all(isinstance(x, PolicyInformation) for x in policies):
raise TypeError(
"Every item in the policies list must be a "
"PolicyInformation"
)
self._policies = policies
def __iter__(self):
return iter(self._policies)
def __len__(self):
return len(self._policies)
def __repr__(self):
return "<CertificatePolicies({0})>".format(self._policies)
def __eq__(self, other):
if not isinstance(other, CertificatePolicies):
return NotImplemented
return self._policies == other._policies
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._policies[idx]
class PolicyInformation(object):
def __init__(self, policy_identifier, policy_qualifiers):
if not isinstance(policy_identifier, ObjectIdentifier):
raise TypeError("policy_identifier must be an ObjectIdentifier")
self._policy_identifier = policy_identifier
if policy_qualifiers:
policy_qualifiers = list(policy_qualifiers)
if not all(
isinstance(x, (six.text_type, UserNotice))
for x in policy_qualifiers
):
raise TypeError(
"policy_qualifiers must be a list of strings and/or "
"UserNotice objects or None"
)
self._policy_qualifiers = policy_qualifiers
def __repr__(self):
return (
"<PolicyInformation(policy_identifier={0.policy_identifier}, polic"
"y_qualifiers={0.policy_qualifiers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, PolicyInformation):
return NotImplemented
return (
self.policy_identifier == other.policy_identifier and
self.policy_qualifiers == other.policy_qualifiers
)
def __ne__(self, other):
return not self == other
policy_identifier = utils.read_only_property("_policy_identifier")
policy_qualifiers = utils.read_only_property("_policy_qualifiers")
class UserNotice(object):
def __init__(self, notice_reference, explicit_text):
if notice_reference and not isinstance(
notice_reference, NoticeReference
):
raise TypeError(
"notice_reference must be None or a NoticeReference"
)
self._notice_reference = notice_reference
self._explicit_text = explicit_text
def __repr__(self):
return (
"<UserNotice(notice_reference={0.notice_reference}, explicit_text="
"{0.explicit_text!r})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, UserNotice):
return NotImplemented
return (
self.notice_reference == other.notice_reference and
self.explicit_text == other.explicit_text
)
def __ne__(self, other):
return not self == other
notice_reference = utils.read_only_property("_notice_reference")
explicit_text = utils.read_only_property("_explicit_text")
class NoticeReference(object):
def __init__(self, organization, notice_numbers):
self._organization = organization
notice_numbers = list(notice_numbers)
if not all(isinstance(x, int) for x in notice_numbers):
raise TypeError(
"notice_numbers must be a list of integers"
)
self._notice_numbers = notice_numbers
def __repr__(self):
return (
"<NoticeReference(organization={0.organization!r}, notice_numbers="
"{0.notice_numbers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, NoticeReference):
return NotImplemented
return (
self.organization == other.organization and
self.notice_numbers == other.notice_numbers
)
def __ne__(self, other):
return not self == other
organization = utils.read_only_property("_organization")
notice_numbers = utils.read_only_property("_notice_numbers")
@utils.register_interface(ExtensionType)
class ExtendedKeyUsage(object):
oid = ExtensionOID.EXTENDED_KEY_USAGE
def __init__(self, usages):
usages = list(usages)
if not all(isinstance(x, ObjectIdentifier) for x in usages):
raise TypeError(
"Every item in the usages list must be an ObjectIdentifier"
)
self._usages = usages
def __iter__(self):
return iter(self._usages)
def __len__(self):
return len(self._usages)
def __repr__(self):
return "<ExtendedKeyUsage({0})>".format(self._usages)
def __eq__(self, other):
if not isinstance(other, ExtendedKeyUsage):
return NotImplemented
return self._usages == other._usages
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class OCSPNoCheck(object):
oid = ExtensionOID.OCSP_NO_CHECK
@utils.register_interface(ExtensionType)
class InhibitAnyPolicy(object):
oid = ExtensionOID.INHIBIT_ANY_POLICY
def __init__(self, skip_certs):
if not isinstance(skip_certs, six.integer_types):
raise TypeError("skip_certs must be an integer")
if skip_certs < 0:
raise ValueError("skip_certs must be a non-negative integer")
self._skip_certs = skip_certs
def __repr__(self):
return "<InhibitAnyPolicy(skip_certs={0.skip_certs})>".format(self)
def __eq__(self, other):
if not isinstance(other, InhibitAnyPolicy):
return NotImplemented
return self.skip_certs == other.skip_certs
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.skip_certs)
skip_certs = utils.read_only_property("_skip_certs")
@utils.register_interface(ExtensionType)
class KeyUsage(object):
oid = ExtensionOID.KEY_USAGE
def __init__(self, digital_signature, content_commitment, key_encipherment,
data_encipherment, key_agreement, key_cert_sign, crl_sign,
encipher_only, decipher_only):
if not key_agreement and (encipher_only or decipher_only):
raise ValueError(
"encipher_only and decipher_only can only be true when "
"key_agreement is true"
)
self._digital_signature = digital_signature
self._content_commitment = content_commitment
self._key_encipherment = key_encipherment
self._data_encipherment = data_encipherment
self._key_agreement = key_agreement
self._key_cert_sign = key_cert_sign
self._crl_sign = crl_sign
self._encipher_only = encipher_only
self._decipher_only = decipher_only
digital_signature = utils.read_only_property("_digital_signature")
content_commitment = utils.read_only_property("_content_commitment")
key_encipherment = utils.read_only_property("_key_encipherment")
data_encipherment = utils.read_only_property("_data_encipherment")
key_agreement = utils.read_only_property("_key_agreement")
key_cert_sign = utils.read_only_property("_key_cert_sign")
crl_sign = utils.read_only_property("_crl_sign")
@property
def encipher_only(self):
if not self.key_agreement:
raise ValueError(
"encipher_only is undefined unless key_agreement is true"
)
else:
return self._encipher_only
@property
def decipher_only(self):
if not self.key_agreement:
raise ValueError(
"decipher_only is undefined unless key_agreement is true"
)
else:
return self._decipher_only
def __repr__(self):
try:
encipher_only = self.encipher_only
decipher_only = self.decipher_only
except ValueError:
encipher_only = None
decipher_only = None
return ("<KeyUsage(digital_signature={0.digital_signature}, "
"content_commitment={0.content_commitment}, "
"key_encipherment={0.key_encipherment}, "
"data_encipherment={0.data_encipherment}, "
"key_agreement={0.key_agreement}, "
"key_cert_sign={0.key_cert_sign}, crl_sign={0.crl_sign}, "
"encipher_only={1}, decipher_only={2})>").format(
self, encipher_only, decipher_only)
def __eq__(self, other):
if not isinstance(other, KeyUsage):
return NotImplemented
return (
self.digital_signature == other.digital_signature and
self.content_commitment == other.content_commitment and
self.key_encipherment == other.key_encipherment and
self.data_encipherment == other.data_encipherment and
self.key_agreement == other.key_agreement and
self.key_cert_sign == other.key_cert_sign and
self.crl_sign == other.crl_sign and
self._encipher_only == other._encipher_only and
self._decipher_only == other._decipher_only
)
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class NameConstraints(object):
oid = ExtensionOID.NAME_CONSTRAINTS
def __init__(self, permitted_subtrees, excluded_subtrees):
if permitted_subtrees is not None:
permitted_subtrees = list(permitted_subtrees)
if not all(
isinstance(x, GeneralName) for x in permitted_subtrees
):
raise TypeError(
"permitted_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(permitted_subtrees)
if excluded_subtrees is not None:
excluded_subtrees = list(excluded_subtrees)
if not all(
isinstance(x, GeneralName) for x in excluded_subtrees
):
raise TypeError(
"excluded_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(excluded_subtrees)
if permitted_subtrees is None and excluded_subtrees is None:
raise ValueError(
"At least one of permitted_subtrees and excluded_subtrees "
"must not be None"
)
self._permitted_subtrees = permitted_subtrees
self._excluded_subtrees = excluded_subtrees
def __eq__(self, other):
if not isinstance(other, NameConstraints):
return NotImplemented
return (
self.excluded_subtrees == other.excluded_subtrees and
self.permitted_subtrees == other.permitted_subtrees
)
def __ne__(self, other):
return not self == other
def _validate_ip_name(self, tree):
if any(isinstance(name, IPAddress) and not isinstance(
name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network)
) for name in tree):
raise TypeError(
"IPAddress name constraints must be an IPv4Network or"
" IPv6Network object"
)
def __repr__(self):
return (
u"<NameConstraints(permitted_subtrees={0.permitted_subtrees}, "
u"excluded_subtrees={0.excluded_subtrees})>".format(self)
)
permitted_subtrees = utils.read_only_property("_permitted_subtrees")
excluded_subtrees = utils.read_only_property("_excluded_subtrees")
class Extension(object):
def __init__(self, oid, critical, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError(
"oid argument must be an ObjectIdentifier instance."
)
if not isinstance(critical, bool):
raise TypeError("critical must be a boolean value")
self._oid = oid
self._critical = critical
self._value = value
oid = utils.read_only_property("_oid")
critical = utils.read_only_property("_critical")
value = utils.read_only_property("_value")
def __repr__(self):
return ("<Extension(oid={0.oid}, critical={0.critical}, "
"value={0.value})>").format(self)
def __eq__(self, other):
if not isinstance(other, Extension):
return NotImplemented
return (
self.oid == other.oid and
self.critical == other.critical and
self.value == other.value
)
def __ne__(self, other):
return not self == other
class GeneralNames(object):
def __init__(self, general_names):
general_names = list(general_names)
if not all(isinstance(x, GeneralName) for x in general_names):
raise TypeError(
"Every item in the general_names list must be an "
"object conforming to the GeneralName interface"
)
self._general_names = general_names
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
# Return the value of each GeneralName, except for OtherName instances
# which we return directly because it has two important properties not
# just one value.
objs = (i for i in self if isinstance(i, type))
if type != OtherName:
objs = (i.value for i in objs)
return list(objs)
def __repr__(self):
return "<GeneralNames({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, GeneralNames):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class SubjectAlternativeName(object):
oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<SubjectAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, SubjectAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __getitem__(self, idx):
return self._general_names[idx]
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class IssuerAlternativeName(object):
oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<IssuerAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, IssuerAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class CertificateIssuer(object):
oid = CRLEntryExtensionOID.CERTIFICATE_ISSUER
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<CertificateIssuer({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, CertificateIssuer):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
def __getitem__(self, idx):
return self._general_names[idx]
@utils.register_interface(ExtensionType)
class CRLReason(object):
oid = CRLEntryExtensionOID.CRL_REASON
def __init__(self, reason):
if not isinstance(reason, ReasonFlags):
raise TypeError("reason must be an element from ReasonFlags")
self._reason = reason
def __repr__(self):
return "<CRLReason(reason={0})>".format(self._reason)
def __eq__(self, other):
if not isinstance(other, CRLReason):
return NotImplemented
return self.reason == other.reason
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.reason)
reason = utils.read_only_property("_reason")
@utils.register_interface(ExtensionType)
class InvalidityDate(object):
oid = CRLEntryExtensionOID.INVALIDITY_DATE
def __init__(self, invalidity_date):
if not isinstance(invalidity_date, datetime.datetime):
raise TypeError("invalidity_date must be a datetime.datetime")
self._invalidity_date = invalidity_date
def __repr__(self):
return "<InvalidityDate(invalidity_date={0})>".format(
self._invalidity_date
)
def __eq__(self, other):
if not isinstance(other, InvalidityDate):
return NotImplemented
return self.invalidity_date == other.invalidity_date
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.invalidity_date)
invalidity_date = utils.read_only_property("_invalidity_date")
@utils.register_interface(ExtensionType)
class UnrecognizedExtension(object):
def __init__(self, oid, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError("oid must be an ObjectIdentifier")
self._oid = oid
self._value = value
oid = utils.read_only_property("_oid")
value = utils.read_only_property("_value")
def __repr__(self):
return (
"<UnrecognizedExtension(oid={0.oid}, value={0.value!r})>".format(
self
)
)
def __eq__(self, other):
if not isinstance(other, UnrecognizedExtension):
return NotImplemented
return self.oid == other.oid and self.value == other.value
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.oid, self.value))
|
py | 1a3ced9935c2f9e7d7287f7a3ee47bd74fa5ffaa | import sys,tweepy,csv,re
from textblob import TextBlob
import matplotlib.pyplot as plt
#alteration
class SentimentAnalysis:
def __init__(self):
self.tweets = []
self.tweetText = []
def DownloadData(self):
# authenticating
consumerKey = 'qBIngtySLGxbyw6eo4Ihqxz2K'
consumerSecret = '6Eu8Ax7QqjLR7uTEMLavaj8KfXCTqzy7W6Ap8JZQQu8HMyu3LZ'
accessToken = '2482155314-176f8Yno4FQiMRTM8YFlXYDQ4m7SsCw0DojEgAy'
accessTokenSecret = 'yofigXgWTivDeQxa6AKtdL7cGUR7Sblp0jBAX7f9xvXVg'
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
# input for term to be searched and how many tweets to search
searchTerm = input("Enter Keyword/Tag to search about: ")
NoOfTerms = int(input("Enter how many tweets to search: "))
# searching for tweets
self.tweets = tweepy.Cursor(api.search, q=searchTerm, lang = "en").items(NoOfTerms)
# Open/create a file to append data to
csvFile = open('result.csv', 'a')
# Use csv writer
csvWriter = csv.writer(csvFile)
# creating some variables to store info
polarity = 0
positive = 0
wpositive = 0
spositive = 0
negative = 0
wnegative = 0
snegative = 0
neutral = 0
# iterating through tweets fetched
for tweet in self.tweets:
#Append to temp so that we can store in csv later. I use encode UTF-8
self.tweetText.append(self.cleanTweet(tweet.text).encode('utf-8'))
# print (tweet.text.translate(non_bmp_map)) #print tweet's text
analysis = TextBlob(tweet.text)
# print(analysis.sentiment) # print tweet's polarity
polarity += analysis.sentiment.polarity # adding up polarities to find the average later
if (analysis.sentiment.polarity == 0): # adding reaction of how people are reacting to find average later
neutral += 1
elif (analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <= 0.3):
wpositive += 1
elif (analysis.sentiment.polarity > 0.3 and analysis.sentiment.polarity <= 0.6):
positive += 1
elif (analysis.sentiment.polarity > 0.6 and analysis.sentiment.polarity <= 1):
spositive += 1
elif (analysis.sentiment.polarity > -0.3 and analysis.sentiment.polarity <= 0):
wnegative += 1
elif (analysis.sentiment.polarity > -0.6 and analysis.sentiment.polarity <= -0.3):
negative += 1
elif (analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <= -0.6):
snegative += 1
# Write to csv and close csv file
csvWriter.writerow(self.tweetText)
csvFile.close()
# finding average of how people are reacting
positive = self.percentage(positive, NoOfTerms)
wpositive = self.percentage(wpositive, NoOfTerms)
spositive = self.percentage(spositive, NoOfTerms)
negative = self.percentage(negative, NoOfTerms)
wnegative = self.percentage(wnegative, NoOfTerms)
snegative = self.percentage(snegative, NoOfTerms)
neutral = self.percentage(neutral, NoOfTerms)
# finding average reaction
polarity = polarity / NoOfTerms
# printing out data
print("How people are reacting on " + searchTerm + " by analyzing " + str(NoOfTerms) + " tweets.")
print()
print("General Report: ")
if (polarity == 0):
print("Neutral")
elif (polarity > 0 and polarity <= 0.3):
print("Weakly Positive")
elif (polarity > 0.3 and polarity <= 0.6):
print("Positive")
elif (polarity > 0.6 and polarity <= 1):
print("Strongly Positive")
elif (polarity > -0.3 and polarity <= 0):
print("Weakly Negative")
elif (polarity > -0.6 and polarity <= -0.3):
print("Negative")
elif (polarity > -1 and polarity <= -0.6):
print("Strongly Negative")
print()
print("Detailed Report: ")
print(str(positive) + "% people thought it was positive")
print(str(wpositive) + "% people thought it was weakly positive")
print(str(spositive) + "% people thought it was strongly positive")
print(str(negative) + "% people thought it was negative")
print(str(wnegative) + "% people thought it was weakly negative")
print(str(snegative) + "% people thought it was strongly negative")
print(str(neutral) + "% people thought it was neutral")
self.plotPieChart(positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, NoOfTerms)
def cleanTweet(self, tweet):
# Remove Links, Special Characters etc from tweet
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w +:\ / \ / \S +)", " ", tweet).split())
# function to calculate percentage
def percentage(self, part, whole):
temp = 100 * float(part) / float(whole)
return format(temp, '.2f')
def plotPieChart(self, positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, noOfSearchTerms):
labels = ['Positive [' + str(positive) + '%]', 'Weakly Positive [' + str(wpositive) + '%]','Strongly Positive [' + str(spositive) + '%]', 'Neutral [' + str(neutral) + '%]',
'Negative [' + str(negative) + '%]', 'Weakly Negative [' + str(wnegative) + '%]', 'Strongly Negative [' + str(snegative) + '%]']
sizes = [positive, wpositive, spositive, neutral, negative, wnegative, snegative]
colors = ['yellowgreen','lightgreen','darkgreen', 'gold', 'red','lightsalmon','darkred']
patches, texts = plt.pie(sizes, colors=colors, startangle=90)
plt.legend(patches, labels, loc="best")
plt.title('How people are reacting on ' + searchTerm + ' by analyzing ' + str(noOfSearchTerms) + ' Tweets.')
plt.axis('equal')
plt.tight_layout()
plt.show()
if __name__== "__main__":
sa = SentimentAnalysis()
sa.DownloadData()
|
py | 1a3cee5b051cbeaa0e6326a2d09cd48b27b229da | import numpy as np
import torch
import torch.nn as nn
# from dataset import attack_eps, attack_range
def attack_eps(rho):
mean = [0.4914, 0.4824, 0.4467]
std = [0.2471, 0.2435, 0.2616]
channels = [rho/255./s for s in std]
return channels
def attack_range():
mean = [0.4914, 0.4824, 0.4467]
std = [0.2471, 0.2435, 0.2616]
channels = []
for i in range(len(std)):
channels.append(
[-mean[i]/std[i], (1-mean[i])/std[i]]
)
return channels
def cross_entropy(input, target, label_smoothing=0.0, size_average=True):
""" Cross entropy that accepts soft targets
Args:
pred: predictions for neural network
targets: targets, can be soft
size_average: if false, sum is returned instead of mean
Examples::
input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])
input = torch.autograd.Variable(out, requires_grad=True)
target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
target = torch.autograd.Variable(y1)
loss = cross_entropy(input, target)
loss.backward()
"""
if label_smoothing > 0:
target = torch.clamp(target, max=1-label_smoothing, min=label_smoothing/9.0)
logsoftmax = nn.LogSoftmax(dim=1)
if size_average:
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
else:
return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))
def tensor_clamp(t, min, max, in_place=True):
if not in_place:
res = t.clone()
else:
res = t
idx = res.data < min
res.data[idx] = min[idx]
idx = res.data > max
res.data[idx] = max[idx]
return res
def l2ball_proj(center, radius, t, in_place=True):
if not in_place:
res = t.clone()
else:
res = t
direction = t - center
dist = direction.view(direction.size(0), -1).norm(p=2, dim=1, keepdim=True)
direction.view(direction.size(0), -1).div_(dist)
dist[dist > radius] = radius
direction.view(direction.size(0), -1).mul_(dist)
res.data.copy_(center + direction)
return res
def linfball_proj(center, radius, t, in_place=True):
return tensor_clamp(t, min=center - radius, max=center + radius, in_place=in_place)
def edgecut(data, min, max, in_place=True):
if not in_place:
res = data.clone()
else:
res = data
idx = res.data < min
res.data[idx] = min
idx = res.data > max
res.data[idx] = max
return res
# return tensor_clamp(data, min=min, max=max, in_place=in_place)
_extra_args = {'alpha', 'steps', 'randinit', 'gamma', 'iscuda'}
def fgsm_gt(x, y, criterion, rho=None, model=None, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if rho is None:
rho = 3
eps = attack_eps(rho)
rgb = attack_range()
# Compute loss
x_adv = x.clone()
x_adv.requires_grad = True
loss_adv0 = criterion(model(x_adv), y, reduction='sum')
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
for i in range(len(eps)):
alpha = eps[i]
# print(alpha)
x_adv[:,i,:,:].data.add_(alpha * torch.sign(grad0.data[:,i,:,:]))
tmp = linfball_proj(center=x[:,i,:,:], radius=alpha, t=x_adv[:,i,:,:])
x_adv[:,i,:,:].data.copy_(tmp.data)
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
# x_adv[:,i,:,:] = torch.clamp(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
# x_adv.data.add_(eps * torch.sign(grad0.data))
return x_adv.data
def rfgsm_gt(x, y, criterion, rho=None, model=None, iscuda=False, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if rho is None:
rho = 3
eps = attack_eps(rho)
eps_torch = torch.tensor(eps).view(1, len(eps), 1, 1)
if iscuda:
eps_torch = eps_torch.cuda()
# print("update eps to cuda")
rgb = attack_range()
# compute loss
x_adv = x.clone()
x_adv.requires_grad = True
if randinit:
# pertub = torch.sign( torch.randn_like(x_adv) )
x_adv.data.add( (2.0 * torch.rand_like(x_adv) - 1.0) * eps_torch )
for i in range(len(eps)):
alpha = eps[i]
# x_adv[:,i,:,:].data.add_(alpha * pertub[:,i,:,:])
linfball_proj(center=x[:,i,:,:], radius=alpha, t=x_adv[:,i,:,:])
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
loss_adv0 = criterion(model(x_adv), y, reduction="sum")
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
for i in range(len(eps)):
alpha = eps[i] / 2.
x_adv[:,i,:,:].data.add_(alpha * torch.sign(grad0.data[:,i,:,:]))
linfball_proj(center=x[:,i,:,:], radius=eps[i], t=x_adv[:,i,:,:])
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
# x_adv[:,i,:,:] = torch.clamp(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
return x_adv.data
def ifgsm_gt(x, y, criterion, rho=None, model=None, steps=3, randinit=False, iscuda=False, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if rho is None:
rho = 3
eps = attack_eps(rho)
eps_torch = torch.tensor(eps).view(1, len(eps), 1, 1)
if iscuda:
eps_torch = eps_torch.cuda()
# print("update eps to cuda")
rgb = attack_range()
# compute loss
x_adv = x.clone()
x_adv.requires_grad = True
if randinit:
# pertub = torch.sign( torch.randn_like(x_adv) )
x_adv.data.add( (2.0 * torch.rand_like(x_adv) - 1.0) * eps_torch )
for i in range(len(eps)):
alpha = eps[i]
# x_adv[:,i,:,:].data.add_(alpha * pertub[:,i,:,:])
linfball_proj(center=x[:,i,:,:], radius=alpha, t=x_adv[:,i,:,:])
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
for _ in range(steps):
loss_adv = criterion(model(x_adv), y, reduction="sum")
grad = torch.autograd.grad(loss_adv, x_adv, only_inputs=True)[0]
with torch.no_grad():
for i in range(len(eps)):
alpha = (eps[i] * 1.25) / steps
x_adv[:,i,:,:].data.add_(alpha * torch.sign(grad.data[:,i,:,:]))
# print(eps[i])
linfball_proj(center=x[:,i,:,:], radius=eps[i], t=x_adv[:,i,:,:])
# x_adv[:,i,:,:].data.copy_(tmp.data)
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
# tmp = torch.max((x - x_adv).view(x.size(0), -1).abs(), dim=-1)[0]
# tmp = x_adv.max()
# print(x_adv.min(), x_adv.max())
# x_adv[:,i,:,:].data.fill_(torch.clamp(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1]))
return x_adv.data
def pgd_gt(x, y, criterion, rho=None, model=None, steps=3, randinit=True, iscuda=False, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if rho is None:
rho = 3
eps = attack_eps(rho)
eps_torch = torch.tensor(eps).view(1, len(eps), 1, 1)
if iscuda:
eps_torch = eps_torch.cuda()
# print("update eps to cuda")
rgb = attack_range()
# compute loss
x_adv = x.clone()
x_adv.requires_grad = True
if randinit:
# pertub = torch.sign( torch.randn_like(x_adv) )
x_adv.data.add( (2.0 * torch.rand_like(x_adv) - 1.0) * eps_torch )
for i in range(len(eps)):
alpha = eps[i]
# x_adv[:,i,:,:].data.add_(alpha * pertub[:,i,:,:])
linfball_proj(center=x[:,i,:,:], radius=alpha, t=x_adv[:,i,:,:])
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
for _ in range(steps):
loss_adv = criterion(model(x_adv), y, reduction="sum")
grad = torch.autograd.grad(loss_adv, x_adv, only_inputs=True)[0]
with torch.no_grad():
for i in range(len(eps)):
alpha = (eps[i] * 1.25) / steps
x_adv[:,i,:,:].data.add_(alpha * torch.sign(grad.data[:,i,:,:]))
linfball_proj(center=x[:,i,:,:], radius=eps[i], t=x_adv[:,i,:,:])
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
# x_adv[:,i,:,:].data.fill_(torch.clamp(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1]))
return x_adv.data
def grad_gt(x, y, criterion, rho=None, model=None, steps=3, randinit=False, iscuda=False, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if rho is None:
rho = 3
eps = attack_eps(rho)
eps_torch = torch.tensor(eps).view(1, len(eps), 1, 1)
if iscuda:
eps_torch = eps_torch.cuda()
# print("update eps to cuda")
rgb = attack_range()
# compute loss
x_adv = x.clone()
x_adv.requires_grad = True
if randinit:
# pertub = torch.sign( torch.randn_like(x_adv) )
x_adv.data.add( (2.0 * torch.rand_like(x_adv) - 1.0) * eps_torch )
for i in range(len(eps)):
alpha = eps[i]
# x_adv[:,i,:,:].data.add_(alpha * pertub[:,i,:,:])
linfball_proj(center=x[:,i,:,:], radius=alpha, t=x_adv[:,i,:,:])
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
for _ in range(steps):
loss_adv = criterion(model(x_adv), y, reduction="sum")
grad = torch.autograd.grad(loss_adv, x_adv, only_inputs=True)[0]
with torch.no_grad():
for i in range(len(eps)):
alpha = (eps[i] * 1.25) / steps / grad.data[:, i, :, :].abs().mean()
x_adv[:,i,:,:].data.add_(alpha * grad.data[:,i,:,:])
linfball_proj(center=x[:,i,:,:], radius=eps[i], t=x_adv[:,i,:,:])
# edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
# x_adv[:,i,:,:].data.fill_(torch.clamp(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1]))
return x_adv.data
def wrm_gt(x, y, criterion, rho=None, model=None, steps=3, randinit=False, gamma=None, iscuda=False, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if gamma is None:
gamma = 1.3
eps = attack_eps(rho)
eps_torch = torch.tensor(eps).view(1, len(eps), 1, 1)
if iscuda:
eps_torch = eps_torch.cuda()
# print("update eps to cuda")
rgb = attack_range()
# compute loss
x_adv = x.clone()
x_adv.requires_grad = True
if randinit:
# pertub = torch.sign( torch.randn_like(x_adv) )
x_adv.data.add( (2.0 * torch.rand_like(x_adv) - 1.0) * eps_torch )
for i in range(len(eps)):
alpha = eps[i]
# x_adv[:,i,:,:].data.add_(alpha * pertub[:,i,:,:])
linfball_proj(center=x[:,i,:,:], radius=alpha, t=x_adv[:,i,:,:])
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
x_adv.requires_grad = True
ord = 2
for t in range(steps):
loss_adv = gamma * criterion(model(x_adv), y, reduction="sum") - \
0.5 * torch.sum(torch.norm((x_adv - x.data).view(x_adv.size(0), -1), p=ord, dim=1) ** 2)
grad = torch.autograd.grad(loss_adv, x_adv, only_inputs=True)[0]
scale = float(1./np.sqrt(t + 1))
x_adv.data.add_(scale * grad.data)
for i in range(len(eps)):
linfball_proj(center=x_adv[:,i,:,:], radius=alpha, t=x_adv[:,i,:,:])
edgecut(x_adv[:,i,:,:], min=rgb[i][0], max=rgb[i][1])
return x_adv.data
def wrm(x, preds, loss_fn, y=None, gamma=None, model=None, steps=3, randinit=False, eps=0.062, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if gamma is None:
gamma = 1.3
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = preds.data.max(1)[1]
y = torch.equal(preds, preds_max).float()
# Compute loss
x_adv = x.clone()
if randinit:
# x_adv += torch.randn_like(x_adv).clamp_(min=-1.0, max=1.0) * eps
x_adv += (2.0 * torch.rand_like(x_adv) - 1.0) * eps
x_adv.requires_grad = True
ord = 2
for t in range(steps):
loss_adv0 = gamma * loss_fn(model(x_adv), y, reduction="sum") - \
0.5 * torch.sum(torch.norm((x_adv - x.data).view(x_adv.size(0), -1), p=ord, dim=1) ** 2)
# loss_adv0.backward()
# grad0 = x_adv.grad
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
scale = float(1./np.sqrt(t+1))
x_adv.data.add_(scale * grad0.data)
# x_adv.grad.data.zero_()
# print("intermedia_grad0:", torch.norm(grad0))
linfball_proj(x, eps, x_adv, in_place=True)
return x_adv
def fgm(x, preds, loss_fn, y=None, eps=None, model=None, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if eps is None:
# eps = 0.07972772183418274
# eps = 0.45474205
eps = 0.062
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = preds.data.max(1)[1]
y = torch.equal(preds, preds_max).float()
# Compute loss
x_adv = x.clone()
x_adv.requires_grad = True
# print("right")
loss_adv0 = loss_fn(model(x_adv), y, reduction='sum')
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
x_adv.data.add_(eps * torch.sign(grad0.data))
return x_adv
def input_reg(x, preds, loss_fn, y=None, eps=None, model=None, label_smoothing=0.0):
if eps is None:
eps = 1e-4
if y is None:
preds_max = preds.data.max(1)[1]
y = torch.equal(preds, preds_max).float()
# compute loss
x_adv = x.clone()
x_adv.requires_grad = True
loss_adv0 = loss_fn(model(x_adv), y, reduction='sum')
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
x_adv.requires_grad = False
return eps * torch.sum(grad0.data **2)
def ifgm(x, preds, loss_fn, y=None, eps=None, model=None, steps=3, alpha=None, randinit=False, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if eps is None:
# eps = 0.07972772183418274
# eps = 0.45474205
eps = 0.062
if alpha is None:
alpha = (eps * 1.25) / steps
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = preds.data.max(1)[1]
y = torch.equal(preds, preds_max).float()
# Compute loss
x_adv = x.clone()
if randinit:
# x_adv += torch.randn_like(x_adv).clamp_(min=-1.0, max=1.0) * eps
x_adv += (2.0 * torch.rand_like(x_adv) - 1.0) * eps
x_adv.requires_grad = True
for _ in range(steps):
loss_adv0 = loss_fn(model(x_adv), y, reduction='sum')
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
x_adv.data.add_(alpha * torch.sign(grad0.data))
linfball_proj(x, eps, x_adv, in_place=True)
return x_adv
def clamp(x, min_range, max_range):
N, C, H, W = x.shape
xadv = x.data.clone()
for i in range(C):
xadv[:,i,:,:] = torch.clamp(x[:,i,:,:], max=max_range[i], min=min_range[i])
return xadv
def ifgm_attack(x, preds, loss_fn, y=None, eps=None, model=None, steps=3, alpha=None, randinit=False, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if eps is None:
# eps = 0.07972772183418274
# eps = 0.45474205
eps = 0.062
if alpha is None:
alpha = (eps * 1.25) / steps
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = preds.data.max(1)[1]
y = torch.equal(preds, preds_max).float()
# Compute loss
x_adv = x.clone()
if randinit:
# x_adv += torch.randn_like(x_adv).clamp_(min=-1.0, max=1.0) * eps
x_adv += torch.sign(2.0 * torch.rand_like(x_adv) - 1.0) * eps
x_adv.requires_grad = True
for _ in range(steps):
loss_adv0 = loss_fn(model(x_adv), y, reduction='sum')
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
x_adv.data.add_(alpha * torch.sign(grad0.data))
linfball_proj(x, eps, x_adv, in_place=True)
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
min_range = []
max_range = []
for i in range(3):
max_range.append((1.0 - mean[i])/std[i])
min_range.append((0.0 - mean[i])/std[i])
x_adv = clamp(x_adv, min_range, max_range)
return x_adv
def pgm(x, preds, loss_fn, y=None, eps=None, model=None, steps=16, **kwargs):
raise DeprecationWarning
if eps is None:
# eps = 0.33910248303413393
eps = 3.27090588
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = preds.data.max(1)[1]
y = torch.equal(preds, preds_max).float()
# Compute loss
x_adv = x.clone()
x_adv.requires_grad = True
for t in range(steps):
loss_adv0 = loss_fn(model(x_adv), y, reduction='sum')
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
scale = float(1./np.sqrt(t+1)) * 3.0
x_adv.data.add_(scale * grad0.data)
l2ball_proj(x, eps, x_adv, in_place=True)
return x_adv
def eval_adv_model(model, data_loader, attack_algo=pgm, attack_eps=None, cuda=True):
model.eval()
test_loss = 0
adv_loss = 0
correct = 0
correct_adv = 0
adv_l2dist = 0.0
adv_linfdist = 0.0
for data, target in data_loader:
indx_target = target.clone()
target_ = torch.unsqueeze(target, 1)
one_hot = torch.FloatTensor(target.size()[0], 10).zero_()
one_hot.scatter_(1, target_, 1)
if cuda:
data, target = data.cuda(), one_hot.cuda()
else:
target = one_hot
with torch.no_grad():
output = model(data)
data_adv = attack_algo(data, output, y=target, eps=attack_eps, model=model, label_smoothing=0.0).data
adv_l2dist += torch.norm((data - data_adv).view(data.size(0), -1), p=2, dim=-1).sum().item()
adv_linfdist += torch.max((data - data_adv).view(data.size(0), -1).abs(), dim=-1)[0].sum().item()
with torch.no_grad():
output_adv = model(data_adv)
adv_loss += cross_entropy(output_adv, target, 0.0, size_average=False).data.item()
pred_adv = output_adv.data.max(1)[1]
correct_adv += pred_adv.cpu().eq(indx_target).sum()
test_loss += cross_entropy(output, target, 0.0, size_average=False).data.item()
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss /= len(data_loader.dataset) # average over number of mini-batch
acc = float(100. * correct) / len(data_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(data_loader.dataset), acc))
adv_loss /= len(data_loader.dataset)
acc_adv = float(100. * correct_adv) / len(data_loader.dataset)
print('\tAdv set: Average loss: {:.4f}, Accuracy : {}/{} ({:.0f}%)'.format(
adv_loss, correct_adv, len(data_loader.dataset), acc_adv
))
adv_l2dist /= len(data_loader.dataset)
adv_linfdist /= len(data_loader.dataset)
print('\tAdv dist: L2: {:.4f}, Linf: {:.4f}'.format(adv_l2dist, adv_linfdist))
return {'test_loss': test_loss, 'test_acc': acc, 'adv_loss': adv_loss, 'adv_acc': acc_adv, 'adv_l2dist': adv_l2dist,
'adv_linfdist': adv_linfdist} |
pyw | 1a3ceeb7459127116f12c069d228f5eb91391dd1 | # manage.py
import os
import unittest
import coverage
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
COV = coverage.coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/server/config.py',
'project/server/*/__init__.py'
]
)
COV.start()
from project.server import app, db, models
migrate = Migrate(app, db)
manager = Manager(app)
# migrations
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
@manager.command
def cov():
"""Runs the unit tests with coverage."""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
return 0
return 1
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
if __name__ == '__main__':
manager.run() |
py | 1a3cefbc8f93a4e0a3a62ebbc513d11cf87db77f | # pylint: disable=unused-import
"""
UBX Protocol Input payload definitions
THESE ARE THE PAYLOAD DEFINITIONS FOR _SET_ MESSAGES _TO_ THE RECEIVER
(e.g. configuration and calibration commands; AssistNow payloads)
Created on 27 Sep 2020
Information sourced from u-blox Interface Specifications © 2013-2021, u-blox AG
:author: semuadmin
"""
# pylint: disable=too-many-lines, line-too-long, duplicate-code
from pyubx2.ubxtypes_core import (
C2,
C6,
C32,
I1,
I2,
I4,
R4,
R8,
U1,
U2,
U3,
U4,
U5,
U6,
U7,
U8,
U9,
U12,
U22,
U40,
U64,
X1,
X2,
X4,
)
UBX_PAYLOADS_SET = {
# AssistNow Aiding Messages: i.e. Ephemeris, Almanac, other A-GPS data input.
# Messages in the AID class are used to send GPS aiding data to the receiver
# AID messages are deprecated in favour of MGA messages in >=Gen8
"AID-ALM": {"svid": U4, "week": U4, "optBlock": ("None", {"dwrd": U4})},
"AID-AOP": {"gnssId": U1, "svId": U1, "reserved1": U2, "data": U64},
"AID-EPH": {
"svid": U4,
"how": U4,
"optBlock": (
"None",
{
"sf1d1": U4,
"sf1d2": U4,
"sf1d3": U4,
"sf1d4": U4,
"sf1d5": U4,
"sf1d6": U4,
"sf1d7": U4,
"sf1d8": U4,
"sf2d1": U4,
"sf2d2": U4,
"sf2d3": U4,
"sf2d4": U4,
"sf2d5": U4,
"sf2d6": U4,
"sf2d7": U4,
"sf2d8": U4,
"sf3d1": U4,
"sf3d2": U4,
"sf3d3": U4,
"sf3d4": U4,
"sf3d5": U4,
"sf3d6": U4,
"sf3d7": U4,
"sf3d8": U4,
},
),
},
"AID-HUI": {
"health": X4,
"utcA0": R8,
"utcA1": R8,
"utcTOW": I4,
"utcWNT": I2,
"utcLS": I2,
"utcWNF": I2,
"utcDNs": I2,
"utcLSF": I2,
"utcSpare": I2,
"klobA0": R4,
"klobA1": R4,
"klobA2": R4,
"klobA3": R4,
"klobB0": R4,
"klobB1": R4,
"klobB2": R4,
"klobB3": R4,
"flags": X4,
},
"AID-INI": {
"ecefXOrLat": I4,
"ecefYOrLon": I4,
"ecefZOrAlt": I4,
"posAcc": U4,
"tmCfg": X2,
"wn": U2,
"tow": U4,
"towNs": I4,
"tAccMs": U4,
"tAccNs": U4,
"clkDOrFreq": I4,
"clkDAccOrFreqAcc": U4,
"flags": X4,
},
# ********************************************************************
# Configuration Input Messages: i.e. Set Dynamic Model, Set DOP Mask, Set Baud Rate, etc..
# Messages in the CFG class are used to configure the receiver and read out current configuration values. Any
# messages in the CFG class sent to the receiver are either acknowledged (with message UBX-ACK-ACK) if
# processed successfully or rejected (with message UBX-ACK-NAK) if processing unsuccessfully.
"CFG-ANT": {
"flags": (
X2,
{
"svcs": U1,
"scd": U1,
"ocd": U1,
"pdwnOnSCD": U1,
"recovery": U1,
},
),
"pins": (
X2,
{
"pinSwitch": U5,
"pinSCD": U5,
"pinOCD": U5,
"reconfig": U1,
},
),
},
"CFG-BATCH": {
"version": U1,
"flags": (
X1,
{
"enable": U1,
"reserved1": U1,
"extraPvt": U1,
"extraOdo": U1,
"reserved2": U1,
"pioEnable": U1,
"pioActiveLow": U1,
},
),
"bufSize": U2,
"notifThrs": U2,
"pioId": U1,
"reserved0": U1,
},
"CFG-CFG": {
"clearMask": X4,
"saveMask": X4,
"loadMask": X4,
"deviceMask": (
X1,
{
"devBBR": U1,
"devFlash": U1,
"devEEPROM": U1,
"reserved1": U1,
"devSpiFlash": U1,
},
),
},
"CFG-DAT": {
"datumNum": U2,
"datumName": C6,
"majA": R8,
"flat": R8,
"dX": R4,
"dY": R4,
"dZ": R4,
"rotX": R4,
"rotY": R4,
"rotZ": R4,
"scale": R4,
},
"CFG-DGNSS": {
"dgnssMode": U1,
"reserved0": U3,
},
"CFG-DOSC": {
"version": U1,
"numOsc": U1,
"reserved1": U2,
"group": (
"numOsc",
{ # repeating group * numOsc
"oscId": U1,
"reserved2": U1,
"flags": (
X2,
{
"isCalibrated": U1,
"controlIf": U4,
},
),
"freq": U4,
"phaseOffset": I4,
"withTemp": U4,
"withAge": U4,
"timeToTemp": U2,
"reserved3": U2,
"gainVco": I4,
"gainUncertainty": U1,
"reserved4": U3,
},
),
},
"CFG-DYNSEED": {"version": U1, "reserved1": U3, "seedHi": U4, "seedLo": U4},
"CFG-ESFALG": {
"bitfield": U4,
"yaw": U4,
"pitch": I2,
"roll": I2,
},
"CFG-ESFA": {
"version": U1,
"reserved1": U9,
"accelRmsThdl": U1,
"frequency": U1,
"latency": U2,
"accuracy": U2,
"reserved2": U4,
},
"CFG-ESFG": {
"version": U1,
"reserved1": U7,
"tcTableSaveRate": U2,
"gyroRmsThdl": U1,
"frequency": U1,
"latency": U2,
"accuracy": U2,
"reserved2": U4,
},
"CFG-ESFWT": {
"version": U1,
"flags1": (
X1,
{
"combineTicks": U1,
"reserved3": U3,
"useWtSpeed": U1,
"dirPinPol": U1,
"useWtPin": U1,
},
),
"flags2": (
X1,
{
"autoWtCountMaxOff": U1,
"autoDirPinPolOff": U1,
"autoSoftwareWtOff": U1,
"autoUseWtSpeedOff": U1,
},
),
"reserved1": U1,
"wtFactor": U4,
"wtQuantError": U4,
"wtCountMax": U4,
"wtLatency": U2,
"wtFrequency": U1,
"flags3": (
X1,
{
"reserved3": U4,
"cntBothEdges": U1,
},
),
"speedDeadBand": U2,
"reserved2": U1,
},
"CFG-ESRC": {
"version": U1,
"numSources": U1,
"reserved1": U2,
"group": (
"numSources",
{ # repeating group * numSources
"extInt": U1,
"flags": (
X2,
{
"polarity": U1,
"gnssUtc": U1,
},
),
"freq": U4,
"reserved2": U4,
"withTemp": U4,
"withAge": U4,
"timeToTemp": U2,
"maxDevLifeTim": U2,
"offset": I4,
"offsetUncertainty": U4,
"jitter": U4,
},
),
},
"CFG-FIXSEED": {
"version": U1,
"length": U1,
"reserved1": U2,
"seedHi": U4,
"seedLo": U4,
"group": ("length", {"classId": U1, "msgId": U1}), # repeating group * length
},
"CFG-GEOFENCE": {
"version": U1,
"numFences": U1,
"confLvl": U1,
"reserved1": U1,
"pioEnabled": U1,
"pinPolarity": U1,
"pin": U1,
"reserved2": U1,
"group": (
"numFences",
{"lat": I4, "lon": I4, "radius": U4}, # repeating group * numFences
),
},
"CFG-GNSS": {
"msgVer": U1,
"numTrkChHw": U1,
"numTrkChUse": U1,
"numConfigBlocks": U1,
"group": (
"numConfigBlocks",
{ # repeating group * numConfigBlocks
"gnssId": U1,
"resTrkCh": U1,
"maxTrkCh": U1,
"reserved0": U1,
"flags": (
X4,
{
"enable": U1,
"reserved1": U8,
"reserved2": U7,
"sigCfMask": U8,
"reserved3": U8,
},
),
},
),
},
"CFG-HNR": {
"highNavRate": U1,
"reserved1": U3,
},
"CFG-INF": {
"protocolID": U1,
"reserved0": U3,
"infMaskGroup": (
6,
{
"infMsgMask": (
X1,
{
"enableError": U1,
"enableWarning": U1,
"enableNotice": U1,
"enableTest": U1,
"enableDebug": U1,
},
),
},
),
},
"CFG-ITFM": {
"config": (
X4,
{
"bbThreshold": U4,
"cwThreshold": U5,
"algorithmBits": U22,
"enable": U1,
},
),
"config2": (
X4,
{
"generalBits": U12,
"antSetting": U2,
"enable2": U1,
},
),
},
"CFG-LOGFILTER": {
"version": U1,
"flags": (
X1,
{
"recordEnabled": U1,
"psmOncePerWakupEnabled": U1,
"applyAllFilterSettings": U1,
},
),
"minInterval": U2,
"timeThreshold": U2,
"speedThreshold": U2,
"positionThreshold": U4,
},
"CFG-MSG": {
"msgClass": U1,
"msgID": U1,
"rateDDC": U1,
"rateUART1": U1,
"rateUART2": U1,
"rateUSB": U1,
"rateSPI": U1,
"reserved": U1,
},
"CFG-NAV5": {
"mask": (
X2,
{
"dyn": U1,
"minEl": U1,
"posFixMode": U1,
"drLim": U1,
"posMask": U1,
"timeMask": U1,
"staticHoldMask": U1,
"dgpsMask": U1,
"cnoThreshold": U1,
"reserved0": U1,
"utc": U1,
},
),
"dynModel": U1,
"fixMode": U1,
"fixedAlt": I4,
"fixedAltVar": U4,
"minElev": I1,
"drLimit": U1,
"pDop": U2,
"tDop": U2,
"pAcc": U2,
"tAcc": U2,
"staticHoldThresh": U1,
"dgpsTimeOut": U1,
"reserved2": U4,
"reserved3": U4,
"reserved4": U4,
},
"CFG-NAVX5": {
"version": U2,
"mask1": (
X2,
{
"reserved9": U2,
"minMax": U1,
"minCno": U1,
"reserved10": U2,
"initial3dfix": U1,
"reserved11": U2,
"wknRoll": U1,
"ackAid": U1,
"reserved12": U2,
"ppp": U1,
"aop": U1,
},
),
"mask2": (
X4,
{
"reserved13": U6,
"useAdr": U1,
"sigAttenComp": U1,
},
),
"reserved0": U2,
"minSVs": U1,
"maxSVs": U1,
"minCNO": U1,
"reserved1": U1,
"iniFix3D": U1,
"reserved2": U2,
"ackAiding": U1,
"wknRollover": U2,
"sigAttenCompMode": U1,
"reserved3": U1,
"reserved4": U2,
"reserved5": U2,
"usePPP": U1,
"aopCfg": U1,
"reserved6": U2,
"aopOrbMaxErr": U2,
"reserved7": U4,
"reserved8": U3,
"useAdr": U1,
},
"CFG-NMEAvX": { # deprecated length 4
"filter": (
X1,
{
"posFilt": U1,
"mskPosFilt": U1,
"timeFilt": U1,
"dateFilt": U1,
"gpsOnlyFilter": U1,
"trackFilt": U1,
},
),
"nmeaVersion": U1,
"numSV": U1,
"flags": (
X1,
{
"compat": U1,
"consider": U1,
"limit82": U1,
"highPrec": U1,
},
),
},
"CFG-NMEAv0": { # v0 deprecated length 12
"filter": (
X1,
{
"posFilt": U1,
"mskPosFilt": U1,
"timeFilt": U1,
"dateFilt": U1,
"gpsOnlyFilter": U1,
"trackFilt": U1,
},
),
"nmeaVersion": U1,
"numSV": U1,
"flags": (
X1,
{
"compat": U1,
"consider": U1,
"limit82": U1,
"highPrec": U1,
},
),
"gnssToFilter": (
X4,
{
"disableGps": U1,
"disableSbas": U1,
"disableGalileo": U1,
"reserved2": U1,
"disableQzss": U1,
"disableGlonass": U1,
"disableBeidou": U1,
},
),
"svNumbering": U1,
"mainTalkerId": U1,
"gsvTalkerId": U1,
"version": U1,
},
"CFG-NMEA": { # preferred version length 20
"filter": (
X1,
{
"posFilt": U1,
"mskPosFilt": U1,
"timeFilt": U1,
"dateFilt": U1,
"gpsOnlyFilter": U1,
"trackFilt": U1,
},
),
"nmeaVersion": U1,
"numSV": U1,
"flags": (
X1,
{
"compat": U1,
"consider": U1,
"limit82": U1,
"highPrec": U1,
},
),
"gnssToFilter": (
X4,
{
"disableGps": U1,
"disableSbas": U1,
"disableGalileo": U1,
"reserved2": U1,
"disableQzss": U1,
"disableGlonass": U1,
"disableBeidou": U1,
},
),
"svNumbering": U1,
"mainTalkerId": U1,
"gsvTalkerId": U1,
"version": U1,
"bdsTalkerId": C2,
"reserved1": U6,
},
"CFG-ODO": {
"version": U1,
"reserved0": U3,
"flags": (
X1,
{
"useODO": U1,
"useCOG": U1,
"outLPVel": U1,
"outLPCog": U1,
},
),
"odoCfg": (
X1,
{
"profile": U3,
},
),
"reserved1": U6,
"cogMaxSpeed": U1,
"cogMaxPosAcc": U1,
"reserved2": U2,
"velLpGain": U1,
"cogLpGain": U1,
"reserved3": U2,
},
"CFG-PM2": {
"version": U1,
"reserved0": U1,
"maxStartupStateDur": U1,
"reserved1": U1,
"flags": (
X4,
{
"optTarget": U3,
"extintSel": U1,
"extintWake": U1,
"extintBackup": U1,
"extintInactive": U1,
"limitPeakCurr": U2,
"waitTimeFix": U1,
"updateRTC": U1,
"updateEPH": U1,
"reserved12": U3,
"doNotEnterOff": U1,
"operationMode": U2,
},
),
"updatePeriod": U4,
"searchPeriod": U4,
"gridOffset": U4,
"onTime": U2,
"minAcqTime": U2,
"reserved4": U2,
"reserved5": U2,
"reserved6": U4,
"reserved7": U4,
"reserved8": U1,
"reserved9": U1,
"reserved10": U2,
"reserved11": U4,
},
"CFG-PMS": {
"version": U1,
"powerSetupValue": U1,
"period": U2,
"onTime": U2,
"reserved1": U2,
},
"CFG-PRT": {
"portID": U1,
"reserved0": U1,
"txReady": (
X2,
{
"enable": U1,
"pol": U1,
"pin": U5,
"thres": U9,
},
),
"UARTmode": (
X4,
{
"reserved6": U6,
"charLen": U2,
"reserved7": U1,
"parity": U3,
"nStopBits": U2,
},
),
"baudRate": U4,
"inProtoMask": (
X2,
{
"inUBX": U1,
"inNMEA": U1,
"inRTCM": U1,
"reserved8": U2,
"inRTCM3": U1,
},
),
"outProtoMask": (
X2,
{
"outUBX": U1,
"outNMEA": U1,
"reserved9": U3,
"outRTCM3": U1,
},
),
"flags": (
X2,
{
"reserved10": U1,
"extendedTxTimeout": U1,
},
),
"reserved5": U2,
},
"CFG-PWR": {"version": U1, "reserved1": U3, "state": U4},
"CFG-RATE": {"measRate": U2, "navRate": U2, "timeRef": U2},
"CFG-RINV": {
"flags": (
X1,
{
"dump": U1,
"binary": U1,
},
),
"group": ("None", {"data": U1}),
}, # repeating group
"CFG-RST": {
"navBbrMask": (
X2,
{
"eph": U1,
"alm": U1,
"health": U1,
"klob": U1,
"pos": U1,
"clkd": U1,
"osc": U1,
"utc": U1,
"rtc": U1,
"reserved2": U6,
"aop": U1,
},
),
"resetMode": U1,
"reserved1": U1,
},
"CFG-RXM": {"reserved0": U1, "lpMode": U1},
"CFG-SBAS": {
"mode": (
X1,
{
"enabled": U1,
"test": U1,
},
),
"usage": (
X1,
{
"range": U1,
"diffCorr": U1,
"integrity": U1,
},
),
"maxSBAS": U1,
"scanmode2": (
X1,
{
"PRN152": U1,
"PRN153": U1,
"PRN154": U1,
"PRN155": U1,
"PRN156": U1,
"PRN157": U1,
"PRN158": U1,
},
),
"scanmode1": (
X4,
{
"PRN120": U1,
"PRN121": U1,
"PRN122": U1,
"PRN123": U1,
"PRN124": U1,
"PRN125": U1,
"PRN126": U1,
"PRN127": U1,
"PRN128": U1,
"PRN129": U1,
"PRN130": U1,
"PRN131": U1,
"PRN132": U1,
"PRN133": U1,
"PRN134": U1,
"PRN135": U1,
"PRN136": U1,
"PRN137": U1,
"PRN138": U1,
"PRN139": U1,
"PRN140": U1,
"PRN141": U1,
"PRN142": U1,
"PRN143": U1,
"PRN144": U1,
"PRN145": U1,
"PRN146": U1,
"PRN147": U1,
"PRN148": U1,
"PRN149": U1,
"PRN150": U1,
"PRN151": U1,
},
),
},
"CFG-SENIF": {
"type": U1,
"version": U1,
"flags": (
X2,
{
"senConn": U1,
},
),
"pioConf": X2,
},
"CFG-SLAS": {
"mode": (
X1,
{
"enabled": U1,
"test": U1,
"raim": U1,
},
),
"reserved1": U3,
},
"CFG-SMGR": {
"minGNSSFix": U1,
"maxFreqChange": U2,
"maxPhaseCorrRate": U2,
"reserved1": U2,
"freqTolerance": U2,
"timeTolerance": U2,
"messageCfg": (
X2,
{
"measInternal": U1,
"measGNSS": U1,
"measEXTINT0": U1,
"measEXTINT1": U1,
},
),
"maxSlewRate": U2,
"flags": (
X4,
{
"disableInternal": U1,
"disableExternal": U1,
"preferenceMode": U1,
"enableGNSS": U1,
"enableEXTINT0": U1,
"enableEXTINT1": U1,
"enableHostMeasInt": U1,
"enableHostMeasExt": U1,
"reserved1": U2,
"useAnyFix": U1,
"disableMaxSlewRate": U1,
"issueFreqWarning": U1,
"issueTimeWarning": U1,
"TPCoherent": U2,
"disableOffset": U1,
},
),
},
"CFG-SPT": {
"version": U1,
"reserved0": U1,
"sensorId": U2,
"reserved1": U8,
},
"CFG-TMODE2": {
"timeMode": U1,
"reserved1": U1,
"flags": (
X2,
{
"lla": U1,
"altInv": U1,
},
),
"ecefXOrLat": I4,
"ecefYOrLon": I4,
"ecefZOrAlt": I4,
"fixedPosAcc": U4,
"svinMinDur": U4,
"svinAccLimit": U4,
},
"CFG-TMODE3": {
"version": U1,
"reserved0": U1,
"flags": (
X2,
{
"rcvrMode": U8,
"lla": U1,
},
),
"ecefXOrLat": I4,
"ecefYOrLon": I4,
"ecefZOrAlt": I4,
"ecefXOrLatHP": I1,
"ecefYOrLonHP": I1,
"ecefZOrAltHP": I1,
"reserved1": U1,
"fixedPosAcc": U4,
"svinMinDur": U4,
"svinAccLimit": U4,
"reserved2": U8,
},
"CFG-TP5": {
"tpIdx": U1,
"reserved0": U1,
"reserved1": U2,
"antCableDelay": I2,
"rfGroupDelay": I2,
"freqPeriod": U4,
"freqPeriodLock": U4,
"pulseLenRatio": U4,
"pulseLenRatioLock": U4,
"userConfigDelay": I4,
"flags": (
X4,
{
"active": U1,
"lockGnssFreq": U1,
"lockedOtherSet": U1,
"isFreq": U1,
"isLength": U1,
"alignToTow": U1,
"polarity": U1,
"gridUtcGnss": U4,
"syncMode": U3,
},
),
},
"CFG-TXSLOT": {
"version": U1,
"enable": (
X1,
{
"enableDDC": U1,
"enableUART1": U1,
"enableUART2": U1,
"enableUSB": U1,
"enableSPI": U1,
},
),
"refTp": U1,
"reserved1": U1,
"end1": U4,
"end2": U4,
"end3": U4,
},
"CFG-USB": {
"vendorID": U2,
"productID": U2,
"reserved1": U2,
"reserved2": U2,
"powerConsumpt": U2,
"flags": (
X2,
{
"reEnum": U1,
"powerMode": U1,
},
),
"vendorString": C32,
"productString": C32,
"serialNumber": C32,
},
"CFG-VALDEL": {
"version": U1, # = 0 no transaction, 1 with transaction
"layers": X1,
"transaction": X1, # if version = 1, else reserved
"reserved0": U1,
"group": ("None", {"keys": U4}), # repeating group
},
"CFG-VALSET": {
"version": U1, # = 0 no transaction, 1 with transaction
"layers": X1,
"transaction": U1, # if version = 1, else reserved
"reserved0": U1,
"group": ("None", {"cfgData": U1}), # repeating group
},
# ********************************************************************
# External Sensor Fusion Messages: i.e. External Sensor Measurements and Status Information.
# Messages in the ESF class are used to output external sensor fusion information from the receiver.
"ESF-MEAS": { # this version used when bit 3 of flags = 0
"timeTag": U4,
"flags": X2,
"id": U2,
"group": (
"None",
{ # repeating group * numMeas, which is bits 11..15 in flags
"data": X4,
},
),
},
"ESF-MEAS-CT": { # this version used when bit 3 of flags = 1
"timeTag": U4,
"flags": X2,
"id": U2,
"group": (
"ESF-MEAS-CT",
{ # repeating group * numMeas, which is bits 11..15 of flags
"data": X4,
},
),
"calibTtag": U4,
},
# ********************************************************************
# Logging Messages: i.e. Log creation, deletion, info and retrieval.
# Messages in the LOG class are used to configure and report status information of the logging feature.
"LOG-CREATE": {
"version": U1,
"logCfg": X1,
"reserved1": U1,
"logSize": U1,
"userDefinedSize": U4,
},
"LOG-ERASE": {},
"LOG-FINDTIME": {
"version": U1,
"type": U1,
"year": U2,
"month": U1,
"day": U1,
"hour": U1,
"minute": U1,
"second": U1,
"reserved1": U1,
},
"LOG-RETRIEVE": {
"startNumber": U4,
"entryCount": U4,
"version": U1,
"reserved": U3,
},
"LOG-RETRIEVEBATCH": {
"version": U1,
"flags": X1,
"reserved0": U2,
},
"LOG-STRING": {"group": ("None", {"bytes": U1})}, # repeating group
# ********************************************************************
# Multiple GNSS Assistance Messages: i.e. Assistance data for various GNSS.
# Messages in the MGA class are used for GNSS aiding information from and to the receiver.
"MGA-ANO": {
"type": U1,
"version": U1,
"svId": U1,
"gnssId": U1,
"year": U1,
"month": U1,
"day": U1,
"reserved1": U1,
"data": U64,
"reserved2": U4,
},
"MGA-BDS-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"SatH1": U1,
"IODC": U1,
"a2": I2,
"a1": I4,
"a0": I4,
"toc": U4,
"TGD1": I2,
"URAI": U1,
"IODE": U1,
"toe": U4,
"sqrtA": U4,
"e": U4,
"omega": I4,
"Deltan": I2,
"IDOT": I2,
"M0": I4,
"Omega0": I4,
"OmegaDot": I4,
"i0": I4,
"Cuc": I4,
"Cus": I4,
"Crc": I4,
"Crs": I4,
"Cic": I4,
"Cis": I4,
"reserved2": U4,
},
"MGA-BDS-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"Wna": U1,
"toa": U1,
"deltaI": I2,
"sqrtA": U4,
"e": U4,
"omega": I4,
"M0": I4,
"Omega0": I4,
"omegaDot": I4,
"a0": I2,
"a1": I2,
"reserved2": U4,
},
"MGA-BDS-HEALTH": {
"type": U1, # 0x04
"version": U1,
"reserved0": U2,
"grouphealthcode": (
30,
{
"healthCode": U2,
},
), # repeating group * 30
"reserved1": U4,
},
"MGA-BDS-UTC": {
"type": U1, # 0x05
"version": U1,
"reserved1": U2,
"a0UTC": I4,
"a1UTC": I4,
"dtLS": I1,
"reserved2": U1,
"wnRec": U1,
"wnLSF": U1,
"dN": U1,
"dtLSF": I1,
"reserved3": U2,
},
"MGA-BDS-IONO": {
"type": U1, # 0x06
"version": U1,
"reserved1": U2,
"alpha0": I1,
"alpha1": I1,
"alpha2": I1,
"alpha3": I1,
"beta0": I1,
"beta1": I1,
"beta2": I1,
"beta3": I1,
"reserved2": U4,
},
"MGA-FLASH-DATA": {
"type": U1,
"version": U1,
"sequence": U2,
"size": U2,
"group": ("size", {"data": U1}), # repeating group * size
},
"MGA-FLASH-STOP": {"type": U1, "version": U1},
"MGA-GAL-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"iodNav": U2,
"deltaN": I2,
"m0": I4,
"e": U4,
"sqrtA": U4,
"omega0": I4,
"i0": I4,
"omega": I4,
"omegaDot": I4,
"iDot": I2,
"cuc": I2,
"cus": I2,
"crc": I2,
"crs": I2,
"cic": I2,
"cis": I2,
"toe": U2,
"af0": I4,
"af1": I4,
"af2": I1,
"sisaIndexE1E5b": U1,
"toc": U2,
"bgdE1E5b": I2,
"reserved2": U2,
"healthE1B": U1,
"dataValidityE1B": U1,
"healthE5b": U1,
"dataValidityE5b": U1,
"reserved3": U4,
},
"MGA-GAL-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"ioda": U1,
"almWNa": U1,
"toa": U2,
"deltaSqrtA": I2,
"e": U2,
"deltaI": I2,
"omega0": I2,
"omegaDot": I2,
"omega": I2,
"m0": I2,
"af0": I2,
"af1": I2,
"healthE1B": U1,
"healthE5b": U1,
"reserved2": U4,
},
"MGA-GAL-TIMEOFFSET": {
"type": U1,
"version": U1,
"reserved1": U2,
"a0G": I2,
"a1G": I2,
"t0G": U1,
"wn0G": U1,
"reserved2": U2,
},
"MGA-GAL-UTC": {
"type": U1,
"version": U1,
"reserved1": U2,
"a0": I4,
"a1": I4,
"dtLS": I1,
"tot": U1,
"wnt": U1,
"wnLSF": U1,
"dN": U1,
"dTLSF": I1,
"reserved2": U2,
},
"MGA-GLO-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"FT": U1,
"B": U1,
"M": U1,
"H": I1,
"x": I4,
"y": I4,
"z": I4,
"dx": I4,
"dy": I4,
"dz": I4,
"ddx": I1,
"ddy": I1,
"ddz": I1,
"tb": U1,
"gamma": I2,
"E": U1,
"deltaTau": I1,
"tau": I4,
"reserved2": U4,
},
"MGA-GLO-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"N": U2,
"M": U1,
"C": U1,
"tau": I2,
"epsilon": U2,
"lambda": I4,
"deltaI": I4,
"tLambda": U4,
"deltaT": I4,
"deltaDT": I1,
"H": I1,
"omega": I2,
"reserved2": U4,
},
"MGA-GLO-TIMEOFFSET": {
"type": U1,
"version": U1,
"N": U2,
"tauC": I4,
"tauGps": I4,
"B1": I2,
"B2": I2,
"reserved1": U4,
},
"MGA-GPS-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"fitInterval": U1,
"uraIndex": U1,
"svHealth": U1,
"tgd": I1,
"iodc": U2,
"toc": U2,
"reserved2": U1,
"af2": I1,
"af1": I2,
"af0": I4,
"crs": I2,
"deltaN": I2,
"m0": I4,
"cuc": I2,
"cus": I2,
"e": U4,
"sqrtA": U4,
"toe": U2,
"cic": I2,
"omega0": I4,
"cis": I2,
"crc": I2,
"i0": I4,
"omega": I4,
"omegaDot": I4,
"idot": I2,
"reserved3": U4,
},
"MGA-GPS-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"svHealth": U1,
"e": U2,
"almWNa": U1,
"toa": U1,
"deltaI": I2,
"omegaDot": I2,
"sqrtA": U4,
"omega0": I4,
"omega": I4,
"m0": I4,
"af0": I2,
"af1": I2,
"reserved1": U4,
},
"MGA-GPS-HEALTH": {
"type": U1,
"version": U1,
"reserved0": U2,
"grouphealthcode": (
32,
{
"healthCode": U1,
},
), # repeating group * 32
"reserved1": U4,
},
"MGA-GPS-UTC": {
"type": U1,
"version": U1,
"reserved1": U2,
"utcA0": I4,
"utcA1": I4,
"utcDtLS": I1,
"utcTot": U1,
"utcWNt": U1,
"utcWNlsf": U1,
"utcDn": U1,
"utcDtLSF": I1,
"reserved2": U2,
},
"MGA-GPS-IONO": {
"type": U1,
"version": U1,
"reserved1": U2,
"ionoAlpha0": I1,
"ionoAlpha1": I1,
"ionoAlpha2": I1,
"ionoAlpha3": I1,
"ionoBeta0": I1,
"ionoBeta1": I1,
"ionoBeta2": I1,
"ionoBeta3": I1,
"reserved2": U4,
},
"MGA-INI-POS_XYZ": {
"type": U1,
"version": U1,
"reserved1": U2,
"ecefX": I4,
"ecefY": I4,
"ecefZ": I4,
"posAcc": U4,
},
"MGA-INI-POS_LLH": {
"type": U1,
"version": U1,
"reserved1": U2,
"lat": I4,
"lon": I4,
"alt": I4,
"posAcc": U4,
},
"MGA-INI-TIME_UTC": {
"type": U1,
"version": U1,
"ref": X1,
"leapSecs": I1,
"year": U2,
"month": U1,
"day": U1,
"hour": U1,
"minute": U1,
"second": U1,
"reserved1": U1,
"ns": U4,
"tAccS": U2,
"reserved2": U2,
"tAccNs": U4,
},
"MGA-INI-TIME_GNSS": {
"type": U1,
"version": U1,
"ref": X1,
"gnssId": U1,
"reserved1": U2,
"week": U2,
"tow": U4,
"ns": U4,
"tAccS": U2,
"reserved2": U2,
"tAccNs": U4,
},
"MGA-INI-CLKD": {
"type": U1,
"version": U1,
"reserved1": U2,
"clkD": I4,
"clkDAcc": U4,
},
"MGA-INI-FREQ": {
"type": U1,
"version": U1,
"reserved1": U1,
"flags": X1,
"freq": I4,
"freqAcc": U4,
},
"MGA-INI-EOP": {
"type": U1,
"version": U1,
"reserved1": U2,
"d2kRef": U2,
"d2kMax": U2,
"xpP0": I4,
"xpP1": I4,
"ypP0": I4,
"ypP1": I4,
"dUT1": I4,
"ddUT1": I4,
"reserved2": U40,
},
"MGA-QZSS-EPH": {
"type": U1,
"version": U1,
"svId": U1,
"reserved1": U1,
"fitInterval": U1,
"uraIndex": U1,
"svHealth": U1,
"tgd": I1,
"iodc": U2,
"toc": U2,
"reserved2": U1,
"af2": I1,
"af1": I2,
"af0": I4,
"crs": I2,
"deltaN": I2,
"m0": I4,
"cuc": I2,
"cus": I2,
"e": U4,
"sqrtA": U4,
"toe": U2,
"cic": I2,
"omega0": I4,
"cis": I2,
"crc": I2,
"i0": I4,
"omega": I4,
"omegaDot": I4,
"idot": I2,
"reserved3": U2,
},
"MGA-QZSS-ALM": {
"type": U1,
"version": U1,
"svId": U1,
"svHealth": U1,
"e": U2,
"almWNa": U1,
"toa": U1,
"deltaI": I2,
"omegaDot": I2,
"sqrtA": U4,
"omega0": I4,
"omega": I4,
"m0": I4,
"af0": I2,
"af1": I2,
"reserved1": U4,
},
"MGA-QZSS-HEALTH": {
"type": U1,
"version": U1,
"reserved0": U2,
"grouphealthcode": (
5,
{
"healthCode": U1,
},
), # repeating group * 5
"reserved1": U3,
},
# ********************************************************************
# Navigation Results Messages: i.e. Position, Speed, Time, Acceleration, Heading, DOP, SVs used.
# Messages in the NAV class are used to output navigation data such as position, altitude and velocity in a
# number of formats. Additionally, status flags and accuracy figures are output. The messages are generated with
# the configured navigation/measurement rate.
"NAV-RESETODO": {},
# ********************************************************************
# Receiver Manager Messages: i.e. Satellite Status, RTC Status.
# Messages in the RXM class are used to output status and result data from the Receiver Manager. The output
# rate is not bound to the navigation/measurement rate and messages can also be generated on events.
"RXM-PMREQ-S": {
"duration": U4,
"flags": X4,
}, # this appears to be a deprecated version
"RXM-PMREQ": {
"version": U1, # 0x00
"reserved0": U3,
"duration": U4,
"flags": X4,
"wakeupSources": X4,
},
# ********************************************************************
# Timing Messages: i.e. Time Pulse Output, Time Mark Results.
# Messages in the TIM class are used to output timing information from the receiver, like Time Pulse and Time
# Mark measurements.
"TIM-HOC": {"version": U1, "oscId": U1, "flags": U1, "reserved1": U1, "value": I4},
"TIM-VCOCAL": {
"type": U1,
"version": U1,
"oscId": U1,
"reserved1": U3,
"gainUncertainty": U2,
"gainVco": I4,
},
# ********************************************************************
# Firmware Update Messages: i.e. Memory/Flash erase/write, Reboot, Flash identification, etc..
# Messages in the UPD class are used to update the firmware and identify any attached flash device.
"UPD-SOS": {"cmd": U1, "reserved1": U3}, # Create or clear backup in flash
}
|
py | 1a3cf033267701cc869a38757537f19c4d2e446c | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import random
import uuid
import numpy as np
from mars.errors import StorageFull
from mars.graph import DAG
from mars.utils import get_next_port, serialize_graph
from mars.scheduler import ChunkMetaActor
from mars.scheduler.utils import SchedulerClusterInfoActor
from mars.tests.core import patch_method
from mars.worker import WorkerDaemonActor, DispatchActor, StorageManagerActor, \
CpuCalcActor, IORunnerActor, PlasmaKeyMapActor, SharedHolderActor, \
InProcHolderActor, QuotaActor, MemQuotaActor, StatusActor
from mars.worker.storage import DataStorageDevice
from mars.worker.storage.sharedstore import PlasmaSharedStore
from mars.worker.tests.base import WorkerCase
from mars.worker.utils import build_quota_key, WorkerClusterInfoActor
class Test(WorkerCase):
@contextlib.contextmanager
def _start_calc_pool(self):
mock_addr = f'127.0.0.1:{get_next_port()}'
with self.create_pool(n_process=1, backend='gevent', address=mock_addr) as pool:
pool.create_actor(SchedulerClusterInfoActor, [mock_addr],
uid=SchedulerClusterInfoActor.default_uid())
pool.create_actor(WorkerClusterInfoActor, [mock_addr],
uid=WorkerClusterInfoActor.default_uid())
pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_uid())
pool.create_actor(StatusActor, mock_addr, uid=StatusActor.default_uid())
pool.create_actor(PlasmaKeyMapActor, uid=PlasmaKeyMapActor.default_uid())
pool.create_actor(WorkerDaemonActor, uid=WorkerDaemonActor.default_uid())
pool.create_actor(DispatchActor, uid=DispatchActor.default_uid())
pool.create_actor(StorageManagerActor, uid=StorageManagerActor.default_uid())
pool.create_actor(IORunnerActor)
pool.create_actor(QuotaActor, 1024 ** 2, uid=MemQuotaActor.default_uid())
shared_holder_ref = pool.create_actor(
SharedHolderActor, uid=SharedHolderActor.default_uid())
pool.create_actor(InProcHolderActor)
pool.create_actor(CpuCalcActor, uid=CpuCalcActor.default_uid())
with self.run_actor_test(pool) as test_actor:
try:
yield pool, test_actor
finally:
shared_holder_ref.destroy()
@staticmethod
def _build_test_graph(data_list):
from mars.tensor.fetch import TensorFetch
from mars.tensor.arithmetic import TensorTreeAdd
inputs = []
for idx, d in enumerate(data_list):
chunk_key = f'chunk-{random.randint(0, 999)}-{idx}'
fetch_chunk = TensorFetch(to_fetch_key=chunk_key, dtype=d.dtype) \
.new_chunk([], shape=d.shape, _key=chunk_key)
inputs.append(fetch_chunk)
add_chunk = TensorTreeAdd(data_list[0].dtype).new_chunk(inputs, shape=data_list[0].shape)
exec_graph = DAG()
exec_graph.add_node(add_chunk)
for input_chunk in inputs:
exec_graph.add_node(input_chunk)
exec_graph.add_edge(input_chunk, add_chunk)
return exec_graph, inputs, add_chunk
def testCpuCalcSingleFetches(self):
import gc
with self._start_calc_pool() as (_pool, test_actor):
quota_ref = test_actor.promise_ref(MemQuotaActor.default_uid())
calc_ref = test_actor.promise_ref(CpuCalcActor.default_uid())
session_id = str(uuid.uuid4())
data_list = [np.random.random((10, 10)) for _ in range(3)]
exec_graph, fetch_chunks, add_chunk = self._build_test_graph(data_list)
storage_client = test_actor.storage_client
for fetch_chunk, d in zip(fetch_chunks, data_list):
self.waitp(
storage_client.put_objects(
session_id, [fetch_chunk.key], [d], [DataStorageDevice.SHARED_MEMORY]),
)
self.assertEqual(list(storage_client.get_data_locations(session_id, [fetch_chunks[0].key])[0]),
[(0, DataStorageDevice.SHARED_MEMORY)])
quota_batch = {
build_quota_key(session_id, add_chunk.key, add_chunk.op.key): data_list[0].nbytes,
}
for idx in [1, 2]:
quota_batch[build_quota_key(session_id, fetch_chunks[idx].key, add_chunk.op.key)] \
= data_list[idx].nbytes
self.waitp(
storage_client.copy_to(session_id, [fetch_chunks[idx].key], [DataStorageDevice.DISK])
.then(lambda *_: storage_client.delete(
session_id, [fetch_chunks[idx].key], [DataStorageDevice.SHARED_MEMORY]))
)
self.assertEqual(
list(storage_client.get_data_locations(session_id, [fetch_chunks[idx].key])[0]),
[(0, DataStorageDevice.DISK)])
self.waitp(
quota_ref.request_batch_quota(quota_batch, _promise=True),
)
o_create = PlasmaSharedStore.create
def _mock_plasma_create(store, session_id, data_key, size):
if data_key == fetch_chunks[2].key:
raise StorageFull
return o_create(store, session_id, data_key, size)
id_type_set = set()
def _extract_value_ref(*_):
inproc_handler = storage_client.get_storage_handler((0, DataStorageDevice.PROC_MEMORY))
obj = inproc_handler.get_objects(session_id, [add_chunk.key])[0]
id_type_set.add((id(obj), type(obj)))
del obj
with patch_method(PlasmaSharedStore.create, _mock_plasma_create):
self.waitp(
calc_ref.calc(session_id, add_chunk.op.key, serialize_graph(exec_graph),
[add_chunk.key], _promise=True)
.then(_extract_value_ref)
.then(lambda *_: calc_ref.store_results(
session_id, add_chunk.op.key, [add_chunk.key], None, _promise=True))
)
self.assertTrue(all((id(obj), type(obj)) not in id_type_set
for obj in gc.get_objects()))
self.assertEqual(sorted(storage_client.get_data_locations(session_id, [fetch_chunks[0].key])[0]),
[(0, DataStorageDevice.SHARED_MEMORY)])
self.assertEqual(sorted(storage_client.get_data_locations(session_id, [fetch_chunks[1].key])[0]),
[(0, DataStorageDevice.DISK)])
self.assertEqual(sorted(storage_client.get_data_locations(session_id, [fetch_chunks[2].key])[0]),
[(0, DataStorageDevice.DISK)])
self.assertEqual(sorted(storage_client.get_data_locations(session_id, [add_chunk.key])[0]),
[(0, DataStorageDevice.SHARED_MEMORY)])
def testCpuCalcErrorInRunning(self):
with self._start_calc_pool() as (_pool, test_actor):
calc_ref = test_actor.promise_ref(CpuCalcActor.default_uid())
session_id = str(uuid.uuid4())
data_list = [np.random.random((10, 10)) for _ in range(2)]
exec_graph, fetch_chunks, add_chunk = self._build_test_graph(data_list)
storage_client = test_actor.storage_client
for fetch_chunk, d in zip(fetch_chunks, data_list):
self.waitp(
storage_client.put_objects(
session_id, [fetch_chunk.key], [d], [DataStorageDevice.SHARED_MEMORY]),
)
def _mock_calc_results_error(*_, **__):
raise ValueError
with patch_method(CpuCalcActor._calc_results, _mock_calc_results_error), \
self.assertRaises(ValueError):
self.waitp(
calc_ref.calc(session_id, add_chunk.op.key, serialize_graph(exec_graph),
[add_chunk.key], _promise=True)
.then(lambda *_: calc_ref.store_results(
session_id, add_chunk.op.key, [add_chunk.key], None, _promise=True))
)
def testDestroyCalcActor(self):
import gevent.event
with self._start_calc_pool() as (_pool, test_actor):
calc_ref = _pool.actor_ref(CpuCalcActor.default_uid())
calc_ref.mark_destroy()
gevent.sleep(0.8)
self.assertFalse(_pool.has_actor(calc_ref))
with self._start_calc_pool() as (_pool, test_actor):
calc_ref = test_actor.promise_ref(CpuCalcActor.default_uid())
session_id = str(uuid.uuid4())
data_list = [np.random.random((10, 10)) for _ in range(2)]
exec_graph, fetch_chunks, add_chunk = self._build_test_graph(data_list)
exec_graph2, fetch_chunks2, add_chunk2 = self._build_test_graph(data_list[::-1])
storage_client = test_actor.storage_client
for fetch_chunk, d in zip(fetch_chunks, data_list):
self.waitp(
storage_client.put_objects(
session_id, [fetch_chunk.key], [d], [DataStorageDevice.SHARED_MEMORY]),
)
for fetch_chunk2, d in zip(fetch_chunks2, data_list[::-1]):
self.waitp(
storage_client.put_objects(
session_id, [fetch_chunk2.key], [d], [DataStorageDevice.SHARED_MEMORY]),
)
orig_calc_results = CpuCalcActor._calc_results
start_event = gevent.event.Event()
def _mock_calc_delayed(actor_obj, *args, **kwargs):
start_event.set()
gevent.sleep(1)
return orig_calc_results(actor_obj, *args, **kwargs)
with patch_method(CpuCalcActor._calc_results, _mock_calc_delayed):
p = calc_ref.calc(session_id, add_chunk.op.key, serialize_graph(exec_graph),
[add_chunk.key], _promise=True) \
.then(lambda *_: calc_ref.store_results(
session_id, add_chunk.op.key, [add_chunk.key], None, _promise=True))
start_event.wait()
calc_ref.mark_destroy()
p2 = calc_ref.calc(session_id, add_chunk2.op.key, serialize_graph(exec_graph2),
[add_chunk2.key], _promise=True) \
.then(lambda *_: calc_ref.store_results(
session_id, add_chunk2.op.key, [add_chunk2.key], None, _promise=True))
self.assertTrue(_pool.has_actor(calc_ref._ref))
self.waitp(p)
self.waitp(p2)
gevent.sleep(0.8)
self.assertFalse(_pool.has_actor(calc_ref._ref))
|
py | 1a3cf05868b50d0e9897df7d27f2d5c6e182a31c | """distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: cygwinccompiler.py 73349 2009-06-11 09:17:19Z tarek.ziade $"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
compiler_so='gcc -mno-cygwin -mdll -O -Wall',
compiler_cxx='g++ -mno-cygwin -O -Wall',
linker_exe='gcc -mno-cygwin',
linker_so='%s -mno-cygwin %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
s = f.read()
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
|
py | 1a3cf0628da1f2406422bd41d15c68e654342255 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.db import db
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.modules.events.cloning import EventCloner
from indico.modules.events.editing.models.file_types import EditingFileType
from indico.modules.events.editing.models.review_conditions import EditingReviewCondition
from indico.modules.events.editing.models.tags import EditingTag
from indico.modules.events.models.events import EventType
from indico.util.i18n import _
class EditingSettingsCloner(EventCloner):
name = 'editing_settings'
friendly_name = _('Editing (configured tags, file types, review conditions)')
new_event_only = True
@property
def is_visible(self):
return self.old_event.type_ == EventType.conference
@no_autoflush
def run(self, new_event, cloners, shared_data, event_exists=False):
self._filetype_map = {}
self._clone_tags(new_event)
self._clone_filetypes(new_event)
self._clone_review_conditions(new_event)
db.session.flush()
def _clone_tags(self, new_event):
attrs = get_simple_column_attrs(EditingTag)
for old_tag in self.old_event.editing_tags:
tag = EditingTag()
tag.populate_from_attrs(old_tag, attrs)
new_event.editing_tags.append(tag)
def _clone_filetypes(self, new_event):
attrs = get_simple_column_attrs(EditingFileType)
del new_event.editing_file_types[:]
db.session.flush()
for old_filetype in self.old_event.editing_file_types:
filetype = EditingFileType()
filetype.populate_from_attrs(old_filetype, attrs)
new_event.editing_file_types.append(filetype)
db.session.flush()
self._filetype_map[old_filetype] = filetype
def _clone_review_conditions(self, new_event):
old_conditions = EditingReviewCondition.query.with_parent(self.old_event).all()
for condition in old_conditions:
new_filetypes = {self._filetype_map[ft] for ft in condition.file_types}
new_condition = EditingReviewCondition(type=condition.type, file_types=new_filetypes)
new_event.editing_review_conditions.append(new_condition)
|
py | 1a3cf13755601ec7d0e5a018cc4a90ba33532737 | import sys
prons = set(["he", "they", "she", "we", "i"])
def get_wrd_label(toke):
"""
necessary b/c some words have /'s in them
"""
pieces = toke.split('/')
if len(pieces) == 2:
return pieces
else:
labe = pieces[-1]
wrd = "".join(pieces[:-1])
return wrd, labe
def stupid_label(tokens):
"""
assumes tokens have NER tags
"""
labels = []
last_start, last_name = 0, None
prev_was_close = False # sometimes closes are used to start???
try:
first_open = tokens.index("``/O")
except ValueError:
first_open = len(tokens)
try:
first_close = tokens.index("''/O")
except ValueError:
first_close = len(tokens)
in_quote = first_close < first_open
for i, toke in enumerate(tokens):
if toke == "''/O" and not prev_was_close: # end quote
if tokens[i-1] != "./O":
# find closest name (w/in ten tokens, let's say)
name = None
for jj in xrange(i+1, min(i+1+10, len(tokens))):
wrd, labe = get_wrd_label(tokens[jj])
if labe == "PERSON" or wrd in prons:
name = wrd
break
else:
name = last_name
if name is not None:
labels.append((last_start, i, name))
last_name = name
in_quote = False
prev_was_close = True
elif toke == "``/O" or toke == "''/O" and prev_was_close: # start quote
in_quote = True
last_start = i
prev_was_close = False # hmmm
elif not in_quote and ("PERSON" in toke or toke.split('/')[0] in prons):
last_name = toke.split('/')[0]
if in_quote and last_name is not None:
labels.append((last_start, len(tokens)-1, last_name))
tokencopy = [toke for toke in tokens]
for (start, end, name) in labels:
for k in xrange(start+1, end):
if "|||" in tokens[k]:
print
print
print " ".join(tokencopy)
print labels
print k, tokens[k]
assert "|||" not in tokens[k]
assert "|||" not in name
tokens[k] = tokens[k] + "|||" + name
return tokens
for line in sys.stdin:
tokes = line.strip().split()
labeled = stupid_label(tokes)
print " ".join(labeled)
|
py | 1a3cf23dcf59c8c874f8b4aaa0196f6b3a60357e | # (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
# Create a new associated incident
emailmessage.createAssociatedIncident("My new incident", "[email protected]")
# Incident can have further changes made to it.
incident.city = "Galway"
# You cannot call createAssociatedIncident twice
try:
emailmessage.createAssociatedIncident(
"My new incident 2", "[email protected]")
except Exception as e:
log.info(
"Scripting server caught the error as expected. Message {}".format(str(e)))
else:
helper.fail("Associating with Incident called twice did not throw an error.")
|
py | 1a3cf2ff4025272b334a0145b5bafd7b0c1fcf8d | #!/usr/bin/env python3
import argparse
import glob
import json
import logging
import os
import shlex
import subprocess
from pathlib import Path
def runBazelBuildForCompilationDatabase(bazel_options, bazel_targets):
query_targets = ' union '.join(bazel_targets)
query = ' union '.join(
q.format(query_targets) for q in [
'attr(include_prefix, ".+", kind(cc_library, deps({})))',
'attr(strip_include_prefix, ".+", kind(cc_library, deps({})))',
'attr(generator_function, ".*proto_library", kind(cc_.*, deps({})))',
])
build_targets = subprocess.check_output(["bazel", "query", "--notool_deps",
query]).decode().splitlines()
subprocess.check_call(["bazel", "build"] + bazel_options + build_targets)
# This method is equivalent to https://github.com/grailbio/bazel-compilation-database/blob/master/generate.sh
def generateCompilationDatabase(args):
# We need to download all remote outputs for generated source code. This option lives here to override those
# specified in bazelrc.
bazel_options = shlex.split(os.environ.get("BAZEL_BUILD_OPTIONS", "")) + [
"--config=compdb",
"--remote_download_outputs=all",
]
if args.keep_going:
bazel_options.append("-k")
if args.run_bazel_build:
try:
runBazelBuildForCompilationDatabase(bazel_options, args.bazel_targets)
except subprocess.CalledProcessError as e:
if not args.keep_going:
raise
else:
logging.warning("bazel build failed {}: {}".format(e.returncode, e.cmd))
subprocess.check_call(["bazel", "build"] + bazel_options + [
"--aspects=@bazel_compdb//:aspects.bzl%compilation_database_aspect",
"--output_groups=compdb_files"
] + args.bazel_targets)
execroot = subprocess.check_output(["bazel", "info", "execution_root"] +
bazel_options).decode().strip()
compdb = []
for compdb_file in Path(execroot).glob("**/*.compile_commands.json"):
compdb.extend(json.loads("[" + compdb_file.read_text().replace("__EXEC_ROOT__", execroot) +
"]"))
return compdb
def isHeader(filename):
for ext in (".h", ".hh", ".hpp", ".hxx"):
if filename.endswith(ext):
return True
return False
def isCompileTarget(target, args):
filename = target["file"]
if not args.include_headers and isHeader(filename):
return False
if not args.include_genfiles:
if filename.startswith("bazel-out/"):
return False
if not args.include_external:
if filename.startswith("external/"):
return False
return True
def modifyCompileCommand(target, args):
cc, options = target["command"].split(" ", 1)
# Workaround for bazel added C++11 options, those doesn't affect build itself but
# clang-tidy will misinterpret them.
options = options.replace("-std=c++0x ", "")
options = options.replace("-std=c++11 ", "")
if args.vscode:
# Visual Studio Code doesn't seem to like "-iquote". Replace it with
# old-style "-I".
options = options.replace("-iquote ", "-I ")
if isHeader(target["file"]):
options += " -Wno-pragma-once-outside-header -Wno-unused-const-variable"
options += " -Wno-unused-function"
target["command"] = " ".join([cc, options])
return target
def fixCompilationDatabase(args, db):
db = [modifyCompileCommand(target, args) for target in db if isCompileTarget(target, args)]
with open("compile_commands.json", "w") as db_file:
json.dump(db, db_file, indent=2)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate JSON compilation database')
parser.add_argument('--run_bazel_build', action='store_true')
parser.add_argument('-k', '--keep_going', action='store_true')
parser.add_argument('--include_external', action='store_true')
parser.add_argument('--include_genfiles', action='store_true')
parser.add_argument('--include_headers', action='store_true')
parser.add_argument('--vscode', action='store_true')
parser.add_argument('bazel_targets',
nargs='*',
default=["//source/...", "//test/...", "//tools/..."])
args = parser.parse_args()
fixCompilationDatabase(args, generateCompilationDatabase(args))
|
py | 1a3cf30aac351ad8c39ddb4399ad4f31c81bdfb2 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 14 14:34:24 2022
@author: Manuel Huber
"""
import os.path
import multiprocessing
from multiprocessing import Process, Manager
import ee
import geemap
import numpy as np
Map = geemap.Map()
import matplotlib.pyplot as plt
from colour import Color
#from osgeo import gdal
import pandas as pd
import time
import os, glob
import progressbar
from osgeo import gdal
#########################################################################
def get_geotiff_gee(dataset,world,name, path, scale_x, name_save, tile_size):
sel_name = 'wld_rgn' #country_na'
conti = world.filter(ee.Filter.eq(sel_name, name)) # Select the right continent boundaries of the input name
sel_name = 'country_na'
features_country = np.unique(conti.aggregate_array(sel_name).getInfo()) # All countries in the selected continents/area
bar = progressbar.ProgressBar(maxval=len(features_country), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', '{}'.format(name), progressbar.Percentage()])
bar.start()
# Looping through all countries individually as there are limitations on the "coveringGrid" function, which needs to put into a list:
for j in range(len(features_country)):
bar.update(j+1)
geometry = world.filter(ee.Filter.eq(sel_name, features_country[j]))
ROI = geometry.geometry()
data_pro = dataset.projection()
features = ROI.coveringGrid(data_pro,tile_size) #Set the size of the tiling which will depend on the inital resolution set!
geometries_new = features.toList(5000)
for k in range(len(geometries_new.getInfo())):
roi =ee.Feature(geometries_new.getInfo()[k]).geometry()
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
dataset_2 = dataset.select('wat')
data = dataset_2.updateMask(dataset_2.eq(1)).clip(roi)
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
data_pro = data.projection(); # Select projection of the image
# Force the next reprojection to aggregate instead of resampling.
new_area_count = data.reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro,None, scale_x)
new_area_count_all = data.unmask().reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro, None ,scale_x)
scaled_pixels =new_area_count.divide(new_area_count_all.divide(100)) # ((Sum of selected pixels)/Total_Count_Pixels)*100 To get percent
rio_pixels = scaled_pixels.clip(roi)
#Possibility to mask certain vaules etc.:
#imgUnmasked = rio_pixels.gt(0) #.select('b1')
#umasked_data = rio_pixels.updateMask(imgUnmasked)
if os.path.exists('{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k)) == False:
geemap.ee_export_image(rio_pixels , filename='{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k), scale= scale_x, region = ROI)
#print(name_save, features_country[j], k)
#else:
# print('This file already exists: ',name_save,k,features_country[j])
if os.path.exists('{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k)) == False:
file_object = open('{}Missing_Files.txt'.format(path), 'a')
file_object.write('{}, {}, {}, '.format(name_save, features_country[j], k))
file_object.write("\n")
# Close the file
file_object.close()
print(name_save, features_country[j], k, 'Is still missing - Download process failed - Will be downloaded in smaller patches')
# Backup download in case there is downloading issue with the set tilesize
if os.path.exists('{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k)) == False:
features_2 = roi.coveringGrid(data_pro, 200000)
geometries_new_2 = features_2.toList(5000)#.map(func_yhi)
for p in range(len(geometries_new_2.getInfo())):
roi_2 =ee.Feature(geometries_new_2.getInfo()[p]).geometry()
rio_pixels_2 = rio_pixels.clip(roi_2)
geemap.ee_export_image(rio_pixels_2 , filename='{}/Image_Exported_Failed_Down_{}_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k,p), scale= scale_x, region = roi_2)
bar.finish()
##################### Start the first the mining process in Google Earth Engine ##############################
if __name__ == "__main__":
##### Input - user depndend ##########################
name = 'MERIT_Surface_Water' # Select name at which data will be sotred with
dataset = ee.Image("MERIT/Hydro/v1_0_1")
# Set path where the data should be saved
path_save = '/data/River_Density/New_River_Composition_Different_Res/'
# Name the folder:
folder_name = 'Test_Folder'
if os.path.exists('{}{}'.format(path_save,folder_name)) == False:
os.mkdir('{}{}'.format(path_save,folder_name))
path ='{}{}/'.format(path_save,folder_name)
# Set scale of the density map
scale_x= 25000 #In m ==> 25km
# If downloading issues occure due to high resolution maps decrease the tile size
tile_size = 500000
# Set number of processors for the multiprocessing:
number_of_processors = 4
######################################################
world = ee.FeatureCollection("USDOS/LSIB_SIMPLE/2017") # Feature collection which gives boundaries for countries and continents
sel_name = 'wld_rgn' # if interested for countries select 'country_na'
europe = world# Here is also option to select individual countries or continents, e.g. filter(ee.Filter.eq('wld_rgn', 'Europe'))
features_cont = np.array(['North America','Africa' , 'Australia', 'Caribbean' ,'Central America',
'Central Asia' ,'E Asia', 'Europe' ,'Indian Ocean', 'N Asia' ,
'Oceania', 'S Asia', 'S Atlantic' ,'SE Asia', 'SW Asia', 'South America'])
# To avoid spaces an addtional list of names has been created:
features_cont_name = np.array(['North_America','Africa' , 'Australia', 'Caribbean' ,'Central_America',
'Central_Asia' ,'E_Asia', 'Europe' ,'Indian_Ocean', 'N_Asia' ,
'Oceania', 'S_Asia', 'S_Atlantic' ,'SE_Asia', 'SW_Asia', 'South_America'])
# Creating a list to split the processes to the provided cores (this case 5 processes in parallel)
x = np.arange(len(features_cont))
split = np.array_split(x, number_of_processors) # Here the number of processors can be selected
print(split, len(split))
for s in range(len(split)):
#for s in range(1):
print('Split', s+1, 'out of ', len(split))
area_sel = features_cont[split[s]]
area_sel_name = features_cont_name[split[s]]
manager = multiprocessing.Manager()
print('entering the processing')
df_all = manager.list()
processes = []
for j in range(len(area_sel)):
name_save = area_sel_name[j]
name_inp = area_sel[j]
print(name_inp, 'is in the making')
p = Process(target=get_geotiff_gee, args=(dataset,world,name_inp, path, scale_x, name_save,tile_size,)) # Passing the list
p.start()
processes.append(p)
for p in processes:
p.join()
print('Finished first part. Now its time to look for the date line issue.')
####################### Downloading the areas along the date line separately to aviod feature cross over at -180,180!
geometry_miss_1 = ee.Geometry.Polygon(
[[[158.84159346653087, 73.96789885519699],
[158.84159346653087, 52.15339248067615],
[179.84745284153087, 52.15339248067615],
[179.84745284153087, 73.96789885519699]]])
geometry_miss_2 = ee.Geometry.Polygon(
[[[-165.56270340846913, 73.72336873420824],
[-165.56270340846913, 44.519635837378665],
[-139.01973465846913, 44.519635837378665],
[-139.01973465846913, 73.72336873420824]]])
geometry_miss_all = [geometry_miss_1, geometry_miss_2]
data_pro = dataset.projection()
for i in range(len(geometry_miss_all)):
ROI = ee.Feature(geometry_miss_all[i]).geometry()
features = ROI.coveringGrid(data_pro, 1000000)
geometries_new = features.toList(5000)#.map(func_yhi)
list_images = []
for k in range(len(geometries_new.getInfo())):
roi =ee.Feature(geometries_new.getInfo()[k]).geometry()
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
dataset_2 = dataset.select('wat')
data = dataset_2.updateMask(dataset_2.eq(1)).clip(roi)
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
data_pro = data.projection(); # Select projection of the image
# Force the next reprojection to aggregate instead of resampling.
new_area_count = data.reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro,None, scale_x)
new_area_count_all = data.unmask().reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro, None ,scale_x)
scaled_pixels =new_area_count.divide(new_area_count_all.divide(100)) # ((Sum of selected pixels)/Total_Count_Pixels)*100 To get percent
rio_pixels = scaled_pixels.clip(roi)
if os.path.exists('{}Image_Date_Line_Missing_{}_{}_{}_{}.tif'.format(path,scale_x,i,k,len(geometries_new.getInfo()))) == False:
geemap.ee_export_image(rio_pixels, filename='{}Image_Date_Line_Missing_{}_{}_{}_{}.tif'.format(path,scale_x,i,k,len(geometries_new.getInfo()) ), scale= scale_x, region = roi)
print('All data is downloaded, its time to start creating some maps.')
######################### Merging and Reprojecting the data ###########################
folder_name_2 = 'Reprojected_Files'
if os.path.exists('{}{}'.format(path,folder_name_2)) == False:
os.mkdir('{}{}'.format(path,folder_name_2))
path_repro ='{}{}/'.format(path,folder_name_2)
folder_name_3 = 'Final_Files'
if os.path.exists('{}{}'.format(path,folder_name_3)) == False:
os.mkdir('{}{}'.format(path,folder_name_3))
path_final ='{}{}/'.format(path,folder_name_3)
files_to_mosaic = glob.glob('{}/*.tif'.format(path))
print(len(files_to_mosaic))
files_string = " ".join(files_to_mosaic)
for i in range(len(files_to_mosaic)):
# Possibility to set projection
command ='gdalwarp {} {}Out_{}.tif -overwrite -t_srs "+proj=longlat +ellps=WGS84"'.format(files_to_mosaic[i], path_repro,i)
print(os.popen(command).read())
files_to_mosaic = np.array(glob.glob('{}*.tif'.format(path_repro)))
long = np.array_split(range(len(files_to_mosaic)), 5) # This needs to be done because gdal has a limit of geotiff files which can be processed at the same time
for f in range(len(long)):
files_ib = files_to_mosaic[long[f].astype(int)]
print(len(files_to_mosaic))
files_string = " ".join(files_ib)
command = "gdal_merge.py -o {}inbetween_{}.tif -of gtiff -n 0 ".format(path_repro,f) + files_string
print(os.popen(command).read())
# Merging the inbetween files together
files_to_mosaic = glob.glob('{}inbetween*.tif'.format(path_repro))
files_string = " ".join(files_to_mosaic)
command = "gdal_merge.py -o {}{}_{}.tif -of gtiff -n 0 ".format(path_final,scale_x,name) + files_string
print(os.popen(command).read())
command = "gdal_translate -scale -of KMLSUPEROVERLAY {}{}_{}.tif {}{}_{}.kmz".format(path_final,scale_x,name,path_final,scale_x,name)
print(os.popen(command).read())
|
py | 1a3cf3176f25d1b753eaf20570215e8a4e72d804 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from copy import deepcopy
from batchgenerators.augmentations.utils import resize_segmentation
from nnunet.configuration import default_num_threads, RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD
from nnunet.preprocessing.cropping import get_case_identifier_from_npz, ImageCropper
from skimage.transform import resize
from scipy.ndimage.interpolation import map_coordinates
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing.pool import Pool
def get_do_separate_z(spacing, anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold
return do_separate_z
def get_lowres_axis(new_spacing):
axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic
return axis
def resample_patient(data, seg, original_spacing, target_spacing, order_data=3, order_seg=0, force_separate_z=False,
cval_data=0, cval_seg=-1, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
"""
:param cval_seg:
:param cval_data:
:param data:
:param seg:
:param original_spacing:
:param target_spacing:
:param order_data:
:param order_seg:
:param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always
/never resample along z separately
:param order_z_seg: only applies if do_separate_z is True
:param order_z_data: only applies if do_separate_z is True
:param separate_z_anisotropy_threshold: if max_spacing > separate_z_anisotropy_threshold * min_spacing (per axis)
then resample along lowres axis with order_z_data/order_z_seg instead of order_data/order_seg
:return:
"""
assert not ((data is None) and (seg is None))
if data is not None:
assert len(data.shape) == 4, "data must be c x y z"
if seg is not None:
assert len(seg.shape) == 4, "seg must be c x y z"
if data is not None:
shape = np.array(data[0].shape)
else:
shape = np.array(seg[0].shape)
new_shape = np.round(((np.array(original_spacing) / np.array(target_spacing)).astype(float) * shape)).astype(int)
if force_separate_z is not None:
do_separate_z = force_separate_z
if force_separate_z:
axis = get_lowres_axis(original_spacing)
else:
axis = None
else:
if get_do_separate_z(original_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(original_spacing)
elif get_do_separate_z(target_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(target_spacing)
else:
do_separate_z = False
axis = None
if axis is not None:
if len(axis) == 3:
# every axis has the spacing, this should never happen, why is this code here?
do_separate_z = False
elif len(axis) == 2:
# this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample
# separately in the out of plane axis
do_separate_z = False
else:
pass
if data is not None:
data_reshaped = resample_data_or_seg(data, new_shape, False, axis, order_data, do_separate_z, cval=cval_data,
order_z=order_z_data)
else:
data_reshaped = None
if seg is not None:
seg_reshaped = resample_data_or_seg(seg, new_shape, True, axis, order_seg, do_separate_z, cval=cval_seg,
order_z=order_z_seg)
else:
seg_reshaped = None
return data_reshaped, seg_reshaped
def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, cval=0, order_z=0):
"""
separate_z=True will resample with order 0 along z
:param data:
:param new_shape:
:param is_seg:
:param axis:
:param order:
:param do_separate_z:
:param cval:
:param order_z: only applies if do_separate_z is True
:return:
"""
assert len(data.shape) == 4, "data must be (c, x, y, z)"
if is_seg:
resize_fn = resize_segmentation
kwargs = OrderedDict()
else:
resize_fn = resize
kwargs = {'mode': 'edge', 'anti_aliasing': False}
dtype_data = data.dtype
data = data.astype(float)
shape = np.array(data[0].shape)
new_shape = np.array(new_shape)
if np.any(shape != new_shape):
if do_separate_z:
print("separate z, order in z is", order_z, "order inplane is", order)
assert len(axis) == 1, "only one anisotropic axis supported"
axis = axis[0]
if axis == 0:
new_shape_2d = new_shape[1:]
elif axis == 1:
new_shape_2d = new_shape[[0, 2]]
else:
new_shape_2d = new_shape[:-1]
reshaped_final_data = []
for c in range(data.shape[0]):
reshaped_data = []
for slice_id in range(shape[axis]):
if axis == 0:
reshaped_data.append(resize_fn(data[c, slice_id], new_shape_2d, order, cval=cval, **kwargs))
elif axis == 1:
reshaped_data.append(resize_fn(data[c, :, slice_id], new_shape_2d, order, cval=cval, **kwargs))
else:
reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order, cval=cval,
**kwargs))
reshaped_data = np.stack(reshaped_data, axis)
if shape[axis] != new_shape[axis]:
# The following few lines are blatantly copied and modified from sklearn's resize()
rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
orig_rows, orig_cols, orig_dim = reshaped_data.shape
row_scale = float(orig_rows) / rows
col_scale = float(orig_cols) / cols
dim_scale = float(orig_dim) / dim
map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim]
map_rows = row_scale * (map_rows + 0.5) - 0.5
map_cols = col_scale * (map_cols + 0.5) - 0.5
map_dims = dim_scale * (map_dims + 0.5) - 0.5
coord_map = np.array([map_rows, map_cols, map_dims])
if not is_seg or order_z == 0:
reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z, cval=cval,
mode='nearest')[None])
else:
unique_labels = np.unique(reshaped_data)
reshaped = np.zeros(new_shape, dtype=dtype_data)
for i, cl in enumerate(unique_labels):
reshaped_multihot = np.round(
map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z,
cval=cval, mode='nearest'))
reshaped[reshaped_multihot > 0.5] = cl
reshaped_final_data.append(reshaped[None])
else:
reshaped_final_data.append(reshaped_data[None])
reshaped_final_data = np.vstack(reshaped_final_data)
else:
print("no separate z, order", order)
reshaped = []
for c in range(data.shape[0]):
reshaped.append(resize_fn(data[c], new_shape, order, cval=cval, **kwargs)[None])
reshaped_final_data = np.vstack(reshaped)
return reshaped_final_data.astype(dtype_data)
else:
print("no resampling necessary")
return data
class GenericPreprocessor(object):
def __init__(self, normalization_scheme_per_modality, use_nonzero_mask, transpose_forward: (tuple, list),
intensityproperties=None):
"""
:param normalization_scheme_per_modality: dict {0:'nonCT'}
:param use_nonzero_mask: {0:False}
:param intensityproperties:
"""
self.transpose_forward = transpose_forward
self.intensityproperties = intensityproperties
self.normalization_scheme_per_modality = normalization_scheme_per_modality
self.use_nonzero_mask = use_nonzero_mask
self.resample_separate_z_anisotropy_threshold = RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD
@staticmethod
def load_cropped(cropped_output_dir, case_identifier):
all_data = np.load(os.path.join(cropped_output_dir, "%s.npz" % case_identifier))['data']
data = all_data[:-1].astype(np.float32)
seg = all_data[-1:]
with open(os.path.join(cropped_output_dir, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return data, seg, properties
@staticmethod
def my_load_cropped(cropped_output_dir, case_identifier):
all_data = np.load(os.path.join(cropped_output_dir, "%s.npz" % case_identifier))['data']
data = all_data[:-4].astype(np.float32)
seg = all_data[-4:]
with open(os.path.join(cropped_output_dir, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return data, seg, properties
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg, properties
def my_resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg1 = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
print(seg1.shape, "@@@@@@@@@@@@@@@@@@@@@@@@@@")
seg = seg1[0:1]
print(seg.shape, "##########################")
print(self.use_nonzero_mask, "**************")
if seg1 is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg1[seg1 < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg1, properties
def preprocess_test_case(self, data_files, target_spacing, seg_file=None, force_separate_z=None):
data, seg, properties = ImageCropper.crop_from_list_of_files(data_files, seg_file)
data = data.transpose((0, *[i + 1 for i in self.transpose_forward]))
seg = seg.transpose((0, *[i + 1 for i in self.transpose_forward]))
data, seg, properties = self.resample_and_normalize(data, target_spacing, properties, seg,
force_separate_z=force_separate_z)
return data.astype(np.float32), seg, properties
def _run_internal(self, target_spacing, case_identifier, output_folder_stage, cropped_output_dir, force_separate_z,
all_classes):
data, seg, properties = self.load_cropped(cropped_output_dir, case_identifier)
data = data.transpose((0, *[i + 1 for i in self.transpose_forward]))
seg = seg.transpose((0, *[i + 1 for i in self.transpose_forward]))
data, seg, properties = self.resample_and_normalize(data, target_spacing,
properties, seg, force_separate_z)
all_data = np.vstack((data, seg)).astype(np.float32)
# we need to find out where the classes are and sample some random locations
# let's do 10.000 samples per class
# seed this for reproducibility!
num_samples = 10000
min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too sparse
rndst = np.random.RandomState(1234)
class_locs = {}
for c in all_classes:
all_locs = np.argwhere(all_data[-1] == c)
if len(all_locs) == 0:
class_locs[c] = []
continue
target_num_samples = min(num_samples, len(all_locs))
target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage)))
selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)]
class_locs[c] = selected
print(c, target_num_samples)
properties['class_locations'] = class_locs
print("saving: ", os.path.join(output_folder_stage, "%s.npz" % case_identifier))
np.savez_compressed(os.path.join(output_folder_stage, "%s.npz" % case_identifier),
data=all_data.astype(np.float32))
with open(os.path.join(output_folder_stage, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
def my_run_internal(self, target_spacing, case_identifier, output_folder_stage, cropped_output_dir,
force_separate_z, all_classes):
data, seg, properties = self.my_load_cropped(cropped_output_dir, case_identifier)
data = data.transpose((0, *[i + 1 for i in self.transpose_forward]))
seg = seg.transpose((0, *[i + 1 for i in self.transpose_forward]))
data, seg, properties = self.my_resample_and_normalize(data, target_spacing,
properties, seg, force_separate_z)
all_data = np.vstack((data, seg)).astype(np.float32)
# we need to find out where the classes are and sample some random locations
# let's do 10.000 samples per class
# seed this for reproducibility!
num_samples = 10000
min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too sparse
rndst = np.random.RandomState(1234)
class_locs = {}
for c in all_classes:
all_locs = np.argwhere(all_data[-4] == c)
if len(all_locs) == 0:
class_locs[c] = []
continue
target_num_samples = min(num_samples, len(all_locs))
target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage)))
selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)]
class_locs[c] = selected
print(c, target_num_samples)
properties['class_locations'] = class_locs
print("saving: ", os.path.join(output_folder_stage, "%s.npz" % case_identifier))
np.savez_compressed(os.path.join(output_folder_stage, "%s.npz" % case_identifier),
data=all_data.astype(np.float32))
with open(os.path.join(output_folder_stage, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
def run(self, target_spacings, input_folder_with_cropped_npz, output_folder, data_identifier,
num_threads=default_num_threads, force_separate_z=None):
"""
:param target_spacings: list of lists [[1.25, 1.25, 5]]
:param input_folder_with_cropped_npz: dim: c, x, y, z | npz_file['data'] np.savez_compressed(fname.npz, data=arr)
:param output_folder:
:param num_threads:
:param force_separate_z: None
:return:
"""
print("Initializing to run preprocessing")
print("npz folder:", input_folder_with_cropped_npz)
print("output_folder:", output_folder)
list_of_cropped_npz_files = subfiles(input_folder_with_cropped_npz, True, None, ".npz", True)
maybe_mkdir_p(output_folder)
num_stages = len(target_spacings)
if not isinstance(num_threads, (list, tuple, np.ndarray)):
num_threads = [num_threads] * num_stages
assert len(num_threads) == num_stages
# we need to know which classes are present in this dataset so that we can precompute where these classes are
# located. This is needed for oversampling foreground
all_classes = load_pickle(join(input_folder_with_cropped_npz, 'dataset_properties.pkl'))['all_classes']
for i in range(num_stages):
all_args = []
output_folder_stage = os.path.join(output_folder, data_identifier + "_stage%d" % i)
maybe_mkdir_p(output_folder_stage)
spacing = target_spacings[i]
for j, case in enumerate(list_of_cropped_npz_files):
case_identifier = get_case_identifier_from_npz(case)
args = spacing, case_identifier, output_folder_stage, input_folder_with_cropped_npz, force_separate_z, all_classes
all_args.append(args)
p = Pool(num_threads[i])
p.starmap(self._run_internal, all_args)
p.close()
p.join()
def my_run(self, target_spacings, input_folder_with_cropped_npz, output_folder, data_identifier,
num_threads=default_num_threads, force_separate_z=None):
"""
:param target_spacings: list of lists [[1.25, 1.25, 5]]
:param input_folder_with_cropped_npz: dim: c, x, y, z | npz_file['data'] np.savez_compressed(fname.npz, data=arr)
:param output_folder:
:param num_threads:
:param force_separate_z: None
:return:
"""
print("Initializing to run preprocessing")
print("npz folder:", input_folder_with_cropped_npz)
print("output_folder:", output_folder)
list_of_cropped_npz_files = subfiles(input_folder_with_cropped_npz, True, None, ".npz", True)
maybe_mkdir_p(output_folder)
num_stages = len(target_spacings)
if not isinstance(num_threads, (list, tuple, np.ndarray)):
num_threads = [num_threads] * num_stages
assert len(num_threads) == num_stages
# we need to know which classes are present in this dataset so that we can precompute where these classes are
# located. This is needed for oversampling foreground
all_classes = load_pickle(join(input_folder_with_cropped_npz, 'dataset_properties.pkl'))['all_classes']
for i in range(num_stages):
all_args = []
output_folder_stage = os.path.join(output_folder, data_identifier + "_stage%d" % i)
maybe_mkdir_p(output_folder_stage)
spacing = target_spacings[i]
for j, case in enumerate(list_of_cropped_npz_files):
case_identifier = get_case_identifier_from_npz(case)
args = spacing, case_identifier, output_folder_stage, input_folder_with_cropped_npz, force_separate_z, all_classes
all_args.append(args)
p = Pool(num_threads[i])
p.starmap(self.my_run_internal, all_args)
p.close()
p.join()
class Preprocessor3DDifferentResampling(GenericPreprocessor):
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=3, order_z_seg=1,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg, properties
class Preprocessor3DBetterResampling(GenericPreprocessor):
"""
This preprocessor always uses force_separate_z=False. It does resampling to the target spacing with third
order spline for data (just like GenericPreprocessor) and seg (unlike GenericPreprocessor). It never does separate
resampling in z.
"""
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=False):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
if force_separate_z is not False:
print("WARNING: Preprocessor3DBetterResampling always uses force_separate_z=False. "
"You specified %s. Your choice is overwritten" % str(force_separate_z))
force_separate_z = False
# be safe
assert force_separate_z is False
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 3,
force_separate_z=force_separate_z, order_z_data=99999, order_z_seg=99999,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg, properties
class PreprocessorFor2D(GenericPreprocessor):
def __init__(self, normalization_scheme_per_modality, use_nonzero_mask, transpose_forward: (tuple, list),
intensityproperties=None):
super(PreprocessorFor2D, self).__init__(normalization_scheme_per_modality, use_nonzero_mask,
transpose_forward, intensityproperties)
def run(self, target_spacings, input_folder_with_cropped_npz, output_folder, data_identifier,
num_threads=default_num_threads, force_separate_z=None):
print("Initializing to run preprocessing")
print("npz folder:", input_folder_with_cropped_npz)
print("output_folder:", output_folder)
list_of_cropped_npz_files = subfiles(input_folder_with_cropped_npz, True, None, ".npz", True)
assert len(list_of_cropped_npz_files) != 0, "set list of files first"
maybe_mkdir_p(output_folder)
all_args = []
num_stages = len(target_spacings)
# we need to know which classes are present in this dataset so that we can precompute where these classes are
# located. This is needed for oversampling foreground
all_classes = load_pickle(join(input_folder_with_cropped_npz, 'dataset_properties.pkl'))['all_classes']
for i in range(num_stages):
output_folder_stage = os.path.join(output_folder, data_identifier + "_stage%d" % i)
maybe_mkdir_p(output_folder_stage)
spacing = target_spacings[i]
for j, case in enumerate(list_of_cropped_npz_files):
case_identifier = get_case_identifier_from_npz(case)
args = spacing, case_identifier, output_folder_stage, input_folder_with_cropped_npz, force_separate_z, all_classes
all_args.append(args)
p = Pool(num_threads)
p.starmap(self._run_internal, all_args)
p.close()
p.join()
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
target_spacing[0] = original_spacing_transposed[0]
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
print("normalization...")
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
print("normalization done")
return data, seg, properties
class PreprocessorFor3D_NoResampling(GenericPreprocessor):
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
"""
if target_spacing[0] is None or nan we use original_spacing_transposed[0] (no resampling along z)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
target_spacing = deepcopy(original_spacing_transposed)
# print(target_spacing, original_spacing_transposed)
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
st = "before:" + str(before) + '\nafter' + str(after) + "\n"
print(st)
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg, properties
class PreprocessorFor2D_noNormalization(GenericPreprocessor):
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
target_spacing[0] = original_spacing_transposed[0]
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
return data, seg, properties
|
py | 1a3cf3b83200af6a4b466b47674bb8d2de2e5741 | """Templates for the policy_sentry YML files.
These can be used for generating policies
"""
ACTIONS_TEMPLATE = """mode: actions
name: ''
actions:
- ''
"""
CRUD_TEMPLATE = """mode: crud
name: ''
# Specify resource ARNs
read:
- ''
write:
- ''
list:
- ''
tagging:
- ''
permissions-management:
- ''
# Skip resource constraint requirements by listing actions here.
skip-resource-constraints:
- ''
# Actions that do not support resource constraints
wildcard-only:
single-actions: # standalone actions
- ''
# Service-wide - like 's3' or 'ec2'
service-read:
- ''
service-write:
- ''
service-list:
- ''
service-tagging:
- ''
service-permissions-management:
- ''
"""
CRUD_TEMPLATE_DICT = {
"mode": "crud",
"name": "",
"read": [],
"write": [],
"list": [],
"tagging": [],
"permissions-management": [],
"skip-resource-constraints": [],
"wildcard-only": {
"single-actions": [],
"service-read": [],
"service-write": [],
"service-list": [],
"service-tagging": [],
"service-permissions-management": [],
},
}
ACTIONS_TEMPLATE_DICT = {"mode": "actions", "name": "", "actions": []}
def create_crud_template():
"""Generate the CRUD YML Template """
return CRUD_TEMPLATE
def create_actions_template():
"""Generate the Actions YML template"""
return ACTIONS_TEMPLATE
def get_crud_template_dict():
"""Generate the CRUD template in dict format"""
return CRUD_TEMPLATE_DICT
def get_actions_template_dict():
"""Get the Actions template in dict format."""
return ACTIONS_TEMPLATE_DICT
|
py | 1a3cf426e9a5590509600b31b96f19f3ffb331bb | from textstyle.en.stylometry.style_features import get_basic_style_features
def test_get_basic_style_features():
text_corpus = [
"I like to eat broccoli and bananas.",
"I ate a banana and spinach smoothie for breakfast.",
"Chinchillas and kittens are cute.",
"My sister adopted a kitten yesterday.",
"Look at this cute hamster munching on a piece of broccoli."
]
words_count = [7, 9, 5, 6, 11]
chars_count = [35, 50, 33, 37, 58]
capital_chars_count = [1, 1, 1, 1, 1]
lower_chars_count = [27, 40, 27, 30, 46]
punc_count = [1, 1, 1, 1, 1]
stopwords_count = [2, 3, 2, 1, 5]
nouns_count = [2, 4, 2, 3, 5]
verbs_count = [2, 1, 1, 1, 1]
pascal_case_count = [1, 1, 1, 1, 1]
all_capital_case_count = [1, 1, 0, 0, 0]
inter_count = [0, 0, 0, 0, 0]
results = get_basic_style_features(text_corpus)
assert len(results) == 12
assert results['words_count'] == words_count
assert results['chars_count'] == chars_count
assert results['capital_chars_count'] == capital_chars_count
assert results['lower_chars_count'] == lower_chars_count
assert results['punc_count'] == punc_count
assert results['stopwords_count'] == stopwords_count
assert results['nouns_count'] == nouns_count
assert results['verbs_count'] == verbs_count
assert results['pascal_case_count'] == pascal_case_count
assert results['all_capital_case_count'] == all_capital_case_count
assert results['interruptions_count'] == inter_count
|
py | 1a3cf484e1f725e7ee446261d391911261093824 | #!/usr/bin/env python
import pygame
import rospy
def sonido(x,y):
matriz = [range(200) for i in range(200)]
matrizexhortador = [range(67) for i in range(67)]
matrizextricto = [range(67) for i in range(67)]
matrizorientador = [range(66) for i in range(67)]
matrizinsatisfecho = [range(67) for i in range(67)]
matrizneutro = [range(67) for i in range(67)]
matrizcomplacido = [range(66) for i in range(67)]
matrizdudoso = [range(67) for i in range(66)]
matrizinteresado = [range(67) for i in range(66)]
matrizalentador = [range(66) for i in range(66)]
for i in range(len(matrizexhortador)):
for j in range(len(matrizexhortador[1])):
if i<16 and j<16:
matrizexhortador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Exhortador/Exhortador4.wav"
matriz[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Exhortador/Exhortador4.wav"
elif i<32 and j<32:
matrizexhortador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Exhortador/Exhortador3.wav"
matriz[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Exhortador/Exhortador3.wav"
elif i<49 and j<49:
matrizexhortador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Exhortador/Exhortador2.wav"
matriz[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Exhortador/Exhortador2.wav"
elif i<67 and j<67:
matrizexhortador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Exhortador/Exhortador1.wav"
matriz[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Exhortador/Exhortador1.wav"
for i in range(len(matrizextricto)):
for j in range(len(matrizextricto[1])):
if i<16:
matrizextricto[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Extricto/Extricto4.wav"
matriz[i][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Extricto/Extricto4.wav"
elif i<32:
matrizextricto[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Extricto/Extricto3.wav"
matriz[i][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Extricto/Extricto3.wav"
elif i<49:
matrizextricto[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Extricto/Extricto2.wav"
matriz[i][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Extricto/Extricto2.wav"
elif i<67:
matrizextricto[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Extricto/Extricto1.wav"
matriz[i][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Extricto/Extricto1.wav"
for i in range(len(matrizorientador)):
for j in range(len(matrizorientador[1])):
if j>49 and i<16:
matrizorientador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Orientador/Orientador4.wav"
matriz[i][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Orientador/Orientador4.wav"
elif j>32 and i<32:
matrizorientador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Orientador/Orientador3.wav"
matriz[i][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Orientador/Orientador3.wav"
elif j>16 and i<49:
matrizorientador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Orientador/Orientador2.wav"
matriz[i][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Orientador/Orientador2.wav"
elif j>=0 and i<67:
matrizorientador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Orientador/Orientador1.wav"
matriz[i][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Orientador/Orientador1.wav"
for i in range(len(matrizinsatisfecho)):
for j in range(len(matrizinsatisfecho[1])):
if j<16:
matrizinsatisfecho[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Insatisfecho/Insatisfecho4.wav"
matriz[i+67][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Insatisfecho/Insatisfecho4.wav"
elif j<32:
matrizinsatisfecho[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Insatisfecho/Insatisfecho3.wav"
matriz[i+67][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Insatisfecho/Insatisfecho3.wav"
elif j<49:
matrizinsatisfecho[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Insatisfecho/Insatisfecho2.wav"
matriz[i+67][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Insatisfecho/Insatisfecho2.wav"
elif j<66:
matrizinsatisfecho[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Insatisfecho/Insatisfecho1.wav"
matriz[i+67][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Insatisfecho/Insatisfecho1.wav"
for i in range(len(matrizneutro)):
for j in range(len(matrizneutro[1])):
if j<33:
matrizneutro[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Neutro/Neutro1.wav"
matriz[i+67][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Neutro/Neutro1.wav"
elif j<67:
matrizneutro[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Neutro/Neutro2.wav"
matriz[i+67][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Neutro/Neutro2.wav"
for i in range(len(matrizcomplacido)):
for j in range(len(matrizcomplacido[1])):
if j<16:
matrizcomplacido[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Complacido/Complacido1.wav"
matriz[i+67][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Complacido/Complacido1.wav"
elif j<32:
matrizcomplacido[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Complacido/Complacido2.wav"
matriz[i+67][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Complacido/Complacido2.wav"
elif j<49:
matrizcomplacido[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Complacido/Complacido3.wav"
matriz[i+67][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Complacido/Complacido3.wav"
elif j<66:
matrizcomplacido[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Complacido/Complacido4.wav"
matriz[i+67][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Complacido/Complacido4.wav"
for i in range(len(matrizdudoso)):
for j in range(len(matrizdudoso[1])):
if j<16 and i>49:
matrizdudoso[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Dudoso/Dudoso4.wav"
matriz[i+134][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Dudoso/Dudoso4.wav"
elif j<32 and i>32:
matrizdudoso[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Dudoso/Dudoso3.wav"
matriz[i+134][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Dudoso/Dudoso3.wav"
elif j<49 and i>16:
matrizdudoso[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Dudoso/Dudoso2.wav"
matriz[i+134][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Dudoso/Dudoso2.wav"
elif j<67 and i>=0:
matrizdudoso[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Dudoso/Dudoso1.wav"
matriz[i+134][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Dudoso/Dudoso1.wav"
for i in range(len(matrizinteresado)):
for j in range(len(matrizinteresado[1])):
if i<16:
matrizinteresado[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Interesado/Interesado1.wav"
matriz[i+134][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Interesado/Interesado1.wav"
elif i<32:
matrizinteresado[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Interesado/Interesado2.wav"
matriz[i+134][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Interesado/Interesado2.wav"
elif i<49:
matrizinteresado[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Interesado/Interesado3.wav"
matriz[i+134][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Interesado/Interesado3.wav"
elif i<66:
matrizinteresado[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Interesado/Interesado4.wav"
matriz[i+134][j+67]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Interesado/Interesado4.wav"
for i in range(len(matrizalentador)):
for j in range(len(matrizalentador[1])):
if j>49 and i>49:
matrizalentador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Alentador/Alentador4.wav"
matriz[i+134][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Alentador/Alentador4.wav"
elif j>32 and i>32:
matrizalentador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Alentador/Alentador3.wav"
matriz[i+134][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Alentador/Alentador3.wav"
elif j>16 and i>16:
matrizalentador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Alentador/Alentador2.wav"
matriz[i+134][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Alentador/Alentador2.wav"
elif j>=0 and i>=0:
matrizalentador[i][j]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Alentador/Alentador1.wav"
matriz[i+134][j+134]="/home/z420/ros_ws/src/jp_baxtertry1/share/Sounds/Alentador/Alentador1.wav"
x=int(x/2)
y=int(y/2)
print(matriz[y][x])
return matriz,y,x
|
py | 1a3cf56eefebde4aa8a16b9a9817beab9467dad4 | # Generated by Django 2.1.15 on 2021-02-12 15:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20210212_1158'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | 1a3cf75090cf6d3056dd17039c1103ab5971ad1e | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetCustomerGatewayResult:
"""
A collection of values returned by getCustomerGateway.
"""
def __init__(__self__, bgp_asn=None, filters=None, id=None, ip_address=None, tags=None, type=None):
if bgp_asn and not isinstance(bgp_asn, float):
raise TypeError("Expected argument 'bgp_asn' to be a float")
__self__.bgp_asn = bgp_asn
"""
(Optional) The gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN).
"""
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
__self__.filters = filters
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
__self__.ip_address = ip_address
"""
(Optional) The IP address of the gateway's Internet-routable external interface.
"""
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
Map of key-value pairs assigned to the gateway.
"""
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
__self__.type = type
"""
(Optional) The type of customer gateway. The only type AWS supports at this time is "ipsec.1".
"""
class AwaitableGetCustomerGatewayResult(GetCustomerGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCustomerGatewayResult(
bgp_asn=self.bgp_asn,
filters=self.filters,
id=self.id,
ip_address=self.ip_address,
tags=self.tags,
type=self.type)
def get_customer_gateway(filters=None,id=None,tags=None,opts=None):
"""
Get an existing AWS Customer Gateway.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
foo = aws.ec2.get_customer_gateway(filters=[{
"name": "tag:Name",
"values": ["foo-prod"],
}])
main = aws.ec2.VpnGateway("main",
amazon_side_asn=7224,
vpc_id=aws_vpc["main"]["id"])
transit = aws.ec2.VpnConnection("transit",
customer_gateway_id=foo.id,
static_routes_only=False,
type=foo.type,
vpn_gateway_id=main.id)
```
:param list filters: One or more [name-value pairs][dcg-filters] to filter by.
:param str id: The ID of the gateway.
:param dict tags: Map of key-value pairs assigned to the gateway.
The **filters** object supports the following:
* `name` (`str`)
* `values` (`list`)
"""
__args__ = dict()
__args__['filters'] = filters
__args__['id'] = id
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:ec2/getCustomerGateway:getCustomerGateway', __args__, opts=opts).value
return AwaitableGetCustomerGatewayResult(
bgp_asn=__ret__.get('bgpAsn'),
filters=__ret__.get('filters'),
id=__ret__.get('id'),
ip_address=__ret__.get('ipAddress'),
tags=__ret__.get('tags'),
type=__ret__.get('type'))
|
py | 1a3cf8a24e2f5eeb8a905e46277e23917dbf1b52 | from pyxlsxfunctions.date_time.core import NOW, TODAY, DAYS, EOMONTH, EDATE
# Unit tests for NOW function
def test_NOW():
assert type(NOW()) == type('known string')
# Unit tests for TODAY function
def test_TODAY():
assert type(TODAY()) == type('known string')
# Unit tests for DAYS function
def test_DAYS_1():
assert DAYS('2016-01-01', '2016-01-02') == 1
def test_DAYS_2():
assert DAYS('2016-01-01', '2016-01-05') == 4
def test_DAYS_3():
assert DAYS('20160101', '2016-01-02') == None
def test_DAYS_3():
assert DAYS(0, '2016-01-02') == None
# Unit tests for EOMONTH function
def test_EOMONTH_1():
assert EOMONTH('2016-01-01', 0) == '2016-01-31'
def test_EOMONTH_2():
assert EOMONTH('2017-01-03', -1) == '2016-12-31'
def test_EOMONTH_3():
assert EOMONTH('2017-01-03', 3) == '2017-04-30'
def test_EOMONTH_4():
assert EOMONTH('hi', 1) == None
def test_EOMONTH_5():
assert EOMONTH(['2017-03-01', '2020-09-29'], 0) == ['2017-03-31', '2020-09-30']
def test_EOMONTH_6():
assert EOMONTH(1, 1) == None
def test_EOMONTH_7():
assert EOMONTH(['2017-03-01', 'hi'], 0) == None
# Unit tests for EDATE function
def test_EDATE_1():
assert EDATE('2016-01-01', 0) == '2016-01-01'
def test_EDATE_2():
assert EDATE('2017-01-03', -1) == '2016-12-03'
def test_EDATE_3():
assert EDATE('2017-01-03', 3) == '2017-04-03'
def test_EDATE_4():
assert EDATE('hi', 1) == None
def test_EDATE_5():
assert EDATE(['2017-03-01', '2020-09-29'], 0) == ['2017-03-01', '2020-09-29']
def test_EDATE_6():
assert EDATE(1, 1) == None
def test_EDATE_7():
assert EDATE(['2017-03-01', 'hi'], 0) == None
def test_EDATE_8():
assert EDATE('2017-03-31', -1) == '2017-02-28'
def test_EDATE_9():
assert EDATE(['2017-03-31', '2018-04-28'], -1) == ['2017-02-28', '2018-03-28']
|
py | 1a3cf9f37147877fcc1f91be73f88428bb91d8f8 | import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from matplotlib import pyplot as plt
from DataLoaders import MNIST_Loaders
from Network import Net
import utils
import AttackTools
import optimizers
import copy
import os #to make dir
def opt_setup(Otype, param):
if 'r' not in param:
param['r'] = 1.
if 'budget' not in param:
param['budget'] = 10000
if 'target_fval' not in param:
param['target_fval'] = None
if 'constrained' not in param:
param['constrained'] = False
if 'metric' not in param:
param['metric'] = 'inf'
if Otype.upper() == 'RING':
if 'useFIM' not in param:
param['useFIM'] = False
if 'm' not in param:
param['m'] = 8
if 'num_blocks' not in param:
param['num_blocks'] = 49*2
if 'BweightOption' not in param:
param['BweightOption'] = None
param['useFIM'] = True
opt = optimizers.RING(param, y0 = None,f = None)
elif Otype.upper() == 'SHIPS':
param['useFIM'] = False
opt = optimizers.RING(param, y0 = None,f = None) # param['useFIM'] = False
elif Otype.upper() == 'CARS' or Otype.upper() == 'ZOO':
opt = optimizers.CARS(param, y0 = None,f = None)
elif Otype.upper() == 'SQUARE':
opt = optimizers.SquareATK(param, y0 = None, f = None)
return opt
def vec2Img(vec):
ImageSize = [28, 28] #image size for MNIST
return vec.reshape(ImageSize)
class result:
def __init__(self):
self.name = None # optimizer's name
self.isol = None # initial sol
self.ilbl = None # initial label
self.tsol = None # terminal sol
self.tlbl = None # terminal label
self.fevals = None # function evaluations
self.niter = None # number of iterations
self.status = None # status after test
self.performance_log = None # performance log
self.CARScounter = None # if available
def saveResult(self, name, opt, performance_log = None):
self.name = name
self.isol = opt.Atk.data
self.ilbl = opt.Atk.label
self.tsol = opt.ximg
self.tlbl = opt.Atk.curr_lbl(self.tsol)
self.fevals = opt.function_evals
self.niter = opt.t
self.status = opt.status
self.performance_log = performance_log
if hasattr(opt, 'CARScounter'):
self.CARScounter = opt.CARScounter
def showBriefResult(self):
if hasattr(self, 'CARScounter'):
print(f'opt = {self.name}, niter = {self.niter}, CARScounter = {self.CARScounter}')
print(f'\tfunction evaluations = {self.fevals}, status = {self.status}')
print(f'\tori lbl = {self.ilbl}, curr lbl = {self.tlbl}')
def plotResult(self, cmap = None):
# assume a proper subplot is already set
if cmap == None:
plt.imshow(vec2Img(self.tsol))
else:
plt.imshow(vec2Img(self.tsol), cmap = cmap)
plt.title(f'{self.tlbl}')
# plt.title(f'lbl ({self.name}) = {self.tlbl}')
def save2file(self, f, tid, delim = '\t'):
'''
saves only
opt name, testset_id, orig label, final label, num_evals
file name:
(opt_name).csv
content:
each row consists of:
[testset_id, orig_label, final_label, num_evals, status]
here status = 'B'(budget reached), 'T'(target reached), or 'S'(attack succeeded)
@param
f ..... file object (open, append mode)
t ..... testset_id
'''
f.write(f'{tid}{delim}{self.ilbl}{delim}{self.tlbl}{delim}{self.fevals}{delim}{self.status}\n')
class Tester:
'''
A test suite, containing the set of test data and set of optimizers
Usage:
1. Set data
2. Set optimizers
3. Run .run_single_test(label, target_label)
'''
def __init__(self, atk_test_set, atk, opts = None, options = None, tid = 0):
"""
(inputs)
atk_test_set .. dict
(*this)[label] = image data in a vector, whose label is "label"
atk ........... AttackTools object
opts .......... dict
a dict of optimizers to test
key = optimizer's name (possibly with options)
options ....... dict
['normalize'] --> images will be normalized to have pixel values btw 0 and 1
(attributes)
res ........... dict of dict of result class
res[optname][lbl] = Attack Results containing
- initial / terminal solutions
- initial / terminal labels
- function_evals
- number of iterations
- status
- CARScounter (if available)
- performance log
"""
self.atk = atk
self.atk_test_set = atk_test_set
if opts != None:
self.setopts(opts)
self.tid = tid
if 'normalize' in options:
if options['normalize']:
self.normalize_data()
if 'metric' in options:
self.metric = options['metric']
if 'constrained' in options:
self.constrained = options['constrained']
def setopts(self, opts):
self.opts = opts
self.res = {} # will be a dict(key: opt) of dict(key: labels) of status
for optname in opts:
self.res[optname] = {}
def addopts(self, added):
self.opts = {**self.opts, **added} # merge two dictionaries
for optname in added:
self.res[optname] = {}
def normalize_data(self):
# re-normalize the images to have pixel values in [0, 1]
max_pixel_val = max([ np.max(self.atk_test_set[lbl]) for lbl in self.atk_test_set])
min_pixel_val = min([ np.min(self.atk_test_set[lbl]) for lbl in self.atk_test_set])
for lbl in self.atk_test_set:
self.atk_test_set[lbl] = (self.atk_test_set[lbl]-min_pixel_val)/(max_pixel_val-min_pixel_val)
def run_single_test(self, label, selected_opts = None, target_lbl = None, verbal = 2):
# verbal option: 0 --> no output
# 1 --> output when started/finished
# 2 --> 1 + func vals
if label not in self.atk_test_set:
print(f'{label} is not a valid label in this attak test set.')
self.atk.setdata(self.atk_test_set[label])
self.atk.setlabel(label)
self.atk.target_lbl = target_lbl
self.atk.metric = self.metric
self.atk.constrained = self.constrained
# self.atk.settargetlabel(target_lbl)
if selected_opts == None:
opts = self.opts
else:
opts = selected_opts
for oname in opts:
self.res[oname][label] = result()
# setup
opt = copy.deepcopy(self.opts[oname]) # to reset everytime
# otherwise the shallow copied opt may alter the original opt object
opt.setAtkAll( Atk = self.atk,
y0 = self.atk_test_set[label],
f = lambda x:self.atk(x) )
performance_log = []
status = None
# verbal
if verbal > 0:
if target_lbl != None:
print(f'start atd atk ({oname}) on lbl = {label}, target lbl = {self.atk.target_lbl}')
else:
print(f'\t[{label}]', end='\t')#start an untargeted atk ({oname}) on lbl = {label}')
# actual attack starts here
while status == None:
# one iteration
evals, _xfinal, status = opt.step()
# logging
performance_log.append( [evals, opt.fval])
# print
if verbal > 2:
if opt.t % int(10*np.log(opt.t+10)) == 0:
opt.report('f(x): %f F-evals: %d\n' %
(opt.fval, evals) )
# logging
self.res[oname][label].saveResult(oname, opt, performance_log)
if verbal>1:
if opt.t>0:
if opt.Otype != 'SQUARE':
print(f"CARS: {opt.CARScounter},\tCVX = {opt.cvx_counter/opt.t*100:.1f} %", end='\t')
# print(f"CVX counter: {opt.cvx_counter}")
# print(f"Final distortion: {opt.Atk.dist_from_orig(opt.x)}")
np.sum((_xfinal-opt.xinit)**2)
print(f"distortion (L2) = { np.sum((_xfinal-opt.xinit)**2) }", end = '\t')
print(f"(Linf) = {np.amax(_xfinal-opt.xinit):.2f}", end='\t')
if verbal > 0:
print( f"#iter = {opt.t} (#eval = {evals}),\t final status = {status}")
def display_single_res(self, label, opts_names_to_disp = None, cmap = None, title = None,
onlyImg = False, onlyLoss = False, save = None, show = False, logplot = True, savedir = None):
if title == None:
title = 'RING for MNIST ATK'
if onlyImg == False and onlyLoss == False:
plt.subplot(2,1,1)
plt.cla()
legends = []
if opts_names_to_disp == None: # default: display all
opts_names_to_disp = self.opts # only need the names (keys of the dict)
for oname in opts_names_to_disp:
if logplot:
plt.plot(np.array(self.res[oname][label].performance_log)[:,0],
np.log10(np.array(self.res[oname][label].performance_log)[:,1]), linewidth=1, label = oname)
else:
plt.plot(np.array(self.res[oname][label].performance_log)[:,0],
(np.array(self.res[oname][label].performance_log)[:,1]), linewidth=1, label = oname)
legends.append(oname)
plt.title(title)
plt.xlabel('function evaluations')
plt.ylabel('$log($ f $)$')
plt.legend(legends)
nopts = len(opts_names_to_disp)
plotnum = 1
# show original image
plt.subplot(2,nopts+1,nopts+2)
if cmap == None:
plt.imshow(vec2Img(self.atk_test_set[label]))
else:
plt.imshow(vec2Img(self.atk_test_set[label]), cmap = cmap)
plt.title(f'original label = {label}')
# show attacked
for oname in opts_names_to_disp:
plt.subplot(2, nopts+1, nopts+2 + plotnum)
self.res[oname][label].showBriefResult()
self.res[oname][label].plotResult(cmap)
plotnum += 1
plt.tight_layout(pad = 1.0)
if show:
plt.show()
elif onlyImg == True: # plot only images
nopts = len(opts_names_to_disp)
# show original image
# set number of rows
if nopts < 8:
nr = 2
elif nopts < 12:
nr = 3
else:
nr = 4
nc = int(np.ceil((nopts+1)/nr))
plt.subplot(nr,nc,1)
if cmap == None:
plt.imshow(vec2Img(self.atk_test_set[label]))
else:
plt.imshow(vec2Img(self.atk_test_set[label]), cmap = cmap)
plt.title(f'original label = {label}')
# show attacked
plotnum = 2
for oname in opts_names_to_disp:
plt.subplot(nr, nc, plotnum)
self.res[oname][label].showBriefResult()
self.res[oname][label].plotResult(cmap)
plotnum += 1
plt.tight_layout(pad = 1.0)
if show:
plt.show()
elif onlyLoss == True: # plot only loss
legends = []
if opts_names_to_disp == None: # default: display all
opts_names_to_disp = self.opts # only need the names (keys of the dict)
for oname in opts_names_to_disp:
plt.plot(np.array(self.res[oname][label].performance_log)[:,0],
np.log10(np.array(self.res[oname][label].performance_log)[:,1]), linewidth=1, label = oname)
legends.append(oname)
plt.title(title)
plt.xlabel('function evaluations')
plt.ylabel('$log($ f $)$')
plt.legend(legends)
if show:
plt.show()
if save != None:
if savedir != None:
if not os.path.exists(savedir):
os.makedirs(savedir)
save = savedir+save # add directory to the file name
plt.savefig(save)
def save_res_simple(self, testset_id, label, opts_names_to_save = None, subdir = 'Res'):
'''
results are saved in the subdir folder (the folder will be created if not exsits)
1. saves the brief result
file name: (opt_name).csv
content: each row consists of:
[testset_id, orig_label, final_label, num_evals]
2. also saves the original/attacked images as 28*28 numpy array
original img name: (label)_(testset_id).npy
attacked img name: (label)_(testset_id)_(opt_name)_(final_label).npy
'''
if opts_names_to_save == None: # default: save all
opts_names_to_save = self.opts # only need the names (keys of the dict)
for oname in opts_names_to_save:
save_orig = True
res = self.res[oname][label]
if not os.path.exists(subdir):
os.makedirs(subdir)
fname = subdir + f'/{oname}.csv'
f = open(fname, 'a')
res.save2file(f=f, tid = testset_id, delim='\t')
f.close()
if save_orig:
subdir_img = 'img_'+ subdir
if not os.path.exists(subdir_img):
os.makedirs(subdir_img)
fname = subdir_img + f'/{label}_{testset_id}.npy'
np.save( fname, vec2Img(res.isol))
save_orig = False
fname = subdir_img + f'/{label}_{testset_id}_{oname}_{res.tlbl}.npy'
np.save( fname, vec2Img(res.tsol))
|
py | 1a3cfafacd9fc9cc0034381b677861642aa9891e | from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser(
description='Kube-Hunter - hunts for security '
'weaknesses in Kubernetes clusters')
parser.add_argument(
'--list',
action="store_true",
help="Displays all tests in kubehunter "
"(add --active flag to see active tests)")
parser.add_argument(
'--interface',
action="store_true",
help="Set hunting on all network interfaces")
parser.add_argument(
'--pod',
action="store_true",
help="Set hunter as an insider pod")
parser.add_argument(
'--quick',
action="store_true",
help="Prefer quick scan (subnet 24)")
parser.add_argument(
'--include-patched-versions',
action="store_true",
help="Don't skip patched versions when scanning")
parser.add_argument(
'--cidr',
type=str,
help="Set an ip range to scan, example: 192.168.0.0/16")
parser.add_argument(
'--mapping',
action="store_true",
help="Outputs only a mapping of the cluster's nodes")
parser.add_argument(
'--remote',
nargs='+',
metavar="HOST",
default=list(),
help="One or more remote ip/dns to hunt")
parser.add_argument(
'--active',
action="store_true",
help="Enables active hunting")
parser.add_argument(
'--log',
type=str,
metavar="LOGLEVEL",
default='INFO',
help="Set log level, options are: debug, info, warn, none")
parser.add_argument(
'--report',
type=str,
default='plain',
help="Set report type, options are: plain, yaml, json")
parser.add_argument(
'--dispatch',
type=str,
default='stdout',
help="Where to send the report to, options are: "
"stdout, http (set KUBEHUNTER_HTTP_DISPATCH_URL and "
"KUBEHUNTER_HTTP_DISPATCH_METHOD environment variables to configure)")
parser.add_argument(
'--statistics',
action="store_true",
help="Show hunting statistics")
parser.add_argument(
'--network-timeout',
type=float,
default=5.0,
help="network operations timeout")
return parser.parse_args()
|
py | 1a3cfb5a64629b0c528b0776b9106c5848576ecc |
"""
=========================================================================
BlockingCacheFL.py
=========================================================================
A function level cache model which only passes cache requests and
responses to the memory
Author: Eric Tang (et396), Xiaoyu Yan (xy97)
Date: 23 December 2019
"""
import math
from pymtl3 import *
from mem_ifcs.MemMsg import MemMsgType
# Assumes 32 bit address and 32 bit data
#-------------------------------------------------------------------------
# Make messages
#-------------------------------------------------------------------------
def req( CacheReqType, type_, opaque, addr, len, data ):
# type_ as string
if type_ == 'rd': type_ = MemMsgType.READ
elif type_ == 'wr': type_ = MemMsgType.WRITE
elif type_ == 'in': type_ = MemMsgType.WRITE_INIT
elif type_ == 'ad': type_ = MemMsgType.AMO_ADD
elif type_ == 'an': type_ = MemMsgType.AMO_AND
elif type_ == 'or': type_ = MemMsgType.AMO_OR
elif type_ == 'sw': type_ = MemMsgType.AMO_SWAP
elif type_ == 'mi': type_ = MemMsgType.AMO_MIN
elif type_ == 'mu': type_ = MemMsgType.AMO_MINU
elif type_ == 'mx': type_ = MemMsgType.AMO_MAX
elif type_ == 'xu': type_ = MemMsgType.AMO_MAXU
elif type_ == 'xo': type_ = MemMsgType.AMO_XOR
elif type_ == 'inv': type_ = MemMsgType.INV
elif type_ == 'fl': type_ = MemMsgType.FLUSH
return CacheReqType( type_, opaque, addr, len, data )
def resp( CacheRespType, type_, opaque, test, len, data ):
if type_ == 'rd': type_ = MemMsgType.READ
elif type_ == 'wr': type_ = MemMsgType.WRITE
elif type_ == 'in': type_ = MemMsgType.WRITE_INIT
elif type_ == 'ad': type_ = MemMsgType.AMO_ADD
elif type_ == 'an': type_ = MemMsgType.AMO_AND
elif type_ == 'or': type_ = MemMsgType.AMO_OR
elif type_ == 'sw': type_ = MemMsgType.AMO_SWAP
elif type_ == 'mi': type_ = MemMsgType.AMO_MIN
elif type_ == 'mu': type_ = MemMsgType.AMO_MINU
elif type_ == 'mx': type_ = MemMsgType.AMO_MAX
elif type_ == 'xu': type_ = MemMsgType.AMO_MAXU
elif type_ == 'xo': type_ = MemMsgType.AMO_XOR
elif type_ == 'inv': type_ = MemMsgType.INV
elif type_ == 'fl': type_ = MemMsgType.FLUSH
return CacheRespType( type_, opaque, test, len, data )
#-------------------------------------------------------------------------
# Define AMO functions
#-------------------------------------------------------------------------
AMO_FUNS = { MemMsgType.AMO_ADD : lambda m,a : m+a,
MemMsgType.AMO_AND : lambda m,a : m&a,
MemMsgType.AMO_OR : lambda m,a : m|a,
MemMsgType.AMO_SWAP : lambda m,a : a,
MemMsgType.AMO_MIN : lambda m,a : m if m.int() < a.int() else a,
MemMsgType.AMO_MINU : min,
MemMsgType.AMO_MAX : lambda m,a : m if m.int() > a.int() else a,
MemMsgType.AMO_MAXU : max,
MemMsgType.AMO_XOR : lambda m,a : m^a,
}
#----------------------------------------------------------------------
# Enhanced random tests
#----------------------------------------------------------------------
# This set of random tests uses a cache model that properly tracks
# hits and misses, and should completely accurately model eviction
# behavior. The model is split up into a hit/miss tracker, and a
# transaction generator, so that the hit/miss tracker can be reused
# in an FL model
class HitMissTracker:
def __init__(self, size, nways, nbanks, linesize):
# Compute various sizes
self.nways = nways
self.linesize = linesize
self.nlines = int(size // linesize)
self.nsets = int(self.nlines // self.nways)
self.nbanks = nbanks
# Compute how the address is sliced
self.offset_start = 0
self.offset_end = self.offset_start + int(math.log(linesize//8, 2))
self.bank_start = self.offset_end
if nbanks > 0:
self.bank_end = self.bank_start + int(math.log(nbanks, 2))
else:
self.bank_end = self.bank_start
self.idx_start = self.bank_end
self.idx_end = self.idx_start + int(math.log(self.nsets, 2))
self.tag_start = self.idx_end
self.tag_end = 32
# Initialize the tag and valid array
# Both arrays are of the form line[idx][way]
# Note that line[idx] is a one-element array for a direct-mapped cache
self.line = []
self.valid = []
for n in range(self.nlines):
self.line.insert(n, [Bits(32, 0) for x in range(nways)])
self.valid.insert(n, [False for x in range(nways)])
# Initialize the LRU array
# Implemented as an array for each set index
# lru[idx][0] is the most recently used
# lru[idx][-1] is the least recently used
self.lru = []
for n in range(self.nsets):
self.lru.insert(n, [x for x in range(nways)])
# Generate the components of an address
# Ignores the bank bits, since they don't affect the behavior
# (and may not even exist)
def split_address(self, addr):
addr = Bits(32, addr)
offset = addr[self.offset_start:self.offset_end]
idx = addr[self.idx_start:self.idx_end]
tag = addr[self.tag_start:self.tag_end]
return (tag, idx, offset)
# Update the LRU status, given that a hit just occurred
def lru_hit(self, idx, way):
self.lru[idx].remove(way)
self.lru[idx].insert(0, way)
# Get the least recently used way for an index
# The LRU is always the last element in the list
def lru_get(self, idx):
return self.lru[idx][-1]
# Perform a tag check, and update lru if a hit occurs
def tag_check(self, tag, idx):
for way in range(self.nways):
if self.valid[idx][way] and self.line[idx][way] == tag:
# Whenever tag check hits, update the set's lru array
self.lru_hit(idx, way)
return True
return False
# Update the tag array due to a value getting fetched from memory
def refill(self, tag, idx):
victim = self.lru_get(idx)
self.line[idx][victim] = tag
self.valid[idx][victim] = True
self.lru_hit(idx, victim)
# Simulate accessing an address. Returns True if a hit occurred,
# False on miss
def access_address(self, addr):
(tag, idx, offset) = self.split_address(addr)
hit = self.tag_check(tag, idx)
if not hit:
self.refill(tag, idx)
return hit
def lru_set(self, idx, way):
self.lru[idx].remove(way)
self.lru[idx].append(way)
def amo_req(self, addr):
(tag, idx, offset) = self.split_address(addr)
for way in range(self.nways):
if self.valid[idx][way] and self.line[idx][way] == tag:
self.valid[idx][way] = False
self.lru_set( idx, way )
break
def invalidate(self):
# invalidates all the cachelines
for way in range(self.nways):
for idx in range(self.nsets):
self.valid[idx][way] = False
class ModelCache:
def __init__(self, size, nways, nbanks, CacheReqType, CacheRespType, MemReqType, MemRespType, mem=None):
# The hit/miss tracker
self.mem_bitwidth_data = MemReqType.get_field_type("data").nbits
self.cache_bitwidth_data = CacheReqType.get_field_type("data").nbits
self.BitsData = mk_bits(self.cache_bitwidth_data)
size = size*8
self.tracker = HitMissTracker(size, nways, nbanks, self.mem_bitwidth_data)
# The transactions list contains the requests and responses for
# the stream of read/write calls on this model
self.transactions = []
self.opaque = 0
self.CacheReqType = CacheReqType
self.CacheRespType = CacheRespType
self.MemReqType = MemReqType
self.MemRespType = MemRespType
self.nlines = int(size // self.mem_bitwidth_data)
self.nsets = int(self.nlines // nways)
# Compute how the address is sliced
self.offset_start = 0
self.offset_end = self.offset_start + int(math.log(self.mem_bitwidth_data//8, 2))
self.idx_start = self.offset_end
self.idx_end = self.idx_start + int(math.log(self.nsets, 2))
self.tag_start = self.idx_end
self.tag_end = 32
# Unpack any initial values of memory into a dict (has easier lookup)
#
# zip is used here to convert the mem array into an array of
# (addr, value) pairs (which it really should be in the first
# place)
self.mem = {}
if mem:
for addr, value in zip(mem[::2], mem[1::2]):
offset = int(Bits32(addr)[ self.offset_start : self.offset_end ])
addr = int(Bits32(addr)[ self.offset_end : 32 ])
if addr not in self.mem:
self.mem[addr] = Bits(self.mem_bitwidth_data, 0)
# assume word mem declarations
self.mem[addr][ offset*8 : (offset+4)*8 ] = value
def check_hit(self, addr):
# Tracker returns boolean, need to convert to 1 or 0 to use
# in the "test" field of the response
if self.tracker.access_address(addr):
return 1
else:
return 0
def read(self, addr, opaque, len_):
hit = self.check_hit(addr)
new_addr = int(addr[self.offset_end:32])
offset = int(addr[self.offset_start:self.offset_end])
if new_addr not in self.mem:
self.mem[new_addr] = Bits(self.mem_bitwidth_data, 0)
if len_ == 0:
value = self.mem[new_addr][offset*8 : (offset+self.cache_bitwidth_data/8)*8]
else:
value = self.mem[new_addr][offset*8 : (offset + int(len_))*8 ]
value = zext(value, self.cache_bitwidth_data)
self.transactions.append(req (self.CacheReqType, 'rd', opaque, addr, len_, 0))
self.transactions.append(resp(self.CacheRespType,'rd', opaque, hit, len_, value))
self.opaque += 1
def write(self, addr, value, opaque, len_):
hit = self.check_hit(addr)
new_addr = int(addr[self.offset_end:32])
offset = int(addr[self.offset_start:self.offset_end])
value = Bits(self.cache_bitwidth_data, value)
if new_addr not in self.mem:
self.mem[new_addr] = Bits(self.mem_bitwidth_data, 0)
if len_ == 0:
self.mem[new_addr][offset*8 : (offset+self.cache_bitwidth_data/8)*8] = value[0 : self.cache_bitwidth_data ]
else:
self.mem[new_addr][offset*8 : (offset + int(len_))*8] = value[0 : int(len_)*8 ]
self.transactions.append(req (self.CacheReqType, 'wr', opaque, addr, len_, value))
self.transactions.append(resp(self.CacheRespType,'wr', opaque, hit, len_, 0))
self.opaque += 1
def init(self, addr, value, opaque, len_):
hit = self.check_hit(addr)
new_addr = int(addr[self.offset_end:32])
offset = int(addr[self.offset_start:self.offset_end])
value = Bits(self.cache_bitwidth_data, value)
if new_addr not in self.mem:
self.mem[new_addr] = Bits(self.mem_bitwidth_data, 0)
if len_ == 0:
self.mem[new_addr][offset*8 : (offset+self.cache_bitwidth_data/8)*8] = value
else:
self.mem[new_addr][offset*8 : (offset + int(len_))*8 ] = value
self.transactions.append(req(self.CacheReqType,'in', opaque, addr, len_, value))
self.transactions.append(resp(self.CacheRespType,'in', opaque, 0, len_, 0))
self.opaque += 1
def amo(self, addr, value, opaque, len_, func):
# AMO operations are on the word level only
self.tracker.amo_req(addr)
new_addr = addr[self.offset_end:32]
offset = int(addr[self.offset_start:self.offset_end])
if new_addr not in self.mem:
self.mem[new_addr] = Bits(self.mem_bitwidth_data, 0)
ret = self.mem[new_addr.int()][offset * 8 : (offset + 4) * 8]
value = trunc(value, 32)
amo_out = AMO_FUNS[ int(func) ]( ret, value )
ret = zext( ret, self.cache_bitwidth_data )
value = zext( value, self.cache_bitwidth_data )
self.mem[new_addr.int()][offset * 8 : (offset + 4) * 8] = amo_out
self.transactions.append(req (self.CacheReqType, func, opaque, addr, len_, value))
self.transactions.append(resp(self.CacheRespType,func, opaque, 0, len_, ret))
self.opaque += 1
def invalidate(self, opaque):
self.tracker.invalidate()
self.transactions.append(req (self.CacheReqType, 'inv', opaque, 0, 0, 0))
self.transactions.append(resp(self.CacheRespType, 'inv', opaque, 0, 0, 0))
self.opaque += 1
def flush(self, opaque):
self.transactions.append(req (self.CacheReqType, 'fl', opaque, 0, 0, 0))
self.transactions.append(resp(self.CacheRespType, 'fl', opaque, 0, 0, 0))
self.opaque += 1
def get_transactions(self):
return self.transactions
|
py | 1a3cfb837413b4f1a93d93479a84378a64143655 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
skeleton code for k-means clustering mini-project
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than 4 clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
temp = [data_dict[key]['salary'] for key in data_dict.keys()]
salaries = [x for x in temp if x!='NaN']
print('max salary:',max(salaries))
print('min salary:',min(salaries))
temp = [data_dict[key]['exercised_stock_options'] for key in data_dict.keys()]
stock_options = [x for x in temp if x!='NaN']
print('max stock_options:',max(stock_options))
print('min stock_options:',min(stock_options))
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
feature_3 = "total_payments"
poi = "poi"
features_list = [poi, feature_1, feature_2,feature_3]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, line below assumes 2 features)
for f1, f2, _ in finance_features:
plt.scatter( f1, f2 )
plt.show()
from sklearn.cluster import KMeans
features_list = ["poi", feature_1, feature_2,feature_3]
data2 = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data2 )
clf = KMeans(n_clusters=2)
pred = clf.fit_predict( finance_features )
Draw(pred, finance_features, poi, name="clusters_before_scaling.pdf", f1_name=feature_1, f2_name=feature_2)
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print("no predictions object named pred found, no clusters to plot")
|
py | 1a3cfbc98389f2a9253757a97bc38e904ab52e9d | with open('puzzle_input') as puzzle_input:
jump_instructions = puzzle_input.readlines()
steps = 0
position = 0
while 0 <= position < len(jump_instructions):
# read instruction
instruction = int(jump_instructions[position])
# save old instruction index
old_position = position
# move the appropriate amount
position += instruction
# increase steps by 1
steps += 1
if instruction >= 3:
jump_instructions[old_position] = int(jump_instructions[old_position]) - 1
else:
jump_instructions[old_position] = int(jump_instructions[old_position]) + 1
if __name__ == '__main__':
print(steps)
|
py | 1a3cfc1b3cc09469f559f9c949a5fa62f7bea26e | import logging
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.db import transaction
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.urls import reverse, reverse_lazy
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.events import event_document_type_edited
from mayan.apps.documents.models import Document, DocumentType
from mayan.apps.documents.permissions import (
permission_document_type_edit, permission_document_view
)
from mayan.apps.documents.views.document_views import DocumentListView
from mayan.apps.views.generics import (
AddRemoveView, SingleObjectCreateView, SingleObjectDeleteView,
SingleObjectEditView, SingleObjectListView
)
from mayan.apps.views.mixins import ExternalObjectMixin
from .events import event_smart_link_edited
from .forms import SmartLinkConditionForm, SmartLinkForm
from .icons import icon_smart_link_setup, icon_smart_link_condition
from .links import link_smart_link_create, link_smart_link_condition_create
from .models import ResolvedSmartLink, SmartLink, SmartLinkCondition
from .permissions import (
permission_smart_link_create, permission_smart_link_delete,
permission_smart_link_edit, permission_smart_link_view
)
logger = logging.getLogger(name=__name__)
class DocumentTypeSmartLinksView(AddRemoveView):
main_object_method_add = 'smart_link_add'
main_object_method_remove = 'smart_link_remove'
main_object_permission = permission_document_type_edit
main_object_model = DocumentType
main_object_pk_url_kwarg = 'document_type_id'
secondary_object_model = SmartLink
secondary_object_permission = permission_smart_link_edit
list_available_title = _('Available smart links')
list_added_title = _('Smart links enabled')
related_field = 'smart_links'
def action_add(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.smart_links.add(obj)
event_smart_link_edited.commit(
actor=_user, action_object=self.main_object, target=obj
)
def action_remove(self, queryset, _user):
with transaction.atomic():
event_document_type_edited.commit(
actor=_user, target=self.main_object
)
for obj in queryset:
self.main_object.smart_links.remove(obj)
event_smart_link_edited.commit(
actor=_user, action_object=self.main_object, target=obj
)
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Smart links to enable for document type: %s'
) % self.main_object,
}
class ResolvedSmartLinkView(ExternalObjectMixin, DocumentListView):
external_object_class = Document
external_object_permission = permission_document_view
external_object_pk_url_kwarg = 'document_id'
def dispatch(self, request, *args, **kwargs):
self.smart_link = self.get_smart_link()
return super(
ResolvedSmartLinkView, self
).dispatch(request, *args, **kwargs)
def get_document_queryset(self):
try:
queryset = self.smart_link.get_linked_document_for(
document=self.external_object
)
except Exception as exception:
queryset = Document.objects.none()
# Check if the user has the smart link edit permission before
# showing the exception text.
try:
AccessControlList.objects.check_access(
obj=self.smart_link,
permissions=(permission_smart_link_edit,),
user=self.request.user
)
except PermissionDenied:
"""User doesn't have the required permission."""
else:
messages.error(
message=_('Smart link query error: %s' % exception),
request=self.request
)
return queryset
def get_extra_context(self):
dynamic_label = self.smart_link.get_dynamic_label(
document=self.external_object
)
if dynamic_label:
title = _('Documents in smart link: %s') % dynamic_label
else:
title = _(
'Documents in smart link "%(smart_link)s" as related to '
'"%(document)s"'
) % {
'document': self.external_object,
'smart_link': self.smart_link.label,
}
context = super(ResolvedSmartLinkView, self).get_extra_context()
context.update(
{
'object': self.external_object,
'title': title,
}
)
return context
def get_smart_link(self):
queryset = AccessControlList.objects.restrict_queryset(
permission=permission_smart_link_view,
queryset=SmartLink.objects.filter(enabled=True),
user=self.request.user
)
return get_object_or_404(
klass=queryset, pk=self.kwargs['smart_link_id']
)
class SmartLinkDocumentTypesView(AddRemoveView):
main_object_method_add = 'document_types_add'
main_object_method_remove = 'document_types_remove'
main_object_permission = permission_smart_link_edit
main_object_model = SmartLink
main_object_pk_url_kwarg = 'smart_link_id'
secondary_object_model = DocumentType
secondary_object_permission = permission_document_type_edit
list_available_title = _('Available document types')
list_added_title = _('Document types enabled')
related_field = 'document_types'
def get_actions_extra_kwargs(self):
return {'_user': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Document type for which to enable smart link: %s'
) % self.main_object,
}
class SmartLinkListView(SingleObjectListView):
model = SmartLink
object_permission = permission_smart_link_view
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_smart_link_setup,
'no_results_main_link': link_smart_link_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Indexes group documents into units, usually with similar '
'properties and of equal or similar types. Smart links '
'allow defining relationships between documents even '
'if they are in different indexes and are of different '
'types.'
),
'no_results_title': _(
'There are no smart links'
),
'title': _('Smart links'),
}
class DocumentSmartLinkListView(ExternalObjectMixin, SmartLinkListView):
external_object_class = Document
external_object_permission = permission_document_view
external_object_pk_url_kwarg = 'document_id'
def get_extra_context(self):
return {
'document': self.external_object,
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_smart_link_setup,
'no_results_text': _(
'Smart links allow defining relationships between '
'documents even if they are in different indexes and '
'are of different types.'
),
'no_results_title': _(
'There are no smart links for this document'
),
'object': self.external_object,
'title': _('Smart links for document: %s') % self.external_object,
}
def get_source_queryset(self):
# Override SingleObjectListView source queryset from SmartLink to
# ResolvedSmartLink.
return ResolvedSmartLink.objects.get_for(
document=self.external_object
)
class SmartLinkCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create new smart link')}
form_class = SmartLinkForm
post_action_redirect = reverse_lazy(
viewname='linking:smart_link_list'
)
view_permission = permission_smart_link_create
def get_save_extra_data(self):
return {'_user': self.request.user}
class SmartLinkDeleteView(SingleObjectDeleteView):
model = SmartLink
object_permission = permission_smart_link_delete
pk_url_kwarg = 'smart_link_id'
post_action_redirect = reverse_lazy(
viewname='linking:smart_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Delete smart link: %s') % self.object
}
class SmartLinkEditView(SingleObjectEditView):
form_class = SmartLinkForm
model = SmartLink
object_permission = permission_smart_link_edit
pk_url_kwarg = 'smart_link_id'
post_action_redirect = reverse_lazy(
viewname='linking:smart_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Edit smart link: %s') % self.object
}
def get_save_extra_data(self):
return {'_user': self.request.user}
class SmartLinkConditionListView(ExternalObjectMixin, SingleObjectListView):
external_object_class = SmartLink
external_object_permission = permission_smart_link_edit
external_object_pk_url_kwarg = 'smart_link_id'
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_smart_link_condition,
'no_results_main_link': link_smart_link_condition_create.resolve(
context=RequestContext(
request=self.request, dict_={
'object': self.external_object
}
)
),
'no_results_text': _(
'Conditions are small logic units that when combined '
'define how the smart link will behave.'
),
'no_results_title': _(
'There are no conditions for this smart link'
),
'object': self.external_object,
'title': _(
'Conditions for smart link: %s'
) % self.external_object,
}
def get_source_queryset(self):
return self.external_object.conditions.all()
class SmartLinkConditionCreateView(
ExternalObjectMixin, SingleObjectCreateView
):
external_object_class = SmartLink
external_object_permission = permission_smart_link_edit
external_object_pk_url_kwarg = 'smart_link_id'
form_class = SmartLinkConditionForm
def get_extra_context(self):
return {
'title': _(
'Add new conditions to smart link: "%s"'
) % self.external_object,
'object': self.external_object,
}
def get_instance_extra_data(self):
return {'smart_link': self.external_object}
def get_post_action_redirect(self):
return reverse(
viewname='linking:smart_link_condition_list', kwargs={
'smart_link_id': self.external_object.pk
}
)
def get_queryset(self):
return self.external_object.conditions.all()
class SmartLinkConditionDeleteView(SingleObjectDeleteView):
model = SmartLinkCondition
object_permission = permission_smart_link_edit
pk_url_kwarg = 'smart_link_condition_id'
def get_extra_context(self):
return {
'condition': self.object,
'navigation_object_list': ('object', 'condition'),
'object': self.object.smart_link,
'title': _(
'Delete smart link condition: "%s"?'
) % self.object,
}
def get_post_action_redirect(self):
return reverse(
viewname='linking:smart_link_condition_list', kwargs={
'smart_link_id': self.object.smart_link.pk
}
)
class SmartLinkConditionEditView(SingleObjectEditView):
form_class = SmartLinkConditionForm
model = SmartLinkCondition
object_permission = permission_smart_link_edit
pk_url_kwarg = 'smart_link_condition_id'
def get_extra_context(self):
return {
'condition': self.object,
'navigation_object_list': ('object', 'condition'),
'object': self.object.smart_link,
'title': _('Edit smart link condition'),
}
def get_post_action_redirect(self):
return reverse(
viewname='linking:smart_link_condition_list', kwargs={
'smart_link_id': self.object.smart_link.pk
}
)
|
py | 1a3cfc534532ba2aa28f1a1e2f9e41434836d1b5 | import face_detection.video_receiver as video_receiver
import face_detection.face_detector as face_detector
import configuration.general_settings as settings
from model.vgg_adapted_model import FaceAnalyserModel
def main():
# Initialize model
model = FaceAnalyserModel(settings.model_weights_path)
# Initialize video capture
capture = video_receiver.initializeVideoCapture()
# Check is user wants quit
while(video_receiver.checkVideo()):
# Capture frame-by-frame
ret, frame = video_receiver.captureFrame(capture)
# Display the resulting frame
video_receiver.displayFrame(frame)
# Change image scale color
frame_gray = face_detector.changeColorScale(frame)
# Detect face
face_coordinates = face_detector.getFaceCoordinates(frame_gray)
# Check if face has been detected
if face_coordinates is not None:
# Preprocess image
preprocessed_frame = face_detector.preprocess(frame_gray, face_coordinates)
emotion_prob, emotion_index = model.detectEmotion(preprocessed_frame)
#print ("The worker is " + settings.detected_emotions[emotion_index])
print("ex:EmotionDetected rdf:type ewe-emodet:EmotionDetected. \
\n ex:EmotionDetected ewe-emodet:hasDetected onyx:Emotion. \
\n onyx:Emotion onyx:hasEmotionCategory wn-affect:" + settings.detected_emotions[emotion_index] + " . \
\n onyx:Emotion onyx:hasEmotionIntensity " + str(emotion_prob) + ".\n\n")
# Draw face rectangle
video_receiver.drawFace(frame, face_coordinates, settings.detected_emotions[emotion_index])
# When everything done, release the capture
capture.release()
video_receiver.stopVideoCapture()
if __name__ == '__main__':
main()
|
py | 1a3cfc9b2cf21c953192f8689649a0e09d61a824 | from core.advbase import *
from slot.a import *
from slot.d import*
def module():
return Summer_Ranzal
class Summer_Ranzal(Adv):
a1 = ('lo',0.4)
a3 = ('primed_defense', 0.08)
conf = {}
conf['slots.a'] = Resounding_Rendition() + Breakfast_at_Valerios()
conf['slots.frostbite.a'] = Primal_Crisis() + His_Clever_Brother()
conf['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s3
`s4
`s2
"""
coab = ['Xander', 'Dagger', 'Tiki']
conf['afflict_res.bog'] = 100
share = ['Gala_Elisanne', 'Ranzal']
def init(self):
self.a3_iscding = 0
self.buff_class = Teambuff if self.condition('buff all team') else Selfbuff
@staticmethod
def prerun_skillshare(adv, dst):
adv.buff_class = Teambuff if adv.condition('buff all team') else Selfbuff
def s1_proc(self, e):
self.dmg_make(e.name,2.16)
self.afflics.bog.on(e.name, 100)
self.dmg_make(e.name,6.48)
def s2_proc(self, e):
self.buff_class(e.name,0.10,15).on()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
py | 1a3cfcb7047318be4e5807577241157d3d7552ae | from numpy import ones, pi, array
from os.path import join
import matplotlib.pyplot as plt
from Tests import save_validation_path as save_path
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.InputFlux import InputFlux
from pyleecan.Classes.ImportGenVectLin import ImportGenVectLin
from pyleecan.Classes.ImportMatrixVal import ImportMatrixVal
from pyleecan.Classes.ImportMatlab import ImportMatlab
from pyleecan.Classes.ImportData import ImportData
from pyleecan.Classes.ImportVectorField import ImportVectorField
from pyleecan.Classes.MagFEMM import MagFEMM
from pyleecan.Classes.Output import Output
from Tests import TEST_DATA_DIR
import pytest
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
@pytest.mark.long
@pytest.mark.validation
@pytest.mark.FEMM
def test_Magnetic_FEMM_sym():
"""Validation of a polar SIPMSM with inset magnet
Armature load (magnet field canceled by is_mmfr=False)
from publication
A. Rahideh and T. Korakianitis,
“Analytical Magnetic Field Calculation of Slotted Brushless Permanent-Magnet Machines With Surface Inset Magnets,”
vol. 48, no. 10, pp. 2633–2649, 2012.
Test compute the Flux in FEMM, with and without symmetry
and with MANATEE semi-analytical subdomain model
"""
SIPMSM_001 = load(join(DATA_DIR, "Machine", "SIPMSM_001.json"))
simu = Simu1(name="EM_SIPMSM_AL_001", machine=SIPMSM_001)
# Definition of the enforced output of the electrical module
N0 = 150
Is = ImportMatrixVal(
value=array([[14.1421, -7.0711, -7.0711], [-14.1421, 7.0711, 7.0711]])
)
time = ImportGenVectLin(start=0, stop=0.1, num=2, endpoint=True)
Na_tot = 1024
Ar = ImportMatrixVal(value=array([2.5219, 0.9511]) + pi / 6)
simu.input = InputCurrent(
Is=Is,
Ir=None, # No winding on the rotor
N0=N0,
angle_rotor=Ar, # Will be computed
time=time,
Na_tot=Na_tot,
angle_rotor_initial=0,
)
# Definition of the magnetic simulation (is_mmfr=False => no flux from the magnets)
assert SIPMSM_001.comp_periodicity() == (1, False, 2, True)
simu.mag = MagFEMM(
type_BH_stator=2,
type_BH_rotor=2,
is_periodicity_a=False,
is_periodicity_t=False,
is_mmfr=False,
angle_stator_shift=-pi / 6,
nb_worker=2,
)
simu.force = None
simu.struct = None
# Just load the Output and ends (we could also have directly filled the Output object)
simu_load = Simu1(init_dict=simu.as_dict())
simu_load.mag = None
mat_file = join(TEST_DATA_DIR, "EM_SIPMSM_AL_001_MANATEE_SDM.mat")
Br = ImportMatlab(file_path=mat_file, var_name="XBr")
Bt = ImportMatlab(file_path=mat_file, var_name="XBt")
simu_load.input = InputFlux(
time=time, Na_tot=Na_tot, B_dict={"Br": Br, "Bt": Bt}, OP=simu.input.copy()
)
out = Output(simu=simu)
simu.run()
out3 = Output(simu=simu_load)
simu_load.run()
# Plot the result by comparing the two simulation (no sym / MANATEE SDM)
plt.close("all")
out.plot_2D_Data(
"mag.B",
"angle",
data_list=[out3.mag.B],
legend_list=["No symmetry", "MANATEE SDM"],
save_path=join(save_path, "test_EM_SIPMSM_AL_001_SDM.png"),
is_show_fig=False,
)
|
py | 1a3cfd1bf20dd87586c9b1119fd7bcd5e8e1e8b2 | import os
from setuptools import find_packages, setup
from pufsim import version
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# load readme file
with open('README.rst') as readme:
README = readme.read()
# stamp the package prior to installation
version.stamp_directory(os.path.join(os.getcwd(), 'pufsim'))
setup(
name='django-pufsim',
version=version.get_version(),
packages=['pufsim'],
include_package_data=True,
license='MIT License',
description='Front-end app for puflib',
long_description=README,
url='https://github.com/gregschmit/django-pufsim',
author='Gregory N. Schmit',
author_email='[email protected]',
install_requires=['Django>=2', 'numpy', 'matplotlib', 'puflib',],
package_data={'pufsim': ['VERSION_STAMP']},
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.0',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
)
|
py | 1a3cfda94b98c9514208433dfcf5947caea8537c | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["MetaOptimizerBase"]
class MetaOptimizerBase(object):
def __init__(self, optimizer):
pass
def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
user_defined_strategy):
self.loss = loss
self.role_maker = role_maker
self.user_defined_optimizer = user_defined_optimizer
self.user_defined_strategy = user_defined_strategy
def _update_inner_optimier(self, optimizer):
self.inner_opt = optimizer
def _can_apply(self):
return False
def _is_graph_out(self):
return False
def _can_update(self, optimizer):
if str(optimizer.__class__.__name__) in self.meta_optimizers_white_list:
return True
def _disable_strategy(self, dist_strategy):
raise NotImplementedError("you should implement disable strategy")
def minimize_impl(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
raise NotImplementedError("meta optimizer not implemented")
def minimize(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
optimize_ops, params_grads = self.minimize_impl(
loss, startup_program, parameter_list, no_grad_set)
return optimize_ops, params_grads
|
py | 1a3cfddeaf13d6e4fe5176b4000104130b82f411 | import pickle, datetime
#Save data as a pickle file and flat text file
# expNumber must be set first using openExperiment method
# data can be appeneded to the file by calling process method, datagram is saved as a string and is time stamped
class DataHandler(object):
def __init__(self):
self.expNumber = None
#public methods to be called for openning, closing, and processing a data
def openExperiment(self, expNumber):
self.expNumber = expNumber
def closeExperiment(self, expNumber):
self.expNumber = "CLOSED"
def process(self, datagram):
if datagram == {}: return
if self.expNumber is None: return
try:
identity = datagram.get("identity")
dataFile = datetime.today().strftime("%b_%d").lower()+self.expNumber+'_'+str(identity)
with open(dataFile, 'a') as of:
of.write(pickle.dumps(datagram)+'\n')
with open(dataFile+'_flat.txt', 'a') as of:
of.write(json.dumps(datagram)+'\n')
except Exception, e:
print e |
py | 1a3cfe811cb723bdd6f28c685c86005f8b6feb00 | from osbenchmark.builder.downloaders.repositories.repository_url_provider import RepositoryUrlProvider
class PluginDistributionRepositoryProvider:
def __init__(self, plugin, executor):
self.plugin = plugin
self.repository_url_provider = RepositoryUrlProvider(executor)
def get_download_url(self, host):
distribution_repository = self.plugin.variables["distribution"]["repository"]
default_key = "plugin.{}.{}.url".format(self.plugin.name, distribution_repository)
return self.repository_url_provider.render_url_for_key(host, self.plugin.variables, default_key, mandatory=False)
|
py | 1a3cff0a8b2d517140ec193066266b5358d4c422 | from __future__ import unicode_literals
import warnings
from .helpers import use_appropriate_encoding
class Device(object):
def __init__(self, account, device_info):
self._account = account
self.device_iden = device_info.get("iden")
for attr in ("push_token", "app_version", "fingerprint", "created", "modified",
"active", "nickname", "generated_nickname", "manufacturer", "icon",
"model", "has_sms", "key_fingerprint"):
setattr(self, attr, device_info.get(attr))
def push_note(self, title, body):
data = {"type": "note", "title": title, "body": body}
return self._push(data)
def push_address(self, name, address):
warnings.warn("Address push type is removed. This push will be sent as note.")
return self.push_note(name, address)
def push_list(self, title, items):
warnings.warn("List push type is removed. This push will be sent as note.")
return self.push_note(title, ",".join(items))
def push_link(self, title, url, body=None):
data = {"type": "link", "title": title, "url": url, "body": body}
return self._push(data)
def push_file(self, file_name, file_url, file_type, body=None, title=None):
return self._account.push_file(file_name, file_url, file_type, body=body, title=title, device=self)
def _push(self, data):
data["device_iden"] = self.device_iden
return self._account._push(data)
@use_appropriate_encoding
def __str__(self):
return "Device('{0}')".format(self.nickname or "nameless (iden: {})".format(self.device_iden))
def __repr__(self):
return self.__str__()
|
py | 1a3d00177632744bcd1d71c4f8296e7ca4b96773 | import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
# local imports
from load_data import (
income_distribution_dropdown_values,
median_income_dropdown_values
)
# we use the Row and Col components to construct the sidebar header
# it consists of a title, and a toggle, the latter is hidden on large screens
sidebar_header = dbc.Row(
[
dbc.Col(html.H2("Page Menu", className="display-4")),
dbc.Col(
html.Button(
# use the Bootstrap navbar-toggler classes to style the toggle
html.Span(className="navbar-toggler-icon"),
className="navbar-toggler",
# the navbar-toggler classes don't set color, so we do it here
style={
"color": "rgba(0,0,0,.5)",
"border-color": "rgba(0,0,0,.1)",
},
id="toggle",
),
# the column containing the toggle will be only as wide as the
# toggle, resulting in the toggle being right aligned
width="auto",
# vertically align the toggle in the center
align="center",
),
]
)
sidebar = html.Div(
[
sidebar_header,
# we wrap the horizontal rule and short blurb in a div that can be
# hidden on a small screen
html.Div(
[
html.Hr(),
],
id="blurb",
),
# use the Collapse component to animate hiding / revealing links
dbc.Collapse(
dbc.Nav(
[
dbc.NavLink("About", href="/page-1", id="page-1-link"),
dbc.NavLink("Income distribution", href="/page-2", id="page-2-link"),
dbc.NavLink("Median income", href="/page-3", id="page-3-link"),
dbc.NavLink("References", href="/page-4", id="page-4-link"),
],
vertical=True,
pills=True,
),
id="collapse",
),
],
id="sidebar",
)
layout1 = html.Div([
html.H3("About"),
html.P("This aim of this site is to help make some of the personal \
income statistics from Statistics Canada more accessible to \
Canadians."),
html.H5("Income distribution"),
dcc.Markdown("The *Income distribution* page is based on 'total income' from \
income tax returns and includes [(ref)](https://www150.statcan.gc.ca/n1/en/catalogue/72-212-X):"),
dcc.Markdown("\
- employment income (salaries, commission), \n \
- self employment income, \n \
- pension income \
(OAS, CPP/QPP, registered pension plans, RRIFs), \n \
- investment income, \n \
- social benefit payments (EI, workers' compensation, \
social assistance), and \n \
- other income."),
dcc.Markdown("There is an important caveat about the definition of 'total income' \
that is relevant when interpreting these statistics. Let's consider two \
people with different income and pension benefits:"),
dcc.Markdown("\
- Person 1: $70,000 total income, no pension, contributes $10,000 of their \
income to RRSPs, \n \
- Person 2: $60,000 total income, receives a defined benefit \
pension worth $10,000"),
dcc.Markdown("At the end of the day these individuals have the same disposal \
income, and presumably similar potential future income from their \
pensions. Based on StatCans 'total income' statistics, Person 1 has higher \
'total income'. The employment Person 1 uses \
to make RRSP and RPP contributions counts as 'total income' \
in the year it is earned, but also again when it \
is withdrawn as pension income (plus any appreciation from capital gains, \
dividends, and interest). For Person 2, the defined benefit pension \
is not counted as 'total income'. Defined benefit pensions are \
promises made by employers to pay employees in the future. \
Defined benefits pensions (and also employee contributions to \
workplace pensions) show up on T4's as a 'pension adjustment'. \
"),
html.H5("Median income"),
dcc.Markdown("The *Median income* page is based on statistics from the \
[Canadian Income Survey]\
(https://www23.statcan.gc.ca/imdb/p2SV.pl?Function=getSurvey&Id=1275662).\
For the 2018 CIS, the sample size was around \
56,000 households.")
])
################
# Layout 2: income distribution
################
def get_dropdown_options(items):
return [{'label': value, 'value': value} for value in items]
layout2_dropdown_headers = dbc.Row([
dbc.Col(html.Div("Select year")),
dbc.Col(html.Div("Select location")),
dbc.Col(html.Div("Select age group")),
])
layout2_dropdown = dbc.Row([
dbc.Col(
dcc.Dropdown(
id='page2-year',
placeholder="Select year",
options=get_dropdown_options(
income_distribution_dropdown_values["year_values"]),
value=2018,
),
),
dbc.Col(
dcc.Dropdown(
id='page2-geo',
placeholder="Select location",
options=get_dropdown_options(
income_distribution_dropdown_values["geo_values"]),
value="Canada"
),
),
dbc.Col(
dcc.Dropdown(
id='page2-age',
placeholder="Select age group",
options=get_dropdown_options(
income_distribution_dropdown_values["age_values"]),
value='35 to 44 years',
),
),
])
layout2 = html.Div([
html.H3("Income distribution"),
layout2_dropdown_headers,
layout2_dropdown,
html.Div(
dcc.Loading(dcc.Graph(id="income-distribution"), type='circle'),
style={'width':'90%'}
),
html.Div(
dcc.Loading(dcc.Graph(id="cumulative-distribution"), type='circle'),
style={'width':'90%'}
)
])
################
# Layout 3: median income
################
page_3_dropdown_header = dbc.Row([
dbc.Col(html.Div("Select age group")),
dbc.Col(html.Div("Select sex")),
dbc.Col()
])
page3_dropdown_header_2 = dbc.Row([
dbc.Col(html.Div("Select region (hold ctrl for multiple selections)"))])
age_sex_dropdown = dbc.Row([
dbc.Col(
dcc.Dropdown(
id='page3-age',
placeholder="Select age group",
options=get_dropdown_options(median_income_dropdown_values["Age group"]),
value='35 to 44 years',
)
),
dbc.Col(
dcc.Dropdown(
id='page3-sex',
placeholder="Select sex",
options=get_dropdown_options(median_income_dropdown_values["Sex"]),
value=["Males", "Females"],
multi=True
)
),
dbc.Col()
])
region_dropdown = dbc.Row([
dbc.Col(
dcc.Dropdown(
id='page3-geo',
placeholder="Select location",
options=get_dropdown_options(
median_income_dropdown_values["GEO"]),
value=[
'Ottawa-Gatineau, Ontario/Quebec',
'Vancouver, British Columbia'],
multi=True
)
)
])
layout3 = html.Div([
html.H3("Median income"),
page_3_dropdown_header,
age_sex_dropdown,
page3_dropdown_header_2,
region_dropdown,
html.Div(
dcc.Loading(dcc.Graph(id="median-income"), type='circle'),
style={'width':'100%'})
])
layout4 = html.Div([
html.H3("References"),
html.Div(dcc.Markdown("\
- Statistics Canada \n\
- [Tax filers and dependants with income by total income, sex and age](https://doi.org/10.25318/1110000801-eng),\n\
- [Income of individuals by age group, sex and income source, Canada, provinces and selected census metropolitan areas](https://doi.org/10.25318/1110023901-eng)\n\
- Source code for this webpage is hosted on [Github](https://github.com/BlaneG/CAN-income-stats)\n\
"
))
]) |
py | 1a3d007e83d858aba6a2254a45c4f8bca8f60349 | from beebole.interfaces.responses.custom_field import CustomFieldListResponse
from beebole.interfaces.responses.simple import IdResponse, SimpleResponse
from beebole.interfaces.responses.jobs import JobInfoResponse
from beebole.interfaces.responses.group import GroupResponse, GroupListResponse
from beebole.interfaces.responses.absence import AbsenceResponse, AbsenceListResponse |
py | 1a3d00a30ef628d2e7c8bf63dedc78be1b3bba8e | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import tensorflow as tf
import time
# Deopout rate
RATE_DROPOUT = 0.5
def small_cnn(x, phase_train):
# Dense Layer
pool2_flat = tf.reshape(x, [-1, 4 * 4 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(inputs=dense, rate=RATE_DROPOUT, training=phase_train)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
logits = tf.layers.dropout(inputs=logits, rate=RATE_DROPOUT, training=phase_train)
return logits
def wrap(x, m, n, stride, shape):
slicing = tf.TensorArray('float32', m * n)
for j in range(m):
for k in range(n):
slicing = slicing.write(
j * n + k, tf.slice(x, [0, j * stride, k * stride, 0],
shape))
sliced = tf.reshape(slicing.concat(), shape)
slicing.close().mark_used()
return sliced
def model(x):
phase_train = tf.placeholder(tf.bool)
m = 5
n = 5
stride = 3
x = tf.reshape(x, [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=x,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
conv1_dropout = tf.layers.dropout(inputs=conv1, rate=RATE_DROPOUT, training=phase_train)
# Convolutional Layer #2
conv2 = tf.layers.conv2d(
inputs=conv1_dropout,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
conv2_dropout = tf.layers.dropout(inputs=conv2, rate=RATE_DROPOUT, training=phase_train)
# Pooling Layer #1
pool1 = tf.layers.average_pooling2d(inputs=conv2_dropout, pool_size=[2, 2], strides=2)
# Convolutional Layer #3
conv3 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
conv3_dropout = tf.layers.dropout(inputs=conv3, rate=RATE_DROPOUT, training=phase_train)
# Pooling Layer #2
pool2 = tf.layers.average_pooling2d(inputs=conv3_dropout, pool_size=[2, 2], strides=2)
padding = tf.pad(pool2, [[0, 0], [1, 1], [1, 1], [0, 0]])
logits = small_cnn(wrap(padding, 6, 6, 1, [-1, 4, 4, 64]), phase_train)
logits = tf.reduce_mean(tf.reshape(logits, [36, -1, 10]), 0)
return logits, phase_train
def main(unused_argv):
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
input_data = tf.placeholder(tf.float32, [None, 784])
output_data = tf.placeholder(tf.int64, [None])
y_model, phase_train= model(input_data)
#Loss
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=output_data, logits=y_model)
cross_entropy = tf.reduce_mean(cross_entropy)
#Optimizer
rate = tf.placeholder(tf.float32)
train_step = tf.train.AdamOptimizer(rate).minimize(cross_entropy)
#Accuracy
correct_prediction = tf.equal(tf.argmax(y_model, 1), output_data)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
#Congifg
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
t0 = time.clock()
rt = 1e-3
for i in range(60001):
# Get the data of next batch
batch = mnist.train.next_batch(100)
if (i % 600 == 0) and (i != 0):
if i == 30000:
rt = 3e-4
if i == 42000:
rt = 1e-4
if i == 48000:
rt = 3e-5
if i == 54000:
rt = 1e-5
# Print the accuracy
test_accuracy = 0
test_accuracy_once = 0
for index in range(200):
accuracy_batch = mnist.test.next_batch(50)
test_accuracy_once = sess.run(accuracy, feed_dict={
input_data: accuracy_batch[0], output_data: accuracy_batch[1],
phase_train: False})
test_accuracy += test_accuracy_once
test_accuracy_once = 0
print('%g, %g, %g' %
(i / 600, test_accuracy / 200, (time.clock() - t0)))
t0 = time.clock()
# Train
_ = sess.run(
train_step,
feed_dict={input_data: batch[0],
output_data: batch[1],
phase_train: True,
rate: rt})
if __name__ == "__main__":
tf.app.run() |
py | 1a3d01003927e0de5e4e6d9a0f84d0a37a430bb7 | # $Id: fact_base.py 081917d30609 2010-03-05 mtnyogi $
# coding=utf-8
#
# Copyright © 2007-2008 Bruce Frederiksen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
A fact_base is one of the kinds of knowledge_bases (see also, rule_base
and special).
>>> from pyke import knowledge_engine
>>> engine = knowledge_engine.engine()
>>> fb = fact_base(engine, 'fb_name')
>>> fb
<fact_base fb_name>
>>> fb.dump_universal_facts()
>>> fb.dump_specific_facts()
A fact_base is nothing more than a list of facts. Each fact has a name
and a tuple of arguments. These arguments are python data (not
patterns).
Fact_bases support two kinds of facts: universal facts (universally
true) and case specific facts (only true in a specific situation).
>>> fb.add_universal_fact('some_universal_fact', ('a', 2))
>>> fb.add_case_specific_fact('some_specific_fact', ('b', ('hi', 32)))
>>> fb.dump_universal_facts()
some_universal_fact('a', 2)
>>> fb.dump_specific_facts()
some_specific_fact('b', ('hi', 32))
The 'reset' method deletes all case specific facts, but leaves the
universal facts.
>>> fb.reset()
>>> fb.dump_universal_facts()
some_universal_fact('a', 2)
>>> fb.dump_specific_facts()
Normally, universal facts are established once at program
initialization time and case specific facts are established both just
prior to each invocation of the expert system as well as by assertions
in forward chaining rules.
>>> fb.assert_('some_fact', ('a', 2, ('hi', 'mom')))
>>> fb.dump_universal_facts()
some_universal_fact('a', 2)
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
>>> fb.assert_('some_fact', ('a', 3, ('hi', 'mom')))
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
some_fact('a', 3, ('hi', 'mom'))
>>> fb.assert_('some_other_fact', ())
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
some_fact('a', 3, ('hi', 'mom'))
some_other_fact()
Duplicate facts are not allowed and trying to assert a duplicate fact is
silently ignored.
>>> fb.assert_('some_fact', ('a', 2, ('hi', 'mom')))
>>> fb.dump_specific_facts()
some_fact('a', 2, ('hi', 'mom'))
some_fact('a', 3, ('hi', 'mom'))
some_other_fact()
'''
import itertools
import contextlib
from pyke import knowledge_base, contexts
class fact_base(knowledge_base.knowledge_base):
''' Not much to fact_bases. The real work is done in fact_list! '''
def __init__(self, engine, name, register = True):
super(fact_base, self).__init__(engine, name, fact_list, register)
def dump_universal_facts(self):
for fl_name in sorted(self.entity_lists.keys()):
self.entity_lists[fl_name].dump_universal_facts()
def dump_specific_facts(self):
for fl_name in sorted(self.entity_lists.keys()):
self.entity_lists[fl_name].dump_specific_facts()
def add_universal_fact(self, fact_name, args):
self.get_entity_list(fact_name).add_universal_fact(args)
def add_case_specific_fact(self, fact_name, args):
self.get_entity_list(fact_name).add_case_specific_fact(args)
def assert_(self, fact_name, args):
self.add_case_specific_fact(fact_name, args)
def get_stats(self):
num_fact_lists = num_universal = num_case_specific = 0
for fact_list in self.entity_lists.values():
universal, case_specific = fact_list.get_stats()
num_universal += universal
num_case_specific += case_specific
num_fact_lists += 1
return num_fact_lists, num_universal, num_case_specific
def print_stats(self, f):
num_fact_lists, num_universal, num_case_specific = self.get_stats()
f.write("%s: %d fact names, %d universal facts, "
"%d case_specific facts\n" %
(self.name, num_fact_lists, num_universal, num_case_specific))
class fact_list(knowledge_base.knowledge_entity_list):
def __init__(self, name):
super(fact_list, self).__init__(name)
self.universal_facts = [] # [(arg...)...]
self.case_specific_facts = [] # [(arg...)...]
self.hashes = {} # (len, (index...)): (other_indices,
# {(arg...): [other_args_from_factn...]})
self.fc_rule_refs = [] # (fc_rule, foreach_index)
def reset(self):
self.case_specific_facts = []
self.hashes.clear()
self.fc_rule_refs = []
def dump_universal_facts(self):
for args in self.universal_facts:
print('%s%s' % (self.name, args))
def dump_specific_facts(self):
for args in self.case_specific_facts:
print('%s%s' % (self.name, args))
def add_fc_rule_ref(self, fc_rule, foreach_index):
self.fc_rule_refs.append((fc_rule, foreach_index))
def get_affected_fc_rules(self):
return (fc_rule for fc_rule, foreach_index in self.fc_rule_refs)
def lookup(self, bindings, pat_context, patterns):
""" Returns a context manager for a generator that binds patterns to
successive facts, yielding None for each successful match.
Undoes bindings upon continuation, so that no bindings remain at
StopIteration.
"""
indices = tuple(enum for enum in enumerate(patterns)
if enum[1].is_data(pat_context))
other_indices, other_arg_lists = \
self._get_hashed(len(patterns),
tuple(index[0] for index in indices),
tuple(index[1].as_data(pat_context)
for index in indices))
def gen():
if other_arg_lists:
for args in other_arg_lists:
mark = bindings.mark(True)
end_done = False
try:
if all(map(
lambda i, arg:
patterns[i].match_data(bindings,
pat_context,
arg),
other_indices,
args)):
bindings.end_save_all_undo()
end_done = True
yield
finally:
if not end_done: bindings.end_save_all_undo()
bindings.undo_to_mark(mark)
return contextlib.closing(gen())
def _get_hashed(self, len, indices, args):
ans = self.hashes.get((len, indices))
if ans is None: ans = self._hash(len, indices)
other_indices, arg_map = ans
return other_indices, arg_map.get(args, ())
def _hash(self, length, indices):
args_hash = {}
new_entry = (tuple(i for i in range(length) if i not in indices),
args_hash)
self.hashes[length, indices] = new_entry
for args in itertools.chain(self.universal_facts,
self.case_specific_facts):
if len(args) == length:
selected_args = tuple(arg for i, arg in enumerate(args)
if i in indices)
args_hash.setdefault(selected_args, []) \
.append(tuple(arg for i, arg in enumerate(args)
if i not in indices))
return new_entry
def add_universal_fact(self, args):
assert args not in self.case_specific_facts, \
"add_universal_fact: fact already present as specific fact"
if args not in self.universal_facts:
self.universal_facts.append(args)
self.add_args(args)
def add_case_specific_fact(self, args):
if args not in self.universal_facts and \
args not in self.case_specific_facts:
self.case_specific_facts.append(args)
self.add_args(args)
for fc_rule, foreach_index in self.fc_rule_refs:
fc_rule.new_fact(args, foreach_index)
def add_args(self, args):
for (length, indices), (other_indices, arg_map) \
in self.hashes.items():
if length == len(args):
selected_args = tuple(arg for i, arg in enumerate(args)
if i in indices)
arg_map.setdefault(selected_args, []) \
.append(tuple(arg for i, arg in enumerate(args)
if i not in indices))
def get_stats(self):
return len(self.universal_facts), len(self.case_specific_facts)
|
py | 1a3d0180e3d47d48d39de391d4b5334038d59162 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 30 00:06:41 2021
@author: qizhe
"""
class Solution:
def computeArea(self, ax1: int, ay1: int, ax2: int, ay2: int, bx1: int, by1: int, bx2: int, by2: int) -> int:
"""
和答案一模一样,巧妙的地方在于充分利用了max min 的作用,从而考虑了各种情况
用时10min
"""
Area1 = (ax2-ax1)*(ay2-ay1)
Area2 = (bx2-bx1)*(by2-by1)
xCross = max(min(ax2,bx2) - max(ax1,bx1), 0)
yCross = max(min(ay2,by2) - max(ay1,by1), 0)
AreaCross = xCross*yCross
# print(Area1,Area2,AreaCross)
return Area1 + Area2 - AreaCross
if __name__ == '__main__':
solu = Solution()
ax1 = -3
ay1 = 0
ax2 = 3
ay2 = 4
bx1 = 0
by1 = -1
bx2 = 9
by2 = 2
# ax1 = -2
# ay1 = -2
# ax2 = 2
# ay2 = 2
# bx1 = -2
# by1 = -2
# bx2 = 2
# by2 = 2
result = solu.computeArea(ax1,ay1,ax2,ay2,bx1,by1,bx2,by2)
# while result:
# print(result.val)
# result = result.next
output_Str = 'result = ' + str(result)
print(output_Str) |
bzl | 1a3d01b0e66078c6f2d97e092fc01d160a2b8cf5 | load("@io_bazel_rules_go//go:def.bzl", "GoLibrary")
load("@io_bazel_rules_go//go/private:mode.bzl", "get_mode")
go_filetype = ["*.go"]
def _compute_genrule_variables(resolved_srcs, resolved_outs):
variables = {"SRCS": cmd_helper.join_paths(" ", resolved_srcs),
"OUTS": cmd_helper.join_paths(" ", resolved_outs)}
if len(resolved_srcs) == 1:
variables["<"] = list(resolved_srcs)[0].path
if len(resolved_outs) == 1:
variables["@"] = list(resolved_outs)[0].path
return variables
def _compute_genrule_command(ctx, go_stdlib):
workspace_root = '$$(pwd)'
if ctx.build_file_path.startswith('external/'):
# We want GO_WORKSPACE to point at the root directory of the Bazel
# workspace containing this go_genrule's BUILD file. If it's being
# included in a different workspace as an external dependency, the
# link target must point to the external subtree instead of the main
# workspace (which contains code we don't care about).
#
# Given a build file path like "external/foo/bar/BUILD", the following
# slash split+join sets external_dep_prefix to "external/foo" and the
# effective workspace root to "$PWD/external/foo/".
external_dep_prefix = '/'.join(ctx.build_file_path.split('/')[:2])
workspace_root = '$$(pwd)/' + external_dep_prefix
cmd = [
'set -e',
'export GOROOT=$$(pwd)/' + go_stdlib.root_file.dirname,
'export GOOS=' + go_stdlib.goos,
'export GOARCH=' + go_stdlib.goarch,
# setup main GOPATH
'GENRULE_TMPDIR=$$(mktemp -d $${TMPDIR:-/tmp}/bazel_%s_XXXXXXXX)' % ctx.attr.name,
'export GOPATH=$${GENRULE_TMPDIR}/gopath',
'export GO_WORKSPACE=$${GOPATH}/src/' + ctx.attr.go_prefix.go_prefix,
'mkdir -p $${GO_WORKSPACE%/*}',
'ln -s %s/ $${GO_WORKSPACE}' % (workspace_root,),
'if [[ ! -e $${GO_WORKSPACE}/external ]]; then ln -s $$(pwd)/external/ $${GO_WORKSPACE}/; fi',
'if [[ ! -e $${GO_WORKSPACE}/bazel-out ]]; then ln -s $$(pwd)/bazel-out/ $${GO_WORKSPACE}/; fi',
# setup genfile GOPATH
'export GENGOPATH=$${GENRULE_TMPDIR}/gengopath',
'export GENGO_WORKSPACE=$${GENGOPATH}/src/' + ctx.attr.go_prefix.go_prefix,
'mkdir -p $${GENGO_WORKSPACE%/*}',
'ln -s $$(pwd)/$(GENDIR) $${GENGO_WORKSPACE}',
# drop into WORKSPACE
'export GOPATH=$${GOPATH}:$${GENGOPATH}',
'cd $${GO_WORKSPACE}',
# execute user command
ctx.attr.cmd.strip(' \t\n\r'),
]
return '\n'.join(cmd)
def _go_genrule_impl(ctx):
go_toolchain = ctx.toolchains["@io_bazel_rules_go//go:toolchain"]
mode = get_mode(ctx, ctx.attr._go_toolchain_flags)
go_stdlib = go_toolchain.stdlib.get(ctx, go_toolchain, mode)
all_srcs = depset(go_stdlib.files)
label_dict = {}
for dep in ctx.attr.go_deps:
lib = dep[GoLibrary]
all_srcs += lib.package.srcs
for transitive_lib in lib.transitive:
all_srcs += transitive_lib.srcs
for dep in ctx.attr.srcs:
all_srcs += dep.files
label_dict[dep.label] = dep.files
cmd = _compute_genrule_command(ctx, go_stdlib)
resolved_inputs, argv, runfiles_manifests = ctx.resolve_command(
command=cmd,
attribute="cmd",
expand_locations=True,
make_variables=_compute_genrule_variables(all_srcs, depset(ctx.outputs.outs)),
tools=ctx.attr.tools,
label_dict=label_dict
)
ctx.action(
inputs = list(all_srcs) + resolved_inputs,
outputs = ctx.outputs.outs,
env = ctx.configuration.default_shell_env,
command = argv,
progress_message = "%s %s" % (ctx.attr.message, ctx),
mnemonic = "GoGenrule",
)
# We have codegen procedures that depend on the "go/*" stdlib packages
# and thus depend on executing with a valid GOROOT and GOPATH containing
# some amount transitive go src of dependencies. This go_genrule enables
# the creation of these sandboxes.
go_genrule = rule(
attrs = {
"srcs": attr.label_list(allow_files = True),
"tools": attr.label_list(
cfg = "host",
allow_files = True,
),
"outs": attr.output_list(mandatory = True),
"cmd": attr.string(mandatory = True),
"go_deps": attr.label_list(),
"message": attr.string(),
"executable": attr.bool(default = False),
"_go_toolchain_flags": attr.label(default = Label("@io_bazel_rules_go//go/private:go_toolchain_flags")),
# Next rule copied from bazelbuild/rules_go@a9df110cf04e167b33f10473c7e904d780d921e6
# and then modified a bit.
# I'm not sure if this is correct anymore.
# Also, go_prefix is deprecated, so this is probably going to break in the near future.
"go_prefix": attr.label(
providers = ["go_prefix"],
default = Label(
"//:go_prefix",
relative_to_caller_repository = True,
),
allow_files = False,
cfg = "host",
),
},
output_to_genfiles = True,
toolchains = ["@io_bazel_rules_go//go:toolchain"],
implementation = _go_genrule_impl,
)
|
py | 1a3d02269bb665d7d8c2ad2f67082d4767afc52b | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import glob
import json
import logging
import multiprocessing as mp
import numpy as np
import os
from itertools import chain
import pycocotools.mask as mask_util
from PIL import Image
from detectron2.structures import BoxMode
from detectron2.utils.logger import setup_logger
from detectron2.utils.comm import get_world_size
from fvcore.common.file_io import PathManager
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
if from_json:
assert to_polygons, (
"Cityscapes's json annotations are in polygon format. "
"Converting to mask format is not supported now."
)
files = []
for image_file in glob.glob(os.path.join(image_dir, "**/*.png")):
suffix = "leftImg8bit.png"
assert image_file.endswith(suffix)
prefix = image_dir
instance_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_instanceIds.png"
assert os.path.isfile(instance_file), instance_file
label_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_labelIds.png"
assert os.path.isfile(label_file), label_file
json_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_polygons.json"
files.append((image_file, instance_file, label_file, json_file))
assert len(files), "No images found in {}".format(image_dir)
logger = logging.getLogger(__name__)
logger.info("Preprocessing cityscapes annotations ...")
# This is still not fast: all workers will execute duplicate works and will
# take up to 10m on a 8GPU server.
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
ret = pool.map(
functools.partial(cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
files,
)
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
# Map cityscape ids to contiguous ids
from cityscapesScripts.cityscapesscripts.helpers.labels import labels
labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
for dict_per_image in ret:
for anno in dict_per_image["annotations"]:
anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
return ret
def load_cityscapes_semantic(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
for image_file in glob.glob(os.path.join(image_dir, "**/*.png")):
suffix = "leftImg8bit.png"
assert image_file.endswith(suffix)
prefix = image_dir
label_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_labelTrainIds.png"
assert os.path.isfile(
label_file
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
json_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_polygons.json"
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret.append(
{
"file_name": image_file,
"sem_seg_file_name": label_file,
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
)
return ret
def cityscapes_files_to_dict(files, from_json, to_polygons):
"""
Parse cityscapes annotation files to a dict.
Args:
files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
A dict in Detectron2 Dataset format.
"""
from cityscapesScripts.cityscapesscripts.helpers.labels import id2label, name2label
image_file, instance_id_file, _, json_file = files
annos = []
if from_json:
from shapely.geometry import MultiPolygon, Polygon
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
# `polygons_union` contains the union of all valid polygons.
polygons_union = Polygon()
# CityscapesScripts draw the polygons in sequential order
# and each polygon *overwrites* existing ones. See
# (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
# We use reverse order, and each polygon *avoids* early ones.
# This will resolve the ploygon overlaps in the same way as CityscapesScripts.
for obj in jsonobj["objects"][::-1]:
if "deleted" in obj: # cityscapes data format specific
continue
label_name = obj["label"]
try:
label = name2label[label_name]
except KeyError:
if label_name.endswith("group"): # crowd area
label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
if __name__ == "__main__":
"""
Test the cityscapes dataset loader.
Usage:
python -m detectron2.data.datasets.cityscapes \
cityscapes/leftImg8bit/train cityscapes/gtFine/train
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("image_dir")
parser.add_argument("gt_dir")
parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
args = parser.parse_args()
from detectron2.data.catalog import Metadata
from detectron2.utils.visualizer import Visualizer
from cityscapesscripts.helpers.labels import labels
logger = setup_logger(name=__name__)
dirname = "cityscapes-data-vis"
os.makedirs(dirname, exist_ok=True)
if args.type == "instance":
dicts = load_cityscapes_instances(
args.image_dir, args.gt_dir, from_json=True, to_polygons=True
)
logger.info("Done loading {} samples.".format(len(dicts)))
thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval]
meta = Metadata().set(thing_classes=thing_classes)
else:
dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir)
logger.info("Done loading {} samples.".format(len(dicts)))
stuff_names = [k.name for k in labels if k.trainId != 255]
stuff_colors = [k.color for k in labels if k.trainId != 255]
meta = Metadata().set(stuff_names=stuff_names, stuff_colors=stuff_colors)
for d in dicts:
img = np.array(Image.open(d["file_name"]))
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
# cv2.imshow("a", vis.get_image()[:, :, ::-1])
# cv2.waitKey()
fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
vis.save(fpath)
|
py | 1a3d02876bbec01230f97a29bf800f291a9af0d9 | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0]) # number=5
prog.h(input_qubit[0]) # number=7
prog.cz(input_qubit[1],input_qubit[0]) # number=8
prog.h(input_qubit[0]) # number=9
prog.x(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_noisy187.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
py | 1a3d030a21fa62f55f5aa38df2a31db571b2bb63 | import pytest
from cache import REDIS
def init_cache():
pass
@pytest.fixture(autouse=True)
def preinit():
"""
Pytest decorator to run preprcessing proceduce before each test case
ex: init database and cache
"""
REDIS.flushdb()
init_cache()
|
py | 1a3d031dda33b575ac0190d0a9cdfdf8e5100f0c | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tf_pose.runner import infer, Estimator, get_estimator
|
py | 1a3d0333b0fe8ac6191903ade63d88e6694fa20f | # Echo server program
import socket
from time import ctime
import os
def psend(conn, prompt, data):
conn.sendall(('[%s] %s' %
(prompt, data.decode())
).encode())
HOST = '' # Symbolic name meaning all available interfaces
PORT = 50007 # Arbitrary non-previleged port
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
with conn:
print('Connected by', addr)
while True:
data = conn.recv(1024)
if not data: break
datas = data.decode().split(' ')
if len(datas) > 0:
if datas[0] == 'date':
psend(conn, bytes(ctime(), 'utf-8'), data)
elif datas[0] == 'os':
psend(conn, os.name, data)
elif datas[0] == 'ls':
if len(datas) > 1:
psend(conn, os.listdir(datas[1]), data)
else:
psend(conn, os.listdir(os.curdir), data)
else:
conn.sendall(data)
else:
conn.sendall(data) |
py | 1a3d03dea558f39758a573bb6ced13b9be52475c | """DjPra1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include,re_path
from django.contrib.auth import views as auth_views
from django.views.generic.base import TemplateView
from . import views
app_name='newGroup'
#from django.conf.urls import url,include
urlpatterns = [
re_path(r'^search',views.search,name='search'),
re_path(r'^newGroup',views.newGroup,name='newGroup'),
#test
# start 练习部分的内容
re_path(r'^base',views.testbase,name='testbase'),
re_path(r'^guide',views.testGuide,name='testGuide'),
re_path(r'^home',views.homePage,name='homePage'),
re_path(r'^showimage',views.showImage,name='showImage'),
re_path(r'^create',views.newGroupCreate,name='NewGroupCreate'),
#re_path(r'^articles/(?P<year>[0-9]{4})/$', views.year_archive),
path('choose/<int:groupsId>/',views.newGroupChoose,name='newGroupChoose'),
path('column/<int:imageGroups>/<int:groupsId>/',views.newGroupColumn,name='newGroupColumn'),
re_path(r'more',views.morefunction,name='morefunction'),
re_path(r'',views.morefunction,name='morefunction'),
]
|
py | 1a3d0514eb3441d8f9f01322263ebd20c4fa7434 | #!/usr/bin/env python3
import sys
from model import ModelW2W
sys.path.extend(['..'])
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import LSTMCell
from tfx.bricks import embedding, rnn, rnn_decoder, dense_to_one_hot, brnn
class Model(ModelW2W):
def __init__(self, data, FLAGS):
super(Model, self).__init__(data, FLAGS)
encoder_embedding_size = 16 * 2
encoder_lstm_size = 16
encoder_vocabulary_length = len(data.idx2word_history)
history_length = data.train_set['histories'].shape[1]
encoder_sequence_length = data.train_set['histories'].shape[2]
decoder_lstm_size = 16
decoder_embedding_size = 16
decoder_sequence_length = data.batch_actions.shape[2]
decoder_vocabulary_length = len(data.idx2word_action)
with tf.name_scope('data'):
batch_histories = tf.Variable(data.batch_histories, name='histories', trainable=False)
batch_actions = tf.Variable(data.batch_actions, name='actions', trainable=False)
histories = tf.gather(batch_histories, self.batch_idx)
actions = tf.gather(batch_actions, self.batch_idx)
# inference model
with tf.name_scope('model'):
batch_size = tf.shape(histories)[0]
encoder_embedding = embedding(
input=histories,
length=encoder_vocabulary_length,
size=encoder_embedding_size,
name='encoder_embedding'
)
with tf.name_scope("UtterancesEncoder"):
with tf.name_scope("RNNForwardUtteranceEncoderCell_1"):
cell_fw_1 = LSTMCell(
num_units=encoder_lstm_size,
input_size=encoder_embedding_size,
use_peepholes=True
)
initial_state_fw_1 = cell_fw_1.zero_state(batch_size, tf.float32)
with tf.name_scope("RNNBackwardUtteranceEncoderCell_1"):
cell_bw_1 = LSTMCell(
num_units=encoder_lstm_size,
input_size=encoder_embedding_size,
use_peepholes=True
)
initial_state_bw_1 = cell_bw_1.zero_state(batch_size, tf.float32)
with tf.name_scope("RNNForwardUtteranceEncoderCell_2"):
cell_fw_2 = LSTMCell(
num_units=encoder_lstm_size,
input_size=cell_fw_1.output_size + cell_bw_1.output_size,
use_peepholes=True
)
initial_state_fw_2 = cell_fw_2.zero_state(batch_size, tf.float32)
# the input data has this dimensions
# [
# #batch,
# #utterance in a history (a dialogue),
# #word in an utterance (a sentence),
# embedding dimension
# ]
# encode all utterances along the word axis
encoder_states_2d = []
for utterance in range(history_length):
encoder_outputs, _ = brnn(
cell_fw=cell_fw_1,
cell_bw=cell_bw_1,
inputs=[encoder_embedding[:, utterance, word, :] for word in
range(encoder_sequence_length)],
initial_state_fw=initial_state_fw_1,
initial_state_bw=initial_state_bw_1,
name='RNNUtteranceBidirectionalLayer',
reuse=True if utterance > 0 else None
)
_, encoder_states = rnn(
cell=cell_fw_2,
inputs=encoder_outputs,
initial_state=initial_state_fw_2,
name='RNNUtteranceForwardEncoder',
reuse=True if utterance > 0 else None
)
# print(encoder_states[-1])
encoder_states = tf.concat(1, tf.expand_dims(encoder_states[-1], 1))
# print(encoder_states)
encoder_states_2d.append(encoder_states)
encoder_states_2d = tf.concat(1, encoder_states_2d)
# print('encoder_states_2d', encoder_states_2d)
with tf.name_scope("HistoryEncoder"):
# encode all histories along the utterance axis
with tf.name_scope("RNNForwardHistoryEncoderCell_1"):
cell_fw_1 = LSTMCell(
num_units=encoder_lstm_size,
input_size=cell_fw_2.state_size,
use_peepholes=True
)
initial_state_fw_1 = cell_fw_1.zero_state(batch_size, tf.float32)
with tf.name_scope("RNNBackwardHistoryEncoderCell_1"):
cell_bw_1 = LSTMCell(
num_units=encoder_lstm_size,
input_size=cell_fw_2.state_size,
use_peepholes=True
)
initial_state_bw_1 = cell_fw_2.zero_state(batch_size, tf.float32)
with tf.name_scope("RNNForwardHistoryEncoderCell_2"):
cell_fw_2 = LSTMCell(
num_units=encoder_lstm_size,
input_size=cell_fw_1.output_size + cell_bw_1.output_size,
use_peepholes=True
)
initial_state_fw_2 = cell_fw_2.zero_state(batch_size, tf.float32)
encoder_outputs, _ = brnn(
cell_fw=cell_fw_1,
cell_bw=cell_bw_1,
inputs=[encoder_states_2d[:, utterance, :] for utterance in range(history_length)],
initial_state_fw=initial_state_fw_1,
initial_state_bw=initial_state_bw_1,
name='RNNHistoryBidirectionalLayer',
reuse=None
)
_, encoder_states = rnn(
cell=cell_fw_2,
inputs=encoder_outputs,
initial_state=initial_state_fw_2,
name='RNNHistoryForwardEncoder',
reuse=None
)
with tf.name_scope("Decoder"):
with tf.name_scope("RNNDecoderCell"):
cell = LSTMCell(
num_units=decoder_lstm_size,
input_size=decoder_embedding_size+cell_fw_2.state_size,
use_peepholes=True,
)
initial_state = cell.zero_state(batch_size, tf.float32)
# decode all histories along the utterance axis
final_encoder_state = encoder_states[-1]
decoder_states, decoder_outputs, decoder_outputs_softmax = rnn_decoder(
cell=cell,
inputs=[actions[:, word] for word in range(decoder_sequence_length)],
static_input=final_encoder_state,
initial_state=initial_state, #final_encoder_state,
embedding_size=decoder_embedding_size,
embedding_length=decoder_vocabulary_length,
sequence_length=decoder_sequence_length,
name='RNNDecoder',
reuse=False,
use_inputs_prob=self.use_inputs_prob
)
self.predictions = tf.concat(1, decoder_outputs_softmax)
# print(p_o_i)
if FLAGS.print_variables:
for v in tf.trainable_variables():
print(v.name)
with tf.name_scope('loss'):
one_hot_labels = dense_to_one_hot(actions, decoder_vocabulary_length)
self.loss = tf.reduce_mean(- one_hot_labels * tf.log(tf.clip_by_value(self.predictions, 1e-10, 1.0)), name='loss')
tf.scalar_summary('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(one_hot_labels, 2), tf.argmax(self.predictions, 2))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
tf.scalar_summary('accuracy', self.accuracy)
|
py | 1a3d0597c21d0065e26f18dea34b1d1cdb1894be | from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import polyak_update
from stable_baselines3.td3.policies import TD3Policy
class TD3(OffPolicyAlgorithm):
"""
Twin Delayed DDPG (TD3)
Addressing Function Approximation Error in Actor-Critic Methods.
Original implementation: https://github.com/sfujim/TD3
Paper: https://arxiv.org/abs/1802.09477
Introduction to TD3: https://spinningup.openai.com/en/latest/algorithms/td3.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.
:param gradient_steps: How many gradient steps to do after each rollout
(see ``train_freq`` and ``n_episodes_rollout``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.
Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param policy_delay: Policy and target networks will only be updated once every policy_delay steps
per training steps. The Q values will be updated policy_delay more often (update every training step).
:param target_policy_noise: Standard deviation of Gaussian noise added to target policy
(smoothing noise)
:param target_noise_clip: Limit for absolute value of target policy smoothing noise.
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[TD3Policy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-3,
buffer_size: int = int(1e6),
learning_starts: int = 100,
batch_size: int = 100,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: int = -1,
gradient_steps: int = -1,
n_episodes_rollout: int = 1,
action_noise: Optional[ActionNoise] = None,
optimize_memory_usage: bool = False,
policy_delay: int = 2,
target_policy_noise: float = 0.2,
target_noise_clip: float = 0.5,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Dict[str, Any] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(TD3, self).__init__(
policy,
env,
TD3Policy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
n_episodes_rollout,
action_noise=action_noise,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Box),
)
self.policy_delay = policy_delay
self.target_noise_clip = target_noise_clip
self.target_policy_noise = target_policy_noise
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(TD3, self)._setup_model()
self._create_aliases()
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.actor_target = self.policy.actor_target
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Update learning rate according to lr schedule
self._update_learning_rate([self.actor.optimizer, self.critic.optimizer])
actor_losses, critic_losses = [], []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
# Select action according to policy and add clipped noise
noise = replay_data.actions.clone().data.normal_(0, self.target_policy_noise)
noise = noise.clamp(-self.target_noise_clip, self.target_noise_clip)
next_actions = (self.actor_target(replay_data.next_observations) + noise).clamp(-1, 1)
# Compute the next Q-values: min over all critics targets
next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)
next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates for each critic network
current_q_values = self.critic(replay_data.observations, replay_data.actions)
# Compute critic loss
critic_loss = sum([F.mse_loss(current_q, target_q_values) for current_q in current_q_values])
critic_losses.append(critic_loss.item())
# Optimize the critics
self.critic.optimizer.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
# Delayed policy updates
if gradient_step % self.policy_delay == 0:
# Compute actor loss
actor_loss = -self.critic.q1_forward(replay_data.observations, self.actor(replay_data.observations)).mean()
actor_losses.append(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
polyak_update(self.actor.parameters(), self.actor_target.parameters(), self.tau)
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/actor_loss", np.mean(actor_losses))
logger.record("train/critic_loss", np.mean(critic_losses))
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "TD3",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(TD3, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(TD3, self)._excluded_save_params() + ["actor", "critic", "actor_target", "critic_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
return state_dicts, []
|
py | 1a3d06abda0a21519447a1b64cdfb39e6e048eb4 | #!/usr/bin/python
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
"""This file contains code to support the hitless image upgrade feature."""
import argparse
from builtins import object
from builtins import str
import copy
from datetime import timedelta
import re
import sys
import traceback
sys.path.append("/opt/contrail/fabric_ansible_playbooks/module_utils")
# unit test
sys.path.append("../fabric-ansible/ansible-playbooks/module_utils")
from filter_utils import _task_error_log, FilterLog
from job_manager.job_utils import JobAnnotations, JobVncApi
ordered_role_groups = [
["leaf"],
["spine"],
["default"]
]
IMAGE_UPGRADE_DURATION = 30 # minutes
class FilterModule(object):
critical_routing_bridging_roles = {
"CRB-MCAST-Gateway",
"DC-Gateway",
"DCI-Gateway",
}
@staticmethod
def _validate_job_ctx(job_ctx):
if not job_ctx.get('fabric_fqname'):
raise ValueError('Invalid job_ctx: missing fabric_fqname')
job_input = job_ctx.get('job_input')
if not job_input:
raise ValueError('Invalid job_ctx: missing job_input')
if not job_input.get('fabric_uuid'):
raise ValueError('Invalid job_ctx: missing fabric_uuid')
return job_input
# end _validate_job_ctx
def filters(self):
return {
'hitless_upgrade_plan': self.get_hitless_upgrade_plan,
'hitless_next_batch': self.get_next_batch,
'hitless_all_devices': self.get_all_devices,
'hitless_device_info': self.get_device_info,
'hitless_validate': self.validate_critical_roles
}
# end filters
# Wrapper to call main routine
def get_hitless_upgrade_plan(self, job_ctx, image_upgrade_list):
try:
FilterLog.instance("HitlessUpgradeFilter")
self.job_input = FilterModule._validate_job_ctx(job_ctx)
self.fabric_uuid = self.job_input['fabric_uuid']
self.vncapi = JobVncApi.vnc_init(job_ctx)
self.job_ctx = job_ctx
self.ja = JobAnnotations(self.vncapi)
self.advanced_parameters = self._get_advanced_params()
self._cache_job_input()
self.batch_limit = self.advanced_parameters.get(
'bulk_device_upgrade_count')
self.image_upgrade_list = image_upgrade_list
upgrade_plan = self._get_hitless_upgrade_plan()
return upgrade_plan
except Exception as ex:
errmsg = "Unexpected error: %s\n%s" % (
str(ex), traceback.format_exc()
)
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end get_hitless_upgrade_plan
# Get any advanced parameters from job input to override defaults
def _get_advanced_params(self):
job_template_fqname = self.job_ctx.get('job_template_fqname')
def_json = self.ja.generate_default_json(job_template_fqname)
adv_params = def_json.get("advanced_parameters")
job_input_adv_params = self.job_input.get('advanced_parameters', {})
adv_params = self.ja.dict_update(adv_params, job_input_adv_params)
return adv_params
# end _get_advanced_params
# Store the job input on the fabric object for UI to retrieve later
def _cache_job_input(self):
job_input = copy.deepcopy(self.job_input)
job_input.update({"advanced_parameters": self.advanced_parameters})
self.ja.cache_job_input(self.fabric_uuid,
self.job_ctx.get('job_template_fqname')[-1],
job_input)
# end _cache_job_input
# Read from Node Profile to determine whether the upgrade is hitless
def _is_hitless_upgrade(self, device_obj):
node_profile_refs = device_obj.get_node_profile_refs()
if node_profile_refs:
np_uuid = node_profile_refs[0].get('uuid')
node_profile_obj = self.vncapi.node_profile_read(id=np_uuid)
is_hitless = node_profile_obj.get_node_profile_hitless_upgrade()
return is_hitless
return True
# end _is_hitless_upgrade
# Main routine to generate an upgrade plan
def _get_hitless_upgrade_plan(self):
self.device_table, self.skipped_device_table = \
self._generate_device_table()
self.role_device_groups = self._generate_role_device_groups()
self.vpg_table = self._generate_vpg_table()
self._generate_buddy_lists()
self.batches = self._generate_batches()
self.report = self._generate_report()
self.results = self._generate_results()
upgrade_plan = {
'image_upgrade_list': self.image_upgrade_list,
'advanced_parameters': self.advanced_parameters,
'device_table': self.device_table,
'device_count': len(self.device_table),
'skipped_device_table': self.skipped_device_table,
'role_device_groups': self.role_device_groups,
'vpg_table': self.vpg_table,
'batches': self.batches,
'report': self.report,
'results': self.results,
'status': "success"
}
return upgrade_plan
# end _get_hitless_upgrade_plan
# generate a table of device information
def _generate_device_table(self):
device_table = {}
skipped_device_table = {}
for image_entry in self.image_upgrade_list:
image_uuid = image_entry.get('image_uuid')
image_obj = self.vncapi.device_image_read(id=image_uuid)
device_list = image_entry.get('device_list')
for device_uuid in device_list:
device_obj = self.vncapi.physical_router_read(id=device_uuid)
routing_bridging_roles = device_obj.routing_bridging_roles
if not routing_bridging_roles:
raise ValueError("Cannot find routing-bridging roles")
rb_roles = routing_bridging_roles.get_rb_roles()
is_hitless_upgrade = self._is_hitless_upgrade(device_obj)
device_info = {
"basic": {
"device_fqname": device_obj.fq_name,
"device_vendor":
device_obj.physical_router_vendor_name,
"device_family":
device_obj.physical_router_device_family,
"device_product":
device_obj.physical_router_product_name,
"device_serial_number":
device_obj.physical_router_serial_number,
"device_management_ip":
device_obj.physical_router_management_ip,
"device_username":
device_obj.physical_router_user_credentials.
username,
"device_password": self._get_password(device_obj),
"device_image_uuid": image_uuid,
"device_hitless_upgrade": is_hitless_upgrade
},
'image_family': image_obj.device_image_device_family,
'image_version': image_obj.device_image_os_version,
'current_image_version':
device_obj.physical_router_os_version,
'name': device_obj.fq_name[-1],
'uuid': device_uuid,
'physical_role': device_obj.physical_router_role,
'rb_roles': rb_roles,
'role': self._determine_role(
device_obj.physical_router_role, rb_roles),
'err_msgs': [],
'vpg_info': {"vpg_list": [], "buddies": []},
'target_multihomed_interface': []
}
skip, reason = self._check_skip_device_upgrade(device_info)
if skip:
if reason:
device_info['skip_reason'] = reason
skipped_device_table[device_uuid] = device_info
else:
device_table[device_uuid] = device_info
return device_table, skipped_device_table
# end _generate_device_table
# generate a simple table of roles with their corresponding devices
def _generate_role_device_groups(self):
# Group devices based on role. Use dict keyed by role name
role_device_groups = {}
for device_uuid, device_info in list(self.device_table.items()):
role = device_info['role']
if role not in role_device_groups:
role_device_groups[role] = []
role_device_groups[role].append(device_uuid)
# Sort lists
for role, group in list(role_device_groups.items()):
group.sort()
return role_device_groups
# end _generate_role_device_groups
# generate a table keyed by virtual port group uuid containing member
# devices and their physical interfaces
def _generate_vpg_table(self):
vpg_table = {}
vpg_refs = self.vncapi.virtual_port_groups_list(
parent_id=self.fabric_uuid). get(
'virtual-port-groups', [])
for vpg_ref in vpg_refs:
vpg_uuid = vpg_ref.get('uuid')
vpg_table[vpg_uuid] = {"device_table": {}}
vpg_dev_table = vpg_table[vpg_uuid]['device_table']
vpg_obj = self.vncapi.virtual_port_group_read(id=vpg_uuid)
vpg_table[vpg_uuid]['name'] = vpg_obj.fq_name[2]
pi_refs = vpg_obj.get_physical_interface_refs() or []
for pi_ref in pi_refs:
pi_uuid = pi_ref.get('uuid')
pi_obj = self.vncapi.physical_interface_read(id=pi_uuid)
device_uuid = pi_obj.parent_uuid
if device_uuid not in vpg_dev_table:
vpg_dev_table[device_uuid] = []
# If this is one of the devices to upgrade, append this
# vpg to the vpg_list for use later
if device_uuid in self.device_table:
device_info = self.device_table[device_uuid]
device_info['vpg_info']['vpg_list'].append(vpg_uuid)
pi_entry = {"fq_name": pi_obj.fq_name, "uuid": pi_obj.uuid}
vpg_dev_table[device_uuid].append(pi_entry)
# Add interface name to multihomed list
if device_uuid in self.device_table:
device_info = self.device_table[device_uuid]
if_name = pi_obj.fq_name[2]
if if_name not in \
device_info['target_multihomed_interface']:
device_info['target_multihomed_interface'].\
append(if_name)
return vpg_table
# end _generate_vpg_table
# For each device, generate a list of devices which cannot be upgraded at
# the same time because they are multi-homed to the same BMS
def _generate_buddy_lists(self):
for device_uuid, device_info in list(self.device_table.items()):
vpg_info = self.device_table[device_uuid]['vpg_info']
for vpg_uuid in vpg_info['vpg_list']:
vpg_entry = self.vpg_table[vpg_uuid]
vpg_dev_table = vpg_entry['device_table']
for vpg_dev_uuid, pi_list in list(vpg_dev_table.items()):
if vpg_dev_uuid not in vpg_info['buddies'] and \
vpg_dev_uuid != device_uuid:
buddy_entry = self._get_buddy_entry(vpg_dev_uuid,
pi_list)
vpg_info['buddies'].append(buddy_entry)
# end _generate_buddy_lists
# Create entry for peer, including ip_addr, username, password
def _get_buddy_entry(self, device_uuid, pi_list):
if device_uuid in self.device_table or \
device_uuid in self.skipped_device_table:
if device_uuid in self.device_table:
device_info = self.device_table[device_uuid]
else:
device_info = self.skipped_device_table[device_uuid]
fq_name = device_info['basic']['device_fqname']
mgmt_ip = device_info['basic']['device_management_ip']
username = device_info['basic']['device_username']
password = device_info['basic']['device_password']
vendor = device_info['basic']['device_vendor']
multihomed_interface_list = \
device_info['target_multihomed_interface']
else:
device_obj = self.vncapi.physical_router_read(id=device_uuid)
fq_name = device_obj.fq_name
mgmt_ip = device_obj.physical_router_management_ip
username = device_obj.physical_router_user_credentials.username
password = self._get_password(device_obj)
vendor = device_obj.physical_router_vendor_name
multihomed_interface_list = \
self._get_multihomed_interface_list(pi_list)
return {
"uuid": device_uuid,
"fq_name": fq_name,
"name": fq_name[-1],
"mgmt_ip": mgmt_ip,
"username": username,
"password": password,
"vendor": vendor,
"multihomed_interface_list": multihomed_interface_list
}
# end _get_buddy_entry
# Get list of multihomed interface names
def _get_multihomed_interface_list(self, pi_list):
if_list = []
for pi_entry in pi_list:
if_name = pi_entry['fq_name'][-1]
if if_name not in if_list:
if_list.append(if_name)
return if_list
# end _get_multihomed_interface_list
def _device_value_based_on_number_of_critical_roles(self, device_uuid):
rb_roles = self.device_table[device_uuid].get('rb_roles')
how_many_critical_roles = 0
for rb_role in rb_roles:
if rb_role in FilterModule.critical_routing_bridging_roles:
how_many_critical_roles += 1
return -how_many_critical_roles
# Creates a dict: name of critical routing bridging role -> number of
# occurences in all devices.
def _calculate_devices_with_critical_routing_bridging_roles(self):
self.critical_routing_bridging_roles_count = {}
for critical_routing_bridging_role in\
FilterModule.critical_routing_bridging_roles:
self.critical_routing_bridging_roles_count[
critical_routing_bridging_role] = 0
for device_uuid, device_info in list(self.device_table.items()):
for routing_bridging_role in device_info.get('rb_roles'):
if routing_bridging_role in\
FilterModule.critical_routing_bridging_roles:
self.critical_routing_bridging_roles_count[
routing_bridging_role] += 1
# Assumes that critical_routing_bridging_roles_count has been initialized.
def _calc_max_number_of_repr_of_critical_rb_roles_per_batch(self):
self.max_number_of_repr_of_critical_rb_roles_per_batch = {}
for role_name, number_of_occurences \
in list(self.critical_routing_bridging_roles_count.items()):
self.max_number_of_repr_of_critical_rb_roles_per_batch[role_name] \
= number_of_occurences / 2 + number_of_occurences % 2
def _calculate_max_number_of_spines_updated_in_batch(self):
number_of_spines = 0
for device_uuid, device_info in list(self.device_table.items()):
if device_info.get('physical_role') == 'spine':
number_of_spines += 1
self.max_number_of_spines_updated_in_batch = \
number_of_spines / 2 + number_of_spines % 2
def _calc_number_of_repr_of_critical_rb_roles_in_batch(self, batch):
critical_routing_bridging_roles_count = {}
for critical_routing_bridging_role in\
FilterModule.critical_routing_bridging_roles:
critical_routing_bridging_roles_count[
critical_routing_bridging_role] = 0
for device_uuid in batch['device_list']:
rb_roles = self.device_table[device_uuid].get('rb_roles')
for rb_role in rb_roles:
if rb_role in FilterModule.critical_routing_bridging_roles:
critical_routing_bridging_roles_count[rb_role] += 1
return critical_routing_bridging_roles_count
# If correct batch extended with device_uuid is still correct in regards
# to vpg buddies, return True. Otherwise return False.
def _check_vpg_buddies_in_batch(self, device_uuid, batch):
# If this device shares a multi-homed vpg interface
# with another device in this batch, return False.
buddies = self._get_vpg_buddies(device_uuid)
for buddy in buddies:
if buddy['uuid'] in batch['device_list']:
return False
return True
# If correct batch extended with device_uuid is still correct in regards
# to number of spines in batch, return True. Otherwise return False.
def _check_number_of_spines_in_batch(self, device_uuid, batch):
device_info = self.device_table[device_uuid]
physical_role = device_info.get('physical_role')
if "spine" in physical_role:
spines_in_batch = 0
for device in batch['device_list']:
device_role = self.device_table[device].get('physical_role')
if "spine" in device_role:
spines_in_batch += 1
if (spines_in_batch + 1 >
self.max_number_of_spines_updated_in_batch):
return False
return True
# If correct batch extended with device_uuid is still correct in regards
# to number of critical roles, return True. Otherwise return False.
def _check_number_of_critical_rb_roles_in_batch(self, device_uuid, batch):
device_info = self.device_table[device_uuid]
rb_roles = device_info.get('rb_roles')
critical_rb_roles_in_device = list(
FilterModule.critical_routing_bridging_roles & set(rb_roles))
if critical_rb_roles_in_device:
critical_rb_roles_in_batch_count = self.\
_calc_number_of_repr_of_critical_rb_roles_in_batch(batch)
for rb_role in critical_rb_roles_in_device:
if critical_rb_roles_in_batch_count[rb_role] + 1 > self.\
max_number_of_repr_of_critical_rb_roles_per_batch[
rb_role]:
return False
return True
# It assumes that batch is correct and is not empty.
def _check_if_device_can_be_added_to_the_batch(self, device_uuid, batch):
return \
self._check_vpg_buddies_in_batch(device_uuid, batch) and \
self._check_number_of_spines_in_batch(device_uuid, batch) and \
self._check_number_of_critical_rb_roles_in_batch(
device_uuid, batch)
def _add_batch_index_to_device_info(self, batches):
for batch in batches:
for device_uuid in batch['device_list']:
self.device_table[device_uuid]['batch_index'] = batches.index(
batch)
def _add_device_to_the_batch(self, device_uuid, batch_load_list, batches):
batch = {}
loaded = False
batch_full = False
device_name = self.device_table[device_uuid].get('name')
# Try to add device into an existing batch
for batch in batch_load_list:
safe = self._check_if_device_can_be_added_to_the_batch(
device_uuid, batch)
if safe:
batch['device_list'].append(device_uuid)
batch['device_names'].append(device_name)
loaded = True
# if the batch is full, move it to the master list
if len(batch['device_list']) >= self.batch_limit:
batch_full = True
break
# if not loaded into a batch, generate a new batch
if not loaded:
idx = len(batch_load_list) + len(batches) + 1
batch = {
'name': "Batch " + str(idx),
'device_list': [device_uuid],
'device_names': [device_name]
}
batch_load_list.append(batch)
# if the batch is full, move it to the master list
if len(batch['device_list']) >= self.batch_limit:
batch_full = True
# if batch full, move from load list to master list
if batch_full:
batch_load_list.remove(batch)
batches.append(batch)
def _assign_devices_to_batches(self):
batches = []
for role_group in ordered_role_groups:
# Batching is per-role-group (constraint 1).
# TODO: Each role group contains just one role. So why do we need
# role groups?
batch_load_list = []
for role in role_group:
device_list = self.role_device_groups.get(role, [])
for device_uuid in device_list:
self._add_device_to_the_batch(
device_uuid, batch_load_list, batches)
# move remaining batches from the load list to the master list
for batch in batch_load_list:
batches.append(batch)
return batches
# Generate batches of devices that can be updated at once.
#
# Constraints:
# 1. Two devices with the different physical_router_role can not be in the
# same batch.
# 2. More than half (half + 0.5 for odd number) of spines can not be in the
# same batch.
# 3. For each routing_bridging_role in {"CRB-MCAST-Gateway",
# "DC-Gateway", "DCI-Gateway"} no more than half (half + 0.5 for odd
# number) of devices with that role can be in the same batch.
# 4. Two devices that share VPG can not be in the same batch.
def _generate_batches(self):
self._calculate_devices_with_critical_routing_bridging_roles()
self._calc_max_number_of_repr_of_critical_rb_roles_per_batch()
self._calculate_max_number_of_spines_updated_in_batch()
batches = self._assign_devices_to_batches()
self._add_batch_index_to_device_info(batches)
return batches
def _spill_device_details(self, device_name, device_info):
details = ""
basic = device_info['basic']
vpg_info = device_info['vpg_info']
batch_index = device_info.get('batch_index')
batch_name = self.batches[batch_index]['name'] \
if batch_index is not None else "N/A"
details += "\n - {}\n".format(device_name)
details += \
" uuid : {}\n"\
" vendor : {}\n"\
" family : {}\n"\
" product : {}\n"\
" serial number : {}\n"\
" management ip : {}\n"\
" username : {}\n"\
" password : {}\n"\
" new image version: {}\n"\
" current image version: {}\n"\
" image family : {}\n"\
" physical role : {}\n"\
" routing bridging roles: {}\n"\
" role : {}\n"\
" vpg list : {}\n"\
" vpg peers : {}\n"\
" batch : {}\n"\
" is hitless? : {}\n"\
.format(
device_info.get('uuid'),
basic.get('device_vendor'),
basic.get('device_family'),
basic.get('device_product'),
basic.get('device_serial_number'),
basic.get('device_management_ip'),
basic.get('device_username'),
"** hidden **", # basic.get('device_password'),
device_info.get('image_version'),
device_info.get('current_image_version'),
device_info.get('image_family'),
device_info.get('physical_role'),
device_info.get('rb_roles'),
device_info.get('role'),
vpg_info.get('vpg_list'),
[buddy['uuid'] for buddy in vpg_info.get('buddies')],
batch_name,
basic.get('device_hitless_upgrade'),
)
return details
def _generate_report(self):
report = ""
# generate devices dict with key of device name
devices = {}
for device_uuid, device_info in list(self.device_table.items()):
device_name = self.device_table[device_uuid]['name']
devices[device_name] = self.device_table[device_uuid]
# generate skipped devices dict with key of device name
sdevices = {}
for device_uuid, device_info in \
list(self.skipped_device_table.items()):
device_name = self.skipped_device_table[device_uuid]['name']
sdevices[device_name] = self.skipped_device_table[device_uuid]
# First dump summary
report += "\n********** Summary *************\n"
# Dump summary of batches
total_time = str(
timedelta(minutes=IMAGE_UPGRADE_DURATION * len(self.batches)))
if len(self.batches) > 0:
report += "\nTotal estimated " \
"duration is {}.\n".format(total_time)
report += "\nNote that this time " \
"estimate may vary depending on " \
"network speeds and system capabilities.\n"
report += "The following batches " \
"of devices will be upgraded in the order listed:\n"
for batch in self.batches:
report += "\n{}:\n".format(batch.get('name'))
for device_name in batch.get('device_names', []):
device_info = devices[device_name]
current_version = \
device_info['current_image_version'] or ""
new_version = device_info['image_version']
hitless_upgrade = \
device_info['basic']['device_hitless_upgrade']
is_hitless = "" if hitless_upgrade else "(not hitless)"
workflow_info = self._check_for_downgrade(device_info)
report += " {} {} --> {} {}{}\n".format(
device_name, current_version, new_version,
is_hitless, workflow_info)
else:
report += "\n NO DEVICES TO UPGRADE!"
report += "\n"
# Dump summary of skipped devices
if len(sdevices) > 0:
report += "\nThe following devices will not be upgraded " \
"for the reasons listed:\n"
for device_name, device_info in sorted(sdevices.items()):
report += "\n {} ({})".format(device_name,
device_info.get
('skip_reason',
"unknown reason"))
report += "\n NOTE: \n Incompatible device-image platform with " \
"the same versions could also lead to a device being " \
"skipped for image upgrade. " \
"Please recheck the platform compatibility " \
"for the above skipped devices."
# Now dump the details
report += "\n******** Details ************\n"
# Dump device info
if len(devices) > 0:
report += "\nDetailed information for the " \
"devices to be upgraded is listed below:\n"
# Spill out sorted list
for device_name, device_info in sorted(devices.items()):
details = self._spill_device_details(device_name, device_info)
report += details
# Dump skipped device info
if len(sdevices) > 0:
report += "\nDetailed information for " \
"the devices to be skipped is listed below:\n"
# Spill out sorted list
for device_name, device_info in sorted(sdevices.items()):
details = self._spill_device_details(device_name, device_info)
report += details
return report
def _generate_results(self):
return self.report
# Get the current and next batch off the batch list and return
def get_next_batch(self, job_ctx, upgrade_plan, device_uuid):
try:
return self._get_next_batch(upgrade_plan, device_uuid)
except Exception as ex:
errmsg = "Unexpected error attempting to " \
"get next batch: %s\n%s" %\
(str(ex), traceback.format_exc())
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end get_next_batch
# Get the current and next batch off the batch list and return
def _get_next_batch(self, upgrade_plan, device_uuid):
c_idx, n_idx = None, None
current_batch, next_batch = {}, {}
batch_info = {
'current': {
'batch_name': None, 'batch_index': None, 'batch_devices': {}
},
'next': {
'batch_name': None, 'batch_index': None, 'batch_devices': {}
},
'status': "success"
}
if device_uuid:
device_info = upgrade_plan['device_table'].get(device_uuid)
if device_info:
c_idx = device_info['batch_index']
n_idx = c_idx + 1
else:
return batch_info
else:
n_idx = 0
if c_idx is not None:
batch = upgrade_plan['batches'][c_idx]
for device_uuid in batch['device_list']:
current_batch[device_uuid] = \
upgrade_plan['device_table'][device_uuid]['basic']
batch_info['current'] = {
'batch_name': batch['name'],
'batch_index': c_idx,
'batch_devices': current_batch}
if n_idx < len(upgrade_plan['batches']):
batch = upgrade_plan['batches'][n_idx]
for device_uuid in batch['device_list']:
next_batch[device_uuid] = \
upgrade_plan['device_table'][device_uuid]['basic']
batch_info['next'] = {
'batch_name': batch['name'],
'batch_index': n_idx,
'batch_devices': next_batch}
return batch_info
# end _get_next_batch
# Get list of all devices for use in test_run
def get_all_devices(self, job_ctx, upgrade_plan):
try:
return self._get_all_devices(upgrade_plan)
except Exception as ex:
errmsg = "Unexpected error attempting " \
"to get all devices: %s\n%s" % \
(str(ex), traceback.format_exc())
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end get_all_devices
# Get list of all devices for use in test_run
def _get_all_devices(self, upgrade_plan):
all_devices = {}
device_table = upgrade_plan['device_table']
batch_info = {
'current': {
'batch_name': None, 'batch_index': None, 'batch_devices': {}
},
'next': {
'batch_name': 'all', 'batch_index': 0, 'batch_devices': {}
},
'status': "success"
}
for device_uuid, device_info in list(device_table.items()):
all_devices[device_uuid] = device_table[device_uuid]['basic']
batch_info['next']['batch_devices'] = all_devices
return batch_info
# end get_all_devices
# Get info for a single device
def get_device_info(self, job_ctx, device_uuid):
try:
FilterLog.instance("HitlessUpgradeFilter")
self.job_input = FilterModule._validate_job_ctx(job_ctx)
self.fabric_uuid = self.job_input['fabric_uuid']
self.vncapi = JobVncApi.vnc_init(job_ctx)
self.job_ctx = job_ctx
self.ja = JobAnnotations(self.vncapi)
self.advanced_parameters = self._get_advanced_params()
self._cache_job_input()
self.device_uuid = device_uuid
device_info = self._get_device_info()
return device_info
except Exception as ex:
errmsg = "Unexpected error getting device info: %s\n%s" % (
str(ex), traceback.format_exc()
)
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end get_device_info
# Get device info used for maintenance mode activate
def _get_device_info(self):
self.device_table = self._generate_device_entry()
self.skipped_device_table = {}
self.vpg_table = self._generate_vpg_table()
self._generate_buddy_lists()
device_info = {
'advanced_parameters': self.advanced_parameters,
'device_table': self.device_table,
'vpg_table': self.vpg_table,
'status': "success"
}
return device_info
# end _get_device_info
# Validate whether fabric will be hitless when the given list of
# devices go into maintenance mode
def validate_critical_roles(self, job_ctx, device_uuid_list):
try:
FilterLog.instance("HitlessUpgradeFilter")
self.job_input = FilterModule._validate_job_ctx(job_ctx)
self.fabric_uuid = self.job_input['fabric_uuid']
self.vncapi = JobVncApi.vnc_init(job_ctx)
self.job_ctx = job_ctx
self.ja = JobAnnotations(self.vncapi)
self.advanced_parameters = self._get_advanced_params()
self._cache_job_input()
self.device_uuid_list = device_uuid_list
results = self._validate_critical_roles()
return results
except Exception as ex:
errmsg = "Unexpected error validating: %s\n%s" % (
str(ex), traceback.format_exc()
)
_task_error_log(errmsg)
return {
'status': 'failure',
'error_msg': errmsg,
}
# end hitless_validate
# Get device info used for maintenance mode activate
def _validate_critical_roles(self):
error_msg = ''
critical_dev_list = []
mm_dev_list = []
dev_list = self.vncapi.physical_routers_list(
fields=['fabric_refs', 'physical_role_refs',
'routing_bridging_roles', 'physical_router_managed_state'
]).get('physical-routers', [])
# Search through all devices in fabric and create a critical device
# list of devices which are active and performing critical roles
for dev in dev_list:
if dev['uuid'] in self.device_uuid_list:
mm_dev_list.append(dev)
continue
fabric_refs = dev.get('fabric_refs')
if not fabric_refs:
continue
fabric_uuid = fabric_refs[0]['uuid']
if fabric_uuid != self.fabric_uuid:
continue
managed_state = dev.get('physical_router_managed_state')
if managed_state and managed_state != 'active':
continue
physical_role_refs = dev.get('physical_role_refs')
if not physical_role_refs:
continue
physical_role = physical_role_refs[0]['to'][-1]
if physical_role == 'spine':
critical_dev_list.append(dev)
continue
routing_bridging_roles = dev.get('routing_bridging_roles')
if routing_bridging_roles:
rb_roles = routing_bridging_roles['rb_roles']
else:
rb_roles = []
for rb_role in rb_roles:
if rb_role in FilterModule.critical_routing_bridging_roles:
critical_dev_list.append(dev)
break
# Make sure critical roles are present in critical devices
missing_roles = set()
for mm_dev in mm_dev_list:
# check critical physical roles
physical_role_refs = mm_dev.get('physical_role_refs')
if not physical_role_refs:
continue
physical_role = physical_role_refs[0]['to'][-1]
if physical_role == 'spine':
found = self._find_critical_phy_role(
physical_role, critical_dev_list)
if not found:
missing_roles.add(physical_role)
# check critical routing-bridging roles
routing_bridging_roles = mm_dev.get('routing_bridging_roles')
if routing_bridging_roles:
rb_roles = routing_bridging_roles['rb_roles']
else:
rb_roles = []
for rb_role in rb_roles:
if rb_role in FilterModule.critical_routing_bridging_roles:
found = self._find_critical_rb_role(
rb_role, critical_dev_list)
if not found:
missing_roles.add(rb_role)
if missing_roles:
error_msg = 'Fabric will not be hitless because these '\
'roles will no longer be deployed: '\
'{}'.format(list(missing_roles))
if error_msg:
results = {
'error_msg': error_msg,
'status': "failure"
}
else:
results = {
'error_msg': "Fabric is hitless",
'status': "success"
}
return results
# end _hitless_validate
# Find a particular critical physical role in a list of devices
def _find_critical_phy_role(self, crit_phy_role, dev_list):
for dev in dev_list:
physical_role_refs = dev.get('physical_role_refs')
if not physical_role_refs:
continue
physical_role = physical_role_refs[0]['to'][-1]
if physical_role == crit_phy_role:
return True
return False
# end _find_critical_rb_role
# Find a particular critical routing-bridging role in a list of devices
def _find_critical_rb_role(self, crit_rb_role, dev_list):
for dev in dev_list:
routing_bridging_roles = dev.get('routing_bridging_roles')
if routing_bridging_roles:
rb_roles = routing_bridging_roles['rb_roles']
else:
rb_roles = []
for rb_role in rb_roles:
if crit_rb_role == rb_role:
return True
return False
# end _find_critical_rb_role
# generate a single entry of device information
def _generate_device_entry(self):
device_table = {}
device_obj = self.vncapi.physical_router_read(id=self.device_uuid)
routing_bridging_roles = device_obj.routing_bridging_roles
if not routing_bridging_roles:
raise ValueError("Cannot find routing-bridging roles")
rb_roles = routing_bridging_roles.get_rb_roles()
is_hitless_upgrade = self._is_hitless_upgrade(device_obj)
device_info = {
"basic": {
"device_fqname": device_obj.fq_name,
"device_vendor":
device_obj.physical_router_vendor_name,
"device_family":
device_obj.physical_router_device_family,
"device_product":
device_obj.physical_router_product_name,
"device_serial_number":
device_obj.physical_router_serial_number,
"device_management_ip":
device_obj.physical_router_management_ip,
"device_username":
device_obj.physical_router_user_credentials.username,
"device_password":
self._get_password(device_obj),
"device_hitless_upgrade": is_hitless_upgrade
},
'name': device_obj.fq_name[-1],
'uuid': self.device_uuid,
'physical_role': device_obj.physical_router_role,
'rb_roles': rb_roles,
'role': self._determine_role(
device_obj.physical_router_role, rb_roles),
'err_msgs': [],
'vpg_info': {"vpg_list": [], "buddies": []},
'target_multihomed_interface': []
}
device_table[self.device_uuid] = device_info
return device_table
# end _generate_device_entry
# Get a list of all devices that share vpg groups with this device
def _get_vpg_buddies(self, device_uuid):
device_info = self.device_table[device_uuid]
vpg_info = device_info['vpg_info']
return vpg_info.get('buddies', [])
# end _get_vpg_buddies
# Get a single role for this device to be used in determining upgrade
# ordering
def _determine_role(self, physical_role, rb_roles):
# Use physical role for now. If not in ordered table, use default
for role_group in ordered_role_groups:
for role in role_group:
if physical_role == role:
return physical_role
return "default"
# end _determine_role
# If old and new image versions match, don't upgrade
def _check_skip_device_upgrade(self, device_info):
if device_info['image_version'] == \
device_info['current_image_version']:
return True, "Upgrade image version matches current image version"
return False, ""
# end _check_skip_device_upgrade
def _check_for_downgrade(self, device_info):
new_image_int = int(re.sub(r"\D", "", device_info['image_version']))
current_image_int = int(
re.sub(
r"\D",
"",
device_info['current_image_version']))
if new_image_int > current_image_int:
return ""
else:
return "(Image downgrade)"
# Get device password
def _get_password(self, device_obj):
return JobVncApi.decrypt_password(
encrypted_password=device_obj.physical_router_user_credentials.
get_password(),
pwd_key=device_obj.uuid)
def _parse_args():
arg_parser = argparse.ArgumentParser(description='fabric filters tests')
arg_parser.add_argument('-p', '--generate_plan',
action='store_true', help='Generate Upgrade Plan')
arg_parser.add_argument('-b', '--next_batch',
action='store_true', help='Get Next Batch')
arg_parser.add_argument('-a', '--all_devices',
action='store_true', help='Get All Devices')
arg_parser.add_argument('-d', '--device_info',
action='store_true', help='Get Device Info')
return arg_parser.parse_args()
|
py | 1a3d06bfdfaf516e0cab0fd13cefd4cc26c81332 |
class BaseTokenizer:
def __init__(self, name=None):
name = name
def __call__(self, text):
raise NotImplementedError
|
py | 1a3d06cba9e6a6b4d23decebb5f705a28e715613 | # qubit number=4
# total number=38
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=30
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
prog.h(input_qubit[3]) # number=13
prog.cz(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[3],input_qubit[1]) # number=19
prog.z(input_qubit[3]) # number=25
prog.h(input_qubit[1]) # number=20
prog.rx(-3.141592653589793,input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=17
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.cx(input_qubit[2],input_qubit[1]) # number=34
prog.x(input_qubit[1]) # number=23
prog.x(input_qubit[1]) # number=24
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2727.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | 1a3d06f7d2a727e779c382d87cf71b95617cd9e9 | import pathlib
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp import web
from aiohttp.web_urldispatcher import SystemRoute
@pytest.mark.parametrize(
"show_index,status,prefix,data",
[pytest.param(False, 403, '/', None, id="index_forbidden"),
pytest.param(True, 200, '/',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/my_dir">my_dir/</a></li>\n'
b'<li><a href="/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_root"),
pytest.param(True, 200, '/static',
b'<html>\n<head>\n<title>Index of /.</title>\n'
b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n'
b'<li><a href="/static/my_dir">my_dir/</a></li>\n'
b'<li><a href="/static/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>',
id="index_static")])
async def test_access_root_of_static_handler(tmp_path,
aiohttp_client,
show_index,
status,
prefix,
data) -> None:
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that correct HTTP statuses are returned depending if we directory
index should be shown or not.
"""
my_file = tmp_path / 'my_file'
my_dir = tmp_path / 'my_dir'
my_dir.mkdir()
my_file_in_dir = my_dir / 'my_file_in_dir'
with my_file.open('w') as fw:
fw.write('hello')
with my_file_in_dir.open('w') as fw:
fw.write('world')
app = web.Application()
# Register global static route:
app.router.add_static(prefix, str(tmp_path), show_index=show_index)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get(prefix)
assert r.status == status
if data:
assert r.headers['Content-Type'] == "text/html; charset=utf-8"
read_ = (await r.read())
assert read_ == data
async def test_follow_symlink(tmp_path, aiohttp_client) -> None:
"""
Tests the access to a symlink, in static folder
"""
data = 'hello world'
my_dir_path = tmp_path / 'my_dir'
my_dir_path.mkdir()
my_file_path = my_dir_path / 'my_file_in_dir'
with my_file_path.open('w') as fw:
fw.write(data)
my_symlink_path = tmp_path / 'my_symlink'
pathlib.Path(str(my_symlink_path)).symlink_to(str(my_dir_path), True)
app = web.Application()
# Register global static route:
app.router.add_static('/', str(tmp_path), follow_symlinks=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/my_symlink/my_file_in_dir')
assert r.status == 200
assert (await r.text()) == data
@pytest.mark.parametrize('dir_name,filename,data', [
('', 'test file.txt', 'test text'),
('test dir name', 'test dir file .txt', 'test text file folder')
])
async def test_access_to_the_file_with_spaces(tmp_path, aiohttp_client,
dir_name, filename, data):
"""
Checks operation of static files with spaces
"""
my_dir_path = tmp_path / dir_name
if my_dir_path != tmp_path:
my_dir_path.mkdir()
my_file_path = my_dir_path / filename
with my_file_path.open('w') as fw:
fw.write(data)
app = web.Application()
url = '/' + str(pathlib.Path(dir_name, filename))
app.router.add_static('/', str(tmp_path))
client = await aiohttp_client(app)
r = await client.get(url)
assert r.status == 200
assert (await r.text()) == data
async def test_access_non_existing_resource(tmp_path,
aiohttp_client) -> None:
"""
Tests accessing non-existing resource
Try to access a non-exiting resource and make sure that 404 HTTP status
returned.
"""
app = web.Application()
# Register global static route:
app.router.add_static('/', str(tmp_path), show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/non_existing_resource')
assert r.status == 404
@pytest.mark.parametrize('registered_path,request_url', [
('/a:b', '/a:b'),
('/a@b', '/a@b'),
('/a:b', '/a%3Ab'),
])
async def test_url_escaping(aiohttp_client,
registered_path,
request_url) -> None:
"""
Tests accessing a resource with
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(registered_path, handler)
client = await aiohttp_client(app)
r = await client.get(request_url)
assert r.status == 200
async def test_handler_metadata_persistence() -> None:
# Tests accessing metadata of a handler after registering it on the app
# router.
app = web.Application()
async def async_handler(request):
"""Doc"""
return web.Response()
app.router.add_get('/async', async_handler)
for resource in app.router.resources():
for route in resource:
assert route.handler.__doc__ == 'Doc'
async def test_unauthorized_folder_access(tmp_path,
aiohttp_client) -> None:
"""
Tests the unauthorized access to a folder of static file server.
Try to list a folder content of static file server when server does not
have permissions to do so for the folder.
"""
my_dir = tmp_path / 'my_dir'
my_dir.mkdir()
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
path = MagicMock()
path.joinpath.return_value = path
path.resolve.return_value = path
path.iterdir.return_value.__iter__.side_effect = PermissionError()
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', str(tmp_path), show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/' + my_dir.name)
assert r.status == 403
async def test_access_symlink_loop(tmp_path, aiohttp_client) -> None:
"""
Tests the access to a looped symlink, which could not be resolved.
"""
my_dir_path = tmp_path / 'my_symlink'
pathlib.Path(str(my_dir_path)).symlink_to(str(my_dir_path), True)
app = web.Application()
# Register global static route:
app.router.add_static('/', str(tmp_path), show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/' + my_dir_path.name)
assert r.status == 404
async def test_access_special_resource(tmp_path, aiohttp_client) -> None:
"""
Tests the access to a resource that is neither a file nor a directory.
Checks that if a special resource is accessed (f.e. named pipe or UNIX
domain socket) then 404 HTTP status returned.
"""
app = web.Application()
with mock.patch('pathlib.Path.__new__') as path_constructor:
special = MagicMock()
special.is_dir.return_value = False
special.is_file.return_value = False
path = MagicMock()
path.joinpath.side_effect = lambda p: (special if p == 'special'
else path)
path.resolve.return_value = path
special.resolve.return_value = special
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', str(tmp_path), show_index=True)
client = await aiohttp_client(app)
# Request the root of the static directory.
r = await client.get('/special')
assert r.status == 403
def test_system_route() -> None:
route = SystemRoute(web.HTTPCreated(reason='test'))
with pytest.raises(RuntimeError):
route.url_for()
assert route.name is None
assert route.resource is None
assert "<SystemRoute 201: test>" == repr(route)
assert 201 == route.status
assert 'test' == route.reason
async def test_allow_head(aiohttp_client) -> None:
"""
Test allow_head on routes.
"""
app = web.Application()
async def handler(_):
return web.Response()
app.router.add_get('/a', handler, name='a')
app.router.add_get('/b', handler, allow_head=False, name='b')
client = await aiohttp_client(app)
r = await client.get('/a')
assert r.status == 200
await r.release()
r = await client.head('/a')
assert r.status == 200
await r.release()
r = await client.get('/b')
assert r.status == 200
await r.release()
r = await client.head('/b')
assert r.status == 405
await r.release()
@pytest.mark.parametrize("path", [
'/a',
'/{a}',
])
def test_reuse_last_added_resource(path) -> None:
"""
Test that adding a route with the same name and path of the last added
resource doesn't create a new resource.
"""
app = web.Application()
async def handler(request):
return web.Response()
app.router.add_get(path, handler, name="a")
app.router.add_post(path, handler, name="a")
assert len(app.router.resources()) == 1
def test_resource_raw_match() -> None:
app = web.Application()
async def handler(request):
return web.Response()
route = app.router.add_get("/a", handler, name="a")
assert route.resource.raw_match("/a")
route = app.router.add_get("/{b}", handler, name="b")
assert route.resource.raw_match("/{b}")
resource = app.router.add_static("/static", ".")
assert not resource.raw_match("/static")
async def test_add_view(aiohttp_client) -> None:
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_view("/a", MyView)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_decorate_view(aiohttp_client) -> None:
routes = web.RouteTableDef()
@routes.view("/a")
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app = web.Application()
app.router.add_routes(routes)
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_web_view(aiohttp_client) -> None:
app = web.Application()
class MyView(web.View):
async def get(self):
return web.Response()
async def post(self):
return web.Response()
app.router.add_routes([
web.view("/a", MyView)
])
client = await aiohttp_client(app)
r = await client.get("/a")
assert r.status == 200
await r.release()
r = await client.post("/a")
assert r.status == 200
await r.release()
r = await client.put("/a")
assert r.status == 405
await r.release()
async def test_static_absolute_url(aiohttp_client, tmp_path) -> None:
# requested url is an absolute name like
# /static/\\machine_name\c$ or /static/D:\path
# where the static dir is totally different
app = web.Application()
file_path = tmp_path / 'file.txt'
file_path.write_text('sample text', 'ascii')
here = pathlib.Path(__file__).parent
app.router.add_static('/static', here)
client = await aiohttp_client(app)
resp = await client.get('/static/' + str(file_path.resolve()))
assert resp.status == 403
|
py | 1a3d0801725c60e5e17c948516993b4971d17c7b | import unittest
from pyvirtualdisplay import Display
from selenium import webdriver
class TestFirefox(unittest.TestCase):
"""
Basic test case that uses firefox as a driver.
"""
def setUp(self):
self.display = Display(visible=0, size=(1024, 768))
self.display.start()
self.driver = webdriver.Firefox()
def test_title(self):
self.driver.get("http://www.tomwaits.com")
self.assertEquals("Tom Waits", self.driver.title)
def test_url(self):
self.driver.get("http://tomwaits.com")
self.assertEquals(self.driver.current_url, "http://www.tomwaits.com/")
def tearDown(self):
self.driver.quit()
self.display.stop()
|
py | 1a3d08bcbaf299c23ffa703995e0e90e1b374996 | # -*- coding: utf-8 -*-
"""Module scanning for the ROBOT vulnerability
Refer to CVE-2017-13099, etc.
Padding oracle for RSA-based key transport, refer to https://robotattack.org
"""
# import basic stuff
import math
# import own stuff
import tlsmate.msg as msg
import tlsmate.plugin as plg
import tlsmate.tls as tls
import tlsmate.utils as utils
# import other stuff
def _rsa_encrypt(msg, e, n, mod_bytes):
return int(pow(msg, e, n)).to_bytes(mod_bytes, byteorder="big")
class ScanRobot(plg.Worker):
name = "robot"
descr = "scan for ROBOT vulnerability"
prio = 41
def _get_oracle_results(self, with_ccs):
def cke_pre_serialization(message):
message.rsa_encrypted_pms = self.enc_pms
results = []
for self.enc_pms in self._rsa_encrypted_pms:
with self.client.create_connection() as conn:
conn.send(msg.ClientHello)
conn.wait(msg.ServerHello)
conn.wait(msg.Certificate)
conn.wait(msg.CertificateRequest, optional=True)
conn.wait(msg.ServerHelloDone)
conn.send(
msg.ClientKeyExchange, pre_serialization=cke_pre_serialization
)
self.premaster_secret = self.rnd_pms
if with_ccs:
conn.send(msg.ChangeCipherSpec)
conn.send(msg.Finished)
try:
rec_msg, rec_bytes = conn.wait_msg_bytes(msg.Any, timeout=1000)
results.append(hash(bytes(rec_bytes)))
except Exception as exc:
results.append(hash(str(exc)))
return results
def _determine_status(self):
for send_ccs_finished in [True, False]:
results = self._get_oracle_results(send_ccs_finished)
if len(set(results)) == 1:
continue
results2 = self._get_oracle_results(send_ccs_finished)
for res1, res2 in zip(results, results2):
if res1 != res2:
return tls.RobotVulnerability.INCONSITENT_RESULTS
if results[1] == results[2] == results[3]:
return tls.RobotVulnerability.WEAK_ORACLE
return tls.RobotVulnerability.STRONG_ORACLE
return tls.RobotVulnerability.NOT_VULNERABLE
def run(self):
values = self.server_profile.get_profile_values(
[tls.Version.TLS10, tls.Version.TLS11, tls.Version.TLS12], full_hs=True
)
rsa_ciphers = utils.filter_cipher_suites(
values.cipher_suites, key_algo=[tls.KeyExchangeAlgorithm.RSA]
)
if rsa_ciphers:
self.client.init_profile(profile_values=values)
self.client.profile.cipher_suites = rsa_ciphers
with self.client.create_connection() as conn:
conn.handshake()
if not conn.handshake_completed:
status = tls.RobotVulnerability.UNDETERMINED
else:
cert = conn.msg.server_certificate.chain.certificates[0]
pub_nbrs = cert.parsed.public_key().public_numbers()
modulus_bits = int(math.ceil(math.log(pub_nbrs.n, 2)))
modulus_bytes = (modulus_bits + 7) // 8
pad_len = (modulus_bytes - 48 - 3) * 2
rnd_pad = ("abcd" * (pad_len // 2 + 1))[:pad_len]
self.rnd_pms = (
"aa11223344556677889911223344556677889911223344"
"5566778899112233445566778899112233445566778899"
)
pms_good_in = int("0002" + rnd_pad + "00" + "0303" + self.rnd_pms, 16)
# wrong first two bytes
pms_bad_in1 = int("4117" + rnd_pad + "00" + "0303" + self.rnd_pms, 16)
# 0x00 on a wrong position, also trigger older JSSE bug
pms_bad_in2 = int("0002" + rnd_pad + "11" + self.rnd_pms + "0011", 16)
# no 0x00 in the middle
pms_bad_in3 = int("0002" + rnd_pad + "11" + "1111" + self.rnd_pms, 16)
# wrong version number (according to Klima / Pokorny / Rosa paper)
pms_bad_in4 = int("0002" + rnd_pad + "00" + "0202" + self.rnd_pms, 16)
self._rsa_encrypted_pms = [
_rsa_encrypt(pms, pub_nbrs.e, pub_nbrs.n, modulus_bytes)
for pms in [
pms_good_in,
pms_bad_in1,
pms_bad_in2,
pms_bad_in3,
pms_bad_in4,
]
]
status = self._determine_status()
else:
status = tls.RobotVulnerability.NOT_APPLICABLE
self.server_profile.allocate_vulnerabilities()
self.server_profile.vulnerabilities.robot = status
|
py | 1a3d0900e21d35c61e92e5f52426077cf1c1d363 | from discord.ext import commands
from lxml import html
import aiohttp
import asyncio
import discord
class google:
""" Google search """
def __init__(self,bot):
self.bot = bot
@commands.command()
async def g(self,ctx,*,qstr:str):
""" Perform a google search """
p = {"q":qstr,"safe":"on"}
h = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64)'}
cs = self.bot.session
async with cs.get('https://www.google.com/search',
params=p, headers=h) as resp:
if resp.status != 200:
err = f"🚫 Google responded with status code {resp.status}"
return await ctx.send(err)
tree = html.fromstring(await resp.text())
# Generate Base Embed
e = discord.Embed(colour=0xdb3236)
th = "http://i.imgur.com/2Ielpqo.png"
e.set_author(name="Google Search",icon_url=th,url=resp.url)
# Scrape Google Cards
card = tree.xpath(".//*[contains(@id,'topstuff')]")
if card:
card = card[0]
# Calculator
x = ".//table/tr/td/span[@class='nobr']/h2[@class='r']/text()"
calc = card.xpath(x)
if calc:
e.title = "Calculator"
e.description = calc[0]
# Unit Conversion
uc = tree.xpath(".//ol//div[@class='_Tsb']")
if uc:
uc = uc[0]
e.title = '🔄 Unit Conversion'
e.description = "".join(uc.xpath(".//text()"))
# Currency
curr = tree.xpath(".//ol/table[@class='std _tLi']/tr/td/h2")
if curr:
curr = curr[0]
e.title = '💷 Currency Conversion'
e.description = "".join(curr.xpath(".//text()"))
# Definition
x = ".//ol/div[@class='g']/div[h3[@class='r']/div]"
defin = tree.xpath(x)
if defin:
e.title = '📖 Definition'
defnode = defin[0]
texts = defnode.xpath(".//text()")
e.description = f"**{texts[0]}**\n{texts[1]}"
deftype = defnode.xpath(".//td/div/text()")[0]
deflist = defnode.xpath(".//ol/li/text()")
e.add_field(name=deftype,value="\n".join(deflist))
# Date
release = tree.xpath(".//div[@id='_vBb']")
if release:
release = release[0]
fields = release.xpath(".//text()")
e.title = f'🗓️ {"".join(fields[1:])}'
e.description = fields[0]
# Time in Card
timein = tree.xpath(".//ol//div[@class='_Tsb _HOb _Qeb']")
if timein:
timein = timein[0]
e.title = f"🕛 {timein.xpath('.//text()')[4].strip()}"
e.description = "".join(timein.xpath(".//text()")[0:4])
# Weather
weather = tree.xpath(".//ol//div[@class='e']")
if weather:
weather = weather[0]
items = weather.xpath('.//text()')
e.description = items[10]
e.title = "".join(items[0:3])
we = {
"Rain":"🌧️",
"Cloudy":"☁️️",
"Clear with periodic clouds":"🌤️",
"Clear":"🌞","Snow Showers":"🌨️",
"Mostly Cloudy":"☁️️",
"Mostly Sunny":"🌤",
"Partly Cloudy":"🌤️",
"Sunny":"🌞"
}
try:
e.description = f"{we[e.description]} {e.description}"
except KeyError:
await ctx.send(f"Emoji not found for {e.description}")
e.add_field(name="Temperature",value=items[3])
e.add_field(name="Humidity",value=items[13][9:])
e.add_field(name="Wind",value=items[12])
# Translate
x = (".//ol/div[@class='g'][1]//table[@class='ts']"
"//h3[@class='r'][1]//text()")
translate = tree.xpath(x)
if translate:
e.title = "Translation"
e.description = "".join(translate)
# Time Conversion
timecard = tree.xpath("..//div[@class='_NId']")
if timecard:
e.title = '≡ Time Conversion'
e.description = "".join(timecard.xpath(".//text()"))
# Write to file for debugging.
# with open('google.html', 'w', encoding='utf-8') as f:
# f.write(html.tostring(tree).decode('utf-8'))
# Search
resultnodes = tree.xpath(".//div[@class='g']")
res = []
for i in resultnodes:
link = i.xpath(".//h3[@class = 'r']/a/@href")
# if not a proper result node, go to next item.
if not link or "/search?q=" in link[0]:
continue
link = link[0]
# strip irrel.
if "/url?q=" in link:
link = link.split("/url?q=")[1]# strip irrel.
if "&sa" in link:
link = link.rsplit("&sa")[0]
link = link.replace(')',"%29")
title = i.xpath("string(.//h3[@class = 'r']/a)")
desc = i.xpath("string(.//span[contains(@class,'st')])")
res.append((link,title,desc))
if not res:
await ctx.send("🚫 No results found.")
return
if e.description == e.Empty:
e.title = res[0][1]
e.url = res[0][0]
e.description = res[0][2]
more = f"[{res[1][1]}]({res[1][0]})\n[{res[2][1]}]({res[2][0]})"
else:
more = (f"[{res[0][1]}]({res[0][0]})\n"
f"[{res[1][1]}]({res[1][0]})\n"
f"[{res[2][1]}]({res[2][0]})")
e.add_field(name="More Results",value=more)
await ctx.send(embed=e)
def setup(bot):
bot.add_cog(google(bot)) |
py | 1a3d09039dd0883546bc6476b9cb21a1d00197ed | import clr
import sys
sys.path.append('C:\Program Files (x86)\IronPython 2.7\Lib')
import os
import math
clr.AddReference('acmgd')
clr.AddReference('acdbmgd')
clr.AddReference('accoremgd')
# Import references from AutoCAD
from Autodesk.AutoCAD.Runtime import *
from Autodesk.AutoCAD.ApplicationServices import *
from Autodesk.AutoCAD.EditorInput import *
from Autodesk.AutoCAD.DatabaseServices import *
from Autodesk.AutoCAD.Geometry import *
doc = Application.DocumentManager.MdiActiveDocument
ed = doc.Editor
db = doc.Database
#Code Here :
objects = []
with doc.LockDocument():
with doc.Database as db:
with db.TransactionManager.StartTransaction() as t:
acblkbl = t.GetObject(db.BlockTableId,OpenMode.ForRead)
print(type(acblkbl))
acblktblrec = t.GetObject(acblkbl[BlockTableRecord.ModelSpace],OpenMode.ForWrite)
print(type(acblktblrec))
sel = doc.Editor.GetSelection()
if(sel.Status== PromptStatus.OK):
results = sel.Value
for i in range(len(results)):
if(results[i] != None) : objects.append(i)
else : pass
print("Count Object Exploded:",len(objects)) |
py | 1a3d0a59df6de534f3506a0e6a21951ff64054a8 | # coding=utf-8
import numpy as np
from pyhsmm.models import _HMMGibbsSampling, _HMMEM, _HMMMeanField
from pyhsmm.internals.initial_state import UniformInitialState
from autoregressive.models import _ARMixin
from autoregressive.util import AR_striding
from pyslds.models import _SLDSGibbsMixin, _SLDSVBEMMixin, _SLDSMeanFieldMixin
from rslds.states import InputHMMStates, PGRecurrentSLDSStates, SoftmaxRecurrentSLDSStates
import rslds.transitions as transitions
### Input-driven HMMs
class _InputHMMMixin(object):
# Subclasses must specify the type of transition model
_trans_class = None
# custom init method, just so we call custom input trans class stuff
def __init__(self,
obs_distns,
D_in=0,
trans_distn=None, trans_params={},
init_state_distn=None, init_state_concentration=None, pi_0=None,
):
self.obs_distns = obs_distns
self.states_list = []
self.D_in = D_in
# our trans class
if trans_distn is None:
self.trans_distn = self._trans_class(num_states=len(obs_distns),
covariate_dim=D_in,
**trans_params)
else:
self.trans_distn = trans_distn
if init_state_distn is not None:
if init_state_distn == 'uniform':
self.init_state_distn = UniformInitialState(model=self)
else:
self.init_state_distn = init_state_distn
else:
self.init_state_distn = self._init_state_class(
model=self,
init_state_concentration=init_state_concentration,
pi_0=pi_0)
self._clear_caches()
# custom add_data - includes a covariates arg
def add_data(self, data, covariates=None, **kwargs):
# NOTE! Our convention is that covariates[t] drives the
# NOTE! transition matrix going into time t. However, for
# NOTE! implementation purposes, it is easier if these inputs
# NOTE! are lagged so that covariates[t] drives the input to
# NOTE! z_{t+1}. Then, we only have T-1 inputs for the T-1
# NOTE! transition matrices in the heterogeneous model.
# Offset the covariates by one so that
# the inputs at time {t-1} determine the transition matrix
# from z_{t-1} to z_{t}.
offset_covariates = covariates[1:]
self.states_list.append(
self._states_class(
model=self, data=data,
covariates=offset_covariates, **kwargs))
def generate(self, T=100, covariates=None, keep=True):
if covariates is None:
covariates = np.zeros((T, self.D_in))
else:
assert covariates.ndim == 2 and \
covariates.shape[0] == T
s = self._states_class(model=self, covariates=covariates[1:], T=T, initialize_from_prior=True)
data = self._generate_obs(s)
if keep:
self.states_list.append(s)
return (data, covariates), s.stateseq
def resample_trans_distn(self):
self.trans_distn.resample(
stateseqs=[s.stateseq for s in self.states_list],
covseqs=[s.covariates for s in self.states_list],
)
self._clear_caches()
class PGInputHMM(_InputHMMMixin, _HMMGibbsSampling):
_trans_class = transitions.InputHMMTransitions
_states_class = InputHMMStates
class PGInputOnlyHMM(PGInputHMM):
_trans_class = transitions.InputOnlyHMMTransitions
class PGStickyInputOnlyHMM(PGInputHMM):
_trans_class = transitions.StickyInputOnlyHMMTransitions
class SoftmaxInputHMM(_InputHMMMixin, _HMMGibbsSampling, _HMMEM):
_trans_class = transitions.SoftmaxInputHMMTransitions
_states_class = InputHMMStates
## EM
def _M_step_trans_distn(self):
zs = [s.expected_states.argmax(1).astype(np.int32) for s in self.states_list]
xs = [s.covariates for s in self.states_list]
xs = [np.row_stack([x, np.zeros(x.shape[1])]) for x in xs]
self.trans_distn.initialize_with_logistic_regression(zs, xs)
class SoftmaxInputOnlyHMM(SoftmaxInputHMM):
_trans_class = transitions.SoftmaxInputOnlyHMMTransitions
### ARHMM's
class _InputARHMMMixin(_InputHMMMixin, _ARMixin):
def add_data(self, data, covariates=None, strided=False, **kwargs):
if covariates is None:
covariates = np.zeros((data.shape[0], 0))
strided_data = AR_striding(data,self.nlags) if not strided else data
lagged_covariates = covariates[self.nlags:]
assert strided_data.shape[0] == lagged_covariates.shape[0]
# Pass to InputHMM
super(_InputARHMMMixin, self).add_data(data=strided_data,
covariates=lagged_covariates,
**kwargs)
class PGInputARHMM(_InputARHMMMixin, _HMMGibbsSampling):
_trans_class = transitions.InputHMMTransitions
_states_class = InputHMMStates
class PGInputOnlyARHMM(PGInputARHMM):
_trans_class = transitions.InputOnlyHMMTransitions
class PGStickyInputOnlyARHMM(PGInputARHMM):
_trans_class = transitions.StickyInputOnlyHMMTransitions
class SoftmaxInputARHMM(_InputARHMMMixin, _HMMGibbsSampling, _HMMEM):
_trans_class = transitions.SoftmaxInputHMMTransitions
_states_class = InputHMMStates
class SoftmaxInputOnlyARHMM(SoftmaxInputARHMM):
_trans_class = transitions.SoftmaxInputOnlyHMMTransitions
### Recurrent ARHMM's
class _RecurrentARHMMMixin(_InputARHMMMixin):
"""
In the "recurrent" version, the data also serve as covariates.
"""
def add_data(self, data, covariates=None, strided=False, **kwargs):
# Remember that the covariates[t] drives the transition probabilities p(z[t] | ...)
# under our convention for add_data.
T = data.shape[0]
if covariates is None:
covariates = np.zeros((T, 0))
else:
assert covariates.shape[0] == T
# Combine the lagged data and the given covariates
covariates = np.column_stack((
np.row_stack((np.zeros(self.D), data[:-1])),
covariates))
super(_RecurrentARHMMMixin, self).add_data(data, covariates=covariates, **kwargs)
def generate(self, T=100, keep=True, init_data=None, covariates=None, with_noise=True):
from pybasicbayes.util.stats import sample_discrete
# Generate from the prior and raise exception if unstable
K, n = self.num_states, self.D
# Prepare the covariates
if covariates is None:
covariates = np.zeros((T, 0))
else:
assert covariates.shape[0] == T
# Initialize discrete state sequence
pi_0 = self.init_state_distn.pi_0
dss = np.empty(T, dtype=np.int32)
dss[0] = sample_discrete(pi_0.ravel())
data = np.empty((T, n), dtype='double')
if init_data is None:
data[0] = np.random.randn(n)
else:
data[0] = init_data
for t in range(1, T):
# Sample discrete state given previous continuous state and covariates
cov_t = np.column_stack((data[t-1:t], covariates[t]))
A = self.trans_distn.get_trans_matrices(cov_t)[0]
dss[t] = sample_discrete(A[dss[t-1], :])
# Sample continuous state given current discrete state
if with_noise:
data[t] = self.obs_distns[dss[t]].rvs(cov_t, return_xy=False)
else:
data[t] = self.obs_distns[dss[t]].predict(cov_t)
assert np.all(np.isfinite(data[t])), "RARHMM appears to be unstable!"
# TODO:
# if keep:
# ...
return data, dss
class PGRecurrentARHMM(_RecurrentARHMMMixin, _HMMGibbsSampling):
_trans_class = transitions.InputHMMTransitions
_states_class = InputHMMStates
class PGRecurrentOnlyARHMM(PGRecurrentARHMM):
_trans_class = transitions.InputOnlyHMMTransitions
class PGStickyRecurrentOnlyARHMM(PGRecurrentARHMM):
_trans_class = transitions.StickyInputOnlyHMMTransitions
class SoftmaxRecurrentARHMM(_RecurrentARHMMMixin, _HMMGibbsSampling, _HMMEM):
_trans_class = transitions.SoftmaxInputHMMTransitions
_states_class = InputHMMStates
class SoftmaxRecurrentOnlyARHMM(SoftmaxRecurrentARHMM):
_trans_class = transitions.SoftmaxInputOnlyHMMTransitions
### Stick-breaking transition models with Pólya-gamma augmentation
class _RecurrentSLDSBase(object):
def __init__(self, dynamics_distns, emission_distns, init_dynamics_distns,
fixed_emission=False, **kwargs):
self.fixed_emission = fixed_emission
# This class must always be used in conjunction with an SLDS class
super(_RecurrentSLDSBase, self).__init__(
dynamics_distns, emission_distns, init_dynamics_distns,
D_in=dynamics_distns[0].D_out, **kwargs)
def add_data(self, data, **kwargs):
self.states_list.append(
self._states_class(model=self, data=data, **kwargs))
class PGRecurrentSLDS(_RecurrentSLDSBase, _SLDSGibbsMixin, PGInputHMM):
_states_class = PGRecurrentSLDSStates
_trans_class = transitions.InputHMMTransitions
def resample_trans_distn(self):
# Include the auxiliary variables used for state resampling
self.trans_distn.resample(
stateseqs=[s.stateseq for s in self.states_list],
covseqs=[s.covariates for s in self.states_list],
omegas=[s.trans_omegas for s in self.states_list]
)
self._clear_caches()
def resample_emission_distns(self):
if self.fixed_emission:
return
super(PGRecurrentSLDS, self).resample_emission_distns()
class StickyPGRecurrentSLDS(PGRecurrentSLDS):
_trans_class = transitions.StickyInputHMMTransitions
class PGRecurrentOnlySLDS(PGRecurrentSLDS):
_trans_class = transitions.InputOnlyHMMTransitions
class StickyPGRecurrentOnlySLDS(PGRecurrentSLDS):
_trans_class = transitions.StickyInputOnlyHMMTransitions
### Softmax transition models with variational inference
class SoftmaxRecurrentSLDS(_RecurrentSLDSBase, _SLDSMeanFieldMixin, _SLDSVBEMMixin, SoftmaxInputHMM):
_states_class = SoftmaxRecurrentSLDSStates
_trans_class = transitions.SoftmaxInputHMMTransitions
def _M_step_trans_distn(self):
stack_tuples = lambda lst: list(map(lambda xs: np.concatenate(xs, axis=0), zip(*lst)))
self.trans_distn.max_likelihood(
stats=stack_tuples([s.E_trans_stats for s in self.states_list]))
def meanfield_update_trans_distn(self):
# Include the auxiliary variables of the lower bound
stack_tuples = lambda lst: list(map(lambda xs: np.concatenate(xs, axis=0), zip(*lst)))
self.trans_distn.meanfieldupdate(
stats=stack_tuples([s.E_trans_stats for s in self.states_list]))
def _init_mf_from_gibbs(self):
self.trans_distn._initialize_mean_field()
super(SoftmaxRecurrentSLDS, self)._init_mf_from_gibbs()
def initialize_transitions_from_gibbs(self):
self.trans_distn.initialize_with_logistic_regression(
[s.stateseq for s in self.states_list],
[s.gaussian_states for s in self.states_list])
def meanfield_update_parameters(self):
self.meanfield_update_init_dynamics_distns()
self.meanfield_update_dynamics_distns()
self.meanfield_update_emission_distns()
super(SoftmaxRecurrentSLDS, self).meanfield_update_parameters()
class SoftmaxRecurrentOnlySLDS(SoftmaxRecurrentSLDS):
_trans_class = transitions.SoftmaxInputOnlyHMMTransitions
|
py | 1a3d0b1dbdff7147196be35c64a9b41ab182752b | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
SEED = 2021
class TestSGD(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "sgd"
self.conf()
w = np.random.random((self.h, self.w)).astype("float32")
g = np.random.random((self.h, self.w)).astype("float32")
lr = np.array([0.1]).astype("float32")
self.inputs = {'Param': w, 'Grad': g, 'LearningRate': lr}
self.outputs = {'ParamOut': w - lr * g}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def conf(self):
self.h = 12
self.w = 15
def test_check_output(self):
self.check_output_with_place(self.place)
class TestNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
a_np = np.random.random(size=(32, 32)).astype('float32')
b_np = np.random.random(size=(32, 32)).astype('float32')
label_np = np.random.randint(2, size=(32, 1)).astype('int64')
with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
label = paddle.static.data(name="label",
shape=[32, 1],
dtype='int64')
sum = paddle.add(a, b)
z = paddle.pow(sum, 2.0)
fc_1 = fluid.layers.fc(input=z, size=128)
prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.reduce_mean(cost)
sgd = fluid.optimizer.SGD(learning_rate=0.01)
sgd.minimize(loss)
if run_npu:
place = paddle.NPUPlace(0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(100):
pred_res, loss_res = exe.run(main_prog,
feed={
"a": a_np,
"b": b_np,
"label": label_np
},
fetch_list=[prediction, loss])
if epoch % 10 == 0:
print("Epoch {} | Prediction[0]: {}, Loss: {}".format(
epoch, pred_res[0], loss_res))
return pred_res, loss_res
def test_npu(self):
cpu_pred, cpu_loss = self._test(False)
npu_pred, npu_loss = self._test(True)
self.assertTrue(np.allclose(npu_pred, cpu_pred))
self.assertTrue(np.allclose(npu_loss, cpu_loss))
if __name__ == '__main__':
unittest.main()
|
py | 1a3d0c52f59ea87e512dfa376d62243854cf8b2e | """ Unit tests cases.
Copyright (c) 2003 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
try:
import logging
except:
from simpletal import DummyLogger as logging
from pubtal import SiteUtils
import updateSite
import unittest, copy, os.path
root = logging.getLogger()
root.setLevel (logging.WARN)
TEMPLATE1 = '<html><body><h1 tal:content="page/headers/title"></h1> <div tal:content="structure page/content"></div></body></html>'
CONTENT1 = """title: Test1
<p>This is the <b>first</b> test.
With a newline
Or two</p>
<p>And a paragraph.</p>
<p>
1
2
3</p>
<p>So there, jimmy lad!</p>"""
CONFIG1 = """<Content>
content-type Raw
</Content>"""
RESULT1 = {'index.html': """<html><body><h1>Test1</h1> <div><p>This is the <b>first</b> test.
With a newline
Or two</p>
<p>And a paragraph.</p>
<p>
1
2
3</p>
<p>So there, jimmy lad!</p></div></body></html>"""}
class RawContentTestCases (unittest.TestCase):
def setUp (self):
self.site = SiteUtils.SiteBuilder()
self.site.buildDirs()
def tearDown (self):
self.site.destroySite()
pass
def _runTest_ (self, expectedResult, configFile=None):
if (configFile is None):
conf = os.path.join (self.site.getSiteDir(), "test.config")
else:
conf = configFile
update = updateSite.UpdateSite (conf, None, ui=SiteUtils.SilentUI())
update.buildSite()
comp = SiteUtils.DirCompare()
res = comp.compare (self.site.getDestDir(), expectedResult)
self.failUnless (res is None, res)
def testRawContent (self):
self.site.createTemplate ('template.html', TEMPLATE1)
self.site.createContent ('index.txt', CONTENT1)
self.site.createConfigFile ('test.config', CONFIG1)
self._runTest_ (RESULT1)
if __name__ == '__main__':
unittest.main()
|
py | 1a3d0c73595f386106696ecac5f3897fdd501abb | # -*- coding: utf-8 -*-
# import abstract task solver
from .task_solver import TaskSolver
# import QP task solver
from .qp_task_solver import QPTaskSolver
# import Non-linear task solver
from .nlp_task_solver import NLPTaskSolver
|
py | 1a3d0d4ac6e08f371b9cb6ba7010cee1064998e8 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import onnx
from onnx import helper, TensorProto, mapping, numpy_helper
import torch
import torchvision
import pytest
import tvm.topi.testing
import tvm
from tvm import relay
from tvm.contrib import graph_executor
import scipy
import tvm.testing
def get_input_data_shape_dict(graph_def, input_data):
if isinstance(input_data, list):
input_names = {}
shape_dict = {}
for i, _ in enumerate(input_data):
input_names[i] = graph_def.graph.input[i].name
shape_dict[input_names[i]] = input_data[i].shape
else:
input_names = graph_def.graph.input[0].name
shape_dict = {input_names: input_data.shape}
return input_names, shape_dict
def get_tvm_output_with_vm(
graph_def, input_data, target, device, opset=None, freeze_params=False, convert_to_static=False
):
""" Generic function to execute and get tvm output with vm executor"""
if not isinstance(input_data, list):
input_data = [input_data]
_, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(
graph_def, shape_dict, opset=opset, freeze_params=freeze_params
)
if convert_to_static:
mod = relay.transform.DynamicToStatic()(mod)
ex = relay.create_executor("vm", mod=mod, device=device, target=target)
result = ex.evaluate()(*input_data, **params)
if isinstance(result, tvm.runtime.NDArray):
return result.asnumpy()
return [r.asnumpy() for r in result]
def get_tvm_output(
graph_def, input_data, target, device, output_shape=None, output_dtype="float32", opset=None
):
""" Generic function to execute and get tvm output"""
# TODO: Resolve the issues and remove the following lines
target = "llvm"
device = tvm.cpu(0)
input_names, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)
with tvm.transform.PassContext(opt_level=1):
graph, lib, params = relay.build(mod, target, params=params)
m = graph_executor.create(graph, lib, device)
# set inputs
if isinstance(input_data, list):
for i, e in enumerate(input_names):
# Its possible for some onnx inputs to not be needed in the tvm
# module, confirm its present before setting.
try:
m.set_input(input_names[i], tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
except:
continue
else:
m.set_input(input_names, tvm.nd.array(input_data.astype(input_data.dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
if isinstance(output_shape, list):
tvm_output_list = []
for i, _ in enumerate(output_shape):
tvm_output = m.get_output(i)
tvm_output_list.append(tvm_output.asnumpy())
return tvm_output_list
else:
tvm_output = m.get_output(0)
return tvm_output.asnumpy()
def get_onnxruntime_output(model, inputs):
import onnxruntime.backend
rep = onnxruntime.backend.prepare(model, "CPU")
if isinstance(inputs, list) and len(inputs) == 1:
inp = inputs[0]
else:
inp = inputs
output = rep.run(inp)
# Unpack output if there's only a single value.
if len(output) == 1:
output = output[0]
return output
def verify_with_ort_with_inputs(
model,
inputs,
out_shape=None,
targets=None,
use_vm=False,
opset=None,
freeze_params=False,
convert_to_static=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
apply_softmax=False,
):
if opset is not None:
model.opset_import[0].version = opset
ort_out = get_onnxruntime_output(model, inputs)
if targets is None:
targets = [tgt for (tgt, _) in tvm.testing.enabled_targets()]
for target in targets:
dev = tvm.device(target, 0)
if use_vm:
tvm_out = get_tvm_output_with_vm(
model,
inputs,
target,
dev,
opset=opset,
freeze_params=freeze_params,
convert_to_static=convert_to_static,
)
else:
tvm_out = get_tvm_output(model, inputs, target, dev, out_shape, dtype, opset=opset)
if not isinstance(tvm_out, list):
tvm_out = [tvm_out]
if not isinstance(ort_out, list):
ort_out = [ort_out]
for tvm_val, ort_val in zip(tvm_out, ort_out):
if apply_softmax:
ort_val = scipy.special.softmax(ort_val)
tvm_val = scipy.special.softmax(tvm_val)
tvm.testing.assert_allclose(ort_val, tvm_val, rtol=rtol, atol=atol)
assert ort_val.dtype == tvm_val.dtype
def verify_with_ort(
model,
input_shapes,
out_shape=None,
targets=None,
use_vm=False,
opset=None,
freeze_params=False,
convert_to_static=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
):
inputs = [np.random.uniform(size=ishape).astype(dtype) for ishape in input_shapes]
verify_with_ort_with_inputs(
model,
inputs,
out_shape=out_shape,
targets=targets,
use_vm=use_vm,
opset=opset,
freeze_params=freeze_params,
convert_to_static=convert_to_static,
dtype=dtype,
rtol=rtol,
atol=atol,
)
def make_constant_node(name, data_type, dims, vals):
return helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=helper.make_tensor(name=name, data_type=data_type, dims=dims, vals=vals),
)
@tvm.testing.uses_gpu
def test_reshape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
graph = helper.make_graph(
[ref_node, reshape_node],
"reshape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="reshape_test")
for target, dev in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, dev, ref_shape, "float32")
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
@tvm.testing.uses_gpu
def test_double_reshape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node1 = helper.make_node("Reshape", ["in", "ref_in"], ["out1"])
reshape_node2 = helper.make_node("Reshape", ["in", "ref_in"], ["out2"])
add_node = helper.make_node("Add", ["out1", "out2"], ["out"])
graph = helper.make_graph(
[ref_node, reshape_node1, reshape_node2, add_node],
"reshape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="reshape_test")
for target, dev in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, dev, ref_shape, "float32")
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
@tvm.testing.uses_gpu
def test_expand():
def _test_expand(name, data, shape, ref_data, dtype="int32"):
shape_array = np.array(shape)
if dtype == "int32":
shape_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["shape"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=shape_array.shape,
vals=shape_array.flatten().astype("int32"),
),
)
elif dtype == "int64":
shape_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["shape"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT64,
dims=shape_array.shape,
vals=shape_array.flatten().astype("int64"),
),
)
else:
raise "Invalid dtype"
expand_node = helper.make_node("Expand", ["in", "shape"], ["out"])
graph = helper.make_graph(
[shape_node, expand_node],
"expand_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(data.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_data.shape))],
)
model = helper.make_model(graph, producer_name=name)
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(model, data, target, dev, freeze_params=True)
tvm.testing.assert_allclose(ref_data, tvm_out)
in_shape = (3, 1)
shape = (3, 4)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = np.tile(data, 4)
_test_expand("expand_with_dim_unchanged_test", data, shape, ref_data, "int32")
_test_expand("expand_with_dim_unchanged_test", data, shape, ref_data, "int64")
in_shape = (3, 1)
shape = (2, 1, 6)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = data * np.ones(shape, dtype=np.float32)
_test_expand("expand_with_dim_changed_test", data, shape, ref_data, "int32")
_test_expand("expand_with_dim_changed_test", data, shape, ref_data, "int64")
def verify_depth_to_space(inshape, outshape, mode, blockSize):
node = onnx.helper.make_node("DepthToSpace", inputs=["x"], outputs=["y"], blocksize=blockSize)
graph = helper.make_graph(
[node],
"depth_to_space_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="depth_to_space_test")
verify_with_ort(model, [inshape], [outshape])
@tvm.testing.uses_gpu
def test_depth_to_space():
# current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument.
# TO-DO, we can add mode arguement to test CRD mode and DCR mode
# in the future when we update to a newer onnx version.
verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode="CRD", blockSize=2)
def verify_space_to_depth(inshape, outshape, blockSize):
node = onnx.helper.make_node("SpaceToDepth", inputs=["x"], outputs=["y"], blocksize=blockSize)
graph = helper.make_graph(
[node],
"space_to_depth_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="space_to_depth_test")
verify_with_ort(model, [inshape], [outshape])
@tvm.testing.uses_gpu
def test_space_to_depth():
verify_space_to_depth((1, 1, 4, 6), (1, 4, 2, 3), 2)
@tvm.testing.uses_gpu
def test_shape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
shape_node = helper.make_node("Shape", ["out"], ["final_out"])
graph = helper.make_graph(
[ref_node, reshape_node, shape_node],
"shape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("final_out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="shape_test")
for target, dev in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, dev, ref_shape, "int32")
tvm.testing.assert_allclose(ref_shape, tvm_out)
def _test_power_iteration(x_shape, y_shape):
if isinstance(y_shape, int):
y_shape = [y_shape]
x = np.random.uniform(size=x_shape).astype(np.float32)
y = np.random.uniform(size=y_shape).astype(np.float32)
np_res = np.power(x, y).astype(np.float32)
res = helper.make_node("Pow", ["x", "y"], ["out"])
graph = helper.make_graph(
[res],
"power_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(np_res.shape))],
)
model = helper.make_model(graph, producer_name="power_test")
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x, y], target, dev, np_res.shape)
tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_power():
_test_power_iteration((1, 3), (1))
_test_power_iteration((2, 3), (2, 3))
_test_power_iteration((2, 3), (1, 3))
def verify_range(start, limit, delta, dtype):
dtype_map = {
"float32": TensorProto.FLOAT,
"int32": TensorProto.INT32,
"int64": TensorProto.INT64,
}
dtype_onnx = dtype_map[dtype]
y = helper.make_node("Range", ["start", "limit", "delta"], ["output"])
graph = helper.make_graph(
[y],
"range_test",
inputs=[
helper.make_tensor_value_info("start", dtype_onnx, []),
helper.make_tensor_value_info("limit", dtype_onnx, []),
helper.make_tensor_value_info("delta", dtype_onnx, []),
],
outputs=[
helper.make_tensor_value_info(
"output", dtype_onnx, np.arange(start, limit, delta).shape
)
],
)
model = helper.make_model(graph, producer_name="range_test")
inputs = [np.array(x).astype(dtype) for x in [start, limit, delta]]
verify_with_ort_with_inputs(model, inputs, use_vm=True)
@tvm.testing.uses_gpu
def test_range():
for t in ["float32", "int32", "int64"]:
verify_range(0, 10, 1, t)
verify_range(2, 8, 2, t)
verify_range(-3, 6, 4, t)
verify_range(-2, -7, -1, t)
@tvm.testing.uses_gpu
def test_squeeze():
in_shape = (1, 3, 1, 3, 1, 1)
out_shape = (3, 3)
y = helper.make_node("Squeeze", ["in"], ["out"], axes=[0, 2, 4, 5])
graph = helper.make_graph(
[y],
"squeeze_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="squeeze_test")
x = np.random.uniform(size=in_shape).astype("float32")
verify_with_ort_with_inputs(model, [x], [out_shape], opset=11)
@tvm.testing.uses_gpu
def test_flatten():
in_shape = (1, 3, 4, 4)
axis = 1
ref_shape = (1, 48)
flatten_node = helper.make_node("Flatten", ["in"], ["out"], axis=axis)
graph = helper.make_graph(
[flatten_node],
"flatten_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="flatten_test")
verify_with_ort(model, [in_shape])
@tvm.testing.uses_gpu
def test_unsqueeze():
in_shape = (3, 3)
axis = (0, 3, 4)
out_shape = (1, 3, 3, 1, 1)
y = helper.make_node("Unsqueeze", ["in"], ["out"], axes=list(axis))
graph = helper.make_graph(
[y],
"squeeze_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="squeeze_test")
verify_with_ort(model, [in_shape], opset=11)
def verify_gather(in_shape, indices, axis, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int64")
out_np = np.take(x, indices, axis=axis)
y = helper.make_node("Gather", ["in", "indices"], ["out"], axis=axis)
graph = helper.make_graph(
[y],
"gather_test",
inputs=[
helper.make_tensor_value_info(
"in", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(in_shape)
),
helper.make_tensor_value_info("indices", TensorProto.INT64, list(indices.shape)),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(out_np.shape)
)
],
)
model = helper.make_model(graph, producer_name="gather_test")
verify_with_ort_with_inputs(model, [x, indices], dtype=dtype)
@tvm.testing.uses_gpu
def test_gather():
verify_gather((4,), [1], 0, "int32")
verify_gather((1, 4), [0], 0, "int32")
verify_gather((4,), [[[1, 0], [0, 1]]], 0, "float32")
verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, "int32")
verify_gather((3, 3, 3), [[[1, 0]]], -1, "int32")
verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, "float32")
@tvm.testing.uses_gpu
def test_dynamic_gather():
dtype = "float32"
in_shape = [2, 2]
indices = 1
axis = 1
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int64")
out_np = np.take(x, indices, axis=axis)
indices = helper.make_node(
"Constant",
inputs=[],
outputs=["indices"],
value=onnx.helper.make_tensor(
name="const_indices",
data_type=onnx.TensorProto.INT64,
dims=[],
vals=[1],
),
)
y = helper.make_node("Gather", ["in", "indices"], ["out"], axis=axis)
graph = helper.make_graph(
[indices, y],
"gather_test",
inputs=[
helper.make_tensor_value_info(
"in", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], ["?", "?"]
),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], ["?"] * len(out_np.shape)
)
],
)
model = helper.make_model(graph, producer_name="dynamic_gather_test")
mod, params = relay.frontend.from_onnx(model)
for target, device in tvm.testing.enabled_targets():
ex = relay.create_executor("vm", mod=mod, device=device, target=target)
result = ex.evaluate()(x, **params)
tvm.testing.assert_allclose(out_np, result.asnumpy(), rtol=1e-5, atol=1e-5)
def verify_gatherelements(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
y = helper.make_node("GatherElements", ["data", "indices"], ["output"], axis=axis)
graph = helper.make_graph(
[y],
"gather_elements_test",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="gather_elements_test")
verify_with_ort_with_inputs(model, [x, indices])
@tvm.testing.uses_gpu
def test_gatherelements():
verify_gatherelements((4,), [3, 0, 2, 1], 0)
verify_gatherelements((2, 2), [[1, 0], [0, 1]], 0)
verify_gatherelements((2, 2), [[0, 0], [1, 0]], 1)
verify_gatherelements((2, 2), [[1, 0], [0, 1]], 1)
indices = [
[[1, 0, 0], [1, 0, 1], [0, 1, 1]],
[[1, 1, 1], [1, 2, 1], [1, 0, 1]],
[[1, 2, 1], [1, 2, 1], [1, 2, 1]],
]
verify_gatherelements((3, 3, 3), indices, 2)
def verify_scatter(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
updates = np.random.uniform(size=indices.shape).astype("float32")
y = helper.make_node("ScatterElements", ["data", "indices", "updates"], ["output"], axis=axis)
graph = helper.make_graph(
[y],
"scatter_test",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
helper.make_tensor_value_info("updates", TensorProto.FLOAT, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="scatter_test")
verify_with_ort_with_inputs(model, [x, indices, updates])
@tvm.testing.uses_gpu
def test_scatter():
verify_scatter((4,), [1], 0)
verify_scatter((1, 4), [[0]], 0)
verify_scatter((4,), [2, 3], 0)
verify_scatter((2, 2), [[1, 0], [0, 1]], 1)
verify_scatter((3, 3, 3), [[[-1, -3]]], -1)
verify_scatter((4, 3, 5, 6), [[[[2, 1, 0, 0]]]], 0)
def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None):
if axes:
y = helper.make_node("Slice", ["in"], ["out"], axes=axes, starts=starts, ends=ends)
else:
y = helper.make_node("Slice", ["in"], ["out"], starts=starts, ends=ends)
graph = helper.make_graph(
[y],
"slice_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="slice_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape], opset=1)
def _test_slice_iteration_v10(indata, outdata, **attrs):
starts = attrs["starts"]
ends = attrs["ends"]
axes = None if "axes" not in attrs else attrs["axes"]
steps = None if "steps" not in attrs else attrs["steps"]
starts = np.asarray(starts)
ends = np.asarray(ends)
inputs = [
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("starts", TensorProto.INT64, list(starts.shape)),
helper.make_tensor_value_info("ends", TensorProto.INT64, list(ends.shape)),
]
initializer = [
helper.make_tensor("starts", TensorProto.INT64, list(starts.shape), starts),
helper.make_tensor("ends", TensorProto.INT64, list(ends.shape), ends),
]
nodes = []
if "add_noop_to_input_attrs" in attrs:
def add_noop_to_input_attr(attr_name, attr):
output_name = attr_name + "_output"
ref_shape = list(np.array(attr).shape)
ref_shape.insert(0, 1)
ref_shape = tuple(ref_shape)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in_" + attr_name],
value=onnx.helper.make_tensor(
name="const_tensor__1_" + attr_name,
data_type=onnx.TensorProto.INT64,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
in_shape = np.array(attr).shape
in_array = np.array(in_shape)
ref_node2 = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["input_shape_" + attr_name],
value=onnx.helper.make_tensor(
name="const_tensor__2_" + attr_name,
data_type=onnx.TensorProto.INT64,
dims=in_array.shape,
vals=in_array.flatten().astype(int),
),
)
reshape1_node = helper.make_node(
"Reshape", [attr_name, "ref_in_" + attr_name], ["reshape_" + attr_name]
)
reshape2_node = helper.make_node(
"Reshape", ["reshape_" + attr_name, "input_shape_" + attr_name], [output_name]
)
return [ref_node, ref_node2, reshape1_node, reshape2_node]
slice_inputs = []
for attr_name in ["starts", "ends", "axes", "steps"]:
if attr_name not in attrs:
continue
if "add_noop_to_input_attrs" in attrs and attr_name in attrs["add_noop_to_input_attrs"]:
nodes.extend(add_noop_to_input_attr(attr_name, attrs[attr_name]))
slice_inputs.append(attr_name + "_output")
else:
slice_inputs.append(attr_name)
if axes:
axes = np.asarray(axes)
inputs.append(helper.make_tensor_value_info("axes", TensorProto.INT64, list(axes.shape)))
initializer.append(helper.make_tensor("axes", TensorProto.INT64, list(axes.shape), axes))
if steps:
assert axes is not None and len(axes) == len(steps)
steps = np.asarray(steps)
inputs.append(helper.make_tensor_value_info("steps", TensorProto.INT64, list(axes.shape)))
initializer.append(helper.make_tensor("steps", TensorProto.INT64, list(steps.shape), steps))
y = helper.make_node("Slice", ["data", *slice_inputs], ["out"])
nodes.append(y)
graph = helper.make_graph(
nodes,
"slice_test",
inputs=inputs,
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
initializer=initializer,
)
model = helper.make_model(graph, producer_name="slice_test")
verify_with_ort_with_inputs(model, [indata], opset=10, freeze_params=True, use_vm=True)
@tvm.testing.uses_gpu
def test_slice():
x = np.random.randn(20, 10, 5).astype(np.float32)
_test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))
_test_slice_iteration_v1(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))
_test_slice_iteration_v1(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))
_test_slice_iteration_v1(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))
_test_slice_iteration_v10(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))
_test_slice_iteration_v10(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))
_test_slice_iteration_v10(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))
_test_slice_iteration_v10(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))
_test_slice_iteration_v10(
x,
x[0:3, 0:10],
starts=(0, 0),
ends=(3, 10),
axes=(0, 1),
add_noop_to_input_attrs=["starts"],
)
_test_slice_iteration_v10(
x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4), add_noop_to_input_attrs=["ends"]
)
_test_slice_iteration_v10(
x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,), add_noop_to_input_attrs=["axes"]
)
_test_slice_iteration_v10(
x,
x[:, 0:-1],
starts=(0,),
ends=(-1,),
axes=(1,),
add_noop_to_input_attrs=["starts", "ends"],
)
_test_slice_iteration_v10(
x,
x[0:3, 0:10],
starts=(0, 0),
ends=(3, 10),
axes=(0, 1),
add_noop_to_input_attrs=["ends", "axes"],
)
_test_slice_iteration_v10(
x,
x[:, :, 3:4],
starts=(0, 0, 3),
ends=(20, 10, 4),
add_noop_to_input_attrs=["starts", "axes"],
)
_test_slice_iteration_v10(
x,
x[:, 1:1000],
starts=(1,),
ends=(1000,),
axes=(1,),
add_noop_to_input_attrs=["starts", "ends", "axes"],
)
x = np.random.randn(1, 1, 1, 128).astype(np.float32)
_test_slice_iteration_v10(
x, x, starts=(0, 0), ends=(9223372036854775807, 9223372036854775807), axes=(0, 3)
)
x = np.random.randn(4, 4).astype(np.float32)
_test_slice_iteration_v10(
x, x[:, 1::2], starts=(1,), ends=(9223372036854775807,), axes=(1,), steps=(2,)
)
_test_slice_iteration_v10(
x,
x[0::1, 1::2],
starts=(0, 1),
ends=(4, 4),
axes=(0, 1),
steps=(1, 2),
)
def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs, opset=None):
indata = np.random.uniform(-1, 1, size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ["in"], ["out"], **kwargs)
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape], opset=opset, dtype=dtype)
@tvm.testing.uses_gpu
def test_floor():
_test_onnx_op_elementwise((2, 4, 5, 6), np.floor, {}, "float32", "Floor", {})
@tvm.testing.uses_gpu
def test_ceil():
_test_onnx_op_elementwise((2, 4, 5, 6), np.ceil, {}, "float32", "Ceil", {})
@tvm.testing.uses_gpu
def test_clip():
_test_onnx_op_elementwise(
(2, 4, 5, 6),
np.clip,
{"a_min": -1.0, "a_max": 1.0},
"float32",
"Clip",
{"min": -1.0, "max": 1.0},
opset=6,
)
_test_onnx_op_elementwise(
(2, 4, 5, 6),
np.clip,
{"a_min": -np.inf, "a_max": 1.0},
"float32",
"Clip",
{"max": 1.0},
opset=6,
)
_test_onnx_op_elementwise(
(2, 4, 5, 6),
np.clip,
{"a_min": -1.0, "a_max": np.inf},
"float32",
"Clip",
{"min": -1.0},
opset=6,
)
@tvm.testing.uses_gpu
def test_clip_min_max_as_inputs():
input_shape = (2, 4, 5, 6)
nodes = [
make_constant_node("min", onnx.TensorProto.FLOAT, (), [0.0]),
make_constant_node("max", onnx.TensorProto.FLOAT, (), [6.0]),
]
input_names = ["in", "min", "max"]
nodes.append(helper.make_node("Clip", inputs=input_names, outputs=["out"]))
graph = helper.make_graph(
nodes,
"clip_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(input_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_shape))],
)
model = helper.make_model(graph, producer_name="clip_test")
verify_with_ort(model, [input_shape], out_shape=[input_shape])
@tvm.testing.uses_gpu
def test_round():
_test_onnx_op_elementwise((2, 4, 5, 6), np.round, {}, "float32", "Round", {})
def _test_finite_ops(inshape, outfunc, npargs, dtype, opname, kwargs):
indata = np.random.choice(a=[np.nan, np.inf, -np.inf, 0.5, 1.0, 0], size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ["in"], ["out"], **kwargs)
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape], dtype=dtype)
@tvm.testing.uses_gpu
def test_isinf():
_test_finite_ops((2, 4, 5, 6), np.isinf, {}, "float32", "IsInf", {})
@tvm.testing.uses_gpu
def test_isnan():
_test_finite_ops((2, 4, 5, 6), np.isnan, {}, "float32", "IsNaN", {})
def verify_gather_nd(in_shape, indices, out_shape, dtype="float32"):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int64")
y = helper.make_node("GatherND", ["in", "indices"], ["out"])
graph = helper.make_graph(
[y],
"gather_test",
inputs=[
helper.make_tensor_value_info(
"in", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(in_shape)
),
helper.make_tensor_value_info("indices", TensorProto.INT64, list(indices.shape)),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(out_shape)
)
],
)
model = helper.make_model(graph, producer_name="gather_test")
verify_with_ort_with_inputs(model, [x, indices], [out_shape])
@tvm.testing.uses_gpu
def test_gather_nd():
verify_gather_nd([2, 2], [[0, 0], [1, 1]], [2], "int32")
verify_gather_nd([2, 2], [[1], [0]], [2, 2])
verify_gather_nd([2, 2, 2], [[0, 1], [1, 0]], [2, 2])
verify_gather_nd([2, 2, 2], [[[0, 1]], [[1, 0]]], [2, 1, 2])
@tvm.testing.uses_gpu
def test_onehot():
indices_shape = [10]
indices_array = np.random.randint(low=0, high=9, size=indices_shape, dtype="int32")
depth = 10
values = np.asarray([0, 1]).astype("int32")
out_np = np.eye(depth)[indices_array.reshape(-1)]
onehot_node = helper.make_node("OneHot", ["indices", "depth", "values"], ["out"])
graph = helper.make_graph(
[onehot_node],
"onehot_test",
inputs=[
helper.make_tensor_value_info("indices", TensorProto.INT32, indices_shape),
helper.make_tensor_value_info("depth", TensorProto.INT32, [1]),
helper.make_tensor_value_info("values", TensorProto.INT32, values.shape),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, out_np.shape)],
)
model = helper.make_model(graph, producer_name="onehot_test")
# TODO(jwfromm): Replace test against np with test against onnxrt once we update versions.
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(
model, [indices_array, np.array([depth]).astype("int32"), values], target, dev
)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
def verify_gemm(a_shape, b_shape, c_shape=None, freeze_params=False):
out_shape = [a_shape[0], b_shape[1]]
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
input_names = ["a", "b"]
input_nodes = [
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
]
input_values = [a_array, b_array]
if c_shape is not None:
c_array = np.random.uniform(size=c_shape).astype("float32")
input_names.append("c")
input_nodes.append(helper.make_tensor_value_info("c", TensorProto.FLOAT, list(c_shape)))
input_values.append(c_array)
gemm_node = helper.make_node("Gemm", input_names, ["out"])
graph = helper.make_graph(
[gemm_node],
"gemm_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="gemm_test")
verify_with_ort_with_inputs(model, input_values, freeze_params=freeze_params)
@tvm.testing.uses_gpu
def test_gemm():
verify_gemm(a_shape=(4, 3), b_shape=(3, 4))
verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,))
verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,), freeze_params=True)
@tvm.testing.uses_gpu
def test_matmul():
a_shape = (4, 3)
b_shape = (3, 4)
out_shape = [a_shape[0], b_shape[1]]
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
verify_with_ort_with_inputs(model, [a_array, b_array])
def verify_batch_matmul(a_shape, b_shape, out_shape, target, dev):
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, out_shape)],
)
model = helper.make_model(graph, producer_name="matmul_test")
verify_with_ort_with_inputs(model, [a_array, b_array], use_vm=True, targets=[target])
# TODO(mbrookhart, electriclilies): Add CUDA as a target once batch matmul is fixed
@tvm.testing.parametrize_targets("llvm")
def test_batch_matmul(target, dev):
verify_batch_matmul((2, 3, 4, 3), (2, 3, 3, 4), (2, 3, 4, 4), target, dev)
verify_batch_matmul((2, 4, 3), (3, 4), (2, 4, 4), target, dev)
verify_batch_matmul((2, 3, 4, 3), (3, 4), (2, 3, 4, 4), target, dev)
# Test implicit broadcasting.
verify_batch_matmul((4, 3), (2, 3, 4), (2, 4, 4), target, dev)
verify_batch_matmul((2, 4, 3), (1, 3, 4), (2, 4, 4), target, dev)
verify_batch_matmul((1, 4, 3), (2, 3, 4), (2, 4, 4), target, dev)
def verify_simple_dynamic_model(a_shape, b_shape, target, dev):
def verify_model(ex, a_shape, b_shape):
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
# relu
out_np[out_np < 0] = 0
tvm_out = ex.evaluate()(a_array, b_array).asnumpy()
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
relu_node = helper.make_node("Relu", ["out"], ["relu"])
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
graph = helper.make_graph(
[mul_node, relu_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("relu", TensorProto.FLOAT, list(out_np.shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
a_anys = [relay.Any()] * len(a_shape)
b_anys = [relay.Any()] * len(b_shape)
mod, params = relay.frontend.from_onnx(model, {"a": a_anys, "b": b_anys})
ex = relay.create_executor("vm", mod=mod, device=dev, target=target)
verify_model(ex, a_shape, b_shape)
verify_model(ex, [a * 2 for a in a_shape], [b * 2 for b in b_shape])
verify_model(ex, [a * 3 for a in a_shape], [b * 3 for b in b_shape])
# TODO(mbrookhart, electriclilies): Add CUDA as a target once batch matmul is fixed
@tvm.testing.parametrize_targets("llvm")
def test_batch_matmul_dynamic_model(target, dev):
verify_simple_dynamic_model((2, 3, 4, 3), (2, 3, 3, 4), target, dev)
verify_simple_dynamic_model((2, 4, 3), (3, 4), target, dev)
verify_simple_dynamic_model((2, 3, 4, 3), (3, 4), target, dev)
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
in_array = np.random.uniform(size=shape).astype(dtype)
if alpha == None and beta == None and bias == None:
alpha = 0.0001
beta = 0.75
bias = 1.0
node = onnx.helper.make_node("LRN", inputs=["in"], outputs=["out"], size=nsize)
else:
node = onnx.helper.make_node(
"LRN", inputs=["in"], outputs=["out"], alpha=alpha, beta=beta, bias=bias, size=nsize
)
graph = helper.make_graph(
[node],
"lrn_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))],
)
model = helper.make_model(graph, producer_name="lrn_test")
verify_with_ort_with_inputs(model, [in_array])
@tvm.testing.uses_gpu
def test_lrn():
verify_lrn((5, 5, 5, 5), 3, "float32")
verify_lrn((5, 5, 5, 5), 3, "float32", alpha=0.0002, beta=0.5, bias=2.0)
def verify_instance_norm(shape, axis=1):
x = np.random.randn(*shape).astype(np.float32)
gamma = np.random.randn(shape[1]).astype(np.float32)
beta = np.random.randn(shape[1]).astype(np.float32)
epsilon = 1e-5
node = onnx.helper.make_node(
"InstanceNormalization",
inputs=["x", "gamma", "beta"],
outputs=["y"],
epsilon=epsilon,
)
graph = helper.make_graph(
[node],
"instance_norm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(shape)),
helper.make_tensor_value_info("gamma", TensorProto.FLOAT, (shape[1],)),
helper.make_tensor_value_info("beta", TensorProto.FLOAT, (shape[1],)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(shape))],
)
model = helper.make_model(graph, producer_name="instance_norm_test")
verify_with_ort_with_inputs(model, [x, gamma, beta], out_shape=[shape])
@tvm.testing.uses_gpu
def test_instance_norm():
verify_instance_norm((2, 3, 4, 5))
verify_instance_norm((32, 64, 80, 64))
verify_instance_norm((8, 6, 5))
verify_instance_norm((8, 7, 6, 5, 4))
def verify_upsample_nearest():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in"], ["out"], mode="nearest", scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
graph = helper.make_graph(
[y],
"upsample_nearest_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_nearest_test")
verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7)
def verify_upsample3d_nearest():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)
y = helper.make_node(
"Upsample", ["in"], ["out"], mode="nearest", scales=[1.0, 1.0, 2.0, 2.0, 2.0]
)
in_array = np.random.uniform(size=in_shape).astype(np.float32)
graph = helper.make_graph(
[y],
"upsample_nearest_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_nearest_test")
# Upsample is deprecated after opset 9
verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7)
def verify_upsample_bilinear():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in"], ["out"], mode="linear", scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
graph = helper.make_graph(
[y],
"upsample_bilinear_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_bilinear_test")
verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7)
def verify_upsample3d_trilinear():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in", "scales"], ["out"], mode="linear")
scales = [1.0, 1.0, 2.0, 2.0, 2.0]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = tvm.topi.testing.trilinear_resize3d_python(
in_array,
(3 * scale, 3 * scale, 3 * scale),
"NCDHW",
coordinate_transformation_mode="half_pixel",
)
ref_array = np.array(scales)
ref_node = helper.make_node(
"Constant",
inputs=[],
outputs=["scales"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=ref_array.shape,
vals=ref_array.flatten().astype(float),
),
)
graph = helper.make_graph(
[ref_node, y],
"upsample_trilinear_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_trilinear_test")
# TODO(jwfromm): Trilinear upsampling not supported in 1.0.0 onnxruntime.
# Replace topi comparison with verify_with_ort once we update.
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, in_array, target, dev, out_shape, "float32")
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_upsample():
verify_upsample_nearest()
verify_upsample_bilinear()
verify_upsample3d_nearest()
verify_upsample3d_trilinear()
def verify_softmax(inshape, axis):
opname = "Softmax"
indata = np.random.uniform(size=inshape).astype(np.float32)
outshape = inshape
y = helper.make_node(opname, ["in"], ["out"])
if axis is not None:
axis_attr = helper.make_attribute("axis", axis)
y.attribute.append(axis_attr)
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
verify_with_ort_with_inputs(model, [indata])
@tvm.testing.uses_gpu
def test_softmax():
verify_softmax((1, 10), None)
verify_softmax((1, 10), 1)
def verify_min(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
min_node = helper.make_node("Min", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[min_node],
"Min_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="Min_test")
verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])
@tvm.testing.uses_gpu
def test_forward_min():
verify_min((1, 3, 20, 20))
verify_min((20, 20))
def verify_max(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
max_node = helper.make_node("Max", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[max_node],
"Max_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="Max_test")
verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])
@tvm.testing.uses_gpu
def test_forward_max():
verify_max((1, 3, 20, 20))
verify_max((20, 20))
def verify_mean(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[mean_node],
"Mean_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="Mean_test")
verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])
@tvm.testing.uses_gpu
def test_forward_mean():
verify_mean((1, 3, 20, 20))
verify_mean((20, 20))
def verify_hardsigmoid(input_dim, alpha, beta):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], ["out"], alpha=alpha, beta=beta)
graph = helper.make_graph(
[hardsigmoid_node],
"HardSigmoid_test",
inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="HardSigmoid_test")
verify_with_ort_with_inputs(model, [a_np1])
@tvm.testing.uses_gpu
def test_forward_hardsigmoid():
verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6)
verify_hardsigmoid((20, 20), 0.3, 0.4)
def verify_argreduce(input_dim, op_name, axis=None, keepdims=None):
a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)
out_shape = list(a_np1.shape)
def_axis = axis if axis is not None else 0
if keepdims == 1 or keepdims == None:
out_shape[def_axis] = 1
else:
out_shape.pop(def_axis)
node = onnx.helper.make_node(op_name, inputs=["a_np1"], outputs=["out"])
if keepdims is not None:
keepdims_attr = helper.make_attribute("keepdims", keepdims)
node.attribute.append(keepdims_attr)
if axis is not None:
axis_attr = helper.make_attribute("axis", axis)
node.attribute.append(axis_attr)
graph = helper.make_graph(
[node],
"argreduce_test",
inputs=[helper.make_tensor_value_info("a_np1", TensorProto.INT32, list(a_np1.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT64, list(out_shape))],
)
model = helper.make_model(graph, producer_name="argreduce_test")
verify_with_ort_with_inputs(model, [a_np1])
# TODO (mbrookhart, electriclilies) Fix argmin on GPU and enable this test
# @tvm.testing.uses_gpu
def test_forward_arg_min_max():
"""Verify argmin and argmax"""
verify_argreduce([3, 4, 4], "ArgMin")
verify_argreduce([3, 4, 4], "ArgMax")
verify_argreduce([3, 4, 4], "ArgMin", axis=1)
verify_argreduce([3, 4, 4], "ArgMax", axis=0)
verify_argreduce([3, 4, 4], "ArgMin", keepdims=0)
verify_argreduce([3, 4, 4], "ArgMax", keepdims=1)
for axis in [None, 0, 1, 2]:
for keepdims in [None, True, False]:
verify_argreduce([3, 4, 4], "ArgMin", axis, keepdims)
verify_argreduce([3, 4, 4], "ArgMax", axis, keepdims)
def verify_constantofshape(input_dim, value, dtype):
fill_node = helper.make_node(
"ConstantOfShape",
["input"],
["output"],
value=helper.make_tensor(
"value", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], (1,), (value,)
),
)
inputs = [helper.make_tensor_value_info("input", TensorProto.INT64, [len(input_dim)])]
graph = helper.make_graph(
[fill_node],
"fill_test",
inputs,
outputs=[
helper.make_tensor_value_info(
"output", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], input_dim
)
],
)
model = helper.make_model(graph, producer_name="fill_test")
input_np = np.array(input_dim).astype("int64")
verify_with_ort_with_inputs(model, [input_np], use_vm=True)
@tvm.testing.uses_gpu
def test_constantofshape():
verify_constantofshape((2, 3, 4, 5), 10, "float32")
verify_constantofshape((3, 3), 0, "int32")
verify_constantofshape((1, 2, 3), -1, "float32")
def verify_pad(indata, pads, mode="constant", value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
# onnx graph
if mode in ["edge", "reflect"]:
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node(
"Pad",
inputs=["input"],
outputs=["output"],
mode=mode,
pads=pads,
)
else:
outdata = np.pad(indata, pad_width=np_pads, mode="constant", constant_values=value)
node = helper.make_node(
"Pad", inputs=["input"], outputs=["output"], mode="constant", pads=pads, value=value
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="pad_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape], dtype="float32", opset=2)
def verify_pad_v11(indata, pads, mode="constant", value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
pads = np.array(pads)
# onnx graph
if mode in ["edge", "reflect"]:
inputs = [indata]
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node("Pad", inputs=["input", "pads"], outputs=["output"], mode=mode)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads", TensorProto.INT64, (len(pads),)),
],
initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads)],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
else:
inputs = [indata]
outdata = np.pad(indata, pad_width=np_pads, mode="constant", constant_values=value)
node = helper.make_node(
"Pad", inputs=["input", "pads", "constant_value"], outputs=["output"], mode="constant"
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads", TensorProto.INT64, (len(pads),)),
helper.make_tensor_value_info("constant_value", TensorProto.FLOAT, (1,)),
],
initializer=[
helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads),
helper.make_tensor("constant_value", TensorProto.FLOAT, (1,), [value]),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
model = helper.make_model(graph, producer_name="pad_test")
verify_with_ort_with_inputs(model, inputs, opset=11, use_vm=True)
@tvm.testing.uses_gpu
def test_pad():
verify_pad(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], "constant", 0.0)
verify_pad(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], "constant", 0.0)
verify_pad(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], "constant", 5.0)
verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "edge")
verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "reflect")
verify_pad_v11(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], "constant", 0.0)
verify_pad_v11(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], "constant", 0.0)
verify_pad_v11(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], "constant", 5.0)
verify_pad_v11(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "edge")
verify_pad_v11(
np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "reflect"
)
def verify_reduce_func(func, data, axis, keepdims):
inshape = data.shape
outshape = np.sum(data, axis=axis, keepdims=keepdims == 1).shape
if axis:
node = onnx.helper.make_node(
func, inputs=["x"], outputs=["y"], axes=axis, keepdims=keepdims
)
else:
node = onnx.helper.make_node(func, inputs=["x"], outputs=["y"], keepdims=keepdims)
graph = helper.make_graph(
[node],
"reduce_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="reduce_test")
verify_with_ort_with_inputs(model, [data], [outshape], opset=11)
@tvm.testing.uses_gpu
def test_all_reduce_funcs():
funcs = [
"ReduceMax",
"ReduceMean",
"ReduceMin",
"ReduceProd",
"ReduceSum",
"ReduceSumSquare",
"ReduceLogSum",
"ReduceLogSumExp",
"ReduceL1",
"ReduceL2",
]
for func in funcs:
for keepdims in [True, False]:
verify_reduce_func(
func, np.random.randn(3, 2, 2).astype(np.float32), axis=None, keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 2, 3).astype(np.float32), axis=None, keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1, 2), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1,), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(1, 3, 4, 1).astype(np.float32), axis=(1,), keepdims=keepdims
)
def verify_split(indata, outdatas, split, axis=0, pass_split=True, opset=11):
indata = np.array(indata).astype(np.float32)
outdatas = [np.array(o).astype(np.float32) for o in outdatas]
inputs = [helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))]
input_names = ["input"]
initializer = []
if split:
split_index = range(len(split))
else:
split_index = range(len(outdatas))
if pass_split:
if opset >= 13:
input_names.append("split")
np_split = np.array(split).astype(np.int64)
inputs.append(
helper.make_tensor_value_info("split", TensorProto.INT64, list(np_split.shape))
)
indata = [indata, np_split]
initializer.append(
helper.make_tensor("split", TensorProto.INT64, list(np_split.shape), np_split)
)
node = helper.make_node(
"Split",
inputs=input_names,
outputs=["output_{}".format(i) for i in range(len(split_index))],
axis=axis,
)
if pass_split and opset < 13:
split_attr = helper.make_attribute("split", split)
node.attribute.append(split_attr)
graph = helper.make_graph(
[node],
"split_test",
inputs=inputs,
initializer=initializer,
outputs=[
helper.make_tensor_value_info(
"output_{}".format(i), TensorProto.FLOAT, list(outdatas[i].shape)
)
for i in range(len(split_index))
],
)
model = helper.make_model(graph, producer_name="split_test")
verify_with_ort_with_inputs(model, indata, out_shape=list(range(len(split_index))), opset=opset)
@tvm.testing.uses_gpu
def test_split():
# 1D
verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0)
verify_split(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0, False
)
verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0], [4.0, 5.0, 6.0]], [2, 1, 3], 0)
# 2D
verify_split(
[[1.0, 2.0, 3.0, 4.0], [7.0, 8.0, 9.0, 10.0]],
[[[1.0, 2.0], [7.0, 8.0]], [[3.0, 4.0], [9.0, 10.0]]],
[2, 2],
1,
)
# Split evenly (unstack)
verify_split([1, 2, 3], [[1], [2], [3]], False, 0, False)
# Split a single value to a single value
verify_split([1], [[1]], [1], pass_split=True)
@tvm.testing.uses_gpu
def test_binary_ops():
in_shape = (1, 2, 3, 3)
dtype = "float32"
out_shape = in_shape
def verify_binary_ops(op, x, y, out_type="float32"):
z = helper.make_node(op, ["in1", "in2"], ["out"])
graph = helper.make_graph(
[z],
"_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.FLOAT, x.shape),
helper.make_tensor_value_info("in2", TensorProto.FLOAT, y.shape),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(out_type)], list(out_shape)
)
],
)
model = helper.make_model(graph, producer_name="_test")
verify_with_ort_with_inputs(model, [x, y])
x = np.random.uniform(size=in_shape).astype(dtype)
y = np.random.uniform(size=in_shape).astype(dtype)
z = np.random.uniform(size=(3,)).astype(dtype)
verify_binary_ops("Add", x, y)
verify_binary_ops("Add", x, z)
verify_binary_ops("Sub", x, y)
verify_binary_ops("Sub", x, z)
verify_binary_ops("Mul", x, y)
verify_binary_ops("Mul", x, z)
verify_binary_ops("Div", x, y)
verify_binary_ops("Div", x, z)
verify_binary_ops("Sum", x, y)
verify_binary_ops("Sum", x, z)
verify_binary_ops("Greater", x, y, "bool")
verify_binary_ops("Greater", x, z, "bool")
verify_binary_ops("Less", x, y, "bool")
verify_binary_ops("Less", x, z, "bool")
verify_binary_ops("Equal", x, y, "bool")
verify_binary_ops("Equal", x, z, "bool")
@tvm.testing.uses_gpu
def test_unary_ops():
in_shape = (1, 2, 3, 3)
dtype = "float32"
out_shape = in_shape
def verify_unary_ops(op, x, rtol=1e-5, atol=1e-5, dtype="float32"):
x = x.astype(dtype)
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
z = helper.make_node(op, ["in1"], ["out"])
graph = helper.make_graph(
[z],
"_test",
inputs=[
helper.make_tensor_value_info("in1", ONNX_DTYPE, list(in_shape)),
],
outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, list(out_shape))],
)
model = helper.make_model(graph, producer_name="_test")
verify_with_ort_with_inputs(model, [x], rtol=rtol, atol=atol)
x = np.random.uniform(size=in_shape)
verify_unary_ops("Neg", x)
verify_unary_ops("Abs", x)
verify_unary_ops("Reciprocal", x)
verify_unary_ops("Reciprocal", x, dtype="float16")
verify_unary_ops("Sqrt", x)
verify_unary_ops("Relu", x)
verify_unary_ops("Exp", x)
verify_unary_ops("Log", x)
verify_unary_ops("Log", x)
verify_unary_ops("Acos", x)
verify_unary_ops("Acosh", x)
verify_unary_ops("Asin", x)
verify_unary_ops("Asinh", x)
verify_unary_ops("Atan", x)
verify_unary_ops("Atanh", x)
verify_unary_ops("Cos", x)
verify_unary_ops("Cosh", x)
verify_unary_ops("Sin", x)
verify_unary_ops("Sinh", x)
verify_unary_ops("Tan", x)
verify_unary_ops("Tanh", x)
verify_unary_ops("Sigmoid", x)
verify_unary_ops("Softsign", x)
@tvm.testing.uses_gpu
def test_leaky_relu():
def leaky_relu_x(x, alpha):
return np.where(x >= 0, x, x * alpha)
_test_onnx_op_elementwise(
(2, 4, 5, 6), leaky_relu_x, {"alpha": 0.25}, "float32", "LeakyRelu", {"alpha": 0.25}
)
@tvm.testing.uses_gpu
def test_elu():
def elu_x(x, alpha):
return np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise(
(2, 4, 5, 6), elu_x, {"alpha": 0.25}, "float32", "Elu", {"alpha": 0.25}
)
@tvm.testing.uses_gpu
def test_selu():
def selu_x(x, alpha, gamma):
return gamma * np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise(
(2, 4, 5, 6),
selu_x,
{"alpha": 0.25, "gamma": 0.3},
"float32",
"Selu",
{"alpha": 0.25, "gamma": 0.3},
)
@tvm.testing.uses_gpu
def test_prelu():
def verify_prelu(x_shape, a_shape):
node = helper.make_node("PRelu", inputs=["X", "slope"], outputs=["Y"])
graph = helper.make_graph(
[node],
"prelu_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("slope", TensorProto.FLOAT, list(a_shape)),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(x_shape))],
)
model = helper.make_model(graph, producer_name="prelu_test")
verify_with_ort(
model,
[x_shape, a_shape],
out_shape=[list(x_shape)],
use_vm=True,
convert_to_static=True,
)
verify_prelu([3, 4, 5, 6], [1, 4, 1, 1])
verify_prelu([1, 8, 5, 6], [1, 8, 1, 1])
verify_prelu([2, 12, 16, 16], [1, 12, 1, 1])
verify_prelu([2, 12, 16, 16], [1]) # Test alpha broadcasting.
verify_prelu([3, 1], [3, 1]) # Test non NCHW workload.
@tvm.testing.uses_gpu
def test_ThresholdedRelu():
def ThresholdedRelu_x(x, alpha):
out_np = np.clip(x, alpha, np.inf)
out_np[out_np == alpha] = 0
return out_np
_test_onnx_op_elementwise(
(2, 4, 5, 6),
ThresholdedRelu_x,
{"alpha": 0.25},
"float32",
"ThresholdedRelu",
{"alpha": 0.25},
)
@tvm.testing.uses_gpu
def test_LogSoftmax():
_test_onnx_op_elementwise(
(1, 4), tvm.topi.testing.log_softmax_python, {}, "float32", "LogSoftmax", {"axis": 1}
)
def check_torch_conversion(model, input_size):
dummy_input = torch.randn(*input_size)
file_name = "{}.onnx".format(model.__name__)
# Set verbose=True for more output
torch.onnx.export(model(), dummy_input, file_name, export_params=True, verbose=False)
onnx_model = onnx.load(file_name)
input_data = np.random.uniform(size=input_size).astype("float32")
verify_with_ort_with_inputs(onnx_model, [input_data], apply_softmax=True)
@tvm.testing.uses_gpu
def test_resnet():
check_torch_conversion(torchvision.models.resnet18, (1, 3, 224, 224))
# check_torch_conversion(torchvision.models.resnet101, (1,3,224,224))
# def test_alexnet():
# Torch's ONNX export does not support the adaptive pooling used by AlexNet?
# check_torch_conversion(torchvision.models.alexnet, (1,3,224,224))
# Torch's ONNX export does not support the adaptive pooling used by vgg16?
# def test_vgg16():
# check_torch_conversion(torchvision.models.vgg16, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_squeezenet():
# # Torch's ONNX export does not support the max pooling used by Squezenet
# check_torch_conversion(torchvision.models.squeezenet1_0, (1,3,224,224))
@tvm.testing.uses_gpu
def test_densenet():
check_torch_conversion(torchvision.models.densenet161, (1, 3, 224, 224))
@tvm.testing.uses_gpu
def test_inception():
check_torch_conversion(torchvision.models.inception_v3, (1, 3, 224, 224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_googlenet():
# check_torch_conversion(torchvision.models.googlenet, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_shufflenetv2():
# check_torch_conversion(torchvision.models.shufflenetv2, (1,3,224,224))
@tvm.testing.uses_gpu
def test_sign():
def Sign_x(x):
return np.sign(x)
_test_onnx_op_elementwise((3, 4, 5, 6), Sign_x, {}, "float32", "Sign", {})
def verify_not(indata, dtype):
x = indata.astype(dtype)
node = helper.make_node(
"Not",
inputs=["in"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"not_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.BOOL, list(x.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(x.shape))],
)
model = helper.make_model(graph, producer_name="not_test")
verify_with_ort_with_inputs(model, [x])
@tvm.testing.uses_gpu
def test_not():
# 2d
verify_not(indata=(np.random.randn(3, 4) > 0), dtype=bool)
# 3d
verify_not(indata=(np.random.randn(3, 4, 5) > 0), dtype=bool)
# 4d
verify_not(indata=(np.random.randn(3, 4, 5, 6) > 0), dtype=bool)
def verify_and(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_and(x, y)
node = helper.make_node(
"And",
inputs=["in1", "in2"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"and_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="and_test")
verify_with_ort_with_inputs(model, [x, y], [outdata.shape])
@tvm.testing.uses_gpu
def test_and():
# 2d
x = np.random.randn(3, 4) > 0
y = np.random.randn(3, 4) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(3, 4, 5) > 0
verify_and(indata=[x, y], dtype=bool)
# 4d
x = np.random.randn(3, 4, 5, 6) > 0
y = np.random.randn(3, 4, 5, 6) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d vs 1d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(5) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d vs 2d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(4, 5) > 0
verify_and(indata=[x, y], dtype=bool)
def verify_tile_v6(indata, repeats, outdata):
node = helper.make_node("Tile", inputs=["input", "repeats"], outputs=["out"])
graph = helper.make_graph(
[node],
"tile_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("repeats", TensorProto.INT64, list(repeats.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="tile_test")
verify_with_ort_with_inputs(model, [indata, repeats], use_vm=True, opset=6)
@tvm.testing.uses_gpu
def test_tile():
x = np.random.rand(2, 3, 4, 5).astype(np.float32)
repeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)
z = np.tile(x, repeats)
verify_tile_v6(x, repeats, z)
def verify_erf(indata, outdata):
node = helper.make_node("Erf", inputs=["in"], outputs=["out"])
graph = helper.make_graph(
[node],
"erf_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="erf_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape])
@tvm.testing.uses_gpu
def test_erf():
x = np.random.rand(2, 3, 4, 6).astype(np.float32)
z = scipy.special.erf(x)
verify_erf(x, z)
def verify_where(condition, x, y, dtype, outdata, dynamic=False):
node_list = []
where_inputs = ["condition", "x", "y"]
if dynamic:
shape_node = helper.make_node("Shape", ["x"], ["shape"])
reshape_node = helper.make_node("Reshape", ["x", "shape"], ["X"])
where_inputs[1] = "X"
node_list += [shape_node, reshape_node]
node = helper.make_node("Where", inputs=where_inputs, outputs=["out"])
node_list.append(node)
graph = helper.make_graph(
node_list,
"where_test",
inputs=[
helper.make_tensor_value_info("condition", TensorProto.BOOL, list(condition.shape)),
helper.make_tensor_value_info("x", dtype, list(x.shape)),
helper.make_tensor_value_info("y", dtype, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", dtype, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="where_test")
verify_with_ort_with_inputs(model, [condition, x, y], [outdata.shape], use_vm=True)
@tvm.testing.uses_gpu
def test_where():
condition = np.array([[1, 0], [1, 1]], dtype=np.bool)
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
y = np.array([[9, 8], [7, 6]], dtype=np.int64)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.INT64, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[9, 8], [7, 6]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array(1, dtype=np.float32)
y = np.array([2], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([2], dtype=np.float32)
y = np.array(1, dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
condition = np.array(1, dtype=np.bool)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[5, 6], [7, 8]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[1], [7]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
verify_where(condition, x, y, TensorProto.FLOAT, outdata, dynamic=True)
def verify_or(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_or(x, y)
node = helper.make_node(
"Or",
inputs=["in1", "in2"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"or_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="or_test")
verify_with_ort_with_inputs(model, [x, y], [outdata.shape])
@tvm.testing.uses_gpu
def test_or():
# 2d
x = np.random.randn(3, 4) > 0
y = np.random.randn(3, 4) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(3, 4, 5) > 0
verify_or(indata=[x, y], dtype=bool)
# 4d
x = np.random.randn(3, 4, 5, 6) > 0
y = np.random.randn(3, 4, 5, 6) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d vs 1d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(5) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d vs 2d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(4, 5) > 0
verify_or(indata=[x, y], dtype=bool)
@tvm.testing.uses_gpu
def test_batch_norm():
def verify_batch_norm(in_shape):
batchnorm = onnx.helper.make_node(
"BatchNormalization", inputs=["x", "scale", "B", "mean", "var"], outputs=["Y"]
)
graph = helper.make_graph(
[batchnorm],
"batchnorm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var", TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="batchnorm_test")
# X, scale, b, mean, var
inshapes = [in_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]
verify_with_ort(model, inshapes, out_shape=[in_shape])
verify_batch_norm([1, 3, 224, 224])
verify_batch_norm([1, 3, 24, 24])
verify_batch_norm([16, 3, 24, 24])
verify_batch_norm([16, 16, 24, 24])
verify_batch_norm([16, 16, 10, 10])
@tvm.testing.uses_gpu
def test_batch_norm_dynamic_subgraph():
def verify_batch_norm_dynamic_subgraph(in_shape, o_shape):
batchnorm = onnx.helper.make_node(
"BatchNormalization", inputs=["x", "scale", "B", "mean", "var"], outputs=["Y"]
)
shape_node = helper.make_node("Shape", ["Y"], ["shape"])
reshape_node = helper.make_node("Reshape", ["in", "shape"], ["out"])
graph = helper.make_graph(
[batchnorm, shape_node, reshape_node],
"batchnorm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("in", TensorProto.FLOAT, list(o_shape)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var", TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="batchnorm_test")
# X, inp, scale, b, mean, var
inshapes = [in_shape, o_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]
verify_with_ort(model, inshapes, out_shape=[in_shape], use_vm=True)
verify_batch_norm_dynamic_subgraph([16, 16, 10, 10], [160, 160])
def verify_conv(
x_shape,
w_shape,
y_shape,
padding,
kernel_shape,
strides,
dilations,
auto_pad="NOTSET",
unset_pad=False,
):
if unset_pad:
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
)
elif padding is None:
## autopadding with unset default attributes
kwargs = {}
if not all([s == 1 for s in strides]):
kwargs["strides"] = strides
if not all([d == 1 for d in dilations]):
kwargs["dilations"] = dilations
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
# Default values for other attributes:
auto_pad=auto_pad,
**kwargs,
)
else:
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
pads=padding,
)
graph = helper.make_graph(
[node],
"conv_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="conv_test")
verify_with_ort(model, [x_shape, w_shape], [y_shape], use_vm=True, convert_to_static=True)
@tvm.testing.uses_gpu
def test_conv():
def repeat(N, D):
return tuple([N for _ in range(D)])
for D in [1, 2, 3]:
# Convolution with padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution with assymetric padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(4, D),
repeat(0, D) + repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution without padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution with autopadding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="VALID",
)
# Convolution with unset padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
True,
)
# Convolution with non uniform stride
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
None,
repeat(3, D),
repeat(2, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(2, D),
repeat(3, D),
repeat(1, D),
repeat(2, D),
)
def verify_convtranspose_with_padding(
x_shape,
w_shape,
y_shape,
padding,
kernel_shape,
strides,
dilations,
auto_pad="NOTSET",
unset_pad=False,
group=1,
):
node = helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
)
if not unset_pad:
if padding is None:
pad_attr = helper.make_attribute("auto_pad", auto_pad)
else:
pad_attr = helper.make_attribute("pads", padding)
node.attribute.append(pad_attr)
if group is not None:
group_attr = helper.make_attribute("group", group)
node.attribute.append(group_attr)
graph = helper.make_graph(
[node],
"convtranspose_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="convtranspose_pad_test")
verify_with_ort(model, [x_shape, w_shape], [y_shape], use_vm=True, convert_to_static=True)
def verify_convtranspose(x_shape, w_shape, y_shape, p, group=1):
node = onnx.helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
strides=[3, 2],
kernel_shape=[3, 3],
pads=p,
)
if group is not None:
group_attr = helper.make_attribute("group", group)
node.attribute.append(group_attr)
graph = helper.make_graph(
[node],
"verify_convtranspose_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="convtranspose_test")
verify_with_ort(model, [x_shape, w_shape], y_shape)
@tvm.testing.uses_gpu
def test_convtranspose():
# Convolution Transpose with padding
# (1, 1, 3, 3) input tensor
# (1, 2, 3, 3) tensor for convolution weights
# (1, 2, 7, 3) output tensor
# [1, 2, 1, 2] list for pads
verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2])
# Test undefined groups.
verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2], group=None)
def repeat(N, D):
return tuple([N for _ in range(D)])
# TODO(mbrookhart): onnxruntime in CI only supports 2D,
# find something else to test 1D and 3D against
for D in [2]:
# Convolution with padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution without padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(7, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution with autopadding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(7, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="VALID",
)
# Convolution with unset padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(7, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
True,
)
# Convolution with non uniform stride
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(9, D),
None,
repeat(3, D),
repeat(2, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
# TODO(mbrookhart): Relay doesn't currently support convtranspose with dilation
# verify_convtranspose_with_padding(
# (1, 1) + repeat(5, D),
# (1, 1) + repeat(3, D),
# (1, 1) + repeat(5, D),
# 2 * repeat(2, D),
# repeat(3, D),
# repeat(1, D),
# repeat(2, D),
# )
@tvm.testing.uses_gpu
def test_unsqueeze_constant():
from torch.nn import Linear, Sequential, Module
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
import tempfile
with tempfile.NamedTemporaryFile() as fp:
file_name = fp.name
input_size = (1, 16, 32, 32)
dummy_input = torch.randn(*input_size)
layer = Sequential(Flatten(), Linear(16 * 32 * 32, 64))
torch.onnx.export(layer, dummy_input, file_name, export_params=True)
onnx_model = onnx.load(file_name)
relay.frontend.from_onnx(onnx_model, {"0": input_size})
def verify_pooling(x_shape, kernel_shape, strides, pads, out_shape, mode, auto_pad="NOTSET"):
x_np = np.random.uniform(size=x_shape).astype("float32")
if mode == "max":
node_type = "MaxPool"
elif mode == "average":
node_type = "AveragePool"
else:
raise ValueError("Pool method {} is not supported.".format(mode))
pool_node = helper.make_node(
node_type, inputs=["x"], outputs=["y"], kernel_shape=kernel_shape, strides=strides
)
if pads is None:
pad_attr = helper.make_attribute("auto_pad", auto_pad)
else:
pad_attr = helper.make_attribute("pads", pads)
pool_node.attribute.append(pad_attr)
if mode == "max":
storage_attr = helper.make_attribute("storage_order", 0)
pool_node.attribute.append(storage_attr)
graph = helper.make_graph(
[pool_node],
"pooling_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="pooling_test")
verify_with_ort(model, [x_shape], [out_shape], use_vm=False, convert_to_static=True)
@tvm.testing.uses_gpu
def test_pooling():
for mode in ["max", "average"]:
# Pool1D
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[1],
pads=[1, 1],
out_shape=[1, 1, 32],
mode=mode,
)
# Pool2D
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
mode=mode,
)
# Pool1D with stride
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=[1, 1],
out_shape=[1, 1, 16],
mode=mode,
)
# Pool2D with stride
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
mode=mode,
)
# Pool1D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=None,
out_shape=[1, 1, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
# Pool2D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
# Pool3D with stride
verify_pooling(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
mode=mode,
)
# Pool3D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
def verify_global_pooling(x_shape, mode):
out_shape = x_shape[:2] + [1] * (len(x_shape) - 2)
if mode == "max":
node_type = "GlobalMaxPool"
elif mode == "average":
node_type = "GlobalAveragePool"
else:
raise ValueError("Pool method {} is not supported.".format(mode))
pool_node = helper.make_node(node_type, inputs=["x"], outputs=["y"])
graph = helper.make_graph(
[pool_node],
"global_pooling_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="global_pooling_test")
verify_with_ort(model, [x_shape], [out_shape], use_vm=False, convert_to_static=True)
@tvm.testing.uses_gpu
def test_global_pooling():
# Test each pooling mode across all N-D inputs.
for mode in ["average", "max"]:
# 1D Pooling (NCW)
verify_global_pooling([1, 8, 8], mode)
verify_global_pooling([4, 1, 4], mode)
# 2D Pooling (NCHW)
verify_global_pooling([1, 8, 8, 8], mode)
verify_global_pooling([4, 1, 6, 4], mode)
# 3D Pooling (NCDHW)
verify_global_pooling([1, 8, 6, 8, 8], mode)
verify_global_pooling([4, 1, 2, 6, 4], mode)
def verify_mod(x_shape, y_shape, fmod, out_shape, dtype="float32"):
x_np = np.random.uniform(-100.0, 100.0, x_shape).astype(dtype)
y_np = np.random.uniform(-100.0, 100.0, y_shape).astype(dtype)
y_np = np.where(y_np == 0, 1, y_np) # remove 0's to avoid division by zero error
mod_node = helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=fmod)
onnx_dtype = TensorProto.FLOAT if dtype == "float32" else TensorProto.INT32
graph = helper.make_graph(
[mod_node],
"mod_test",
inputs=[
helper.make_tensor_value_info("x", onnx_dtype, list(x_shape)),
helper.make_tensor_value_info("y", onnx_dtype, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("z", onnx_dtype, list(out_shape))],
)
model = helper.make_model(graph, producer_name="mod_test")
verify_with_ort_with_inputs(model, [x_np, y_np], [out_shape])
@tvm.testing.uses_gpu
def test_mod():
# Mod
verify_mod(
x_shape=[1, 32, 32], y_shape=[1, 1, 32], fmod=0, out_shape=(1, 32, 32), dtype="int32"
)
verify_mod(
x_shape=[1, 32, 32, 32],
y_shape=[1, 32, 32, 32],
fmod=0,
out_shape=(1, 32, 32, 32),
dtype="int32",
)
# fmod
verify_mod(
x_shape=[1, 32, 32], y_shape=[1, 32, 32], fmod=1, out_shape=(1, 32, 32), dtype="int32"
)
verify_mod(x_shape=[1, 1, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 1, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
verify_mod(
x_shape=[1, 32, 32, 32],
y_shape=[1, 32, 32, 32],
fmod=1,
out_shape=(1, 32, 32, 32),
dtype="int32",
)
verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
def verify_xor(x_shape, y_shape):
x_np = np.random.choice(a=[False, True], size=x_shape).astype("bool")
y_np = np.random.choice(a=[False, True], size=y_shape).astype("bool")
np_out = np.logical_xor(x_np, y_np)
out_shape = np_out.shape
xor_node = helper.make_node("Xor", inputs=["x", "y"], outputs=["z"])
onnx_dtype = TensorProto.BOOL
graph = helper.make_graph(
[xor_node],
"xor_test",
inputs=[
helper.make_tensor_value_info("x", onnx_dtype, list(x_shape)),
helper.make_tensor_value_info("y", onnx_dtype, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("z", onnx_dtype, list(out_shape))],
)
model = helper.make_model(graph, producer_name="xor_test")
verify_with_ort_with_inputs(model, [x_np, y_np], [out_shape])
@tvm.testing.uses_gpu
def test_xor():
# XOR
verify_xor(x_shape=[1, 32, 32], y_shape=[1, 32, 32])
# Xor broadcast
verify_xor(x_shape=[1, 32, 32], y_shape=[1, 1, 32])
def verify_max_roi_pool(x_shape, rois_shape, pooled_shape, spatial_scale, out_shape):
if spatial_scale is None:
pool_node = helper.make_node(
"MaxRoiPool", inputs=["x", "rois"], outputs=["y"], pooled_shape=pooled_shape
)
else:
pool_node = helper.make_node(
"MaxRoiPool",
inputs=["x", "rois"],
outputs=["y"],
pooled_shape=pooled_shape,
spatial_scale=spatial_scale,
)
graph = helper.make_graph(
[pool_node],
"pool_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("rois", TensorProto.FLOAT, list(rois_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="pool_test")
verify_with_ort(model, [x_shape, rois_shape], [out_shape])
@tvm.testing.uses_gpu
def test_max_roi_pool():
verify_max_roi_pool(
x_shape=[1, 3, 6, 6],
rois_shape=[3, 5],
pooled_shape=[1, 1],
spatial_scale=None,
out_shape=[3, 3, 1, 1],
)
verify_max_roi_pool(
x_shape=[1, 3, 10, 10],
rois_shape=[4, 5],
pooled_shape=[2, 2],
spatial_scale=2.0,
out_shape=[4, 3, 2, 2],
)
def verify_lppool(x_shape, kernel_shape, p, strides, pads, out_shape, auto_pad="NOTSET"):
if pads is None:
pool_node = helper.make_node(
"LpPool",
inputs=["x"],
outputs=["y"],
kernel_shape=kernel_shape,
p=p,
auto_pad=auto_pad,
strides=strides,
)
else:
pool_node = helper.make_node(
"LpPool",
inputs=["x"],
outputs=["y"],
kernel_shape=kernel_shape,
p=p,
pads=pads,
strides=strides,
)
graph = helper.make_graph(
[pool_node],
"lppool_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="lppool_test")
verify_with_ort(model, [x_shape], [out_shape], use_vm=True, convert_to_static=True)
@tvm.testing.uses_gpu
def test_lppool():
# Pool1D
verify_lppool(
x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[1], pads=[1, 1], out_shape=[1, 1, 32]
)
# Pool2D
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
)
# Pool1D with stride
verify_lppool(
x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[2], pads=[1, 1], out_shape=[1, 1, 16]
)
# Pool2D with stride
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
)
# Pool1D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32],
kernel_shape=[3],
p=2,
strides=[2],
pads=None,
out_shape=[1, 1, 16],
auto_pad="SAME_UPPER",
)
# Pool2D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
auto_pad="SAME_UPPER",
)
# Pool3D with stride
verify_lppool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
p=2,
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
)
# Pool3D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
p=2,
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
auto_pad="SAME_UPPER",
)
def verify_rnn(
seq_length,
batch_size,
input_size,
hidden_size,
rnn_type="LSTM",
use_bias=False,
activations=None,
alphas=None,
betas=None,
use_initial_state=False,
use_peep=False,
linear_before_reset=False,
):
if rnn_type == "LSTM":
multiplier = 4
elif rnn_type == "GRU":
multiplier = 3
else:
raise NotImplementedError("%s RNNs not yet supported." % rnn_type)
x_np = np.random.uniform(size=(seq_length, batch_size, input_size)).astype("float32")
w_np = np.random.uniform(size=(1, multiplier * hidden_size, input_size)).astype("float32")
r_np = np.random.uniform(size=(1, multiplier * hidden_size, hidden_size)).astype("float32")
input_names = ["X", "W", "R"]
input_tensors = [
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_np.shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_np.shape)),
helper.make_tensor_value_info("R", TensorProto.FLOAT, list(r_np.shape)),
]
input_values = [x_np, w_np, r_np]
if use_bias:
b_np = np.random.uniform(size=(1, multiplier * 2 * hidden_size)).astype("float32")
input_names.append("B")
input_tensors.append(
helper.make_tensor_value_info("B", TensorProto.FLOAT, [1, multiplier * 2 * hidden_size])
)
input_values.append(b_np)
if use_initial_state:
assert use_bias == True, "Initial states must have bias specified."
sequence_np = np.repeat(seq_length, batch_size).astype("int32")
input_names.append("sequence_lens")
input_tensors.append(
helper.make_tensor_value_info("sequence_lens", TensorProto.INT32, [batch_size])
)
input_values.append(sequence_np)
initial_h_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype("float32")
input_names.append("initial_h")
input_tensors.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_h_np)
if rnn_type == "LSTM":
initial_c_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype("float32")
input_names.append("initial_c")
input_tensors.append(
helper.make_tensor_value_info(
"initial_c", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_c_np)
if use_peep and rnn_type == "LSTM":
assert use_initial_state == True, "Peepholes require initial state to be specified."
p_np = np.random.uniform(size=(1, 3 * hidden_size)).astype("float32")
input_names.append("P")
input_tensors.append(
helper.make_tensor_value_info("P", TensorProto.FLOAT, [1, 3 * hidden_size])
)
input_values.append(p_np)
Y_shape = [seq_length, 1, batch_size, hidden_size]
Y_h_shape = [1, batch_size, hidden_size]
outputs = ["Y", "Y_h"]
graph_outputs = [
helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(Y_shape)),
helper.make_tensor_value_info("Y_h", TensorProto.FLOAT, list(Y_h_shape)),
]
output_shapes = [Y_shape, Y_h_shape]
if rnn_type == "LSTM":
Y_c_shape = [1, batch_size, hidden_size]
outputs.append("Y_c")
graph_outputs.append(
helper.make_tensor_value_info("Y_c", TensorProto.FLOAT, list(Y_c_shape))
)
output_shapes.append(Y_c_shape)
rnn_node = helper.make_node(
rnn_type, inputs=input_names, outputs=outputs, hidden_size=hidden_size
)
if activations is not None:
activations_attr = helper.make_attribute("activations", activations)
rnn_node.attribute.append(activations_attr)
if alphas is not None:
alphas_attr = helper.make_attribute("activation_alpha", alphas)
rnn_node.attribute.append(alphas_attr)
if betas is not None:
betas_attr = helper.make_attribute("activation_beta", betas)
rnn_node.attribute.append(betas_attr)
if linear_before_reset and rnn_type == "GRU":
lbr_attr = helper.make_attribute("linear_before_reset", 1)
rnn_node.attribute.append(lbr_attr)
graph = helper.make_graph([rnn_node], "rnn_test", inputs=input_tensors, outputs=graph_outputs)
model = helper.make_model(graph, producer_name="rnn_test")
verify_with_ort_with_inputs(model, input_values, output_shapes, atol=1e-2, rtol=1e-2)
@tvm.testing.uses_gpu
def test_lstm():
# No bias.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=32, use_bias=False, rnn_type="LSTM"
)
# large batch.
verify_rnn(
seq_length=4, batch_size=8, input_size=16, hidden_size=32, use_bias=True, rnn_type="LSTM"
)
# Non power of two.
verify_rnn(
seq_length=3, batch_size=3, input_size=16, hidden_size=40, use_bias=True, rnn_type="LSTM"
)
# Long sequence.
verify_rnn(
seq_length=8, batch_size=1, input_size=16, hidden_size=32, use_bias=True, rnn_type="LSTM"
)
# Large hidden.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=128, use_bias=True, rnn_type="LSTM"
)
# Large input.
verify_rnn(
seq_length=2, batch_size=1, input_size=64, hidden_size=32, use_bias=True, rnn_type="LSTM"
)
# Different activation testing.
# Default value hardsigmoid.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "Tanh", "Tanh"],
rnn_type="LSTM",
)
# Multiple parameterized activations.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "LeakyRelu", "Tanh"],
alphas=[2.0, 0.5],
betas=[0.3],
rnn_type="LSTM",
)
# All parameterized with new Affine activation.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "LeakyRelu", "Affine"],
alphas=[2.0, 0.5, 0.8],
betas=[0.3, 0.1],
rnn_type="LSTM",
)
# Testing with initial state and peepholes
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
rnn_type="LSTM",
)
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
use_peep=True,
rnn_type="LSTM",
)
@tvm.testing.uses_gpu
def test_gru():
# No bias.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=32, use_bias=False, rnn_type="GRU"
)
# large batch.
verify_rnn(
seq_length=4,
batch_size=8,
input_size=16,
hidden_size=32,
use_bias=True,
rnn_type="GRU",
linear_before_reset=True,
)
# Non power of two.
verify_rnn(
seq_length=3, batch_size=3, input_size=16, hidden_size=40, use_bias=True, rnn_type="GRU"
)
# Long sequence.
verify_rnn(
seq_length=8, batch_size=1, input_size=16, hidden_size=32, use_bias=True, rnn_type="GRU"
)
# Large hidden.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=128, use_bias=True, rnn_type="GRU"
)
# Large input.
verify_rnn(
seq_length=2, batch_size=1, input_size=64, hidden_size=32, use_bias=True, rnn_type="GRU"
)
# Different activation testing.
# Default value hardsigmoid.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "Softsign"],
rnn_type="GRU",
)
# Multiple parameterized activations.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "LeakyRelu"],
alphas=[2.0, 0.5],
betas=[0.3],
rnn_type="GRU",
)
# All parameterized with new Affine activation.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "Affine"],
alphas=[2.0, 0.8],
betas=[0.3, 0.1],
rnn_type="GRU",
)
# Testing with initial state
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
rnn_type="GRU",
)
@tvm.testing.uses_gpu
def test_resize():
def verify(ishape, oshape, scales, mode, coord_trans):
nodes = [
make_constant_node("roi", onnx.TensorProto.FLOAT, (0,), []),
make_constant_node("scales", onnx.TensorProto.FLOAT, (len(scales),), scales),
]
input_names = ["X", "roi", "scales"]
if oshape != []:
nodes.append(
make_constant_node("sizes", onnx.TensorProto.INT64, (len(oshape),), oshape)
)
input_names.append("sizes")
nodes.append(
helper.make_node(
"Resize",
inputs=input_names,
outputs=["Y"],
mode=mode,
coordinate_transformation_mode=coord_trans,
)
)
if oshape == []:
oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]
graph = helper.make_graph(
nodes,
"resize_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, ishape)],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, oshape)],
)
model = helper.make_model(graph, producer_name="resize_test")
verify_with_ort(model, [ishape], [oshape], use_vm=True, opset=11, freeze_params=True)
# upsampling
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "nearest", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "linear", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "nearest", "align_corners")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "linear", "align_corners")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "nearest", "half_pixel")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "linear", "half_pixel")
# downsampling
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "nearest", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "linear", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "nearest", "align_corners")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "linear", "align_corners")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "nearest", "half_pixel")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "linear", "half_pixel")
# scales are specified instead of sizes
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "nearest", "asymmetric")
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "linear", "asymmetric")
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "nearest", "align_corners")
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "linear", "align_corners")
verify([1, 16, 32, 32], [], [1, 1, 0.5, 0.5], "linear", "half_pixel")
verify([1, 16, 32, 32], [], [1, 1, 0.5, 0.5], "nearest", "half_pixel")
def verify_opset_10(ishape, scales, mode):
nodes = [
make_constant_node("scales", onnx.TensorProto.FLOAT, (len(scales),), scales),
]
input_names = ["X", "scales"]
nodes.append(
helper.make_node(
"Resize",
inputs=input_names,
outputs=["Y"],
mode=mode,
)
)
oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]
graph = helper.make_graph(
nodes,
"resize_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, ishape)],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, oshape)],
)
model = helper.make_model(graph, producer_name="resize_test")
verify_with_ort(model, [ishape], [oshape], use_vm=True, freeze_params=True, opset=10)
verify_opset_10([1, 16, 32, 32], [1, 1, 2, 2], "nearest")
verify_opset_10([1, 16, 32, 32], [1, 1, 0.5, 0.5], "linear")
@tvm.testing.uses_gpu
def test_nonzero():
def verify_nonzero(indata, outdata, dtype):
node = helper.make_node(
"NonZero",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"nonzero_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.INT64, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="nonzero_test")
verify_with_ort_with_inputs(model, [indata], dtype="int64", use_vm=True, opset=9)
input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)
result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 1], [0, 0, 1]]
verify_nonzero(input_data, result, dtype=np.int64)
input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)
result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 2, 2], [0, 1, 0, 1]]
verify_nonzero(input_data, result, dtype=np.int64)
@tvm.testing.uses_gpu
def test_topk():
def verify_topk(input_dims, K, axis=-1):
output_dims = list(input_dims)
output_dims[axis] = K
node = helper.make_node(
"TopK", inputs=["X", "K"], outputs=["Values", "Indicies"], axis=axis
)
graph = helper.make_graph(
[node],
"topk_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(input_dims)),
helper.make_tensor_value_info(
"K",
TensorProto.INT64,
[
1,
],
),
],
outputs=[
helper.make_tensor_value_info("Values", TensorProto.FLOAT, output_dims),
helper.make_tensor_value_info("Indicies", TensorProto.INT64, output_dims),
],
)
model = helper.make_model(graph, producer_name="topk_test")
indata = np.random.uniform(-10, 10, input_dims).astype(np.float32)
verify_with_ort_with_inputs(model, [indata, np.array([K])], use_vm=True)
for n in [12, 32]:
for shape in [[n], [n, n], [n, n, n]]:
for k in [1, 5, 10]:
verify_topk(shape, k)
verify_topk([n, n, n], 5, 0)
verify_topk([n, n, n], 5, 1)
verify_topk([n, n, n], 5, 2)
@tvm.testing.uses_gpu
def test_roi_align():
def verify_roi_align(
input_dims,
num_roi,
output_height,
output_width,
sampling_ratio=0,
spatial_scale=1.0,
mode="avg",
):
output_dims = [num_roi, input_dims[1], output_height, output_width]
node = helper.make_node(
"RoiAlign",
inputs=["X", "rois", "batch_indicies"],
outputs=["Y"],
mode=mode,
output_height=output_height,
output_width=output_width,
sampling_ratio=sampling_ratio,
spatial_scale=spatial_scale,
)
graph = helper.make_graph(
[node],
"roialign_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(input_dims)),
helper.make_tensor_value_info("rois", TensorProto.FLOAT, [num_roi, 4]),
helper.make_tensor_value_info(
"batch_indicies",
TensorProto.INT64,
[
num_roi,
],
),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, output_dims)],
)
model = helper.make_model(graph, producer_name="roialign_test")
np_data = np.random.uniform(size=input_dims).astype("float32")
np_rois = np.random.uniform(size=[num_roi, 4]).astype("float32") * input_dims[2]
np_batch_indicies = np.random.randint(low=0, high=input_dims[0], size=num_roi)
verify_with_ort_with_inputs(
model, [np_data, np_rois, np_batch_indicies], out_shape=[output_dims]
)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((4, 4, 16, 32), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 8, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 8, 8), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 16, 5, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 12), 8, 7, 3, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=0.5)
verify_roi_align((3, 4, 12, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.5)
verify_roi_align((5, 4, 16, 14), 32, 7, 7, sampling_ratio=1, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=2, spatial_scale=1.0)
# ONNX implementation of roi_align with max mode is incorrect, so we don't compare outputs here.
@tvm.testing.uses_gpu
def test_non_max_suppression():
def verify_nms(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims
):
input_names = ["boxes", "scores", "max_output_boxes_per_class", "iou_threshold"]
input_nodes = [
helper.make_tensor_value_info("boxes", TensorProto.FLOAT, boxes.shape),
helper.make_tensor_value_info("scores", TensorProto.FLOAT, scores.shape),
helper.make_tensor_value_info(
"max_output_boxes_per_class", TensorProto.INT64, max_output_boxes_per_class.shape
),
helper.make_tensor_value_info("iou_threshold", TensorProto.FLOAT, iou_threshold.shape),
]
inputs = [boxes, scores, max_output_boxes_per_class, iou_threshold]
if score_threshold is not None:
input_names.append("score_threshold")
input_nodes.append(
helper.make_tensor_value_info(
"score_threshold", TensorProto.FLOAT, score_threshold.shape
)
)
inputs.append(score_threshold)
node = helper.make_node(
"NonMaxSuppression",
inputs=input_names,
outputs=["Y"],
center_point_box=0,
)
graph = helper.make_graph(
[node],
"nms_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, output_dims)],
)
model = helper.make_model(graph, producer_name="nms_test")
verify_with_ort_with_inputs(model, inputs, use_vm=True)
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.5, 0.5, 0.95, 0.95],
[0.5, 0.5, 0.96, 0.96],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = np.array(2).astype("int64")
iou_threshold = np.array(0.8).astype("float32")
output_dims = [8, 3]
verify_nms(boxes, scores, max_output_boxes_per_class, iou_threshold, None, output_dims)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0],
]
]
).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.4]).astype(np.float32)
output_dims = [2, 3]
verify_nms(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims
)
def verify_cond_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [1])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [1])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [1])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.array([-2]).astype(np.float32)
five_const_node = helper.make_node(
"Constant",
inputs=[],
outputs=["five"],
value=helper.make_tensor(
name="const_tensor_five", data_type=TensorProto.FLOAT, dims=(), vals=[5]
),
)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
less_node = helper.make_node("Less", inputs=["y_out", "five"], outputs=["cond_less"])
squeeze_node = helper.make_node("Squeeze", inputs=["cond_less"], outputs=["cond_squeeze"])
cond_cast_node = helper.make_node(
"Cast", inputs=["cond_squeeze"], outputs=["cond_out"], to=onnx.TensorProto.BOOL
)
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[
five_const_node,
iter_cast_node,
y_add_node,
less_node,
squeeze_node,
cond_cast_node,
scan_identity_node,
],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop", inputs=["trip_count", "cond", "y"], outputs=["res_y", "res_scan"], body=loop_body
)
trip_count = np.array(5).astype(np.int64)
res_y = np.array([13]).astype(np.float32)
cond = np.array(1).astype(np.bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1]),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, [1]),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5, 1]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
# Set a high trip count so that condition trips first.
trip_count = np.array(40).astype(np.int64)
cond = np.array(1).astype(np.bool)
input_vals = [trip_count, cond, y]
verify_with_ort_with_inputs(loop_model, input_vals, use_vm=True, freeze_params=True)
def verify_count_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.array(-2).astype(np.float32)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
identity_node = helper.make_node("Identity", inputs=["cond_in"], outputs=["cond_out"])
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[identity_node, iter_cast_node, y_add_node, scan_identity_node],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop", inputs=["trip_count", "cond", "y"], outputs=["res_y", "res_scan"], body=loop_body
)
trip_count = np.array(5).astype(np.int64)
res_y = np.array([13]).astype(np.float32)
cond = np.array(1).astype(np.bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, []),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(np.bool)
input_vals = [trip_count, cond, y]
verify_with_ort_with_inputs(loop_model, input_vals, use_vm=True, freeze_params=True)
def verify_tensor_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [3, 3, 3, 3])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [3, 3, 3, 3])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [3, 3, 3, 3])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.random.normal(size=[3, 3, 3, 3]).astype(np.float32)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
identity_node = helper.make_node("Identity", inputs=["cond_in"], outputs=["cond_out"])
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[identity_node, iter_cast_node, y_add_node, scan_identity_node],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop", inputs=["trip_count", "cond", "y"], outputs=["res_y", "res_scan"], body=loop_body
)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(np.bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [3, 3, 3, 3]),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, [3, 3, 3, 3]),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5, 3, 3, 3, 3]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(np.bool)
input_vals = [trip_count, cond, y]
verify_with_ort_with_inputs(
loop_model, input_vals, use_vm=True, freeze_params=True, convert_to_static=True
)
def test_loop():
# Test a loop that exits once a condition is met.
verify_cond_loop()
# Test a loop that exits after a fixed number of iterations with scalar outputs.
verify_count_loop()
# Test a loop that uses an array output.
verify_tensor_loop()
def verify_if(cond_array):
# Given a bool scalar input cond.
# return constant tensor x if cond is True, otherwise return constant tensor y.
then_out = onnx.helper.make_tensor_value_info("then_out", onnx.TensorProto.FLOAT, [5])
else_out = onnx.helper.make_tensor_value_info("else_out", onnx.TensorProto.FLOAT, [5])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
y = np.array([5, 4, 3, 2, 1]).astype(np.float32)
then_const_node = onnx.helper.make_node(
"Constant", inputs=[], outputs=["then_out"], value=numpy_helper.from_array(x)
)
else_const_node = onnx.helper.make_node(
"Constant", inputs=[], outputs=["else_out"], value=numpy_helper.from_array(y)
)
then_body = onnx.helper.make_graph([then_const_node], "then_body", [], [then_out])
else_body = onnx.helper.make_graph([else_const_node], "else_body", [], [else_out])
if_node = onnx.helper.make_node(
"If", inputs=["cond"], outputs=["res"], then_branch=then_body, else_branch=else_body
)
if_graph = onnx.helper.make_graph(
[if_node],
"if_outer",
inputs=[
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
],
outputs=[
onnx.helper.make_tensor_value_info("res", onnx.TensorProto.FLOAT, [5]),
],
)
if_model = onnx.helper.make_model(if_graph)
if cond_array:
cond = np.array([1]).astype("bool")
else:
cond = np.array(1).astype("bool")
correct_out = x if cond else y
# TODO(jwfromm): Onnxruntime 1.0.0 is buggy with If statements. Replace this with
# verify_with_ort once we update versions.
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(if_model, [cond], target, dev, freeze_params=True)
for i in range(len(tvm_out)):
tvm.testing.assert_allclose(correct_out[i], tvm_out[i], rtol=1e-05, atol=1e-05)
@tvm.testing.uses_gpu
def test_if():
# Confirm that if works with cond as an array or scalar.
verify_if(cond_array=False)
verify_if(cond_array=True)
@tvm.testing.uses_gpu
def test_size():
def verify_size(indata):
node = helper.make_node(
"Size",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"size_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.INT64, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, [])],
)
model = helper.make_model(graph, producer_name="size_test")
verify_with_ort_with_inputs(model, [indata], dtype="int64", use_vm=True, opset=11)
input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)
verify_size(input_data)
input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)
verify_size(input_data)
@tvm.testing.uses_gpu
def test_maxunpool():
def verify_maxunpool(data, indices, kernel_shape, strides, output_shape=None, pads=None):
input_names = ["xT", "xI"]
input_info = [
helper.make_tensor_value_info("xT", TensorProto.FLOAT, list(data.shape)),
helper.make_tensor_value_info("xI", TensorProto.INT64, list(indices.shape)),
]
input_values = [data, indices]
if output_shape is not None:
input_names.append("output_shape")
input_info.append(
helper.make_tensor_value_info(
"output_shape", TensorProto.INT64, list(output_shape.shape)
)
)
input_values.append(output_shape)
else:
# Compute expected output shape
output_shape = np.asarray(([1, 1] + list(strides))) * np.asarray(list(data.shape))
output_shape += np.asarray(([0, 0] + list(kernel_shape))) - np.asarray(
([0, 0] + list(strides))
)
if pads is not None:
output_shape -= np.asarray(
[0, 0] + list(np.sum(np.reshape(list(pads), [-1, 2]), axis=-1))
)
output_shape = [int(i) for i in output_shape]
node = helper.make_node(
"MaxUnpool", inputs=input_names, outputs=["y"], kernel_shape=kernel_shape
)
if pads is not None:
pad_attr = helper.make_attribute("pads", pads)
node.attribute.append(pad_attr)
if strides is not None:
strides_attr = helper.make_attribute("strides", strides)
node.attribute.append(strides_attr)
graph = helper.make_graph(
[node],
"maxunpool_test",
inputs=input_info,
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, output_shape)],
)
model = helper.make_model(graph, producer_name="size_test")
verify_with_ort_with_inputs(model, input_values, use_vm=True, opset=11)
# Basic test
xT = np.array([[[[5, 6], [7, 8]]]], dtype=np.float32)
xI = np.array([[[[0, 7], [13, 15]]]], dtype=np.int64)
verify_maxunpool(xT, xI, [2, 2], strides=[2, 2])
# Small stride
verify_maxunpool(xT, xI, [2, 2], strides=[1, 1])
# Big kernel
verify_maxunpool(xT, xI, [3, 3], strides=[2, 2])
# With output shape
output_shape = np.array((1, 1, 5, 5), dtype=np.int64)
verify_maxunpool(xT, xI, [2, 2], strides=[2, 2], output_shape=output_shape)
# With explicit reverse padding
pads = np.asarray([1, 1, 1, 1]).astype(np.int64)
verify_maxunpool(xT, xI, [2, 2], strides=[2, 2], pads=pads)
@tvm.testing.uses_gpu
def test_softplus():
def verify_softplus(indata):
node = helper.make_node(
"Softplus",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"softplus_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(indata.shape))],
)
model = helper.make_model(graph, producer_name="softplus_test")
verify_with_ort_with_inputs(model, [indata], dtype="float32", use_vm=True, opset=11)
# Simple case with all signs.
input_data = np.array([[-1, 0, 1]], dtype=np.float32)
verify_softplus(input_data)
# More fancy case.
input_data = np.random.randn(1, 32, 32, 3).astype("float32")
verify_softplus(input_data)
def test_cumsum():
def verify_cumsum(indata, axis, exclusive=0, reverse=0, type="float32"):
cumsum_node = onnx.helper.make_node(
"CumSum",
inputs=["X", "axis"],
outputs=["Y"],
)
if exclusive != 0:
exclusive_attr = helper.make_attribute("exclusive", exclusive)
cumsum_node.attribute.append(exclusive_attr)
if reverse != 0:
reverse_attr = helper.make_attribute("reverse", reverse)
cumsum_node.attribute.append(reverse_attr)
nodes = [
make_constant_node("axis", onnx.TensorProto.INT32, [1], [axis]),
cumsum_node,
]
if type == "float32":
tensor_type = TensorProto.FLOAT
else:
tensor_type = TensorProto.INT32
type = "int32"
graph = helper.make_graph(
nodes,
"cumsum_test",
inputs=[
helper.make_tensor_value_info("X", tensor_type, list(indata.shape)),
],
outputs=[helper.make_tensor_value_info("Y", tensor_type, list(indata.shape))],
)
model = helper.make_model(graph, producer_name="cumsum_test")
verify_with_ort_with_inputs(model, [indata], dtype=type, use_vm=True, opset=11)
data = (
np.array(
[
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
]
)
.astype(np.float32)
.reshape((3, 4))
)
verify_cumsum(data, 0)
verify_cumsum(data, 1)
verify_cumsum(data, 0, 1, 0)
verify_cumsum(data, 1, 1, 0)
verify_cumsum(data, 0, 0, 1)
verify_cumsum(data, 1, 0, 1)
verify_cumsum(data, 1, 1, 1)
data = np.random.randn(1, 32, 32, 3).astype("float32")
verify_cumsum(data, 1)
data = np.random.randn(1, 32, 32, 3).astype("int32")
verify_cumsum(data, 0, type="int32")
verify_cumsum(data, 1, type="int32")
verify_cumsum(data, 0, 1, 0, type="int32")
verify_cumsum(data, 1, 1, 0, type="int32")
verify_cumsum(data, 0, 0, 1, type="int32")
verify_cumsum(data, 1, 0, 1, type="int32")
verify_cumsum(data, 1, 1, 1, type="int32")
"""
The following parameterized tests loads the tests that ONNX ships as
serialized ONNX files, inputs, and outputs. The goal of this test
is to ensure the ONNX importer is in line with the ONNX specification.
To allow these tests to run in CI before all pass, a number of tests that
are not yet supported are skipped.
"""
from onnx import numpy_helper
f = onnx.__file__
import glob
onnx_test_folders = sorted(glob.glob("/".join(f.split("/")[0:-1]) + "/backend/test/data/node/*/"))
unsupported_onnx_tests = [
"test_basic_convinteger/",
"test_cast_DOUBLE_to_FLOAT16/",
"test_cast_FLOAT_to_STRING/",
"test_cast_STRING_to_FLOAT/",
"test_compress_0/",
"test_compress_1/",
"test_compress_default_axis/",
"test_compress_negative_axis/",
"test_convinteger_with_padding/",
"test_convtranspose_dilations/",
"test_convtranspose_output_shape/",
"test_cumsum_1d/",
"test_cumsum_1d_exclusive/",
"test_cumsum_1d_reverse/",
"test_cumsum_1d_reverse_exclusive/",
"test_cumsum_2d_axis_0/",
"test_cumsum_2d_axis_1/",
"test_cumsum_2d_negative_axis/",
"test_det_2d/",
"test_det_nd/",
"test_eyelike_populate_off_main_diagonal/",
"test_eyelike_with_dtype/",
"test_eyelike_without_dtype/",
"test_isinf_negative/",
"test_isinf_positive/",
"test_matmulinteger/",
"test_maxpool_2d_dilations/",
"test_maxpool_2d_same_lower/",
"test_maxpool_2d_same_upper/",
"test_maxpool_with_argmax_2d_precomputed_pads/",
"test_maxpool_with_argmax_2d_precomputed_strides/",
"test_maxunpool_export_with_output_shape/",
"test_mvn/",
"test_qlinearconv/",
"test_qlinearmatmul_2D/",
"test_qlinearmatmul_3D/",
"test_range_float_type_positive_delta_expanded/",
"test_range_int32_type_negative_delta_expanded/",
"test_resize_tf_crop_and_resize/",
## For these three tests, ONNX 1.6.0 has incorrect graphs, they pass with ONNX 1.7.0
"test_resize_upsample_sizes_nearest_ceil_half_pixel/",
"test_resize_upsample_sizes_nearest_floor_align_corners/",
"test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric/",
# ----
"test_reversesequence_batch/",
"test_reversesequence_time/",
"test_rnn_seq_length/",
"test_roialign/",
"test_round/",
"test_scan9_sum/",
"test_scan_sum/",
"test_scatternd/",
"test_simple_rnn_defaults/",
"test_simple_rnn_with_initial_bias/",
"test_strnormalizer_export_monday_casesensintive_lower/",
"test_strnormalizer_export_monday_casesensintive_nochangecase/",
"test_strnormalizer_export_monday_casesensintive_upper/",
"test_strnormalizer_export_monday_empty_output/",
"test_strnormalizer_export_monday_insensintive_upper_twodim/",
"test_strnormalizer_nostopwords_nochangecase/",
"test_tfidfvectorizer_tf_batch_onlybigrams_skip0/",
"test_tfidfvectorizer_tf_batch_onlybigrams_skip5/",
"test_tfidfvectorizer_tf_batch_uniandbigrams_skip5/",
"test_tfidfvectorizer_tf_only_bigrams_skip0/",
"test_tfidfvectorizer_tf_onlybigrams_levelempty/",
"test_tfidfvectorizer_tf_onlybigrams_skip5/",
"test_tfidfvectorizer_tf_uniandbigrams_skip5/",
"test_top_k_smallest/",
"test_unique_not_sorted_without_axis/",
"test_unique_sorted_with_axis/",
"test_unique_sorted_with_axis_3d/",
"test_unique_sorted_with_negative_axis/",
"test_unique_sorted_without_axis/",
"test_upsample_nearest/",
]
@pytest.mark.parametrize("test", onnx_test_folders)
def test_onnx_nodes(test):
for failure in unsupported_onnx_tests:
if failure in test:
pytest.skip()
break
onnx_model = onnx.load(test + "/model.onnx")
inputs = []
outputs = []
for dataset in glob.glob(test + "/*/"):
tensors = sorted(glob.glob(dataset + "/*.pb"))
for tensor in tensors:
new_tensor = onnx.TensorProto()
with open(tensor, "rb") as f:
new_tensor.ParseFromString(f.read())
if "input" in tensor.split("/")[-1]:
inputs.append(numpy_helper.to_array(new_tensor))
elif "output" in tensor.split("/")[-1]:
outputs.append(numpy_helper.to_array(new_tensor))
else:
raise ImportError(str(tensor) + " not labeled as an import or an output")
tvm_val = get_tvm_output_with_vm(onnx_model, inputs, "llvm", tvm.cpu(0))
if len(outputs) == 1:
tvm.testing.assert_allclose(outputs[0], tvm_val, rtol=1e-5, atol=1e-5)
else:
for output, val in zip(outputs, tvm_val):
tvm.testing.assert_allclose(output, val, rtol=1e-5, atol=1e-5)
def test_wrong_input():
node = helper.make_node(
"Softplus",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"softplus_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list([5]))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list([5]))],
)
model = helper.make_model(graph, producer_name="softplus_test")
# Check that the graph can import correctly with proper shape definitions.
correct_shape_dict = {"X": [5]}
relay.frontend.from_onnx(model, shape=correct_shape_dict)
# Check that an assertion is triggered when an input not in the graph is provided.
wrong_shape_dict = {"Z": [5]}
with pytest.raises(AssertionError):
relay.frontend.from_onnx(model, shape=wrong_shape_dict)
def test_aten():
torch.set_grad_enabled(False)
def _convert_to_onnx(model, inputs):
file_name = "{}.onnx".format("aten_model")
torch.onnx.export(
model,
inputs,
file_name,
export_params=True,
verbose=False,
opset_version=10,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN,
)
onnx_model = onnx.load(file_name)
assert 's: "embedding_bag"' in str(onnx_model)
return onnx_model
def verify_embedding_bag(num_embedding, embedding_dim, data_shape, num_bags=None):
dummy_data = torch.randint(0, num_embedding - 1, data_shape)
tvm_inputs = [dummy_data.numpy()]
model = torch.nn.EmbeddingBag(num_embedding, embedding_dim)
onnx_model = _convert_to_onnx(model, dummy_data)
torch_out = model(dummy_data)
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(
onnx_model, tvm_inputs, target, ctx, freeze_params=True, convert_to_static=True
)
tvm.testing.assert_allclose(torch_out.numpy(), tvm_out)
verify_embedding_bag(10, 3, [2, 10])
verify_embedding_bag(32, 2, [3, 3])
if __name__ == "__main__":
test_flatten()
test_reshape()
test_shape()
test_expand()
test_power()
test_squeeze()
test_unsqueeze()
test_slice()
test_floor()
test_ceil()
test_round()
test_isinf()
test_isnan()
test_clip()
test_clip_min_max_as_inputs()
test_onehot()
test_gemm()
test_matmul()
test_gather()
test_gatherelements()
test_gather_nd()
test_scatter()
test_lrn()
test_instance_norm()
test_upsample()
test_forward_min()
test_forward_max()
test_forward_mean()
test_forward_hardsigmoid()
test_forward_arg_min_max()
test_softmax()
test_constantofshape()
test_all_reduce_funcs()
test_pad()
test_split()
test_binary_ops()
test_unary_ops()
test_leaky_relu()
test_elu()
test_selu()
test_prelu()
test_ThresholdedRelu()
test_LogSoftmax()
test_resnet()
test_inception()
test_densenet()
test_sign()
test_not()
test_and()
test_tile()
test_erf()
test_where()
test_or()
test_depth_to_space()
test_space_to_depth()
test_batch_norm()
test_batch_norm_dynamic_subgraph()
test_conv()
test_convtranspose()
test_unsqueeze_constant()
test_pooling()
test_lppool()
test_lstm()
test_gru()
test_resize()
test_nonzero()
test_topk()
test_mod()
test_xor()
test_max_roi_pool()
test_roi_align()
test_range()
test_loop()
test_size()
test_maxunpool()
test_softplus()
test_cumsum()
test_wrong_input()
test_aten()
|
py | 1a3d0d5c5b852b51b8669a84d403554abf2f49fe | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: unversioned
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1ServiceAccountList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, items=None, metadata=None):
"""
V1ServiceAccountList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'items': 'list[V1ServiceAccount]',
'metadata': 'UnversionedListMeta'
}
self.attribute_map = {
'items': 'items',
'metadata': 'metadata'
}
self._items = items
self._metadata = metadata
@property
def items(self):
"""
Gets the items of this V1ServiceAccountList.
List of ServiceAccounts. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts
:return: The items of this V1ServiceAccountList.
:rtype: list[V1ServiceAccount]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1ServiceAccountList.
List of ServiceAccounts. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts
:param items: The items of this V1ServiceAccountList.
:type: list[V1ServiceAccount]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def metadata(self):
"""
Gets the metadata of this V1ServiceAccountList.
Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The metadata of this V1ServiceAccountList.
:rtype: UnversionedListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1ServiceAccountList.
Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1ServiceAccountList.
:type: UnversionedListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 1a3d0e1dfb25034682257ad8da9dc4a13720aebd | import argparse
from typing import Tuple
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import torchvision
from torch.utils.data import Dataset
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from deepext_with_lightning.callbacks import GenerateAttentionMap, CSVClassificationResult
from deepext_with_lightning.models.layers.backbone_key import BackBoneKey
from deepext_with_lightning.models.base import ClassificationModel
from deepext_with_lightning.models.classification import *
from deepext_with_lightning.transforms import AlbumentationsOnlyImageWrapperTransform
from deepext_with_lightning.dataset.functions import label_names_to_dict
from common import CLASSIFICATION_DATASET_INFO, build_data_loader, get_logger
VALID_MODEL_KEYS = ["efficientnet", "mobilenet", "abn", "custommodel"]
# NOTE モデル・データセットはここを追加
def build_model(args, n_classes) -> ClassificationModel:
if args.model == "efficientnet":
return EfficientNet(num_classes=n_classes, lr=args.lr, network=f"efficientnet-b{args.efficientnet_scale}")
if args.model == "mobilenet":
return MobileNetV3(num_classes=n_classes, lr=args.lr, pretrained=False)
if args.model == "abn":
return AttentionBranchNetwork(n_classes=n_classes, lr=args.lr, backbone=BackBoneKey.from_val(args.submodel))
if args.model == "customnet":
return CustomClassificationNetwork(n_classes=n_classes, lr=args.lr,
backbone=BackBoneKey.from_val(args.submodel))
raise RuntimeError(f"Invalid model name: {args.model}")
def build_transforms(args) -> Tuple[any, any]:
train_transforms = A.Compose([
A.HorizontalFlip(p=0.3),
A.RandomResizedCrop(width=args.image_size, height=args.image_size, scale=(0.7, 1.2)),
A.Rotate((-30, 30), p=0.3),
A.CoarseDropout(max_width=int(args.image_size / 8), max_height=int(args.image_size / 8), max_holes=3, p=0.3),
ToTensorV2(),
])
train_transforms = AlbumentationsOnlyImageWrapperTransform(train_transforms)
test_transforms = A.Compose([
A.Resize(width=args.image_size, height=args.image_size),
ToTensorV2(),
])
test_transforms = AlbumentationsOnlyImageWrapperTransform(test_transforms)
return train_transforms, test_transforms
def build_dataset(args, train_transforms, test_transforms) -> Tuple[Dataset, Dataset]:
if args.dataset == "stl10":
train_dataset = torchvision.datasets.STL10(root=args.dataset_root, download=True, split="train",
transform=train_transforms)
test_dataset = torchvision.datasets.STL10(root=args.dataset_root, download=True, split="test",
transform=test_transforms)
return train_dataset, test_dataset
if args.dataset == "cifar10":
train_dataset = torchvision.datasets.CIFAR10(root=args.dataset_root, download=True, train=True,
transform=train_transforms)
test_dataset = torchvision.datasets.CIFAR10(root=args.dataset_root, download=True, train=False,
transform=test_transforms)
return train_dataset, test_dataset
raise RuntimeError(f"Invalid dataset name: {args.dataset_root}")
parser = argparse.ArgumentParser(description='Pytorch Image classification training.')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate')
parser.add_argument('--dataset', type=str, default="stl10",
help=f'Dataset type in {list(CLASSIFICATION_DATASET_INFO.keys())}')
parser.add_argument('--epoch', type=int, default=100, help='Number of epochs')
parser.add_argument('--batch_size', type=int, default=8, help='Batch size')
parser.add_argument('--dataset_root', type=str, required=True, help='Dataset folder path')
parser.add_argument('--progress_dir', type=str, default=None, help='Directory for saving progress')
parser.add_argument('--model', type=str, default="mobilenet", help=f"Model type in {VALID_MODEL_KEYS}")
parser.add_argument('--load_checkpoint_path', type=str, default=None, help="Saved checkpoint path")
parser.add_argument('--save_checkpoint_path', type=str, default="checkpoints", help="Saving checkpoint directory")
parser.add_argument('--efficientnet_scale', type=int, default=0, help="Number of scale of EfficientNet.")
parser.add_argument('--image_size', type=int, default=96, help="Image size.")
parser.add_argument('--submodel', type=str, default="resnet18", help=f'Type of submodel(resnet18, resnet34...).')
parser.add_argument('--val_every_n_epoch', type=int, default=5, help="Validate every n epoch.")
parser.add_argument('--log_type', type=str, default="mlflow", help="")
if __name__ == "__main__":
args = parser.parse_args()
# Fetch dataset.
dataset_info = CLASSIFICATION_DATASET_INFO.get(args.dataset)
if dataset_info is None:
raise ValueError(
f"Invalid dataset name - {args.dataset}. Required [{list(CLASSIFICATION_DATASET_INFO.keys())}]")
label_names = dataset_info["label_names"]
class_index_dict = label_names_to_dict(label_names)
# Fetch dataset.
train_transforms, test_transforms = build_transforms(args)
train_dataset, test_dataset = build_dataset(args, train_transforms, test_transforms)
train_data_loader, test_data_loader = build_data_loader(args, train_dataset, test_dataset)
# Fetch model and load weight.
model = build_model(args, dataset_info["n_classes"])
if args.load_checkpoint_path:
model = model.load_from_checkpoint(args.load_checkpoint_path)
# Training setting.
logger = get_logger("classification_demo", args, model)
callbacks = [ModelCheckpoint(period=args.val_every_n_epoch, filename=f"{model.generate_model_name()}",
dirpath=args.save_checkpoint_path, monitor='val_acc', verbose=True, mode="max"),
CSVClassificationResult(period=args.epoch, model=model, dataset=test_dataset,
label_names=label_names, out_filepath=f"{args.progress_dir}/result.csv"), ]
if args.progress_dir:
if isinstance(model, AttentionBranchNetwork):
callbacks.append(GenerateAttentionMap(model=model, output_dir=args.progress_dir, period=5,
dataset=test_dataset, label_names=label_names))
# Training.
Trainer(max_epochs=args.epoch, callbacks=callbacks, gpus=-1,
check_val_every_n_epoch=args.val_every_n_epoch, logger=logger) \
.fit(model, train_dataloader=train_data_loader, val_dataloaders=test_data_loader)
|
py | 1a3d0e67835a8103b4819dcb3fee94b510b3675e | import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="x", parent_name="scattergl.marker.colorbar", **kwargs
):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
**kwargs
)
|
py | 1a3d0e952c6c7db62bade39467c5cfd157784479 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_sssd_ldap_user(host):
user = host.user('test')
assert user.uid == 5000
assert user.gid == 1
def test_sssd_ldap_user_filtered(host):
user = host.user('filtered-test')
assert not user.exists
def test_sssd_service_state(host):
assert host.service('sssd').is_enabled
assert host.service('sssd').is_running
def test_sshd_service_state(host):
ssh_service_name = {
'centos': 'sshd',
'ubuntu': 'ssh'
}
assert host.service(
ssh_service_name[host.system_info.distribution]).is_enabled
assert host.service(
ssh_service_name[host.system_info.distribution]).is_running
def test_ssh_access(host):
host.run_test(
'/usr/bin/ssh '
'-o StrictHostKeyChecking=no '
'-o BatchMode=yes '
'-T '
'-i /root/.ssh/id_rsa '
'-l test '
'localhost '
'exit'
)
def test_homedir_created(host):
assert host.file('/home/test').is_directory
|
py | 1a3d0eb16325e4841fb6f3c211bf26fcaa412dc7 | from ..sdoc import (
SLine,
SAnnotationPush,
SAnnotationPop,
)
from ..syntax import Token
from ..render import as_lines
from ..utils import rfind_idx
_COLOR_DEPS_INSTALLED = True
try:
from pygments import token
from pygments import styles
except ImportError:
_COLOR_DEPS_INSTALLED = False
else:
_SYNTAX_TOKEN_TO_PYGMENTS_TOKEN = {
Token.KEYWORD_CONSTANT: token.Keyword.Constant,
Token.NAME_BUILTIN: token.Name.Builtin,
Token.NAME_ENTITY: token.Name.Entity,
Token.NAME_FUNCTION: token.Name.Function,
Token.LITERAL_STRING: token.String,
Token.STRING_AFFIX: token.String.Affix,
Token.STRING_ESCAPE: token.String.Escape,
Token.NUMBER_INT: token.Number,
Token.NUMBER_BINARY: token.Number.Bin,
Token.NUMBER_INT: token.Number.Integer,
Token.NUMBER_FLOAT: token.Number.Float,
Token.OPERATOR: token.Operator,
Token.PUNCTUATION: token.Punctuation,
Token.COMMENT_SINGLE: token.Comment.Single,
}
default_style = styles.get_style_by_name('monokai')
try:
import colorful
except ImportError:
_COLOR_DEPS_INSTALLED = False
def styleattrs_to_colorful(attrs):
c = colorful.reset
if attrs['color'] or attrs['bgcolor']:
# Colorful doesn't have a way to directly set Hex/RGB
# colors- until I find a better way, we do it like this :)
accessor = ''
if attrs['color']:
colorful.update_palette({'peprintCurrFg': attrs['color']})
accessor = 'peprintCurrFg'
if attrs['bgcolor']:
colorful.update_palette({'peprintCurrBg': attrs['bgcolor']})
accessor += '_on_peprintCurrBg'
c &= getattr(colorful, accessor)
if attrs['bold']:
c &= colorful.bold
if attrs['italic']:
c &= colorful.italic
if attrs['underline']:
c &= colorful.underline
return c
def colored_render_to_stream(stream, sdocs, style, newline='\n', separator=' '):
if not _COLOR_DEPS_INSTALLED:
raise Exception(
"'pygments' and 'colorful' packages must be "
"installed to use colored output."
)
if style is None:
style = default_style
evald = list(sdocs)
if not evald:
return
colorstack = []
sdoc_lines = as_lines(evald)
for sdoc_line in sdoc_lines:
last_text_sdoc_idx = rfind_idx(
lambda sdoc: isinstance(sdoc, str),
sdoc_line
)
# Edge case: trailing whitespace on a line.
# Currently happens on multiline str value in a dict:
# there's a trailing whitespace after the colon that's
# hard to eliminate at the doc level.
if last_text_sdoc_idx != -1:
last_text_sdoc = sdoc_line[last_text_sdoc_idx]
sdoc_line[last_text_sdoc_idx] = last_text_sdoc.rstrip()
for sdoc in sdoc_line:
if isinstance(sdoc, str):
stream.write(sdoc)
elif isinstance(sdoc, SLine):
stream.write(newline + separator * sdoc.indent)
elif isinstance(sdoc, SAnnotationPush):
if isinstance(sdoc.value, Token):
pygments_token = _SYNTAX_TOKEN_TO_PYGMENTS_TOKEN[sdoc.value]
tokenattrs = style.style_for_token(pygments_token)
color = styleattrs_to_colorful(tokenattrs)
colorstack.append(color)
stream.write(str(color))
elif isinstance(sdoc, SAnnotationPop):
try:
colorstack.pop()
except IndexError:
continue
if colorstack:
stream.write(str(colorstack[-1]))
else:
stream.write(str(colorful.reset))
if colorstack:
stream.write(str(colorful.reset))
|
py | 1a3d0f2e8ba9ba711a353a306a5e656cb296d2bd | # [SublimeLinter @python:3]
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import threading
import win32api
import win32con
import win32gui
class drag_accept_files(object):
def __init__(self, wnd, callback):
super(drag_accept_files, self).__init__()
self.callback = callback
self.hwnd = int(wnd.wm_frame(), 16)
self._old_wnd_proc = win32gui.SetWindowLong(
self.hwnd, win32con.GWL_WNDPROC, self._new_wnd_proc)
self.accept_files = True
@property
def accept_files(self):
raise NotImplementedError()
@accept_files.setter
def accept_files(self, value):
win32gui.DragAcceptFiles(self.hwnd, bool(value))
def _new_wnd_proc(self, hwnd, msg, wparam, lparam):
assert self.hwnd == hwnd
if msg == win32con.WM_DROPFILES:
files = []
for i in range(win32api.DragQueryFile(wparam)):
files.append(win32api.DragQueryFile(wparam, i))
if files:
threading.Thread(target=self.callback, args=(files, )).start()
if msg == win32con.WM_DESTROY:
win32api.SetWindowLong(hwnd, win32con.GWL_WNDPROC, self._old_wnd_proc)
return win32gui.CallWindowProc(self._old_wnd_proc, hwnd, msg, wparam, lparam)
|
py | 1a3d0fb20f3dfa6dd1e152c9140e6479c2ba354d | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r""" TileLink-Uncached Lightweight Xbar generator
"""
import argparse
import logging as log
import sys
from pathlib import Path
import hjson
import tlgen
def main():
parser = argparse.ArgumentParser(prog="tlgen")
parser.add_argument('--topcfg',
'-t',
metavar='file',
type=argparse.FileType('r'),
help="`top_cfg.hjson` file.")
parser.add_argument('--doc',
'-d',
action='store_true',
help='Generate self HTML document in stdout')
parser.add_argument(
'--outdir',
'-o',
help=
"Target directory. tlgen needs 'rtl/' and 'dv/' directory under the target dir"
)
parser.add_argument('--ip-path',
default="",
help='''
Additional path to generated rtl/ or dv/ folders: outdir/ip_path/rtl
Only needed when there are multiple xbar in outdir''')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
args = parser.parse_args()
if args.verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if args.doc:
# Generate Doc and return
sys.stdout.write(tlgen.selfdoc(heading=3, cmd='tlgen.py --doc'))
return
# Check if topcfg defined
if not args.topcfg or not args.outdir:
log.error("--topcfg option is mandatory to generate codes.")
# Check if outdir exists. If not, show error and exit
if not Path(args.outdir).is_dir():
log.error("'--outdir' should point to writable directory")
# Load contents of top_cfg
# Skip this part and use internal structure at this time
try:
obj = hjson.load(args.topcfg, use_decimal=True)
except ValueError:
raise SystemExit(sys.exc_info()[1])
log.info(obj)
xbar = tlgen.validate(obj)
xbar.ip_path = args.ip_path
if not tlgen.elaborate(xbar):
log.error("Elaboration failed." + repr(xbar))
# Generate
out_rtl, out_pkg, out_core = tlgen.generate(xbar)
rtl_path = Path(args.outdir) / args.ip_path / 'rtl/autogen'
rtl_path.mkdir(parents=True, exist_ok=True)
dv_path = Path(args.outdir) / args.ip_path / 'dv/autogen'
dv_path.mkdir(parents=True, exist_ok=True)
rtl_filename = "xbar_%s.sv" % (xbar.name)
rtl_filepath = rtl_path / rtl_filename
with rtl_filepath.open(mode='w', encoding='UTF-8') as fout:
fout.write(out_rtl)
pkg_filename = "tl_%s_pkg.sv" % (xbar.name)
pkg_filepath = rtl_path / pkg_filename
with pkg_filepath.open(mode='w', encoding='UTF-8') as fout:
fout.write(out_pkg)
core_filename = "xbar_%s.core" % (xbar.name)
core_filepath = rtl_path / core_filename
with core_filepath.open(mode='w', encoding='UTF-8') as fout:
fout.write(out_core)
# generate TB
tlgen.generate_tb(xbar, dv_path)
if __name__ == "__main__":
main()
|
py | 1a3d0fda2bf7015d722e72aec448f9307d5c028b | import os
from urllib.request import urlretrieve
import pandas as pd
FREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename='Fremont.csv', url=FREMONT_URL, force_download=False):
"""Download and cache the fremont data
Parameters
==========
filename : string (optional)
location to save the data
url : string (optional)
web location of the data
force_download : bool (optional)
if True, force redownload of data
Returns
=======
data : pandas.DataFrame
The fremont bridge data
"""
if force_download or not os.path.exists(filename):
urlretrieve(url, filename)
data = pd.read_csv('Fremont.csv', index_col='Date')
try:
data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p')
except TypeError:
data.index = pd.to_datetime(data.index)
data.columns = ['West', 'East']
data['Total'] = data['West'] + data['East']
return data
|
py | 1a3d106bf8086af83f8530d751e81df3ee195b0d | """Class for Braava devices."""
import logging
from homeassistant.components.vacuum import SUPPORT_FAN_SPEED
from .irobot_base import SUPPORT_IROBOT, IRobotVacuum
_LOGGER = logging.getLogger(__name__)
ATTR_DETECTED_PAD = "detected_pad"
ATTR_LID_CLOSED = "lid_closed"
ATTR_TANK_PRESENT = "tank_present"
ATTR_TANK_LEVEL = "tank_level"
ATTR_PAD_WETNESS = "spray_amount"
OVERLAP_STANDARD = 67
OVERLAP_DEEP = 85
OVERLAP_EXTENDED = 25
MOP_STANDARD = "Standard"
MOP_DEEP = "Deep"
MOP_EXTENDED = "Extended"
BRAAVA_MOP_BEHAVIORS = [MOP_STANDARD, MOP_DEEP, MOP_EXTENDED]
BRAAVA_SPRAY_AMOUNT = [1, 2, 3]
# Braava Jets can set mopping behavior through fanspeed
SUPPORT_BRAAVA = SUPPORT_IROBOT | SUPPORT_FAN_SPEED
class BraavaJet(IRobotVacuum):
"""Braava Jet."""
def __init__(self, roomba, blid):
"""Initialize the Roomba handler."""
super().__init__(roomba, blid)
# Initialize fan speed list
speed_list = []
for behavior in BRAAVA_MOP_BEHAVIORS:
for spray in BRAAVA_SPRAY_AMOUNT:
speed_list.append(f"{behavior}-{spray}")
self._speed_list = speed_list
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_BRAAVA
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
# Mopping behavior and spray amount as fan speed
rank_overlap = self.vacuum_state.get("rankOverlap", {})
behavior = None
if rank_overlap == OVERLAP_STANDARD:
behavior = MOP_STANDARD
elif rank_overlap == OVERLAP_DEEP:
behavior = MOP_DEEP
elif rank_overlap == OVERLAP_EXTENDED:
behavior = MOP_EXTENDED
pad_wetness = self.vacuum_state.get("padWetness", {})
# "disposable" and "reusable" values are always the same
pad_wetness_value = pad_wetness.get("disposable")
return f"{behavior}-{pad_wetness_value}"
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return self._speed_list
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
try:
split = fan_speed.split("-", 1)
behavior = split[0]
spray = int(split[1])
if behavior.capitalize() in BRAAVA_MOP_BEHAVIORS:
behavior = behavior.capitalize()
except IndexError:
_LOGGER.error(
"Fan speed error: expected {behavior}-{spray_amount}, got '%s'",
fan_speed,
)
return
except ValueError:
_LOGGER.error("Spray amount error: expected integer, got '%s'", split[1])
return
if behavior not in BRAAVA_MOP_BEHAVIORS:
_LOGGER.error(
"Mop behavior error: expected one of %s, got '%s'",
str(BRAAVA_MOP_BEHAVIORS),
behavior,
)
return
if spray not in BRAAVA_SPRAY_AMOUNT:
_LOGGER.error(
"Spray amount error: expected one of %s, got '%d'",
str(BRAAVA_SPRAY_AMOUNT),
spray,
)
return
overlap = 0
if behavior == MOP_STANDARD:
overlap = OVERLAP_STANDARD
elif behavior == MOP_DEEP:
overlap = OVERLAP_DEEP
else:
overlap = OVERLAP_EXTENDED
await self.hass.async_add_executor_job(
self.vacuum.set_preference, "rankOverlap", overlap
)
await self.hass.async_add_executor_job(
self.vacuum.set_preference,
"padWetness",
{"disposable": spray, "reusable": spray},
)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
state_attrs = super().device_state_attributes
# Get Braava state
state = self.vacuum_state
detected_pad = state.get("detectedPad")
mop_ready = state.get("mopReady", {})
lid_closed = mop_ready.get("lidClosed")
tank_present = mop_ready.get("tankPresent")
tank_level = state.get("tankLvl")
state_attrs[ATTR_DETECTED_PAD] = detected_pad
state_attrs[ATTR_LID_CLOSED] = lid_closed
state_attrs[ATTR_TANK_PRESENT] = tank_present
state_attrs[ATTR_TANK_LEVEL] = tank_level
return state_attrs
|
py | 1a3d10810d78be5ec87bbe6459a56d31871767a1 | # Generated by Django 3.1.1 on 2020-10-15 22:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('hackathon', '0003_auto_20201015_2020'),
]
operations = [
migrations.AddField(
model_name='hackawardcategory',
name='hackathon',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='awards', to='hackathon.hackathon'),
preserve_default=False,
),
migrations.AddField(
model_name='hackawardcategory',
name='winning_project',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='hackathon.hackproject'),
),
migrations.AddField(
model_name='hackteam',
name='hackathon',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='teams', to='hackathon.hackathon'),
preserve_default=False,
),
migrations.AddField(
model_name='hackteam',
name='project',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='hackathon.hackproject'),
),
migrations.CreateModel(
name='HackProjectScoreCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.CharField(default='', max_length=255)),
('score', models.IntegerField(default=0)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hackprojectscorecategory_created_by', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Hack project score categories',
},
),
migrations.CreateModel(
name='HackProjectScore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hackprojectscore_created_by', to=settings.AUTH_USER_MODEL)),
('judge', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scores', to='hackathon.hackproject')),
('score', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hackathon.hackprojectscorecategory')),
],
),
]
|
py | 1a3d10a2f9783fd2dcf12c72107e17ebb7ddb61d | """
Author: Daisuke Oyama
Tests for normal_form_game.py
"""
from __future__ import division
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import eq_, ok_, raises
from quantecon.game_theory import (
Player, NormalFormGame, pure2mixed, best_response_2p
)
# Player #
class TestPlayer_1opponent:
"""Test the methods of Player with one opponent player"""
def setUp(self):
"""Setup a Player instance"""
coordination_game_matrix = [[4, 0], [3, 2]]
self.player = Player(coordination_game_matrix)
def test_best_response_against_pure(self):
eq_(self.player.best_response(1), 1)
def test_best_response_against_mixed(self):
eq_(self.player.best_response([1/2, 1/2]), 1)
def test_best_response_list_when_tie(self):
"""best_response with tie_breaking=False"""
assert_array_equal(
sorted(self.player.best_response([2/3, 1/3], tie_breaking=False)),
sorted([0, 1])
)
def test_best_response_with_random_tie_breaking(self):
"""best_response with tie_breaking='random'"""
ok_(self.player.best_response([2/3, 1/3], tie_breaking='random')
in [0, 1])
seed = 1234
br0 = self.player.best_response([2/3, 1/3], tie_breaking='random',
random_state=seed)
br1 = self.player.best_response([2/3, 1/3], tie_breaking='random',
random_state=seed)
eq_(br0, br1)
def test_best_response_with_smallest_tie_breaking(self):
"""best_response with tie_breaking='smallest' (default)"""
eq_(self.player.best_response([2/3, 1/3]), 0)
def test_best_response_with_payoff_perturbation(self):
"""best_response with payoff_perturbation"""
eq_(self.player.best_response([2/3, 1/3],
payoff_perturbation=[0, 0.1]),
1)
eq_(self.player.best_response([2, 1], # int
payoff_perturbation=[0, 0.1]),
1)
def test_is_best_response_against_pure(self):
ok_(self.player.is_best_response(0, 0))
def test_is_best_response_against_mixed(self):
ok_(self.player.is_best_response([1/2, 1/2], [2/3, 1/3]))
class TestPlayer_2opponents:
"""Test the methods of Player with two opponent players"""
def setUp(self):
"""Setup a Player instance"""
payoffs_2opponents = [[[3, 6],
[4, 2]],
[[1, 0],
[5, 7]]]
self.player = Player(payoffs_2opponents)
def test_payoff_vector_against_pure(self):
assert_array_equal(self.player.payoff_vector((0, 1)), [6, 0])
def test_is_best_response_against_pure(self):
ok_(not self.player.is_best_response(0, (1, 0)))
def test_best_response_against_pure(self):
eq_(self.player.best_response((1, 1)), 1)
def test_best_response_list_when_tie(self):
"""
best_response against a mixed action profile with
tie_breaking=False
"""
assert_array_equal(
sorted(self.player.best_response(([3/7, 4/7], [1/2, 1/2]),
tie_breaking=False)),
sorted([0, 1])
)
def test_random_choice():
n, m = 5, 4
payoff_matrix = np.zeros((n, m))
player = Player(payoff_matrix)
eq_(player.random_choice([0]), 0)
actions = list(range(player.num_actions))
ok_(player.random_choice() in actions)
# NormalFormGame #
class TestNormalFormGame_Sym2p:
"""Test the methods of NormalFormGame with symmetric two players"""
def setUp(self):
"""Setup a NormalFormGame instance"""
coordination_game_matrix = [[4, 0], [3, 2]]
self.g = NormalFormGame(coordination_game_matrix)
def test_getitem(self):
assert_array_equal(self.g[0, 1], [0, 3])
def test_is_nash_pure(self):
ok_(self.g.is_nash((0, 0)))
def test_is_nash_mixed(self):
ok_(self.g.is_nash(([2/3, 1/3], [2/3, 1/3])))
class TestNormalFormGame_Asym2p:
"""Test the methods of NormalFormGame with asymmetric two players"""
def setUp(self):
"""Setup a NormalFormGame instance"""
matching_pennies_bimatrix = [[(1, -1), (-1, 1)],
[(-1, 1), (1, -1)]]
self.g = NormalFormGame(matching_pennies_bimatrix)
def test_getitem(self):
assert_array_equal(self.g[1, 0], [-1, 1])
def test_is_nash_against_pure(self):
ok_(not self.g.is_nash((0, 0)))
def test_is_nash_against_mixed(self):
ok_(self.g.is_nash(([1/2, 1/2], [1/2, 1/2])))
class TestNormalFormGame_3p:
"""Test the methods of NormalFormGame with three players"""
def setUp(self):
"""Setup a NormalFormGame instance"""
payoffs_2opponents = [[[3, 6],
[4, 2]],
[[1, 0],
[5, 7]]]
player = Player(payoffs_2opponents)
self.g = NormalFormGame([player for i in range(3)])
def test_getitem(self):
assert_array_equal(self.g[0, 0, 1], [6, 4, 1])
def test_is_nash_pure(self):
ok_(self.g.is_nash((0, 0, 0)))
ok_(not self.g.is_nash((0, 0, 1)))
def test_is_nash_mixed(self):
p = (1 + np.sqrt(65)) / 16
ok_(self.g.is_nash(([1 - p, p], [1 - p, p], [1 - p, p])))
def test_normalformgame_input_action_sizes():
g = NormalFormGame((2, 3, 4))
eq_(g.N, 3) # Number of players
assert_array_equal(
g.players[0].payoff_array,
np.zeros((2, 3, 4))
)
assert_array_equal(
g.players[1].payoff_array,
np.zeros((3, 4, 2))
)
assert_array_equal(
g.players[2].payoff_array,
np.zeros((4, 2, 3))
)
def test_normalformgame_setitem():
g = NormalFormGame((2, 2))
g[0, 0] = (0, 10)
g[0, 1] = (0, 10)
g[1, 0] = (3, 5)
g[1, 1] = (-2, 0)
assert_array_equal(
g.players[0].payoff_array,
[[0, 0], [3, -2]]
)
assert_array_equal(
g.players[1].payoff_array,
[[10, 5], [10, 0]]
)
def test_normalformgame_constant_payoffs():
g = NormalFormGame((2, 2))
ok_(g.is_nash((0, 0)))
ok_(g.is_nash((0, 1)))
ok_(g.is_nash((1, 0)))
ok_(g.is_nash((1, 1)))
def test_normalformgame_payoff_profile_array():
nums_actions = (2, 3, 4)
for N in range(1, len(nums_actions)+1):
payoff_arrays = [
np.arange(np.prod(nums_actions[0:N])).reshape(nums_actions[i:N] +
nums_actions[0:i])
for i in range(N)
]
players = [Player(payoff_array) for payoff_array in payoff_arrays]
g = NormalFormGame(players)
g_new = NormalFormGame(g.payoff_profile_array)
for player_new, payoff_array in zip(g_new.players, payoff_arrays):
assert_array_equal(player_new.payoff_array, payoff_array)
# Trivial cases with one player #
class TestPlayer_0opponents:
"""Test for trivial Player with no opponent player"""
def setUp(self):
"""Setup a Player instance"""
payoffs = [0, 1]
self.player = Player(payoffs)
def test_payoff_vector(self):
"""Trivial player: payoff_vector"""
assert_array_equal(self.player.payoff_vector(None), [0, 1])
def test_is_best_response(self):
"""Trivial player: is_best_response"""
ok_(self.player.is_best_response(1, None))
def test_best_response(self):
"""Trivial player: best_response"""
eq_(self.player.best_response(None), 1)
class TestNormalFormGame_1p:
"""Test for trivial NormalFormGame with a single player"""
def setUp(self):
"""Setup a NormalFormGame instance"""
data = [[0], [1], [1]]
self.g = NormalFormGame(data)
def test_construction(self):
"""Trivial game: construction"""
ok_(self.g.N == 1)
assert_array_equal(self.g.players[0].payoff_array, [0, 1, 1])
def test_getitem(self):
"""Trivial game: __getitem__"""
eq_(self.g[0], 0)
def test_is_nash_pure(self):
"""Trivial game: is_nash with pure action"""
ok_(self.g.is_nash((1,)))
ok_(not self.g.is_nash((0,)))
def test_is_nash_mixed(self):
"""Trivial game: is_nash with mixed action"""
ok_(self.g.is_nash(([0, 1/2, 1/2],)))
def test_normalformgame_input_action_sizes_1p():
g = NormalFormGame(2)
eq_(g.N, 1) # Number of players
assert_array_equal(
g.players[0].payoff_array,
np.zeros(2)
)
def test_normalformgame_setitem_1p():
g = NormalFormGame(2)
eq_(g.N, 1) # Number of players
g[0] = 10 # Set payoff 10 for action 0
eq_(g.players[0].payoff_array[0], 10)
# Test __repre__ #
def test_player_repr():
nums_actions = (2, 3, 4)
payoff_arrays = [
np.arange(np.prod(nums_actions[0:i])).reshape(nums_actions[0:i])
for i in range(1, len(nums_actions)+1)
]
players = [Player(payoff_array) for payoff_array in payoff_arrays]
for player in players:
player_new = eval(repr(player))
assert_array_equal(player_new.payoff_array, player.payoff_array)
# Invalid inputs #
@raises(ValueError)
def test_normalformgame_invalid_input_players_shape_inconsistent():
p0 = Player(np.zeros((2, 3)))
p1 = Player(np.zeros((2, 3)))
g = NormalFormGame([p0, p1])
@raises(ValueError)
def test_normalformgame_invalid_input_players_num_inconsistent():
p0 = Player(np.zeros((2, 2, 2)))
p1 = Player(np.zeros((2, 2, 2)))
g = NormalFormGame([p0, p1])
@raises(ValueError)
def test_normalformgame_invalid_input_players_dtype_inconsistent():
p0 = Player(np.zeros((2, 2), dtype=int))
p1 = Player(np.zeros((2, 2), dtype=float))
g = NormalFormGame([p0, p1])
@raises(ValueError)
def test_normalformgame_invalid_input_nosquare_matrix():
g = NormalFormGame(np.zeros((2, 3)))
@raises(ValueError)
def test_normalformgame_invalid_input_payoff_profiles():
g = NormalFormGame(np.zeros((2, 2, 1)))
# Utility functions #
def test_pure2mixed():
num_actions = 3
pure_action = 0
mixed_action = [1., 0., 0.]
assert_array_equal(pure2mixed(num_actions, pure_action), mixed_action)
# Numba jitted functions #
def test_best_response_2p():
test_case0 = {
'payoff_array': np.array([[4, 0], [3, 2], [0, 3]]),
'mixed_actions':
[np.array([1, 0]), np.array([0.5, 0.5]), np.array([0, 1])],
'brs_expected': [0, 1, 2]
}
test_case1 = {
'payoff_array': np.zeros((2, 3)),
'mixed_actions': [np.array([1, 0, 0]), np.array([1/3, 1/3, 1/3])],
'brs_expected': [0, 0]
}
for test_case in [test_case0, test_case1]:
for mixed_action, br_expected in zip(test_case['mixed_actions'],
test_case['brs_expected']):
br_computed = \
best_response_2p(test_case['payoff_array'], mixed_action)
eq_(br_computed, br_expected)
if __name__ == '__main__':
import sys
import nose
argv = sys.argv[:]
argv.append('--verbose')
argv.append('--nocapture')
nose.main(argv=argv, defaultTest=__file__)
|
py | 1a3d1148109dcc0e87e3093f2b8c97e953603ca7 | # Copyright (c) 2021 Cisco Systems, Inc. and its affiliates
# All rights reserved.
# Use of this source code is governed by a BSD 3-Clause License
# that can be found in the LICENSE file.
import pytest
from swagger_server.utils import get_simple_subject, SimpleSubjectType
from swagger_server.models import Subject, SimpleSubject, ComplexSubject, Aliases, Email, DID, Account, IssSub, Opaque, PhoneNumber, JwtID
@pytest.mark.parametrize("expected_result, class_to_search, subject", [
# test getting subject types that are present
[Email(email="[email protected]"), Email,
Subject.parse_obj({"format": "email", "email": "[email protected]"})],
[PhoneNumber(phone_number="+12223334444"), PhoneNumber,
Subject.parse_obj({"format": "phone_number", "phone_number": "+12223334444"})],
[IssSub(iss="http://issuer.example.com/", sub="145234573"), IssSub,
Subject.parse_obj({
"tenant": {"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
"user": {"format": "email", "email": "[email protected]"},
"application": {"format": "opaque", "id": "123456789"}
})],
[Opaque(id="123456789"), Opaque,
Subject.parse_obj({
"tenant": {"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
"user": {"format": "email", "email": "[email protected]"},
"application": {"format": "opaque", "id": "123456789"}
})],
[Account(uri="acct:[email protected]"), Account,
Subject.parse_obj({
"identifiers": [
{"format": "account", "uri": "acct:[email protected]"},
{"format": "did", "url": "did:example:123456/did/url/path?versionId=1"},
{"format": "email", "email": "[email protected]"},
{"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
]
})],
[DID(url="did:example:123456/did/url/path?versionId=1"), DID,
Subject.parse_obj({
"identifiers": [
{"format": "account", "uri": "acct:[email protected]"},
{"format": "did", "url": "did:example:123456/did/url/path?versionId=1"},
{"format": "email", "email": "[email protected]"},
{"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
]
})],
# test returning None when subject type is not present
[None, Account,
Subject.parse_obj({"format": "phone_number", "phone_number": "+12223334444"})],
[None, DID,
Subject.parse_obj({
"tenant": {"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
"user": {"format": "email", "email": "[email protected]"},
"application": {"format": "opaque", "id": "123456789"}
})],
[None, JwtID,
Subject.parse_obj({
"identifiers": [
{"format": "account", "uri": "acct:[email protected]"},
{"format": "did", "url": "did:example:123456/did/url/path?versionId=1"},
{"format": "email", "email": "[email protected]"},
{"format": "iss_sub", "iss": "http://issuer.example.com/", "sub": "145234573"},
]
})],
])
def test_get_simple_subject(
expected_result: SimpleSubjectType,
class_to_search: type,
subject: Subject
) -> None:
assert get_simple_subject(subject, class_to_search) == expected_result
|
py | 1a3d129836920c33013f591fa8b61d59f5a062ec | import sys, os
from dataset.image_base import *
set_names = {'all':['train','val','test'],'test':['test'],'val':['train','val','test']}
PW3D_PCsubset = {'courtyard_basketball_00':[200,280], 'courtyard_captureSelfies_00':[500,600],\
'courtyard_dancing_00':[60,370], 'courtyard_dancing_01':[60,270], 'courtyard_hug_00':[100,500], 'downtown_bus_00':[1620,1900]}
PW3D_OCsubset = ['courtyard_backpack','courtyard_basketball','courtyard_bodyScannerMotions','courtyard_box','courtyard_golf','courtyard_jacket',\
'courtyard_laceShoe','downtown_stairs','flat_guitar','flat_packBags','outdoors_climbing','outdoors_crosscountry','outdoors_fencing','outdoors_freestyle',\
'outdoors_golf','outdoors_parcours','outdoors_slalom']
PW3D_NOsubset = {}
class PW3D(Image_base):
def __init__(self,train_flag = False, split='train', mode='vibe', regress_smpl=True, **kwargs):
#if train_flag:
# mode, split, regress_smpl = ['normal', 'train', True]
super(PW3D,self).__init__(train_flag,regress_smpl=regress_smpl)
self.data_folder = os.path.join(self.data_folder,'3DPW/')
self.data3d_dir = os.path.join(self.data_folder,'sequenceFiles')
self.image_dir = os.path.join(self.data_folder,'imageFiles')
self.mode = mode
self.split = split
self.regress_smpl = regress_smpl
self.val_sample_ratio = 5
self.scale_range = [1.56,1.8]
self.dataset_name = {'PC':'pw3d_pc', 'NC':'pw3d_nc','OC':'pw3d_oc','vibe':'pw3d_vibe', 'normal':'pw3d_normal'}[mode]
logging.info('Start loading 3DPW data.')
if mode in ['normal','PC']:
logging.info('Loading 3DPW in {} mode, split {}'.format(self.mode,self.split))
self.joint_mapper = constants.joint_mapping(constants.COCO_18,constants.SMPL_ALL_54)
self.joint3d_mapper = constants.joint_mapping(constants.SMPL_24,constants.SMPL_ALL_54)
self.annots_path = os.path.join(self.data_folder,'annots.npz')
if not os.path.exists(self.annots_path):
self.pack_data()
self.load_annots()
elif mode in ['vibe','NC','OC']:
logging.info('Loading 3DPW in VIBE mode, split {}'.format(self.split))
self.annots_path = os.path.join(self.data_folder,'vibe_db')
self.joint_mapper = constants.joint_mapping(constants.LSP_14,constants.SMPL_ALL_54)
self.joint3d_mapper = constants.joint_mapping(constants.LSP_14,constants.SMPL_ALL_54)
self.regress_smpl = False
self.load_vibe_annots()
else:
logging.info('3DPW loading mode is not recognized, please use the normal / vibe mode')
raise NotImplementedError
if self.split=='val':
self.file_paths = self.file_paths[::self.val_sample_ratio]
if mode in ['vibe','NC','OC']:
self.root_inds = [constants.SMPL_ALL_54['R_Hip'], constants.SMPL_ALL_54['L_Hip']]
elif mode in ['PC', 'normal']:
self.root_inds = [constants.SMPL_ALL_54['Pelvis_SMPL']]
if self.regress_smpl:
self.smplr = SMPLR(use_gender=True)
logging.info('3DPW dataset {} split total {} samples, loading mode {}'.format(self.split ,self.__len__(), self.mode))
def __len__(self):
return len(self.file_paths)
def load_PC_annots(self):
annots = np.load(self.annots_path,allow_pickle=True)
params = annots['params'][()]
kp3ds = annots['kp3d'][()]
kp2ds = annots['kp2d'][()]
self.annots = {}
video_names = list(params.keys())
for video_name in video_names:
for person_id in range(len(kp3ds[video_name])):
frame_range = PW3D_PCsubset[video_name.strip('.pkl')]
for frame_id in range(frame_range[0],frame_range[1]):
name = '{}_{}'.format(video_name.strip('.pkl'),frame_id)
kp3d = kp3ds[video_name][person_id][frame_id]
kp2d = kp2ds[video_name][person_id][frame_id]
pose_param = params[video_name]['poses'][person_id][frame_id]
beta_param = params[video_name]['betas'][person_id]
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name.strip('.pkl'), person_id, frame_id, kp2d.T, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
def reset_dataset_length_to_target_person_number(self):
single_person_file_paths = []
for name in self.file_paths:
for person_id, annot in enumerate(self.annots[name]):
single_person_key = '{}-{}'.format(name, person_id)
single_person_file_paths.append(single_person_key)
self.annots[single_person_key]=[annot]
#del self.annots[name]
self.file_paths = single_person_file_paths
def get_image_info(self, index):
annots = self.annots[self.file_paths[index%len(self.file_paths)]]
subject_ids, genders, kp2ds, kp3ds, params, bbox, valid_mask_2d, valid_mask_3d = [[] for i in range(8)]
for inds, annot in enumerate(annots):
video_name, gender, person_id, frame_id, kp2d, kp3d, pose_param, beta_param = annot
subject_ids.append(person_id)
genders.append(gender)
if not self.regress_smpl:
kp3d = self.map_kps(kp3d, self.joint3d_mapper)
kp3ds.append(kp3d)
params.append(np.concatenate([pose_param[:66], beta_param[:10]]))
kp2d_gt = self.map_kps(kp2d, self.joint_mapper)
kp2ds.append(kp2d_gt)
valid_mask_2d.append([True,False,False])
valid_mask_3d.append([True,True,True,True])
kp2ds, kp3ds, params = np.array(kp2ds), np.array(kp3ds), np.array(params)
valid_mask_2d, valid_mask_3d = np.array(valid_mask_2d), np.array(valid_mask_3d)
if self.regress_smpl:
kp3ds = []
poses, betas = np.concatenate([params[:,:-10], np.zeros((len(params),6))], 1),params[:,-10:]
for pose, beta, gender in zip(poses, betas, genders):
smpl_outs = self.smplr(pose, beta, gender)
kp3ds.append(smpl_outs['j3d'].numpy())
kp3ds = np.concatenate(kp3ds, 0)
imgpath = os.path.join(self.image_dir,video_name,'image_{:05}.jpg'.format(frame_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
root_trans = kp3ds[:,self.root_inds].mean(1)
valid_masks = np.array([self._check_kp3d_visible_parts_(kp3d) for kp3d in kp3ds])
kp3ds -= root_trans[:,None]
kp3ds[~valid_masks] = -2.
img_info = {'imgpath': imgpath, 'image': image, 'kp2ds': kp2ds, 'track_ids': subject_ids,\
'vmask_2d': valid_mask_2d, 'vmask_3d': valid_mask_3d,\
'kp3ds': kp3ds, 'params': params, 'img_size': image.shape[:2],'ds': self.dataset_name}
return img_info
def load_vibe_annots(self):
set_names = {'all':['train','val','test'],'train':['train'],'test':['test'],'val':['val']}
self.split_used = set_names[self.split]
self.annots = {}
for split in self.split_used:
db_file = os.path.join(self.annots_path,'3dpw_{}_db.pt'.format(split))
db = joblib.load(db_file)
vid_names = db['vid_name']
frame_ids = db['frame_id']
kp2ds, kp3ds, pose_params, beta_params, valids = db['joints2D'], db['joints3D'], db['pose'], db['shape'], db['valid']
if split=='train':
kp3ds = kp3ds[:,25:39]
for vid_name, frame_id, kp2d, kp3d, pose_param, beta_param, valid in zip(vid_names, frame_ids, kp2ds, kp3ds, pose_params, beta_params, valids):
if valid!=1:
continue
video_name, person_id = vid_name[:-2], int(vid_name[-1])
name = '{}_{}'.format(video_name,frame_id)
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name, None, person_id, frame_id, kp2d, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
if self.mode == 'NC':
logging.info('Convert to NC subset...')
file_paths = []
annots = {}
for key, annot in self.annots.items():
frame_id = key.split('_')[-1]
video_name = key.replace('_'+frame_id,'')
if video_name[:-3] not in PW3D_OCsubset:
if video_name not in PW3D_PCsubset:
file_paths.append(key)
annots[key] = annot
self.file_paths = file_paths
self.annots = annots
if self.mode == 'OC':
logging.info('Convert to OC subset...')
video_used = []
file_paths = []
annots = {}
for key, annot in self.annots.items():
frame_id = key.split('_')[-1]
video_name = key.replace('_'+frame_id,'')
if video_name[:-3] in PW3D_OCsubset:
if video_name not in video_used:
video_used.append(video_name)
file_paths.append(key)
annots[key] = annot
self.file_paths = file_paths
self.annots = annots
def load_annots(self):
set_names = {'train':['train'],'all':['train','validation','test'],'val':['validation'],'test':['test']}
split_used = set_names[self.split]
annots = np.load(self.annots_path,allow_pickle=True)
params = annots['params'][()]
kp3ds = annots['kp3d'][()]
kp2ds = annots['kp2d'][()]
self.annots = {}
video_names = list(params.keys())
for video_name in video_names:
valid_indices = params[video_name]['valid_indices']
genders = params[video_name]['genders']
for person_id, valid_index in enumerate(valid_indices):
for annot_id,frame_id in enumerate(valid_index):
split = params[video_name]['split']
if split not in split_used:
continue
name = '{}_{}'.format(video_name.strip('.pkl'),frame_id)
kp3d = kp3ds[video_name][person_id][annot_id]
kp2d = kp2ds[video_name][person_id][annot_id]
pose_param = params[video_name]['poses'][person_id][annot_id]
beta_param = params[video_name]['betas'][person_id]
gender = genders[person_id]
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name.strip('.pkl'), gender, person_id, frame_id, kp2d.T, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
# bacause VIBE removed the subject occluded, so we have to use the original gt data.
if self.mode == 'PC':
file_paths = []
annots = {}
for key, annot in self.annots.items():
frame_id = key.split('_')[-1]
video_name = key.replace('_'+frame_id,'')
if video_name in PW3D_PCsubset:
frame_range = PW3D_PCsubset[video_name]
if frame_range[0]<=int(frame_id)<frame_range[1]:
file_paths.append(key)
annots[key] = annot
self.file_paths = file_paths
self.annots = annots
def pack_data(self):
"""
The function reads all the ground truth and prediction files. And concatenates
:param paths_gt: all the paths corresponding to the ground truth - list of pkl files
:param paths_prd: all the paths corresponding to the predictions - list of pkl files
:return:
jp_pred: jointPositions Prediction. Shape N x 24 x 3
jp_gt: jointPositions ground truth. Shape: N x 24 x 3
mats_pred: Global rotation matrices predictions. Shape N x 24 x 3 x 3
mats_gt: Global rotation matrices ground truths. Shape N x 24 x 3 x 3
"""
# all ground truth smpl parameters / joint positions / rotation matrices
from evaluation.pw3d_eval.SMPL import SMPL
all_params, all_jp_gts, all_jp2d_gts, all_glob_rot_gts = {}, {}, {}, {}
seq = 0
num_jps_pred = 0
num_ors_pred = 0
paths_gt = glob.glob(os.path.join(self.data3d_dir,'*/*.pkl'))
smpl_model_genders = {'f':SMPL(center_idx=0, gender='f', model_root=args().smpl_model_path),\
'm':SMPL(center_idx=0, gender='m', model_root=args().smpl_model_path) }
# construct the data structures -
for path_gt in paths_gt:
print('Processing: ', path_gt)
video_name = os.path.basename(path_gt)
seq = seq + 1
# Open pkl files
data_gt = pickle.load(open(path_gt, 'rb'), encoding='latin1')
split = path_gt.split('/')[-2]
genders = data_gt['genders']
all_params[video_name], all_jp_gts[video_name], all_jp2d_gts[video_name], all_glob_rot_gts[video_name] = {}, [], [], []
all_params[video_name]['split'] = split
all_params[video_name]['genders'] = genders
all_params[video_name]['poses'], all_params[video_name]['trans'], all_params[video_name]['valid_indices'] = [], [], []
all_params[video_name]['betas'] = np.array(data_gt['betas'])
for i in range(len(genders)):
# Get valid frames
# Frame with no zeros in the poses2d file and where campose_valid is True
poses2d_gt = data_gt['poses2d']
poses2d_gt_i = poses2d_gt[i]
camposes_valid = data_gt['campose_valid']
camposes_valid_i = camposes_valid[i]
valid_indices = check_valid_inds(poses2d_gt_i, camposes_valid_i)
all_jp2d_gts[video_name].append(poses2d_gt_i[valid_indices])
# Get the ground truth SMPL body parameters - poses, betas and translation parameters
pose_params = np.array(data_gt['poses'])
pose_params = pose_params[i, valid_indices, :]
shape_params = np.array(data_gt['betas'][i])
shape_params = np.expand_dims(shape_params, 0)
shape_params = shape_params[:, :10]
shape_params = np.tile(shape_params, (pose_params.shape[0], 1))
trans_params = np.array(data_gt['trans'])
trans_params = trans_params[i, valid_indices, :]
all_params[video_name]['trans'].append(trans_params)
all_params[video_name]['valid_indices'].append(valid_indices)
# Get the GT joint and vertex positions and the global rotation matrices
verts_gt, jp_gt, glb_rot_mats_gt = smpl_model_genders[genders[i]].update(pose_params, shape_params, trans_params)
# Apply Camera Matrix Transformation to ground truth values
cam_matrix = data_gt['cam_poses']
new_cam_poses = np.transpose(cam_matrix, (0, 2, 1))
new_cam_poses = new_cam_poses[valid_indices, :, :]
# we don't have the joint regressor for female/male model. So we can't regress all 54 joints from the mesh of female/male model.
jp_gt, glb_rot_mats_gt = apply_camera_transforms(jp_gt, glb_rot_mats_gt, new_cam_poses)
root_rotation_cam_tranformed = transform_rot_representation(glb_rot_mats_gt[:,0], input_type='mat',out_type='vec')
pose_params[:,:3] = root_rotation_cam_tranformed
all_params[video_name]['poses'].append(pose_params)
all_jp_gts[video_name].append(jp_gt)
all_glob_rot_gts[video_name].append(glb_rot_mats_gt)
np.savez(self.annots_path, params=all_params, kp3d=all_jp_gts, glob_rot=all_glob_rot_gts, kp2d=all_jp2d_gts)
def with_ones(data):
"""
Converts an array in 3d coordinates to 4d homogenous coordiantes
:param data: array of shape A x B x 3
:return return ret_arr: array of shape A x B x 4 where the extra dimension is filled with ones
"""
ext_arr = np.ones((data.shape[0], data.shape[1], 1))
ret_arr = np.concatenate((data, ext_arr), axis=2)
return ret_arr
def apply_camera_transforms(joints, rotations, camera):
"""
Applies camera transformations to joint locations and rotations matrices
:param joints: B x 24 x 3
:param rotations: B x 24 x 3 x 3
:param camera: B x 4 x 4 - already transposed
:return: joints B x 24 x 3 joints after applying camera transformations
rotations B x 24 x 3 x 3 - rotations matrices after applying camera transformations
"""
joints = with_ones(joints) # B x 24 x 4
joints = np.matmul(joints, camera)[:, :, :3]
# multiply all rotation matrices with the camera rotation matrix
# transpose camera coordinates back
cam_new = np.transpose(camera[:, :3, :3], (0, 2, 1))
cam_new = np.expand_dims(cam_new, 1)
cam_new = np.tile(cam_new, (1, 24, 1, 1))
# B x 24 x 3 x 3
rotations = np.matmul(cam_new, rotations)
return joints, rotations
def check_valid_inds(poses2d, camposes_valid):
"""
Computes the indices where further computations are required
:param poses2d: N x 18 x 3 array of 2d Poses
:param camposes_valid: N x 1 array of indices where camera poses are valid
:return: array of indices indicating frame ids in the sequence which are to be evaluated
"""
# find all indices in the N sequences where the sum of the 18x3 array is not zero
# N, numpy array
poses2d_mean = np.mean(np.mean(np.abs(poses2d), axis=2), axis=1)
poses2d_bool = poses2d_mean == 0
poses2d_bool_inv = np.logical_not(poses2d_bool)
# find all the indices where the camposes are valid
camposes_valid = np.array(camposes_valid).astype('bool')
final = np.logical_and(poses2d_bool_inv, camposes_valid)
indices = np.array(np.where(final == True)[0])
return indices
def read_keypoints(keypoint_fn, use_hands=True, use_face=True,
use_face_contour=False):
if not os.path.exists(keypoint_fn):
return None
with open(keypoint_fn) as keypoint_file:
data = json.load(keypoint_file)
keypoints = []
gender_pd = []
gender_gt = []
if len(data['people'])<1:
return None
for idx, person_data in enumerate(data['people']):
body_keypoints = np.array(person_data['pose_keypoints_2d'],
dtype=np.float32)
body_keypoints = body_keypoints.reshape([-1, 3])[:25]
keypoints.append(body_keypoints)
'''
left_hand_keyp = np.array(
person_data['hand_left_keypoints_2d'],
dtype=np.float32).reshape([-1, 3])
right_hand_keyp = np.array(
person_data['hand_right_keypoints_2d'],
dtype=np.float32).reshape([-1, 3])
hand_kp2d = np.concatenate([left_hand_keyp, right_hand_keyp],0)
# TODO: Make parameters, 17 is the offset for the eye brows,
# etc. 51 is the total number of FLAME compatible landmarks
face_keypoints = np.array(
person_data['face_keypoints_2d'],
dtype=np.float32).reshape([-1, 3])[17: 17 + 51, :]
contour_keyps = np.array(
[], dtype=body_keypoints.dtype).reshape(0, 3)
if use_face_contour:
contour_keyps = np.array(
person_data['face_keypoints_2d'],
dtype=np.float32).reshape([-1, 3])[:17, :]
keypoints.append([body_keypoints, hand_kp2d, face_keypoints])
'''
return keypoints
if __name__ == '__main__':
#dataset= PW3D(train_flag=False, split='test', mode='vibe')
dataset= PW3D(train_flag=True)
test_dataset(dataset,with_3d=True,with_smpl=True)
print('Done')
'''
if crop_eval:
self.reset_dataset_length_to_target_person_number()
self.multi_mode = Falsec
self.openpose_dir = os.path.join(self.data_folder,'openpose_json')
input_cropped_img=False, bbox=None, use_openpose_center=False
self.input_cropped_img = input_cropped_img
self.use_bbox = True if bbox is not None else False
self.use_openpose_center = use_openpose_center
if self.input_cropped_img:
self.multi_mode = False
self.reset_dataset_length_to_target_person_number()
logging.info('loading 3DPW dataset using cropped image')
if self.use_bbox:
self.bboxes = np.load(bbox,allow_pickle=True)['bbox'][()]
logging.info('using bbox from ', bbox)
openpose_annot_path = self.openpose_dir.replace('_json', '_body_results.npz')
if not os.path.exists(openpose_annot_path):
self.pack_openpose_results(openpose_annot_path)
self.openpose_kp2ds = np.load(openpose_annot_path,allow_pickle=True)['annots'][()]
def get_image_info(self,index):
if not self.input_cropped_img:
multi_person_annots = self.annots[self.file_paths[index]]
return self.get_complete_image_info(multi_person_annots)
if self.input_cropped_img:
annot_id, person_id = self.file_paths[index].split('-')
multi_person_annots = self.annots[annot_id]
target_person_annots = multi_person_annots[int(person_id)]
video_name, frame_id = target_person_annots[0], target_person_annots[2]
if video_name in self.openpose_kp2ds:
if frame_id in self.openpose_kp2ds[video_name]:
self.multi_mode = False
return self.get_cropped_image_info(target_person_annots)
self.multi_mode = True
return self.get_complete_image_info(multi_person_annots)
def get_complete_image_info(self, multi_person_annots):
# if self.train_flag and self.train_with_openpose:
# video_name, frame_id = multi_person_annots[0][0], multi_person_annots[0][2]
# if frame_id in self.openpose_kp2ds[video_name]:
# full_kp2d = self.openpose_kp2ds[video_name][frame_id]
# else:
# return self.get_image_info(random.randint(0,len(self)))
# #full_kp2d = [self.map_kps(kp2d,maps=constants.body1352coco25) for kp2d in full_kp2d]
# subject_ids = np.arange(len(full_kp2d))
# kp3d_monos, params = None, None
subject_ids, full_kp2d, kp3d_monos, params, bbox = [[] for i in range(5)]
video_name, frame_id = multi_person_annots[0][0], multi_person_annots[0][2]
#if self.use_openpose_center:
# full_kp2d_op = np.array(self.openpose_kp2ds[video_name][frame_id])
# openpose_center = np.array([self._calc_center_(kp2d) for kp2d in full_kp2d_op])
for subject_id, annots in enumerate(multi_person_annots):
video_name, person_id, frame_id, kp2d, kp3d, pose_param, beta_param = annots
subject_ids.append(person_id)
kp3d_monos.append(kp3d)
params.append(np.concatenate([pose_param[:66], beta_param]))
kp2d_gt = self.map_kps(kp2d, self.joint_mapper)
#if self.use_openpose_center:
# kp2d_gt_center = self._calc_center_(kp2d_gt)
# min_dist_idx = np.argmin(np.linalg.norm(openpose_center-kp2d_gt_center[None],axis=-1))
# full_kp2d.append(full_kp2d_op[min_dist_idx])
full_kp2d.append(kp2d_gt)
imgpath = os.path.join(self.image_dir,video_name,'image_{:05}.jpg'.format(frame_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
info_2d = ('pw3d', imgpath, image, full_kp2d[np.random.randint(len(full_kp2d))], full_kp2d, None, subject_ids)
info_3d = ('pw3d', kp3d_monos, params, None)
return info_2d, info_3d
def get_cropped_image_info(self, target_person_annots):
video_name, person_id, frame_id, kp2d, kp3d, pose_param, beta_param = target_person_annots
kp2d_op = self.openpose_kp2ds[video_name][frame_id]
kp2d_op_matched = self.match_op_to_gt(kp2d_op,kp2d)
full_kp2d = [kp2d]
subject_ids = [person_id]
kp3d_monos, params = [kp3d], [np.concatenate([pose_param[:66], beta_param])]
imgpath = os.path.join(self.image_dir,video_name,'image_{:05}.jpg'.format(frame_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
info_2d = ('pw3d', imgpath, image, kp2d_op_matched, full_kp2d,None,subject_ids)
info_3d = ('pw3d', kp3d_monos, params, None)
return info_2d, info_3d
if self.use_bbox:
bbox_center = self.bboxes[video_name][person_id,frame_id]
min_dist_idx = np.argmin(np.linalg.norm(openpose_center[:,:2]-bbox_center[None],axis=-1))
center = self._calc_center_(full_kp2d_op[min_dist_idx])
centers.append(center)
if self.use_bbox:
centers = np.array(centers)
def pack_openpose_results(self, annot_file_path):
self.openpose_kp2ds = {}
for key, multi_person_annots in self.annots.items():
video_name, frame_id = multi_person_annots[0][0], multi_person_annots[0][2]
openpose_file_path = os.path.join(self.openpose_dir,video_name+'-'+'image_{:05}_keypoints.json'.format(frame_id))
full_kp2d = read_keypoints(openpose_file_path)
if full_kp2d is None:
continue
if video_name not in self.openpose_kp2ds:
self.openpose_kp2ds[video_name] = {}
self.openpose_kp2ds[video_name][frame_id] = full_kp2d
np.savez(annot_file_path, annots=self.openpose_kp2ds)
def match_op_to_gt(self, kp2ds_op, kp2d_gt):
kp2ds_op_dist = {}
vis_gt = kp2d_gt[self.torso_ids,-1]>0
center_gt = kp2d_gt[self.torso_ids][vis_gt].mean(0)
for idx, kp2d_op in enumerate(kp2ds_op):
vis = kp2d_op[self.torso_ids,-1]>0
if vis.sum()>1:
center_point = kp2d_op[self.torso_ids][vis].mean(0)
dist = np.linalg.norm(center_point-center_gt)
kp2ds_op_dist[dist] = idx
kp2d_op_matched_id = kp2ds_op_dist[np.min(list(kp2ds_op_dist.keys()))]
return kp2ds_op[kp2d_op_matched_id]
if 'joint_format' in kwargs:
joint_format=kwargs['joint_format']
else:
joint_format='coco25'
print('joint_format',joint_format)
#for set_name in set_names[self.phase]:
# label_dir = os.path.join(self.data3d_dir,set_name)
# self.get_labels(label_dir)
def get_image_info(self,index):
annot_3d = self.labels[index]
imgpath = os.path.join(self.image_dir,annot_3d['name'],'image_{:05}.jpg'.format(annot_3d['ids']))
subject_ids = annot_3d['subject_ids'].tolist()
person_num = len(subject_ids)
#name = os.path.join(self.image_dir,annot_3d['name'],'image_{:05}_{}.jpg'.format(annot_3d['ids'],subject_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
#openpose_file_path = os.path.join(self.openpose_dir,annot_3d['name']+'-'+'image_{:05}_keypoints.json'.format(annot_3d['ids']))
#openpose_result_list = read_keypoints(openpose_file_path)
#kp2d_body = self.process_openpose(openpose_result_list, kps)
full_kps = annot_3d['kp2d'].copy()
thetas,betas,ts,genders = annot_3d['poses'].copy(),annot_3d['betas'].copy(),annot_3d['t'].copy(),annot_3d['gender'].copy()
full_kp2d,kp3d_monos = [],[]
for idx in range(person_num):
joint = self.map_kps(full_kps[idx].T)
if (joint[:,-1]>-1).sum()<1:
subject_ids.remove(idx)
continue
full_kp2d.append(joint)
kp3d = self.smplr(thetas[idx], betas[idx], genders[idx])[0]
kp3d_monos.append(kp3d)
#kp3d_mono = annot_3d['kp3d'].copy().reshape(24,3)
#kp3d_mono[:,1:] *= -1
#kp3d_mono = self.map_kps(kp3d_mono,maps=config.smpl24_2_coco25)
params = np.concatenate([np.array(thetas)[:,:66], np.array(betas)[:,-10:]],-1)
info_2d = ('pw3d', imgpath, image, full_kp2d[np.random.randint(len(full_kp2d))], full_kp2d,None,None,subject_ids)
info_3d = ('pw3d', kp3d_monos, params, None)
return info_2d, info_3d
def get_labels(self,label_dir):
label_paths = glob.glob(label_dir+'/*.pkl')
for label_path in label_paths:
raw_labels = self.read_pkl(label_path)
frame_num = len(raw_labels['img_frame_ids'])
for j in range(frame_num):
label = {}
label['name'] = raw_labels['sequence']
label['ids'] = j#raw_labels['img_frame_ids'][j]\
#img_frame_ids: an index-array to down-sample 60 Hz 3D poses to corresponding image frame ids
label['frame_ids'] = raw_labels['img_frame_ids'][j]
label['subject_ids'] = np.arange(len(raw_labels['poses']))
label['kp2d'] = np.array([raw_labels['poses2d'][idx][j] for idx in range(len(raw_labels['poses2d']))])
if (label['kp2d'][:,:,-1]>-1).sum()<1:
continue
extrinsics = raw_labels['cam_poses'][j,:3,:3]
poses,shapes,trans = [[] for idx in range(3)]
for idx in range(len(raw_labels['poses'])):
trans.append(raw_labels['trans'][idx][j])
shapes.append(raw_labels['betas'][idx][:10])
pose=raw_labels['poses'][idx][j]
pose[:3] = cv2.Rodrigues(np.dot(extrinsics, cv2.Rodrigues(pose[:3])[0]))[0].T[0]
poses.append(pose)
label['poses'],label['betas'],label['t'] = poses,shapes,trans
label['kp3d'] = [raw_labels['jointPositions'][idx][j] for idx in range(len(raw_labels['jointPositions']))]
label['gender'] = [raw_labels['genders'][idx] for idx in range(len(raw_labels['genders']))]
#label['cam_poses'] = raw_labels['cam_poses'][i]#Rt矩阵
label['cam_trans'] = raw_labels['cam_poses'][j,:3,3]
label['cam_rotation_matrix'] = raw_labels['cam_poses'][j,:3,:3]#Rt矩阵
#label['campose_valid_mask'] = raw_labels['campose_valid'][i][j]
self.labels.append(label)
return True
def process_openpose(self,result_list, kps_gt):
if result_list is not None:
if len(result_list)>1:
for body_kp2d_op, hand_kp2d_op, face_kp2d_op in result_list:
body_kp2d_op = body_kp2d_op[config.body1352coco25]
if body_kp2d_op[9,2]>0.05 and body_kp2d_op[12,2]>0.05:
body_kp2d_op[8] = (body_kp2d_op[9]+body_kp2d_op[12])/2
else:
body_kp2d_op[8,2] = -2
vis_id = ((body_kp2d_op[:,2]>0.04).astype(np.float32) + (kps_gt[:,2]>0.04).astype(np.float32))>1
if vis_id.sum()>4:
error = np.linalg.norm((body_kp2d_op[vis_id,:2]-kps_gt[vis_id,:2]), axis=-1).mean()
else:
error = 1000
if error<70:
return body_kp2d_op
return kps_gt
def load_file_list(self):
self.file_paths = []
self.annots = np.load(self.annots_file, allow_pickle=True)['annots'][()]
with open(self.imgs_list_file) as f:
test_list = f.readlines()
for test_file in test_list:
self.file_paths.append(test_file.strip())
self.kps_op, self.facial_kps2d, self.hand_kps2d = {},{},{}
with open(self.kps_openpose_json_file,'r') as f:
openpose_labels = json.load(f)
empty_count=0
for idx,img_name in enumerate(self.file_paths):
img_name = os.path.basename(img_name)
annot = openpose_labels[img_name]
if annot is None:
empty_count += 1
continue
kp2d = np.array(annot['pose_keypoints_2d']).reshape(-1,3)
self.kps_op[img_name] = kp2d.astype(np.float32)
face_kp2d = np.array(annot['face_keypoints_2d']).reshape(-1,3)[17:68]
self.facial_kps2d[img_name] = face_kp2d.astype(np.float32)
hand_kp2d = np.concatenate([np.array(annot['hand_left_keypoints_2d']).reshape(-1,3),\
np.array(annot['hand_right_keypoints_2d']).reshape(-1,3)],0)
self.hand_kps2d[img_name] = hand_kp2d.astype(np.float32)
print('empty_count_op:',empty_count)
def load_alphapose_mpii(self):
with open(self.kps_alpha_json_file,'r') as f:
raw_labels = json.load(f)
error_num = 0
for idx,annot_3d in enumerate(self.labels):
content = raw_labels['{}-image_{:05}.jpg'.format(annot_3d['name'],annot_3d['ids'])]
poses = []
for pid in range(len(content)):
poses.append(np.array(content[pid]['keypoints']).reshape(-1,3)[:,:3])
poses = np.array(poses)[:,self.mpii_2_lsp14]
kps_gt = annot_3d['kp2d'].copy().T[self.coco18_2_lsp14][:-2]
vis = np.where(kps_gt[:,2]>0)[0]
poses_comp = poses[:,vis,:2]
kps_gt = kps_gt[vis,:2][None,:,:]
mis_errors = np.mean(np.linalg.norm(poses_comp-kps_gt,ord=2,axis=-1),-1)
pose = poses[np.argmin(mis_errors)]
pose[pose[:,2]<0.01,2] = 0
pose[pose[:,2]>0.01,2] = 1
annot_3d['kps_alpha'] = pose
def load_alphapose_coco(self):
with open(self.kps_alpha_json_file,'r') as f:
raw_labels = json.load(f)
frame_num = len(raw_labels)
print('frame_num',frame_num)
error_count=0
for idx,annot_3d in enumerate(self.labels):
try:
content = raw_labels['{}-image_{:05}.jpg'.format(annot_3d['name'],annot_3d['ids'])]['bodies']
poses = []
for pid in range(len(content)):
poses.append(np.array(content[pid]['joints']).reshape(-1,3))
poses = np.array(poses)[:,self.coco18_2_lsp14]
poses[:,-1,2] = 0
kps_gt = annot_3d['kp2d'].copy().T[self.coco18_2_lsp14][:-2]
vis = np.where(kps_gt[:,2]>0)[0]
mis_errors = []
for i in range(len(poses)):
poses_comp = poses[i,vis]
vis_pred = poses_comp[:,2]>0
poses_comp = poses_comp[vis_pred,:2]
kps_gti = kps_gt[vis,:2][vis_pred,:]
mis_errors.append(np.mean(np.linalg.norm(poses_comp-kps_gti,ord=2,axis=-1)))
mis_errors = np.array(mis_errors)
pose = poses[np.argmin(mis_errors)]
pose[pose[:,2]<0.1,2] = 0
pose[pose[:,2]>0.1,2] = 1
annot_3d['kps_alpha'] = pose
except :
print('{}/image_{:05}.jpg'.format(annot_3d['name'],annot_3d['ids']))
error_count+=1
pose_gt = annot_3d['kp2d'].copy().T[self.coco18_2_lsp14]
pose_gt[pose_gt[:,2]<0.1,2] = 0
pose_gt[pose_gt[:,2]>0.1,2] = 1
annot_3d['kps_alpha'] = pose_gt
print('error_count',error_count)
def get_item_video(self,index):
label = self.labels[index]
label_dict_name = '{}_{}'.format(label['name'],label['subject_ids'])
ids_sequence = list(self.label_dict[label_dict_name].keys())
current_frame = label['ids']
current_spawn = int((self.spawn-1)/2)
features_idx = []
for index, num in enumerate(list(range(current_frame, current_frame+current_spawn+1))):
if num not in ids_sequence:
num=features_idx[index-1]
features_idx.append(num)
for index, num in enumerate(list(range(current_frame-1, current_frame-current_spawn-1,-1))):
if num not in ids_sequence:
num=features_idx[0]
features_idx=[num]+features_idx
labels_idx = []
for idx in features_idx:
labels_idx.append(self.label_dict[label_dict_name][idx])
video = []
video_input = {}
for label_idx in labels_idx:
video.append(self.get_item_single_frame(label_idx))
for key in video[0].keys():
if key=='image':
video_input[key] = torch.cat([video[i][key].unsqueeze(0) for i in range(len(video))])
elif key=='kps_alpha':
video_input[key] = torch.cat([video[i][key].unsqueeze(0) for i in range(len(video))])
else:
video_input[key] = video[current_spawn][key]
return video_input
''' |
py | 1a3d129a35001b529f0541219fa5cabf84234887 | import sys
import PySimpleGUI as sg
# import os.path
import json
import os
import random
import tkinter as tk
def check_experience(s):
if (s.isdigit() == False):
return False
exp = int(s)
if (exp > 30):
return False
return True
filename = './to-grade/hs.json'
try:
with open(filename[:-5] + '.tmp.json') as f:
dat = json.load(f)
except:
with open(filename) as f:
dat = json.load(f)
mylist = [(x, y) for x in range(len(dat) - 1) for y in range(3)]
random.shuffle(mylist)
names = ('gcnn', 'nl2code', 'snippet')
file_list_column = [
[
sg.Text('''Is the code snippet below relevant or not relevant description of the card on the right?
Please rate it on a scale from 0 to 4. You can either press on the radio button or press the corresponding key (\'4\' for 4 etc.)
You can also press \'Exit\' to finish grading or \'Skip\' to skip the snippet
4: Snippet is very relevant, it describes the card exactly
3: Snippet is relevant, but needs to be slightly changed to describe the card exactly
2: Snippet is somewhat relevant, it requires significant changes (compared to the size of the snippet), but is still useful to describe the card
1: Snippet is slightly relevant, it contains information relevant to the card, but it is easier to write the description from scratch
0: Snippet is not at all relevant to the card''', font=("Helvetica", 12)),
],
[sg.Radio('4', "RADIO1", enable_events=True, font=("Helvetica", 12), key='4', size=(10, 10)),
sg.Radio('3', "RADIO1", enable_events=True, font=("Helvetica", 12), key='3', size=(10, 10)),
sg.Radio('2', "RADIO1", enable_events=True, font=("Helvetica", 12), key='2', size=(10, 10)),
sg.Radio('1', "RADIO1", enable_events=True, font=("Helvetica", 12), key='1', size=(10, 10)),
sg.Radio('0', "RADIO1", enable_events=True, font=("Helvetica", 12), key='0', size=(10, 10))],
[sg.Cancel(button_text="Skip"), sg.Exit()],
[sg.Text(''), sg.Text(size=(150, 40), key='-OUTPUT-', font=("Helvetica", 12))]
]
# For now will only show the name of the file that was chosen
image_viewer_column = [
[sg.Image(key="-IMAGE-")],
]
# ----- Full layout -----
layout_form = [[sg.Text('''Dear participant,\n
this program is a survey on quality of the code snippets conducted by Independent non-profit organization of additional professional education
“Research and Education Center “JetBrains”, OGRN 1187800000134, located at St. Petersburg, Kantemirovskaya street 2, liter A, office 201.
You will be presented with code snippets (one at a time) and a problem they are supposed to solve. You are asked to evaluate whether
the suggested snippet is helpful or not helpful in solving the problem on a scale from 0 to 4, where 0 corresponds to a totally irrelevant snippet
and 4 corresponds to a snippet which solves the problem (more detailed instruction will be present at the snippet grading screen).\n
In the event of any publication or presentation resulting from the research, no personally identifiable information will be shared.
We plan to include the results of this survey in a scientific publication. If you have any concerns or questions about your rights as a participant
or about the way the study is being conducted, please contact Mikhail Evtikhiev ([email protected]).''',
font=("Helvetica", 12))],
[sg.Text('''In the text box below please write, for how long have you been programming in Python (in years),
rounded to the nearest integer number. This information will be reported in the publication in an aggregated form.''',
font=("Helvetica", 12))],
[sg.Text('Python experience: ', key='_text1_',
font=("Helvetica", 12)), sg.InputText(key='_python_', size=(10, 1))],
[sg.Text('''In the text box below please write your Slack handle or e-mail address. This information will be kept private and we only ask for it
to be able to reach back to you to clarify any technical uncertainties with the graded snippets, if such uncertainties shall arise.''')],
[sg.Text('Contact information: ', key='_text2_',
font=("Helvetica", 12)), sg.InputText(key='_contact_', size=(30, 1))],
[sg.Text('''ELECTRONIC CONSENT\n
Please select your choice below. Selecting the “yes” option below indicates that:
i) you have read and understood the above information,
ii) you voluntarily agree to participate, and
iii) you are at least 18 years old.
If you do not wish to participate in the research study, please decline participation by selecting “No”.''',
font=("Helvetica", 12))],
[sg.Ok(button_text="Yes"), sg.Exit(button_text="No")],
]
layout_grade = [[
sg.Column(file_list_column),
sg.VSeperator(),
sg.Column(image_viewer_column),
]
]
#layout1 = [[sg.Text('')]]
#root = tk.Tk()
#screen_width = root.winfo_screenwidth()
#scaling_window = sg.Window('Window Title', layout1, no_titlebar=True, auto_close=False, alpha_channel=0).Finalize()
#scaling_window.TKroot.tk.call('tk', 'scaling', max(screen_width / 1920, 1))
#scaling_window.close()
pers_data = dat[-1]
no_consent = False
if ((pers_data["contact"] == "") or (pers_data["experience"] == "") or (pers_data["consent"] == "")):
window = sg.Window("Hearthstone dataset grader form", layout_form, finalize=True, location=(0, 0),
return_keyboard_events=True)
no_consent = True
while (no_consent):
event, values = window.read()
if event == "No" or event == sg.WIN_CLOSED:
window.close()
sys.exit()
elif event == "Yes":
error_text = ""
if (check_experience(values['_python_']) == False):
error_text += "Incorrect input. Please enter, for how long have you been programming in Python (in " \
"years, rounded to a nearest integer)\n"
if (len(values['_contact_']) < 1):
error_text += 'Incorrect input. Please enter your Slack handle or e-mail address.\n'
if len(error_text) > 0:
sg.popup(error_text)
else:
pers_data["contact"] = values['_contact_']
pers_data["experience"] = int(values['_python_'])
pers_data["consent"] = 'yes'
no_consent = False
for key in dat[-1]:
dat[-1][key] = pers_data[key]
window.close()
else:
pass
window = sg.Window("Hearthstone dataset grader", layout_grade, finalize=True, location=(0, 0),
return_keyboard_events=True)
if no_consent: window.close()
# Run the Event Loop
for (i, j) in mylist:
successful = False
finished = False
sname = 'grade-' + names[j]
if sname not in dat[i]:
window['-OUTPUT-'].update(dat[i][names[j]])
window["-IMAGE-"].update(filename='./hs_cards/' + str(i) + '.png')
while not successful:
event, values = window.read()
if event == "Exit" or event == sg.WIN_CLOSED:
with open(filename, 'w') as o:
json.dump(dat, o)
try:
os.remove(filename[:-5] + '.tmp.json')
except:
pass
finished = True
successful = True
elif event[0] in ['0', '1', '2', '3', '4']:
successful = True
dat[i][sname] = int(event)
with open(filename[:-5] + '.tmp.json', 'w') as o:
json.dump(dat, o)
elif event == "Skip":
successful = True
pass
else:
sg.popup(event)
if finished:
break
with open(filename, 'w') as o:
json.dump(dat, o)
try:
os.remove(filename[:-5] + '.tmp.json')
except:
pass
window.close()
|
py | 1a3d12d134e11937308c803b8ca0b34e211f38ae | from flask import Flask , render_template, request
from db_magazina import Kategor, Tovar, Tovar_photo, Tovar_inphoto
my_flask_app = Flask(__name__)
@my_flask_app.route('/')
def index():
return render_template('index.html')
@my_flask_app.route('/smart/harakter/')
def harakt():
t1= Tovar_inphoto()
harackter = Tovar_inphoto.query.filter(Tovar_inphoto.id==1).all()
return render_template('harakteriskick.html' , harackter=harackter)
@my_flask_app.route('/inf/')
def info():
return render_template('info_gl_str.html')
@my_flask_app.route('/log1/')
def log1():
return render_template('login_menu.html')
@my_flask_app.route('/login/',methods = ['POST'])
def login():
return render_template('login.html',email=request.form.get("email"), password=request.form.get("passwd"))
@my_flask_app.route('/smart/')
@my_flask_app.route('/smart/<username>', methods = ['GET', 'POST'])
def category(username=None):
phone_name = request.args.get('phone_name', False)
t = Tovar()
smartfons = t.query.filter(Tovar.kategory_id==1)
harackter = Tovar_inphoto.query.filter(Tovar_inphoto.id==1).all()
if phone_name:
qry = '%{}%'.format(phone_name)
smartfons = smartfons.filter(Tovar.tovar_name.like(qry))
if username:
qry = '%{}%'.format(username)
smartfons = smartfons.filter(Tovar.tovar_name.like(qry))
check9 = request.args.get('check9', False)
check8 = request.args.get('check8', False)
check7 = request.args.get('check7', False)
check6 = request.args.get('check6', False)
check5 = request.args.get('check5', False)
check4 = request.args.get('check4', False)
check3 = request.args.get('check3', False)
check2 = request.args.get('check2', False)
check1 = request.args.get('check1', False)
check0 = request.args.get('check0', False)
checki = request.args.get('checki', False)
if checki:
z = [check0,check1,check2,check3,check4,check5,check6,check7,check8,check9]
smartfons=smartfons.filter(Tovar.tovar_name.in_([z[0],z[1],z[2],z[6]]))
try:
smartfons= Tovar.query.filter(Tovar.id.in_([Tovar_inphoto.query.filter(Tovar_inphoto.tovarinphoto_diagon.in_([z[3],z[4],z[5]])).all()[0].tovar_id]))
except IndexError:
pass
try:
a = Tovar_inphoto.query.filter(Tovar_inphoto.tovarinphoto_ram.in_([z[7],z[8],z[9]])).all()
for i in a:
smartfons= Tovar.query.filter(Tovar.id.in_([i.tovar_id]))
#smartfons= Tovar.query.filter(Tovar.id.in_([a[0].tovar_id , a[1].tovar_id]))
except IndexError:
pass
print(smartfons)
smartfons=smartfons.all()
return render_template('smartfons.html', smartfons=smartfons)
if __name__ == "__main__":
my_flask_app.run(debug=True)
|
py | 1a3d14100f05cd3473507935440cbb8764c0806c | #!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multisig RPCs"""
import decimal
import itertools
import json
import os
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.authproxy import JSONRPCException
from test_framework.descriptors import descsum_create, drop_origins
from test_framework.key import ECPubKey, ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
)
from test_framework.wallet_util import bytes_to_wif
class RpcCreateMultiSigTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
self.pub = []
self.priv = []
node0, node1, node2 = self.nodes
for _ in range(self.nkeys):
k = ECKey()
k.generate()
self.pub.append(k.get_pubkey().get_bytes().hex())
self.priv.append(bytes_to_wif(k.get_bytes(), k.is_compressed))
self.final = node2.getnewaddress()
def run_test(self):
node0, node1, node2 = self.nodes
self.check_addmultisigaddress_errors()
self.log.info('Generating blocks ...')
self.generate(node0, 149)
self.moved = 0
for self.nkeys in [3, 5]:
for self.nsigs in [2, 3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
# Test mixed compressed and uncompressed pubkeys
self.log.info('Mixed compressed and uncompressed multisigs are not allowed')
pk0 = node0.getaddressinfo(node0.getnewaddress())['pubkey']
pk1 = node1.getaddressinfo(node1.getnewaddress())['pubkey']
pk2 = node2.getaddressinfo(node2.getnewaddress())['pubkey']
# decompress pk2
pk_obj = ECPubKey()
pk_obj.set(bytes.fromhex(pk2))
pk_obj.compressed = False
pk2 = pk_obj.get_bytes().hex()
node0.createwallet(wallet_name='wmulti0', disable_private_keys=True)
wmulti0 = node0.get_wallet_rpc('wmulti0')
# Check all permutations of keys because order matters apparently
for keys in itertools.permutations([pk0, pk1, pk2]):
# Results should be the same as this legacy one
legacy_addr = node0.createmultisig(2, keys, 'legacy')['address']
result = wmulti0.addmultisigaddress(2, keys, '', 'legacy')
assert_equal(legacy_addr, result['address'])
assert 'warnings' not in result
# Generate addresses with the segwit types. These should all make legacy addresses
for addr_type in ['bech32', 'p2sh-segwit']:
result = wmulti0.createmultisig(2, keys, addr_type)
assert_equal(legacy_addr, result['address'])
assert_equal(result['warnings'], ["Unable to make chosen address type, please ensure no uncompressed public keys are present."])
result = wmulti0.addmultisigaddress(2, keys, '', addr_type)
assert_equal(legacy_addr, result['address'])
assert_equal(result['warnings'], ["Unable to make chosen address type, please ensure no uncompressed public keys are present."])
self.log.info('Testing sortedmulti descriptors with BIP 67 test vectors')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_bip67.json'), encoding='utf-8') as f:
vectors = json.load(f)
for t in vectors:
key_str = ','.join(t['keys'])
desc = descsum_create('sh(sortedmulti(2,{}))'.format(key_str))
assert_equal(self.nodes[0].deriveaddresses(desc)[0], t['address'])
sorted_key_str = ','.join(t['sorted_keys'])
sorted_key_desc = descsum_create('sh(multi(2,{}))'.format(sorted_key_str))
assert_equal(self.nodes[0].deriveaddresses(sorted_key_desc)[0], t['address'])
# Check that bech32m is currently not allowed
assert_raises_rpc_error(-5, "createmultisig cannot create bech32m multisig addresses", self.nodes[0].createmultisig, 2, self.pub, "bech32m")
def check_addmultisigaddress_errors(self):
if self.options.descriptors:
return
self.log.info('Check that addmultisigaddress fails when the private keys are missing')
addresses = [self.nodes[1].getnewaddress(address_type='legacy') for _ in range(2)]
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
for a in addresses:
# Importing all addresses should not change the result
self.nodes[0].importaddress(a)
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
# Bech32m address type is disallowed for legacy wallets
pubs = [self.nodes[1].getaddressinfo(addr)["pubkey"] for addr in addresses]
assert_raises_rpc_error(-5, "Bech32m multisig addresses cannot be created with legacy wallets", self.nodes[0].addmultisigaddress, 2, pubs, "", "bech32m")
def checkbalances(self):
node0, node1, node2 = self.nodes
self.generate(node0, COINBASE_MATURITY)
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149 * 50 + (height - 149 - 100) * 25
assert bal1 == 0
assert bal2 == self.moved
assert bal0 + bal1 + bal2 == total
def do_multisig(self):
node0, node1, node2 = self.nodes
if 'wmulti' not in node1.listwallets():
try:
node1.loadwallet('wmulti')
except JSONRPCException as e:
path = os.path.join(self.options.tmpdir, "node1", "regtest", "wallets", "wmulti")
if e.error['code'] == -18 and "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path) in e.error['message']:
node1.createwallet(wallet_name='wmulti', disable_private_keys=True)
else:
raise
wmulti = node1.get_wallet_rpc('wmulti')
# Construct the expected descriptor
desc = 'multi({},{})'.format(self.nsigs, ','.join(self.pub))
if self.output_type == 'legacy':
desc = 'sh({})'.format(desc)
elif self.output_type == 'p2sh-segwit':
desc = 'sh(wsh({}))'.format(desc)
elif self.output_type == 'bech32':
desc = 'wsh({})'.format(desc)
desc = descsum_create(desc)
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
assert_equal(desc, msig['descriptor'])
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
# compare against addmultisigaddress
msigw = wmulti.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
assert_equal(desc, drop_origins(msigw['descriptor']))
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd == v["scriptPubKey"]["address"]]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
self.generate(node0, 1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
prevtx_err = dict(prevtxs[0])
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "Missing redeemScript/witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# if witnessScript specified, all ok
prevtx_err["witnessScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# both specified, also ok
prevtx_err["redeemScript"] = prevtxs[0]["redeemScript"]
node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript mismatch to witnessScript
prevtx_err["redeemScript"] = "6a" # OP_RETURN
assert_raises_rpc_error(-8, "redeemScript does not correspond to witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# redeemScript does not match scriptPubKey
del prevtx_err["witnessScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
# witnessScript does not match scriptPubKey
prevtx_err["witnessScript"] = prevtx_err["redeemScript"]
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "redeemScript/witnessScript does not match scriptPubKey", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs - 1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], 0)
blk = self.generate(node0, 1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
wmulti.unloadwallet()
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
|
py | 1a3d16a9fc2d7be8c140e3ab26d7eb9eee710ffb | #!/usr/bin/env python3
import string
class BracketError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Machine():
def __init__(self):
self.tape = [0]
self.p = 0
def run(self, code, step=False):
pc = 0
loop_stack = []
brackets = 0
printed = False
for instr in code:
if instr == '[':
brackets += 1
elif instr == ']':
brackets -= 1
if brackets != 0:
raise BracketError('Error: failed bracket count')
while pc < len(code):
instr = code[pc]
# increment/decrement
if instr == '+':
self.increment(1)
elif instr == '-':
self.increment(-1)
# I/O
elif instr == '.':
print(chr(self.cell()), end='')
printed = True
elif instr == ',':
self.input()
# move tape
elif instr == '<':
if self.p > 0:
self.p -= 1
else:
print("Error: Can't decrement pointer")
elif instr == '>':
if self.p > (len(self.tape)-2):
self.tape.append(0)
self.p += 1
# looping
elif instr == ']':
pc = loop_stack.pop() - 1
elif instr == '[':
if self.cell() == 0:
while code[pc] != ']':
pc += 1
else:
loop_stack.append(pc)
if step:
input()
pc += 1
if printed:
print('')
def set(self, val):
self.tape[self.p] = val % 128
def increment(self, amount):
self.set(self.cell() + amount)
def input(self):
character = input()
if character == '':
print("No value given, setting cell to 0 ...")
self.set(0)
else:
self.set(ord(character[0]))
def cell(self):
return self.tape[self.p]
def dump(self):
print("%d," % self.p, self.tape)
def write_to(program, command):
split = command.index(' ')
line = int(command[:split])
command = command[(split+1):]
if line < len(program):
program[line] = command
else:
while len(program) < line:
program.append('')
program.append(command)
if __name__ == "__main__":
helptext = "help: Display this help text\nquit: Quit\ndump: Print tape, pointer\nclear: Reset tape\nnew: Wipe program\nlist: List program\nrun: Run program\nsave <arg>: Save program as <arg>\nload <arg>: Load program from <arg>\nstep [arg]: step through program or optional arg"
tape = Machine()
program = []
while True:
try:
command = input("[%d]:%d$ " %(tape.p,tape.cell()))
except EOFError:
break
if command == "":
continue
elif command == "q" or command == "quit":
break
elif command == "d" or command == "dump":
tape.dump()
elif command == "h" or command == "help":
print(helptext)
elif command == "new":
program = []
elif command == "clear":
tape = Machine()
print("Tape Reset")
elif command == "l" or command == "list":
for number, line in enumerate(program):
if line != '':
print(number, line)
elif command == "r" or command == "run":
tape.run("".join(program))
elif command[:4] == "load":
f = open(command[5:],mode='r')
program = f.read().split('\n')
f.close()
elif command[:4] == "save":
f = open(command[5:],mode='w')
f.write('\n'.join(program))
f.close()
elif command == "step":
tape.run(program, step=True)
elif command[:4] == "step":
tape.run(command[5:], step=True)
elif command[0] in string.digits:
write_to(program, command)
else:
try:
tape.run(command)
except BracketError:
print("Error: Failed bracket count!")
print("Goodbye!")
|
py | 1a3d16b8c02519cc0eacfc06525591f70bace53f | from __future__ import absolute_import
from builtins import object
import future.utils as futils
import os
if futils.PY2:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
else:
from io import BytesIO as StringIO
from .compat import as_bytes, as_str
# Python 2.4 support: os lacks SEEK_END and friends
try:
getattr(os, "SEEK_END")
except AttributeError:
os.SEEK_SET, os.SEEK_CUR, os.SEEK_END = list(range(3))
class SimpleBuffer(object):
"""
>>> b = SimpleBuffer()
>>> b.write('abcdef')
>>> b.read(3)
'abc'
>>> b.consume(3)
>>> b.write('z')
>>> b.read()
'defz'
>>> b.read()
'defz'
>>> b.read(0)
''
>>> repr(b)
"<SimpleBuffer of 4 bytes, 7 total size, 'defz'>"
>>> str(b)
"<SimpleBuffer of 4 bytes, 7 total size, 'defz'>"
>>> len(b)
4
>>> bool(b)
True
>>> b.flush()
>>> len(b)
0
>>> bool(b)
False
>>> b.read(1)
''
>>> b.write('a'*524288)
>>> b.flush() # run GC code
"""
def __init__(self):
self.buf = StringIO()
self.size = 0
self.offset = 0
def write(self, data):
data = as_bytes(data)
self.buf.write(data)
self.size += len(data)
def read(self, size=None):
self.buf.seek(self.offset)
if size is None:
data = self.buf.read()
else:
data = self.buf.read(size)
self.buf.seek(0, os.SEEK_END)
return data
def consume(self, size):
self.offset += size
self.size -= size
# GC old StringIO instance and free memory used by it.
if self.size == 0 and self.offset > 524288:
self.buf.close()
self.buf = StringIO()
self.offset = 0
def flush(self):
self.consume(self.size)
def __bool__(self):
return self.size > 0
def __len__(self):
return self.size
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<SimpleBuffer of %i bytes, %i total size, %r%s>' % \
(self.size, self.size + self.offset, self.read(16),
(self.size > 16) and '...' or '')
|
py | 1a3d16f278293c38283c1d3eb79ad2a43e9626b7 | from conftest import get_metrics
from pyriemann.embedding import Embedding
import pytest
@pytest.mark.parametrize("metric", get_metrics())
@pytest.mark.parametrize("eps", [None, 0.1])
def test_embedding(metric, eps, get_covmats):
"""Test Embedding."""
n_trials, n_channels, n_comp = 6, 3, 2
covmats = get_covmats(n_trials, n_channels)
embd = Embedding(metric=metric, n_components=n_comp, eps=eps)
covembd = embd.fit_transform(covmats)
assert covembd.shape == (n_trials, n_comp)
def test_fit_independence(get_covmats):
n_trials, n_channels = 6, 3
covmats = get_covmats(n_trials, n_channels)
embd = Embedding()
embd.fit_transform(covmats)
# retraining with different size should erase previous fit
new_covmats = covmats[:, :-1, :-1]
embd.fit_transform(new_covmats)
|
py | 1a3d1871f27c4b94c2e11356bee5bf7bac3b1b75 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.errors",
marshal="google.ads.googleads.v4",
manifest={"HeaderErrorEnum",},
)
class HeaderErrorEnum(proto.Message):
r"""Container for enum describing possible header errors."""
class HeaderError(proto.Enum):
r"""Enum describing possible header errors."""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_LOGIN_CUSTOMER_ID = 3
INVALID_LINKED_CUSTOMER_ID = 7
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | 1a3d18ff1c0819a7dfa25eb1c96387ac7cd58eb7 | import cdflib
import numpy as np
import pandas as pd
import numpy as np
import sys
sys.path.insert(0, '/home/andres_munoz_j/pyCFOFiSAX')
print(sys.path)
# import importlib.util
# spec = importlib.util.spec_from_file_location('ForestISAX', '/home/andres_munoz_j/pyCFOFiSAX/pyCFOFiSAX/_forest_iSAX.py')
# ForestISAX = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(ForestISAX)
# spec = importlib.util.spec_from_file_location('TreeISAX', '/home/andres_munoz_j/pyCFOFiSAX/pyCFOFiSAX/_tree_iSAX.py')
# TreeISAX = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(TreeISAX)
from pyCFOFiSAX._forest_iSAX import ForestISAX
from pyCFOFiSAX._isax import IndexableSymbolicAggregateApproximation
# from pyCFOFiSAX._tree_iSAX import TreeISAX
from anytree import RenderTree
from anytree.exporter import DotExporter
psp_path = '/sw-data/psp/'
path = psp_path + 'mag_rtn/'
year = '2019'
month = '05'
day = '15'
hour = '00'
cdf_file_path = path + year + '/psp_fld_l2_mag_rtn_' + year + month + day + hour + '_v01.cdf'
cdf_file = cdflib.CDF(cdf_file_path)
x = cdf_file.varget('epoch_mag_RTN') # reading in the epoch time stamps
x = cdflib.epochs.CDFepoch.to_datetime(x) # convrting x axis labels to date time stamps
y = cdf_file.varget('psp_fld_l2_mag_RTN')
npoints = 200
# Start with Bx
ts = y[0:int(y.shape[0]/npoints)*npoints,0].reshape(-1,npoints)
# Append By
ts = np.append(ts, y[0:int(y.shape[0]/npoints)*npoints,1].reshape(-1,npoints), axis=0)
# Append Bz
ts = np.append(ts, y[0:int(y.shape[0]/npoints)*npoints,2].reshape(-1,npoints), axis=0)
# Create auxiliary dataframe
ts_loc = pd.DataFrame({'File':np.repeat(cdf_file_path,ts.shape[0]/3), 'Component':np.repeat('Bx',ts.shape[0]/3)})
ts_loc['t0'] = np.array(x[0:int(y.shape[0]/npoints)*npoints]).reshape(-1,npoints)[:,0]
ts_loc['t1'] = np.array(x[0:int(y.shape[0]/npoints)*npoints]).reshape(-1,npoints)[:,-1]
tmp_loc = ts_loc.copy()
tmp_loc['Component'] = 'By'
ts_loc = pd.concat((ts_loc,tmp_loc))
tmp_loc['Component'] = 'Bz'
ts_loc = pd.concat((ts_loc,tmp_loc)).reset_index(drop=True)
sw_forest = ForestISAX(size_word=10,
threshold=20,
data_ts=ts,
base_cardinality=2, number_tree=1)
sw_forest.index_data(ts, parallel=True)
# test = sw_forest.forest[0].root.escalate_node(sw_forest.forest[0].root.nodes[0])
# print(test)
sw_forest.forest[0].parallel_escalation()
# print(test)
size_word = 10
mu = np.mean(ts)
sig = np.std(ts)
isax = IndexableSymbolicAggregateApproximation(size_word, mean=mu, std=sig)
nodes_at_level = sw_forest.forest[0].get_nodes_of_level_or_terminal(8)
annotations_l = nodes_at_level[30].get_annotations()
sequences_l = nodes_at_level[30].get_sequences()
annotations_l = pd.concat([pd.Series(sequences_l, index=annotations_l.index, name='iSAX'), annotations_l], axis=1)
# print(sw_forest.forest[0].root.get_sequences())
# print(sw_forest.forest[0].root.get_annotations())
print('done') |
py | 1a3d1b6ee044efd3584383b4ced251f72745edeb | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
from flask import Flask,request, jsonify, render_template
import pickle
# In[2]:
app=Flask(__name__)
model=pickle.load(open('spam_model.pkl','rb'))
cv = pickle.load(open('cv-transform.pkl','rb'))
# In[3]:
@app.route('/')
def home():
return render_template('index.html')
# In[4]:
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
message = request.form['message']
data = [message]
vect = cv.transform(data).toarray()
prediction = model.predict(vect)
return render_template('index.html', prediction_text="message is $ {}".format(prediction))
# In[5]:
if __name__=="__main__":
app.run(debug=True)
# In[6]:
# In[ ]:
|
py | 1a3d1c14795b0ab05f2689d2c1ff2836e628d644 | import copy
import decimal
import subprocess
import time
import os
import re
import datetime
import json
import signal
from core_symbol import CORE_SYMBOL
from testUtils import Utils
from testUtils import Account
from testUtils import EnumType
from testUtils import addEnum
from testUtils import unhandledEnumType
class ReturnType(EnumType):
pass
addEnum(ReturnType, "raw")
addEnum(ReturnType, "json")
class BlockType(EnumType):
pass
addEnum(BlockType, "head")
addEnum(BlockType, "lib")
# pylint: disable=too-many-public-methods
class Node(object):
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, host, port, pid=None, cmd=None, walletMgr=None, enableMongo=False, mongoHost="localhost", mongoPort=27017, mongoDb="EOStest"):
self.host=host
self.port=port
self.pid=pid
self.cmd=cmd
if Utils.Debug: Utils.Print("new Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd))
self.killed=False # marks node as killed
self.enableMongo=enableMongo
self.mongoHost=mongoHost
self.mongoPort=mongoPort
self.mongoDb=mongoDb
self.endpointHttp="http://%s:%d" % (self.host, self.port)
self.endpointArgs="--url %s" % (self.endpointHttp)
self.mongoEndpointArgs=""
self.infoValid=None
self.lastRetrievedHeadBlockNum=None
self.lastRetrievedLIB=None
self.transCache={}
self.walletMgr=walletMgr
self.missingTransaction=False
self.popenProc=None # initial process is started by launcher, this will only be set on relaunch
if self.enableMongo:
self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb)
def eosClientArgs(self):
walletArgs=" " + self.walletMgr.getWalletEndpointArgs() if self.walletMgr is not None else ""
return self.endpointArgs + walletArgs + " " + Utils.MiscEosClientArgs
def __str__(self):
#return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd)
return "Host: %s, Port:%d" % (self.host, self.port)
@staticmethod
def validateTransaction(trans):
assert trans
assert isinstance(trans, dict), print("Input type is %s" % type(trans))
executed="executed"
def printTrans(trans, status):
Utils.Print("ERROR: Valid transaction should be \"%s\" but it was \"%s\"." % (executed, status))
Utils.Print("Transaction: %s" % (json.dumps(trans, indent=1)))
transStatus=Node.getTransStatus(trans)
assert transStatus == executed, printTrans(trans, transStatus)
@staticmethod
def __printTransStructureError(trans, context):
Utils.Print("ERROR: Failure in expected transaction structure. Missing trans%s." % (context))
Utils.Print("Transaction: %s" % (json.dumps(trans, indent=1)))
class Context:
def __init__(self, obj, desc):
self.obj=obj
self.sections=[obj]
self.keyContext=[]
self.desc=desc
def __json(self):
return "%s=\n%s" % (self.desc, json.dumps(self.obj, indent=1))
def __keyContext(self):
msg=""
for key in self.keyContext:
if msg=="":
msg="["
else:
msg+="]["
msg+=key
if msg!="":
msg+="]"
return msg
def __contextDesc(self):
return "%s%s" % (self.desc, self.__keyContext())
def add(self, newKey):
assert isinstance(newKey, str), print("ERROR: Trying to use %s as a key" % (newKey))
subSection=self.sections[-1]
assert isinstance(subSection, dict), print("ERROR: Calling \"add\" method when context is not a dictionary. %s in %s" % (self.__contextDesc(), self.__json()))
assert newKey in subSection, print("ERROR: %s%s does not contain key \"%s\". %s" % (self.__contextDesc(), key, self.__json()))
current=subSection[newKey]
self.sections.append(current)
self.keyContext.append(newKey)
return current
def index(self, i):
assert isinstance(i, int), print("ERROR: Trying to use \"%s\" as a list index" % (i))
cur=self.getCurrent()
assert isinstance(cur, list), print("ERROR: Calling \"index\" method when context is not a list. %s in %s" % (self.__contextDesc(), self.__json()))
listLen=len(cur)
assert i < listLen, print("ERROR: Index %s is beyond the size of the current list (%s). %s in %s" % (i, listLen, self.__contextDesc(), self.__json()))
return self.sections.append(cur[i])
def getCurrent(self):
return self.sections[-1]
@staticmethod
def getTransStatus(trans):
cntxt=Node.Context(trans, "trans")
cntxt.add("processed")
cntxt.add("receipt")
return cntxt.add("status")
@staticmethod
def getTransBlockNum(trans):
cntxt=Node.Context(trans, "trans")
cntxt.add("processed")
cntxt.add("action_traces")
cntxt.index(0)
return cntxt.add("block_num")
@staticmethod
def stdinAndCheckOutput(cmd, subcommand):
"""Passes input to stdin, executes cmd. Returns tuple with return code(int), stdout(byte stream) and stderr(byte stream)."""
assert(cmd)
assert(isinstance(cmd, list))
assert(subcommand)
assert(isinstance(subcommand, str))
outs=None
errs=None
ret=0
try:
popen=subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs,errs=popen.communicate(input=subcommand.encode("utf-8"))
ret=popen.wait()
except subprocess.CalledProcessError as ex:
msg=ex.output
return (ex.returncode, msg, None)
return (ret, outs, errs)
@staticmethod
def normalizeJsonObject(extJStr):
tmpStr=extJStr
tmpStr=re.sub(r'ObjectId\("(\w+)"\)', r'"ObjectId-\1"', tmpStr)
tmpStr=re.sub(r'ISODate\("([\w|\-|\:|\.]+)"\)', r'"ISODate-\1"', tmpStr)
tmpStr=re.sub(r'NumberLong\("(\w+)"\)', r'"NumberLong-\1"', tmpStr)
return tmpStr
@staticmethod
def runMongoCmdReturnJson(cmd, subcommand, trace=False, exitOnError=False):
"""Run mongodb subcommand and return response."""
assert(cmd)
assert(isinstance(cmd, list))
assert(subcommand)
assert(isinstance(subcommand, str))
retId,outs,errs=Node.stdinAndCheckOutput(cmd, subcommand)
if retId is not 0:
errorMsg="mongodb call failed. cmd=[ %s ] subcommand=\"%s\" - %s" % (", ".join(cmd), subcommand, errs)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
Utils.Print("ERROR: %s" % (errMsg))
return None
outStr=Node.byteArrToStr(outs)
if not outStr:
return None
extJStr=Utils.filterJsonObjectOrArray(outStr)
if not extJStr:
return None
jStr=Node.normalizeJsonObject(extJStr)
if not jStr:
return None
if trace: Utils.Print ("RAW > %s"% (outStr))
if trace: Utils.Print ("JSON> %s"% jStr)
try:
jsonData=json.loads(jStr)
except json.decoder.JSONDecodeError as _:
Utils.Print ("ERROR: JSONDecodeError")
Utils.Print ("Raw MongoDB response: > %s"% (outStr))
Utils.Print ("Normalized MongoDB response: > %s"% (jStr))
raise
return jsonData
@staticmethod
def getTransId(trans):
"""Retrieve transaction id from dictionary object."""
assert trans
assert isinstance(trans, dict), print("Input type is %s" % type(trans))
assert "transaction_id" in trans, print("trans does not contain key %s. trans={%s}" % ("transaction_id", json.dumps(trans, indent=2, sort_keys=True)))
transId=trans["transaction_id"]
return transId
@staticmethod
def isTrans(obj):
"""Identify if this is a transaction dictionary."""
if obj is None or not isinstance(obj, dict):
return False
return True if "transaction_id" in obj else False
@staticmethod
def byteArrToStr(arr):
return arr.decode("utf-8")
def validateAccounts(self, accounts):
assert(accounts)
assert(isinstance(accounts, list))
for account in accounts:
assert(account)
assert(isinstance(account, Account))
if Utils.Debug: Utils.Print("Validating account %s" % (account.name))
accountInfo=self.getEosAccount(account.name, exitOnError=True)
try:
if not self.enableMongo:
assert(accountInfo["account_name"] == account.name)
else:
assert(accountInfo["name"] == account.name)
except (AssertionError, TypeError, KeyError) as _:
Utils.Print("account validation failed. account: %s" % (account.name))
raise
# pylint: disable=too-many-branches
def getBlock(self, blockNum, silentErrors=False, exitOnError=False):
"""Given a blockId will return block details."""
assert(isinstance(blockNum, int))
if not self.enableMongo:
cmdDesc="get block"
cmd="%s %d" % (cmdDesc, blockNum)
msg="(block number=%s)" % (blockNum);
return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg)
else:
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.blocks.findOne( { "block_num": %d } )' % (blockNum)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
block=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
if block is not None:
return block
except subprocess.CalledProcessError as ex:
if not silentErrors:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during get db node get block. cmd Duration: %.3f sec. %s" % (end-start, msg)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
else:
Utils.Print("ERROR: %s" % (errorMsg))
return None
return None
def getBlockByIdMdb(self, blockId, silentErrors=False):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.blocks.findOne( { "block_id": "%s" } )' % (blockId)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
if trans is not None:
return trans
except subprocess.CalledProcessError as ex:
if not silentErrors:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during db get block by id. cmd Duration: %.3f sec. %s" % (end-start, msg))
return None
return None
def isBlockPresent(self, blockNum, blockType=BlockType.head):
"""Does node have head_block_num/last_irreversible_block_num >= blockNum"""
assert isinstance(blockNum, int)
assert isinstance(blockType, BlockType)
assert (blockNum > 0)
info=self.getInfo(silentErrors=True, exitOnError=True)
node_block_num=0
try:
if blockType==BlockType.head:
node_block_num=int(info["head_block_num"])
elif blockType==BlockType.lib:
node_block_num=int(info["last_irreversible_block_num"])
else:
unhandledEnumType(blockType)
except (TypeError, KeyError) as _:
Utils.Print("Failure in get info parsing %s block. %s" % (blockType.type, info))
raise
present = True if blockNum <= node_block_num else False
if Utils.Debug and blockType==BlockType.lib:
decorator=""
if present:
decorator="is not "
Utils.Print("Block %d is %sfinalized." % (blockNum, decorator))
return present
def isBlockFinalized(self, blockNum):
"""Is blockNum finalized"""
return self.isBlockPresent(blockNum, blockType=BlockType.lib)
# pylint: disable=too-many-branches
def getTransaction(self, transId, silentErrors=False, exitOnError=False, delayedRetry=True):
assert(isinstance(transId, str))
exitOnErrorForDelayed=not delayedRetry and exitOnError
timeout=3
if not self.enableMongo:
cmdDesc="get transaction"
cmd="%s %s" % (cmdDesc, transId)
msg="(transaction id=%s)" % (transId);
for i in range(0,(int(60/timeout) - 1)):
trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed, exitMsg=msg)
if trans is not None or not delayedRetry:
return trans
if Utils.Debug: Utils.Print("Could not find transaction with id %s, delay and retry" % (transId))
time.sleep(timeout)
self.missingTransaction=True
# either it is there or the transaction has timed out
return self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg)
else:
for i in range(0,(int(60/timeout) - 1)):
trans=self.getTransactionMdb(transId, silentErrors=silentErrors, exitOnError=exitOnErrorForDelayed)
if trans is not None or not delayedRetry:
return trans
if Utils.Debug: Utils.Print("Could not find transaction with id %s in mongodb, delay and retry" % (transId))
time.sleep(timeout)
return self.getTransactionMdb(transId, silentErrors=silentErrors, exitOnError=exitOnError)
def getTransactionMdb(self, transId, silentErrors=False, exitOnError=False):
"""Get transaction from MongoDB. Since DB only contains finalized blocks, transactions can take a while to appear in DB."""
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
#subcommand='db.Transactions.findOne( { $and : [ { "trx_id": "%s" }, {"irreversible":true} ] } )' % (transId)
subcommand='db.transactions.findOne( { "trx_id": "%s" } )' % (transId)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
if trans is not None:
return trans
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during get db node get trans in mongodb with transaction id=%s. cmd Duration: %.3f sec. %s" % (transId, end-start, msg)
if exitOnError:
Utils.cmdError("" % (errorMsg))
Utils.errorExit("Failed to retrieve transaction in mongodb for transaction id=%s" % (transId))
elif not silentErrors:
Utils.Print("ERROR: %s" % (errorMsg))
return None
def isTransInBlock(self, transId, blockId):
"""Check if transId is within block identified by blockId"""
assert(transId)
assert(isinstance(transId, str))
assert(blockId)
assert(isinstance(blockId, int))
block=self.getBlock(blockId, exitOnError=True)
transactions=None
key=""
try:
if not self.enableMongo:
key="[transactions]"
transactions=block["transactions"]
else:
key="[blocks][transactions]"
transactions=block["block"]["transactions"]
except (AssertionError, TypeError, KeyError) as _:
Utils.Print("block%s not found. Block: %s" % (key,block))
raise
if transactions is not None:
for trans in transactions:
assert(trans)
try:
myTransId=trans["trx"]["id"]
if transId == myTransId:
return True
except (TypeError, KeyError) as _:
Utils.Print("transaction%s not found. Transaction: %s" % (key, trans))
return False
def getBlockIdByTransId(self, transId, delayedRetry=True):
"""Given a transaction Id (string), will return the actual block id (int) containing the transaction"""
assert(transId)
assert(isinstance(transId, str))
trans=self.getTransaction(transId, exitOnError=True, delayedRetry=delayedRetry)
refBlockNum=None
key=""
try:
if not self.enableMongo:
key="[trx][trx][ref_block_num]"
refBlockNum=trans["trx"]["trx"]["ref_block_num"]
else:
key="[ref_block_num]"
refBlockNum=trans["ref_block_num"]
refBlockNum=int(refBlockNum)+1
except (TypeError, ValueError, KeyError) as _:
Utils.Print("transaction%s not found. Transaction: %s" % (key, trans))
return None
headBlockNum=self.getHeadBlockNum()
assert(headBlockNum)
try:
headBlockNum=int(headBlockNum)
except(ValueError) as _:
Utils.Print("ERROR: Block info parsing failed. %s" % (headBlockNum))
raise
if Utils.Debug: Utils.Print("Reference block num %d, Head block num: %d" % (refBlockNum, headBlockNum))
for blockNum in range(refBlockNum, headBlockNum+1):
if self.isTransInBlock(str(transId), blockNum):
if Utils.Debug: Utils.Print("Found transaction %s in block %d" % (transId, blockNum))
return blockNum
return None
def getBlockIdByTransIdMdb(self, transId):
"""Given a transaction Id (string), will return block id (int) containing the transaction. This is specific to MongoDB."""
assert(transId)
assert(isinstance(transId, str))
trans=self.getTransactionMdb(transId)
if not trans: return None
refBlockNum=None
try:
refBlockNum=trans["ref_block_num"]
refBlockNum=int(refBlockNum)+1
except (TypeError, ValueError, KeyError) as _:
Utils.Print("transaction[ref_block_num] not found. Transaction: %s" % (trans))
return None
headBlockNum=self.getHeadBlockNum()
assert(headBlockNum)
try:
headBlockNum=int(headBlockNum)
except(ValueError) as _:
Utils.Print("Info parsing failed. %s" % (headBlockNum))
for blockNum in range(refBlockNum, headBlockNum+1):
if self.isTransInBlock(str(transId), blockNum):
return blockNum
return None
def isTransInAnyBlock(self, transId):
"""Check if transaction (transId) is in a block."""
assert(transId)
assert(isinstance(transId, (str,int)))
# if not self.enableMongo:
blockId=self.getBlockIdByTransId(transId)
# else:
# blockId=self.getBlockIdByTransIdMdb(transId)
return True if blockId else False
def isTransFinalized(self, transId):
"""Check if transaction (transId) has been finalized."""
assert(transId)
assert(isinstance(transId, str))
blockId=self.getBlockIdByTransId(transId)
if not blockId:
return False
assert(isinstance(blockId, int))
return self.isBlockPresent(blockId, blockType=BlockType.lib)
# Create & initialize account and return creation transactions. Return transaction json object
def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False):
cmdDesc="system newaccount"
cmd='%s -j %s %s %s %s --stake-net "%s %s" --stake-cpu "%s %s" --buy-ram "%s %s"' % (
cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey,
account.activePublicKey, stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL)
msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name);
trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
transId=Node.getTransId(trans)
if stakedDeposit > 0:
self.waitForTransInBlock(transId) # seems like account creation needs to be finalized before transfer can happen
trans = self.transferFunds(creatorAccount, account, Node.currencyIntToStr(stakedDeposit, CORE_SYMBOL), "init")
transId=Node.getTransId(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, exitOnError=False):
"""Create account and return creation transactions. Return transaction json object.
waitForTransBlock: wait on creation transaction id to appear in a block."""
cmdDesc="create account"
cmd="%s -j %s %s %s %s" % (
cmdDesc, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey)
msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name);
trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
transId=Node.getTransId(trans)
if stakedDeposit > 0:
self.waitForTransInBlock(transId) # seems like account creation needs to be finlized before transfer can happen
trans = self.transferFunds(creatorAccount, account, "%0.04f %s" % (stakedDeposit/10000, CORE_SYMBOL), "init")
self.trackCmdTransaction(trans)
transId=Node.getTransId(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def getEosAccount(self, name, exitOnError=False, returnType=ReturnType.json, avoidMongo=False):
assert(isinstance(name, str))
if not self.enableMongo or avoidMongo:
cmdDesc="get account"
jsonFlag="-j" if returnType==ReturnType.json else ""
cmd="%s %s %s" % (cmdDesc, jsonFlag, name)
msg="( getEosAccount(name=%s) )" % (name);
return self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg, returnType=returnType)
else:
assert returnType == ReturnType.json, "MongoDB only supports a returnType of ReturnType.json"
return self.getEosAccountFromDb(name, exitOnError=exitOnError)
def getEosAccountFromDb(self, name, exitOnError=False):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.accounts.findOne({"name" : "%s"})' % (name)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
try:
timeout = 3
for i in range(0,(int(60/timeout) - 1)):
start=time.perf_counter()
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError)
if trans is not None:
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
return trans
time.sleep(timeout)
return trans
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
if exitOnError:
end=time.perf_counter()
Utils.cmdError("Exception during get account from db for %s. cmd Duration: %.3f sec. %s" % (name, end-start, msg))
Utils.errorExit("Failed during get account from db for %s. %s" % (name, msg))
Utils.Print("ERROR: Exception during get account from db for %s. %s" % (name, msg))
return None
def getTable(self, contract, scope, table, exitOnError=False):
cmdDesc = "get table"
cmd="%s %s %s %s" % (cmdDesc, contract, scope, table)
msg="contract=%s, scope=%s, table=%s" % (contract, scope, table);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
def getTableAccountBalance(self, contract, scope):
assert(isinstance(contract, str))
assert(isinstance(scope, str))
table="accounts"
trans = self.getTable(contract, scope, table, exitOnError=True)
try:
return trans["rows"][0]["balance"]
except (TypeError, KeyError) as _:
print("transaction[rows][0][balance] not found. Transaction: %s" % (trans))
raise
def getCurrencyBalance(self, contract, account, symbol=CORE_SYMBOL, exitOnError=False):
"""returns raw output from get currency balance e.g. '99999.9950 CUR'"""
assert(contract)
assert(isinstance(contract, str))
assert(account)
assert(isinstance(account, str))
assert(symbol)
assert(isinstance(symbol, str))
cmdDesc = "get currency balance"
cmd="%s %s %s %s" % (cmdDesc, contract, account, symbol)
msg="contract=%s, account=%s, symbol=%s" % (contract, account, symbol);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg, returnType=ReturnType.raw)
def getCurrencyStats(self, contract, symbol=CORE_SYMBOL, exitOnError=False):
"""returns Json output from get currency stats."""
assert(contract)
assert(isinstance(contract, str))
assert(symbol)
assert(isinstance(symbol, str))
cmdDesc = "get currency stats"
cmd="%s %s %s" % (cmdDesc, contract, symbol)
msg="contract=%s, symbol=%s" % (contract, symbol);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
# Verifies account. Returns "get account" json return object
def verifyAccount(self, account):
assert(account)
if not self.enableMongo:
ret=self.getEosAccount(account.name)
if ret is not None:
account_name=ret["account_name"]
if account_name is None:
Utils.Print("ERROR: Failed to verify account creation.", account.name)
return None
return ret
else:
return self.verifyAccountMdb(account)
def verifyAccountMdb(self, account):
assert(account)
ret=self.getEosAccountFromDb(account.name)
if ret is not None:
account_name=ret["name"]
if account_name is None:
Utils.Print("ERROR: Failed to verify account creation.", account.name)
return None
return ret
return None
def waitForTransInBlock(self, transId, timeout=None):
"""Wait for trans id to be finalized."""
assert(isinstance(transId, str))
lam = lambda: self.isTransInAnyBlock(transId)
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForTransFinalization(self, transId, timeout=None):
"""Wait for trans id to be finalized."""
assert(isinstance(transId, str))
lam = lambda: self.isTransFinalized(transId)
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForNextBlock(self, timeout=None, blockType=BlockType.head):
num=self.getBlockNum(blockType=blockType)
lam = lambda: self.getHeadBlockNum() > num
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForBlock(self, blockNum, timeout=None, blockType=BlockType.head):
lam = lambda: self.getBlockNum(blockType=blockType) > blockNum
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForIrreversibleBlock(self, blockNum, timeout=None, blockType=BlockType.head):
return self.waitForBlock(blockNum, timeout=timeout, blockType=blockType)
# Trasfer funds. Returns "transfer" json return object
def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True):
assert isinstance(amountStr, str)
assert(source)
assert(isinstance(source, Account))
assert(destination)
assert(isinstance(destination, Account))
cmd="%s %s -v transfer -j %s %s" % (
Utils.EosClientPath, self.eosClientArgs(), source.name, destination.name)
cmdArr=cmd.split()
cmdArr.append(amountStr)
cmdArr.append(memo)
if force:
cmdArr.append("-f")
s=" ".join(cmdArr)
if Utils.Debug: Utils.Print("cmd: %s" % (s))
trans=None
start=time.perf_counter()
try:
trans=Utils.runCmdArrReturnJson(cmdArr)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
self.trackCmdTransaction(trans)
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during funds transfer. cmd Duration: %.3f sec. %s" % (end-start, msg))
if exitOnError:
Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination))
Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination))
return None
if trans is None:
Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination))
Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination))
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
@staticmethod
def currencyStrToInt(balanceStr):
"""Converts currency string of form "12.3456 EON" to int 123456"""
assert(isinstance(balanceStr, str))
balanceStr=balanceStr.split()[0]
#balance=int(decimal.Decimal(balanceStr[1:])*10000)
balance=int(decimal.Decimal(balanceStr)*10000)
return balance
@staticmethod
def currencyIntToStr(balance, symbol):
"""Converts currency int of form 123456 to string "12.3456 EON" where EON is symbol string"""
assert(isinstance(balance, int))
assert(isinstance(symbol, str))
balanceStr="%.04f %s" % (balance/10000.0, symbol)
return balanceStr
def validateFunds(self, initialBalances, transferAmount, source, accounts):
"""Validate each account has the expected EON balance. Validate cumulative balance matches expectedTotal."""
assert(source)
assert(isinstance(source, Account))
assert(accounts)
assert(isinstance(accounts, list))
assert(len(accounts) > 0)
assert(initialBalances)
assert(isinstance(initialBalances, dict))
assert(isinstance(transferAmount, int))
currentBalances=self.getEosBalances([source] + accounts)
assert(currentBalances)
assert(isinstance(currentBalances, dict))
assert(len(initialBalances) == len(currentBalances))
if len(currentBalances) != len(initialBalances):
Utils.Print("ERROR: validateFunds> accounts length mismatch. Initial: %d, current: %d" % (len(initialBalances), len(currentBalances)))
return False
for key, value in currentBalances.items():
initialBalance = initialBalances[key]
assert(initialBalances)
expectedInitialBalance = value - transferAmount
if key is source:
expectedInitialBalance = value + (transferAmount*len(accounts))
if (initialBalance != expectedInitialBalance):
Utils.Print("ERROR: validateFunds> Expected: %d, actual: %d for account %s" %
(expectedInitialBalance, initialBalance, key.name))
return False
def getEosBalances(self, accounts):
"""Returns a dictionary with account balances keyed by accounts"""
assert(accounts)
assert(isinstance(accounts, list))
balances={}
for account in accounts:
balance = self.getAccountEosBalance(account.name)
balances[account]=balance
return balances
# Gets accounts mapped to key. Returns json object
def getAccountsByKey(self, key, exitOnError=False):
cmdDesc = "get accounts"
cmd="%s %s" % (cmdDesc, key)
msg="key=%s" % (key);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
# Get actions mapped to an account (cleos get actions)
def getActions(self, account, pos=-1, offset=-1, exitOnError=False):
assert(isinstance(account, Account))
assert(isinstance(pos, int))
assert(isinstance(offset, int))
if not self.enableMongo:
cmdDesc = "get actions"
cmd="%s -j %s %d %d" % (cmdDesc, account.name, pos, offset)
msg="account=%s, pos=%d, offset=%d" % (account.name, pos, offset);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
else:
return self.getActionsMdb(account, pos, offset, exitOnError=exitOnError)
def getActionsMdb(self, account, pos=-1, offset=-1, exitOnError=False):
assert(isinstance(account, Account))
assert(isinstance(pos, int))
assert(isinstance(offset, int))
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.action_traces.find({$or: [{"act.data.from":"%s"},{"act.data.to":"%s"}]}).sort({"_id":%d}).limit(%d)' % (account.name, account.name, pos, abs(offset))
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
actions=Node.runMongoCmdReturnJson(cmd.split(), subcommand, exitOnError=exitOnError)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
if actions is not None:
return actions
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during get db actions. cmd Duration: %.3f sec. %s" % (end-start, msg)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
else:
Utils.Print("ERROR: %s" % (errorMsg))
return None
# Gets accounts mapped to key. Returns array
def getAccountsArrByKey(self, key):
trans=self.getAccountsByKey(key)
assert(trans)
assert("account_names" in trans)
accounts=trans["account_names"]
return accounts
def getServants(self, name, exitOnError=False):
cmdDesc = "get servants"
cmd="%s %s" % (cmdDesc, name)
msg="name=%s" % (name);
return self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
def getServantsArr(self, name):
trans=self.getServants(name, exitOnError=True)
servants=trans["controlled_accounts"]
return servants
def getAccountEosBalanceStr(self, scope):
"""Returns EON currency0000 account balance from cleos get table command. Returned balance is string following syntax "98.0311 EON". """
assert isinstance(scope, str)
amount=self.getTableAccountBalance("eonio.token", scope)
if Utils.Debug: Utils.Print("getNodeAccountEosBalance %s %s" % (scope, amount))
assert isinstance(amount, str)
return amount
def getAccountEosBalance(self, scope):
"""Returns EON currency0000 account balance from cleos get table command. Returned balance is an integer e.g. 980311. """
balanceStr=self.getAccountEosBalanceStr(scope)
balance=Node.currencyStrToInt(balanceStr)
return balance
def getAccountCodeHash(self, account):
cmd="%s %s get code %s" % (Utils.EosClientPath, self.eosClientArgs(), account)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
start=time.perf_counter()
try:
retStr=Utils.checkOutput(cmd.split())
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
#Utils.Print ("get code> %s"% retStr)
p=re.compile(r'code\shash: (\w+)\n', re.MULTILINE)
m=p.search(retStr)
if m is None:
msg="Failed to parse code hash."
Utils.Print("ERROR: "+ msg)
return None
return m.group(1)
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during code hash retrieval. cmd Duration: %.3f sec. %s" % (end-start, msg))
return None
# publish contract and return transaction as json object
def publishContract(self, account, contractDir, wasmFile, abiFile, waitForTransBlock=False, shouldFail=False):
cmd="%s %s -v set contract -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, contractDir)
cmd += "" if wasmFile is None else (" "+ wasmFile)
cmd += "" if abiFile is None else (" " + abiFile)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
trans=None
start=time.perf_counter()
try:
trans=Utils.runCmdReturnJson(cmd, trace=False)
self.trackCmdTransaction(trans)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
except subprocess.CalledProcessError as ex:
if not shouldFail:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during code hash retrieval. cmd Duration: %.3f sec. %s" % (end-start, msg))
return None
else:
retMap={}
retMap["returncode"]=ex.returncode
retMap["cmd"]=ex.cmd
retMap["output"]=ex.output
# commented below as they are available only in Python3.5 and above
# retMap["stdout"]=ex.stdout
# retMap["stderr"]=ex.stderr
return retMap
if shouldFail:
Utils.Print("ERROR: The publish contract did not fail as expected.")
return None
Node.validateTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=False)
def getTableRows(self, contract, scope, table):
jsonData=self.getTable(contract, scope, table)
if jsonData is None:
return None
rows=jsonData["rows"]
return rows
def getTableRow(self, contract, scope, table, idx):
if idx < 0:
Utils.Print("ERROR: Table index cannot be negative. idx: %d" % (idx))
return None
rows=self.getTableRows(contract, scope, table)
if rows is None or idx >= len(rows):
Utils.Print("ERROR: Retrieved table does not contain row %d" % idx)
return None
row=rows[idx]
return row
def getTableColumns(self, contract, scope, table):
row=self.getTableRow(contract, scope, table, 0)
keys=list(row.keys())
return keys
# returns tuple with transaction and
def pushMessage(self, account, action, data, opts, silentErrors=False):
cmd="%s %s push action -j %s %s" % (Utils.EosClientPath, self.eosClientArgs(), account, action)
cmdArr=cmd.split()
if data is not None:
cmdArr.append(data)
if opts is not None:
cmdArr += opts.split()
s=" ".join(cmdArr)
if Utils.Debug: Utils.Print("cmd: %s" % (cmdArr))
start=time.perf_counter()
try:
trans=Utils.runCmdArrReturnJson(cmdArr)
self.trackCmdTransaction(trans, ignoreNonTrans=True)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
return (True, trans)
except subprocess.CalledProcessError as ex:
msg=ex.output.decode("utf-8")
if not silentErrors:
end=time.perf_counter()
Utils.Print("ERROR: Exception during push message. cmd Duration=%.3f sec. %s" % (end - start, msg))
return (False, msg)
def setPermission(self, account, code, pType, requirement, waitForTransBlock=False, exitOnError=False):
cmdDesc="set action permission"
cmd="%s -j %s %s %s %s" % (cmdDesc, account, code, pType, requirement)
trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, transferTo=False, waitForTransBlock=False, exitOnError=False):
if toAccount is None:
toAccount=fromAccount
cmdDesc="system delegatebw"
transferStr="--transfer" if transferTo else ""
cmd="%s -j %s %s \"%s %s\" \"%s %s\" %s" % (
cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr)
msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name);
trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, exitOnError=False):
if toAccount is None:
toAccount=fromAccount
cmdDesc="system undelegatebw"
cmd="%s -j %s %s \"%s %s\" \"%s %s\"" % (
cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL)
msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name);
trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnError=False):
cmdDesc="system regproducer"
cmd="%s -j %s %s %s %s" % (
cmdDesc, producer.name, producer.activePublicKey, url, location)
msg="producer=%s" % (producer.name);
trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def vote(self, account, producers, waitForTransBlock=False, exitOnError=False):
cmdDesc = "system voteproducer prods"
cmd="%s -j %s %s" % (
cmdDesc, account.name, " ".join(producers))
msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers));
trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg)
self.trackCmdTransaction(trans)
return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError)
def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json):
assert(isinstance(returnType, ReturnType))
cmd="%s %s %s" % (Utils.EosClientPath, self.eosClientArgs(), cmd)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
if exitMsg is not None:
exitMsg="Context: " + exitMsg
else:
exitMsg=""
trans=None
start=time.perf_counter()
try:
if returnType==ReturnType.json:
trans=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors)
elif returnType==ReturnType.raw:
trans=Utils.runCmdReturnStr(cmd)
else:
unhandledEnumType(returnType)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
except subprocess.CalledProcessError as ex:
if not silentErrors:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during \"%s\". Exception message: %s. cmd Duration=%.3f sec. %s" % (cmdDesc, msg, end-start, exitMsg)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
else:
Utils.Print("ERROR: %s" % (errorMsg))
return None
if exitOnError and trans is None:
Utils.cmdError("could not \"%s\". %s" % (cmdDesc,exitMsg))
Utils.errorExit("Failed to \"%s\"" % (cmdDesc))
return trans
def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json):
assert(isinstance(producer, str))
assert(isinstance(whereInSequence, int))
assert(isinstance(blockType, BlockType))
assert(isinstance(returnType, ReturnType))
basedOnLib="true" if blockType==BlockType.lib else "false"
cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \
(self.endpointHttp, producer, whereInSequence, basedOnLib)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
rtn=None
start=time.perf_counter()
try:
if returnType==ReturnType.json:
rtn=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors)
elif returnType==ReturnType.raw:
rtn=Utils.runCmdReturnStr(cmd)
else:
unhandledEnumType(returnType)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
except subprocess.CalledProcessError as ex:
if not silentErrors:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
errorMsg="Exception during \"%s\". %s. cmd Duration=%.3f sec." % (cmd, msg, end-start)
if exitOnError:
Utils.cmdError(errorMsg)
Utils.errorExit(errorMsg)
else:
Utils.Print("ERROR: %s" % (errorMsg))
return None
if exitMsg is not None:
exitMsg=": " + exitMsg
else:
exitMsg=""
if exitOnError and rtn is None:
Utils.cmdError("could not \"%s\" - %s" % (cmd,exitMsg))
Utils.errorExit("Failed to \"%s\"" % (cmd))
return rtn
def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False):
if not waitForTransBlock:
return trans
transId=Node.getTransId(trans)
if not self.waitForTransInBlock(transId):
if exitOnError:
Utils.cmdError("transaction with id %s never made it to a block" % (transId))
Utils.errorExit("Failed to find transaction with id %s in a block before timeout" % (transId))
return None
return trans
def getInfo(self, silentErrors=False, exitOnError=False):
cmdDesc = "get info"
info=self.processCleosCmd(cmdDesc, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError)
if info is None:
self.infoValid=False
else:
self.infoValid=True
self.lastRetrievedHeadBlockNum=int(info["head_block_num"])
self.lastRetrievedLIB=int(info["last_irreversible_block_num"])
return info
def getBlockFromDb(self, idx):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand="db.blocks.find().sort({\"_id\":%d}).limit(1).pretty()" % (idx)
if Utils.Debug: Utils.Print("cmd: echo \"%s\" | %s" % (subcommand, cmd))
start=time.perf_counter()
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
if Utils.Debug:
end=time.perf_counter()
Utils.Print("cmd Duration: %.3f sec" % (end-start))
return trans
except subprocess.CalledProcessError as ex:
end=time.perf_counter()
msg=ex.output.decode("utf-8")
Utils.Print("ERROR: Exception during get db block. cmd Duration: %.3f sec. %s" % (end-start, msg))
return None
def checkPulse(self, exitOnError=False):
info=self.getInfo(True, exitOnError=exitOnError)
return False if info is None else True
def getHeadBlockNum(self):
"""returns head block number(string) as returned by cleos get info."""
if not self.enableMongo:
info=self.getInfo(exitOnError=True)
if info is not None:
headBlockNumTag="head_block_num"
return info[headBlockNumTag]
else:
# Either this implementation or the one in getIrreversibleBlockNum are likely wrong.
block=self.getBlockFromDb(-1)
if block is not None:
blockNum=block["block_num"]
return blockNum
return None
def getIrreversibleBlockNum(self):
if not self.enableMongo:
info=self.getInfo(exitOnError=True)
if info is not None:
return info["last_irreversible_block_num"]
else:
# Either this implementation or the one in getHeadBlockNum are likely wrong.
block=self.getBlockFromDb(-1)
if block is not None:
blockNum=block["block_num"]
return blockNum
return None
def getBlockNum(self, blockType=BlockType.head):
assert isinstance(blockType, BlockType)
if blockType==BlockType.head:
return self.getHeadBlockNum()
elif blockType==BlockType.lib:
return self.getIrreversibleBlockNum()
else:
unhandledEnumType(blockType)
def kill(self, killSignal):
if Utils.Debug: Utils.Print("Killing node: %s" % (self.cmd))
assert(self.pid is not None)
try:
os.kill(self.pid, killSignal)
except OSError as ex:
Utils.Print("ERROR: Failed to kill node (%d)." % (self.cmd), ex)
return False
# wait for kill validation
def myFunc():
try:
os.kill(self.pid, 0) #check if process with pid is running
except OSError as _:
return True
return False
if not Utils.waitForBool(myFunc):
Utils.Print("ERROR: Failed to validate node shutdown.")
return False
# mark node as killed
self.pid=None
self.killed=True
return True
def interruptAndVerifyExitStatus(self):
if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd))
assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd)
self.popenProc.send_signal(signal.SIGINT)
try:
outs, _ = self.popenProc.communicate(timeout=15)
assert self.popenProc.returncode == 0, "Expected terminating \"%s\" to have an exit status of 0, but got %d" % (self.cmd, self.popenProc.returncode)
except subprocess.TimeoutExpired:
Utils.errorExit("Terminate call failed on node: %s" % (self.cmd))
def verifyAlive(self, silent=False):
if not silent and Utils.Debug: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd))
if self.killed or self.pid is None:
self.killed=True
self.pid=None
return False
try:
os.kill(self.pid, 0)
except ProcessLookupError as ex:
# mark node as killed
self.pid=None
self.killed=True
return False
except PermissionError as ex:
return True
return True
def getBlockProducerByNum(self, blockNum, timeout=None, waitForBlock=True, exitOnError=True):
if waitForBlock:
self.waitForBlock(blockNum, timeout=timeout, blockType=BlockType.head)
block=self.getBlock(blockNum, exitOnError=exitOnError)
blockProducer=block["producer"]
if blockProducer is None and exitOnError:
Utils.cmdError("could not get producer for block number %s" % (blockNum))
Utils.errorExit("Failed to get block's producer")
return blockProducer
def getBlockProducer(self, timeout=None, waitForBlock=True, exitOnError=True, blockType=BlockType.head):
blockNum=self.getBlockNum(blockType=blockType)
block=self.getBlock(blockNum, exitOnError=exitOnError, blockType=blockType)
blockProducer=block["producer"]
if blockProducer is None and exitOnError:
Utils.cmdError("could not get producer for block number %s" % (blockNum))
Utils.errorExit("Failed to get block's producer")
return blockProducer
def getNextCleanProductionCycle(self, trans):
transId=Node.getTransId(trans)
rounds=21*12*2 # max time to ensure that at least 2/3+1 of producers x blocks per producer x at least 2 times
self.waitForTransFinalization(transId, timeout=rounds/2)
irreversibleBlockNum=self.getIrreversibleBlockNum()
# The voted schedule should be promoted now, then need to wait for that to become irreversible
votingTallyWindow=120 #could be up to 120 blocks before the votes were tallied
promotedBlockNum=self.getHeadBlockNum()+votingTallyWindow
self.waitForIrreversibleBlock(promotedBlockNum, timeout=rounds/2)
ibnSchedActive=self.getIrreversibleBlockNum()
blockNum=self.getHeadBlockNum()
Utils.Print("Searching for clean production cycle blockNum=%s ibn=%s transId=%s promoted bn=%s ibn for schedule active=%s" % (blockNum,irreversibleBlockNum,transId,promotedBlockNum,ibnSchedActive))
blockProducer=self.getBlockProducerByNum(blockNum)
blockNum+=1
Utils.Print("Advance until the next block producer is retrieved")
while blockProducer == self.getBlockProducerByNum(blockNum):
blockNum+=1
blockProducer=self.getBlockProducerByNum(blockNum)
return blockNum
# TBD: make nodeId an internal property
# pylint: disable=too-many-locals
def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False):
assert(self.pid is None)
assert(self.killed)
if Utils.Debug: Utils.Print("Launching node process, Id: %d" % (nodeId))
cmdArr=[]
myCmd=self.cmd
toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {}
if not newChain:
skip=False
swapValue=None
for i in self.cmd.split():
Utils.Print("\"%s\"" % (i))
if skip:
skip=False
continue
if "--genesis-json" == i or "--genesis-timestamp" == i:
skip=True
continue
if swapValue is None:
cmdArr.append(i)
else:
cmdArr.append(swapValue)
swapValue=None
if i in toAddOrSwap:
swapValue=toAddOrSwap[i]
del toAddOrSwap[i]
for k,v in toAddOrSwap.items():
cmdArr.append(k)
cmdArr.append(v)
myCmd=" ".join(cmdArr)
dataDir="var/lib/node_%02d" % (nodeId)
dt = datetime.datetime.now()
dateStr="%d_%02d_%02d_%02d_%02d_%02d" % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr)
stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr)
with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr:
cmd=myCmd + ("" if chainArg is None else (" " + chainArg))
Utils.Print("cmd: %s" % (cmd))
popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr)
if cachePopen:
self.popenProc=popen
self.pid=popen.pid
if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd))
def isNodeAlive():
"""wait for node to be responsive."""
try:
return True if self.checkPulse() else False
except (TypeError) as _:
pass
return False
isAlive=Utils.waitForBool(isNodeAlive, timeout)
if isAlive:
Utils.Print("Node relaunch was successfull.")
else:
Utils.Print("ERROR: Node relaunch Failed.")
self.pid=None
return False
self.cmd=cmd
self.killed=False
return True
def trackCmdTransaction(self, trans, ignoreNonTrans=False):
if trans is None:
if Utils.Debug: Utils.Print(" cmd returned transaction: %s" % (trans))
return
if ignoreNonTrans and not Node.isTrans(trans):
if Utils.Debug: Utils.Print(" cmd returned a non-transaction")
return
transId=Node.getTransId(trans)
if Utils.Debug:
status=Node.getTransStatus(trans)
blockNum=Node.getTransBlockNum(trans)
if transId in self.transCache.keys():
replaceMsg="replacing previous trans=\n%s" % json.dumps(self.transCache[transId], indent=2, sort_keys=True)
else:
replaceMsg=""
Utils.Print(" cmd returned transaction id: %s, status: %s, (possible) block num: %s %s" % (transId, status, blockNum, replaceMsg))
self.transCache[transId]=trans
def reportStatus(self):
Utils.Print("Node State:")
Utils.Print(" cmd : %s" % (self.cmd))
self.verifyAlive(silent=True)
Utils.Print(" killed: %s" % (self.killed))
Utils.Print(" host : %s" % (self.host))
Utils.Print(" port : %s" % (self.port))
Utils.Print(" pid : %s" % (self.pid))
status="last getInfo returned None" if not self.infoValid else "at last call to getInfo"
Utils.Print(" hbn : %s (%s)" % (self.lastRetrievedHeadBlockNum, status))
Utils.Print(" lib : %s (%s)" % (self.lastRetrievedLIB, status))
|
py | 1a3d1c4bc2eb7af6d5f194868d3b24c99cd58bdb | from flask import render_template, current_app, session, jsonify, request
from info import constants
from info.models import User, News, Category
from info.utils.response_code import RET
from . import index_blue
@index_blue.route('/news_list')
def news_list():
"""
获取首页新闻数据
:return:
"""
# 1. 获取参数
# 新闻的分类id
cid = request.args.get("cid", "1")
page = request.args.get("page", "1")
per_page = request.args.get("per_page", "10")
# 2. 校验参数
try:
page = int(page)
cid = int(cid)
per_page = int(per_page)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR, errmsg="参数")
filters = [News.status == 0]
if cid != 1: # 查询的不是最新的数据
# 需要添加条件
filters.append(News.category_id == cid)
# 3. 查询数据
try:
paginate = News.query.filter(*filters).order_by(News.create_time.desc()).paginate(page, per_page, False)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="数据查询错误")
# 取到当前页的数据
news_model_list = paginate.items # 模型对象列表
total_page = paginate.pages
current_page = paginate.page
# 将模型对象列表转成字典列表
news_dict_li = []
for news in news_model_list:
news_dict_li.append(news.to_basic_dict())
data = {
"total_page": total_page,
"current_page": current_page,
"news_dict_li": news_dict_li
}
return jsonify(errno=RET.OK, errmsg="OK", data=data)
@index_blue.route('/')
def index():
user_id = session.get("user_id", None)
user = None
if user_id:
# 尝试查询用户的模型
try:
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
# 右侧的新闻排行的逻辑
news_list = []
try:
news_list = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)
except Exception as e:
current_app.logger.error(e)
# 定义一个空的字典列表,里面装的就是字典
news_dict_li = []
# 遍历对象列表,将对象的字典添加到字典列表中
for news in news_list:
news_dict_li.append(news.to_basic_dict())
# 查询分类数据,通过模板的形式渲染出来
categories = Category.query.all()
category_li = []
for category in categories:
category_li.append(category.to_dict())
data = {
"user": user.to_dict() if user else None,
"news_dict_li": news_dict_li,
"category_li": category_li
}
return render_template("news/index.html", data=data)
@index_blue.route('/favicon.ico')
def favicon():
return current_app.send_static_file('news/favicon.ico')
|
py | 1a3d1d2017874db3b60b4353727c141dbc94176d | from __future__ import absolute_import
from django.dispatch import Signal
class BetterSignal(Signal):
def connect(self, receiver=None, **kwargs):
"""
Support decorator syntax:
>>> @signal.connect(sender=type)
>>> def my_receiver(**kwargs):
>>> pass
"""
def wrapped(func):
return super(BetterSignal, self).connect(func, **kwargs)
if receiver is None:
return wrapped
if hasattr(receiver, '__name__'):
wrapped.__name__ = receiver.__name__
if hasattr(receiver, '__module__'):
wrapped.__module__ = receiver.__module__
if hasattr(receiver, '__doc__'):
wrapped.__doc__ = receiver.__doc__
return wrapped(receiver)
regression_signal = BetterSignal(providing_args=["instance"])
buffer_incr_complete = BetterSignal(providing_args=["model", "columns", "extra", "result"])
event_accepted = BetterSignal(providing_args=["ip", "data", "project"])
event_dropped = BetterSignal(providing_args=["ip", "data", "project"])
event_filtered = BetterSignal(providing_args=["ip", "data", "project"])
event_received = BetterSignal(providing_args=["ip", "project"])
pending_delete = BetterSignal(providing_args=["instance"])
event_processed = BetterSignal(providing_args=['project', 'group', 'event'])
# Organization Onboarding Signals
project_created = BetterSignal(providing_args=["project", "user"])
first_event_pending = BetterSignal(providing_args=["project", "user"])
first_event_received = BetterSignal(providing_args=["project", "group"])
member_invited = BetterSignal(providing_args=["member", "user"])
member_joined = BetterSignal(providing_args=["member"])
issue_tracker_used = BetterSignal(providing_args=["plugin", "project", "user"])
plugin_enabled = BetterSignal(providing_args=["plugin", "project", "user"])
email_verified = BetterSignal(providing_args=["email"])
mocks_loaded = BetterSignal(providing_args=["project"])
|
py | 1a3d1d7127eb3284856082edd3a692a1b3064576 |
import numpy as np
import matplotlib.pyplot as plt
def dbtime(x):
return (x/2-2)*(x/2-2)+2
xdbtime = np.arange(0,np.pi*4,0.1)
ydbtime = dbtime(xdbtime)
plt.grid()
plt.xlim(0,10)
plt.ylim(0,10)
plt.title("Fonction dbtime 2")
plt.plot(xdbtime,ydbtime)
plt.savefig('dbtime2.png')
|
py | 1a3d1dfaa12a0e3df056ffc8f4670cdfd749a4cf | import unittest
import virtualbox
from virtualbox.library import VBoxError
class TestErrors(unittest.TestCase):
def test_raises(self):
vbox = virtualbox.VirtualBox()
try:
vbox.find_machine('blah blah X')
except VBoxError as exc:
pass
else:
self.fail("VBoxError not raised")
|
py | 1a3d1e1ba3616a6a528cd0e8eaab857d24478fdd | _base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24) |
py | 1a3d1e89b1ba2f76f1bcd266ec2dc7d385ea225f | #------------------------------------------------------------------------------
# Copyright 2020 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Name: CleanMRFCache.py
# Description: Cleans MRF Cache files by oldest access-time until free space
# requested has been achieved.
# Version: 20201109
# Requirements: Python
# Required Arguments: -input
# Optional Arguments: -mode -ext -size
# e.g.: -mode = [del,scan], -ext=txt,mrfcache -input=d:/mrfcache_folder
# Usage: python.exe CleanMRFCache.py <arguments>
# Author: Esri Imagery Workflows team
#------------------------------------------------------------------------------
#!/usr/bin/env python
import sys
import operator
import argparse
import os
import ctypes
import platform
def Message(msg, status=0):
try:
if (log is not None):
log.Message(msg, status)
return
except:
pass
print(msg)
# for any paprent processes to receive the stdout realtime.
sys.stdout.flush()
class Cleaner:
def __init__(self):
pass
def init(self, input_path, extensions=()):
self.m_extensions = extensions
self.m_input_path = input_path.replace('\\', '/')
if (self.m_input_path.endswith('/') is False):
self.m_input_path += '/'
self.m_info = []
return True
def getFreeDiskSpace(self, input_path): # static
try:
fbytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(input_path),
None,
None,
ctypes.pointer(fbytes))
except:
return -1
return fbytes
def getFileInfo(self, root_only=False):
Message('[Generate file list]..')
for r, d, f in os.walk(self.m_input_path):
if (root_only):
if (r != self.m_input_path):
continue
for file in f:
(f_, e_) = os.path.splitext(file)
if ((e_[1:].lower() in self.m_extensions)):
mk_path = os.path.join(r, file).replace('\\', '/')
self.m_info.append({
'f': mk_path,
's': os.path.getsize(mk_path),
'at': os.path.getatime(mk_path)
})
try:
pass
except Exception as exp:
Message('Err: (%s)' % (str(inf)))
return True
def main():
pass
if __name__ == '__main__':
main()
if __name__ == '__main__':
main()
__program_ver__ = 'v1.0'
__program_name__ = 'CleanMRFCache.py %s' % __program_ver__
parser = argparse.ArgumentParser(description='Cleans MRF Cache files by '
'oldest access-time until free space '
'requested has been achieved.\n')
parser.add_argument('-input', help='Input directory', dest='input_path')
parser.add_argument('-mode', help='Processing mode. Valid modes [del]',
dest='mode', default='scan')
parser.add_argument('-ext',
help='Extensions to filter-in. e.g. -ext=mrfcache,txt',
dest='ext')
parser.add_argument('-size', type=int,
help='Free size requested in bytes. e.g. -size=1000000',
dest='size', default=2000000000)
log = None
Message(__program_name__)
Message(parser.description)
args = parser.parse_args()
extensions = ['mrfcache']
# check for extensions
if (args.ext is not None):
ext_ = args.ext.split(',')
for e in ext_:
e = e.strip().lower()
if ((e in extensions) is False):
extensions.append(e)
# ends
# check input path
if (args.input_path is None):
Message('Err: -input is required.')
exit(0)
# ends
# clean-up instance
cln = Cleaner()
cln.init(args.input_path, extensions)
# ends
# let's get the free space
space_available = cln.getFreeDiskSpace(os.path.dirname(args.input_path))
if (space_available == -1): # an error has occured
Message('Err: Unable to get the free-disk-space for the path (%s)' %
(args.input_path))
exit(1)
# ends
space_to_free = args.size * 1000000000
space_available = space_available.value
if (space_available >= space_to_free):
Message('The disk already has the requested free space')
exit(0)
# setup -mode
is_mode = not args.mode is None
arg_mode = args.mode.lower()
Message('Mode (%s)' % arg_mode) # display the user/default selected (-mode)
# ends
ret = cln.getFileInfo()
if (ret is False):
Message('Err: Unable to scan for files. Quitting..')
exit(1)
process = sorted(cln.m_info, key=operator.itemgetter('at'), reverse=False)
print('\nResults:')
tot_savings = 0
for f in process:
print('%s [%s] [%s]' % (f['f'], f['s'], f['at']))
tot_savings += f['s']
if (is_mode):
if (arg_mode == 'del'):
Message('[Del] %s' % (f['f']))
# let's delete here.
try:
pass
os.remove(f['f'])
except Exception as exp:
Message('Err: Unable to remove (%s). Skipping..' % (f['f']))
continue
space_available += f['s']
if (space_available >= space_to_free):
pass
Message('\nRequired disk space has been freed.')
break
# ends
msg = '\nTotal savings if files get deleted: [%d] bytes.' % (tot_savings)
if (arg_mode == 'del'):
msg = '\nTotal space freed [%d] bytes' % (space_available)
if (space_available < space_to_free):
Message('\nUnable to free space requested.')
Message(msg)
Message('\nDone..')
|
py | 1a3d1f6c992c6dfd49b42ac641ef764ebd057319 | """
.. module:: Katna.config
:platform: Platfrom Independent
:synopsis: This module defines some helpful configuration variables
"""
import os
# # Configuration parameters for Image class
class Image:
# default value by which image size to be reduces for processing
down_sample_factor = 8
# Debug flag
DEBUG = False
# Crop_height_reduction_factor_in_each_iterationnot found crop height
# will be reduced by this multiplier/factor and search for candidate crops
# is resumed.
# Decreasing the height and width for crops while checking it don't get small by 1/(min_image_to_crop_factor) of image height/width
min_image_to_crop_factor = 4
crop_height_reduction_factor_in_each_iteration = 0.05
# # Configurations for Scoring crops for crop extractor
class CropScorer:
detail_weight = 0.2 # default weight value for detail parameter
edge_radius = 0.4 # default edge radius
edge_weight = -20 # default edge weight
outside_importance = (
-0.5
) # default value to set if the pixel is outside crop rectangle
rule_of_thirds = True # boolean to set rule of third condition check
saliency_bias = 0.2 # bias color value for saliency(+- error value)
saliency_weight = 1.3 # default edge radius
face_bias = 0.01 # bias color value for face(+- error value)
face_weight = 3.4 # default weight value for face parameter
rects_weight = 1 # default weight value for crop rectangles
# # Configurations for Text detection class
class TextDetector:
# Min Confidence Threshold for Text detection model
min_confidence = 0.9
# Threshold for merging text detection boxes
merge_threshold = 1
# Name of Model files to be used for text detection
frozen_weights = "frozen_east_text_detection.pb"
# Location where model file will be downloaded
cache_subdir = "models"
# Layers Name for text detection
layerNames = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
# Download Link for Text detection model
model_download_link = "https://github.com/oyyd/frozen_east_text_detection.pb/raw/master/frozen_east_text_detection.pb"
# # Configurations for Edge Feature class
class EdgeFeature:
# min edge threshold value
min_val_threshold = 100
# Max edge threshold value
max_val_threshold = 200
# aperture_size/size of Sobel kernel for canny edge detector
ksize = 3
# # Configurations for Face detection Feature class
class FaceFeature:
# Model file name to be used for face detection
model_file = "res10_300x300_ssd_iter_140000_fp16.caffemodel"
# Model definition file name to be used for face detetion
prototxt_file = "deploy.prototxt"
# Location where model file will be downloaded
cache_subdir = "models"
# Min Confidence Threshold for face detection model
confidence = 0.5
# Download Link for face detection model defintion file
prototxt_download_link = "https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt"
# Download Link for face detection model
modelfile_download_link = "https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel"
# # Configuration parameters for Video class
class Video:
# Debug flag
DEBUG = False
min_video_duration = 5.0
# consume % of memory during video keyframe extraction
# 80% of available memory will be consumed
memory_consumption_threshold = 0.80
# assumed numbers of frames within which 1 candidate frames which might be available
# seconds to reach threshold if all frames are collected, but not all are candidate frames
# currently we assume 1 in 5 frame for that
assumed_no_of_frames_per_candidate_frame = 5
# if video duration greater than this number video will be treated as a large video
video_split_threshold_in_minutes = 20
# https://trac.ffmpeg.org/wiki/Encode/H.264
# Keep this between 20 to 30 value
video_compression_crf_parameter = 23
video_compression_codec = "libx264" # Currently "libx264 and is supported"
compression_output_file_extension = "mp4"
# Supported/valid video extensions supported by ffmpeg
# You can generate updated list by using following shell script on MacOSX or Linux
# $ ffmpeg -demuxers -hide_banner | tail -n +5 | cut -d' ' -f4 | xargs -I{} ffmpeg -hide_banner -h demuxer={} | grep 'Common extensions' | cut -d' ' -f7 | tr ',' $'\n' | tr -d '.'
video_extensions = [
".str",
".aa",
".aac",
".ac3",
".acm",
".adf",
".adp",
".dtk",
".ads",
".ss2",
".adx",
".aea",
".afc",
".aix",
".al",
".ape",
".apl",
".mac",
".aptx",
".aptxhd",
".aqt",
".ast",
".avi",
".avr",
".bfstm",
".bcstm",
".bit",
".bmv",
".brstm",
".cdg",
".cdxl",
".xl",
".c2",
".302",
".daud",
".str",
".dss",
".dts",
".dtshd",
".dv",
".dif",
".cdata",
".eac3",
".paf",
".fap",
".flm",
".flac",
".flv",
".fsb",
".g722",
".722",
".tco",
".rco",
".g723_1",
".g729",
".genh",
".gsm",
".h261",
".h26l",
".h264",
".264",
".avc",
".hevc",
".h265",
".265",
".idf",
".cgi",
".sf",
".ircam",
".ivr",
".flv",
".lvf",
".m4v",
".mkv",
".mk3d",
".mka",
".mks",
".mjpg",
".mjpeg",
".mpo",
".j2k",
".mlp",
".mov",
".mp4",
".m4a",
".3gp",
".3g2",
".mj2",
".mp2",
".mp3",
".m2a",
".mpa",
".mpc",
".mjpg",
".txt",
".mpl2",
".sub",
".msf",
".mtaf",
".ul",
".musx",
".mvi",
".mxg",
".v",
".nist",
".sph",
".nsp",
".nut",
".ogg",
".oma",
".omg",
".aa3",
".pjs",
".pvf",
".yuv",
".cif",
".qcif",
".rgb",
".rt",
".rsd",
".rsd",
".rso",
".sw",
".sb",
".smi",
".sami",
".sbc",
".msbc",
".sbg",
".scc",
".sdr2",
".sds",
".sdx",
".shn",
".vb",
".son",
".sln",
".mjpg",
".stl",
".sub",
".sub",
".sup",
".svag",
".tak",
".thd",
".tta",
".ans",
".art",
".asc",
".diz",
".ice",
".nfo",
".txt",
".vt",
".ty",
".ty+",
".uw",
".ub",
".v210",
".yuv10",
".vag",
".vc1",
".viv",
".idx",
".vpk",
".txt",
".vqf",
".vql",
".vqe",
".vtt",
".wsd",
".xmv",
".xvag",
".yop",
".y4m",
]
# Configuration parameters for mediapipe
class MediaPipe:
class AutoFlip:
# Rerun is required due to autoflip issue mentione here:
# https://github.com/google/mediapipe/issues/497
RERUN_LIMIT = 2
# Models folder location
MODELS_FOLDER_LOCATION = os.path.join(os.getcwd(), "mediapipe", "models")
# pbtxt temp folder name
TMP_PBTXT_FOLDER_NAME = "temp_pbtxt"
TMP_PBTXT_FOLDER_PATH = os.path.join(os.getcwd(), TMP_PBTXT_FOLDER_NAME)
# Default pbtxt and build cmd
CONFIG_FILE_PBTXT = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "mediapipe_autoflip.pbtxt"
)
BUILD_CMD = "run_autoflip"
# user friendly conf keys
ENFORCE_FEATURES_KEYNAME = "ENFORCE_FEATURES"
STABALIZATION_THRESHOLD_KEYNAME = "STABALIZATION_THRESHOLD"
BLUR_AREA_OPACITY_KEYNAME = "BLUR_AREA_OPACITY"
# DEFAULT VALUES IN PBTXT
DEFAULT_BLUR_AREA_OPACITY = 0.6
DEFAULT_MOTION_STABALIZATION_THRESHOLD = 0.5
DEFAULT_FEATURE_SIGNAL_VALUE = "false"
# ENFORCE_FEATURES Keys
_FACE_CORE_LANDMARKS = "FACE_CORE_LANDMARKS"
_FACE_FULL = "FACE_FULL"
_FACE_ALL_LANDMARKS = "FACE_ALL_LANDMARKS"
_HUMAN = "HUMAN"
_PET = "PET"
_CAR = "CAR"
_OBJECT = "OBJECT"
# the variables names below should match the keyname for set_conf to work
# smoothly
# ENFORCE_FEATURES list
ENFORCE_FEATURES = {
_FACE_CORE_LANDMARKS: False,
_FACE_ALL_LANDMARKS: False,
_FACE_FULL: False,
_HUMAN: False,
_PET: False,
_CAR: False,
_OBJECT: False,
}
# % AREA from center where most of the content is
# usually applied when content is focused near center
STABALIZATION_THRESHOLD = DEFAULT_MOTION_STABALIZATION_THRESHOLD
# opacity of blur area
BLUR_AREA_OPACITY = DEFAULT_BLUR_AREA_OPACITY
@classmethod
def get_pbtxt_mapping(cls):
return {
cls.ENFORCE_FEATURES_KEYNAME: "signal_settings",
cls.STABALIZATION_THRESHOLD_KEYNAME: "motion_stabilization_threshold_percent",
cls.BLUR_AREA_OPACITY_KEYNAME: "overlay_opacity",
}
@classmethod
def get_conf(cls):
"""Gets the current config
:return: dictionary containing the current config
:rtype: dict
"""
return {
cls.ENFORCE_FEATURES_KEYNAME: cls.ENFORCE_FEATURES,
cls.STABALIZATION_THRESHOLD_KEYNAME: cls.STABALIZATION_THRESHOLD,
cls.BLUR_AREA_OPACITY_KEYNAME: cls.BLUR_AREA_OPACITY,
}
@classmethod
def set_conf(cls, config):
"""Sets the config passed
:param config: The configuration to set.
:type config: dict
"""
for attr in config.keys():
current_conf = cls.get_conf()
if attr in current_conf.keys():
if attr == cls.ENFORCE_FEATURES_KEYNAME:
updated_attr_dict = {**current_conf[attr], **config[attr]}
setattr(cls, attr, updated_attr_dict)
else:
setattr(cls, attr, config[attr])
else:
raise Exception(
" Invalid configuration. Use get_conf method to see existing configuration or refer documentation."
)
class ImageSelector:
# Setting for optimum Brightness values
min_brightness_value = 10.0
max_brightness_value = 90.0
brightness_step = 2.0
# Setting for optimum Contrast/Entropy values
min_entropy_value = 1.0
max_entropy_value = 10.0
entropy_step = 0.5
class FrameExtractor:
# Setting local maxima criteria
USE_LOCAL_MAXIMA = True
# Lenght of sliding window taking difference
len_window = 20
# Chunk size of Images to be processed at a time in memory
max_frames_in_chunk = 500
# Type of smoothening window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing.
window_type = "hanning"
|
py | 1a3d2029578147666de04bb13080211d85e09204 | from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
import sys
from sumatra import commands
from io import StringIO
modes = list(commands.modes)
modes.sort()
usage = {}
sys.argv[0] = 'smt'
for mode in modes:
main = getattr(commands, mode)
usage[mode] = StringIO()
sys.stdout = usage[mode]
try:
main(['--help'])
except:
pass
sys.stdout = sys.__stdout__
f = open("command_reference.txt", "w")
f.write("=====================\n")
f.write("smt command reference\n")
f.write("=====================\n\n")
for mode in modes:
sio = usage[mode]
f.write(mode + '\n')
f.write('-'*len(mode) + '\n::\n\n ')
sio.seek(0)
f.write(" ".join(sio.readlines()) + '\n')
sio.close()
f.close()
|
py | 1a3d234fcb6b166cc828c98fb0752dd643a21f0a | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Optional list of dependencies required by the package
dependencies = ['torch', 'torchvision']
# from torch.hub import load_state_dict_from_url
import torch
from .resnet import ResNet, Bottleneck
model_urls = {
'resnext101_32x8d': '/mnt/soulfs2/kyyue/research/ig_models/ig_resnext101_32x8-c38310e5.pth',
'resnext101_32x16d': '/mnt/soulfs2/kyyue/research/ig_models/ig_resnext101_32x16-c6f796b0.pth',
'resnext101_32x32d': '/mnt/soulfs2/kyyue/research/ig_models/ig_resnext101_32x32-e4b90b00.pth',
'resnext101_32x48d': '/mnt/soulfs2/kyyue/research/ig_models/ig_resnext101_32x48-3e41cc8a.pth'
}
def _resnext(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
# state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
state_dict = torch.load(model_urls[arch])
model.load_state_dict(state_dict)
return model
def resnext101_32x8d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnext('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
def resnext101_32x16d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 16
return _resnext('resnext101_32x16d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
def resnext101_32x32d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 32
return _resnext('resnext101_32x32d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
def resnext101_32x48d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 48
return _resnext('resnext101_32x48d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
|
py | 1a3d23a614f0a4bac74d33587e8a7906c9c6a4fb | import pytest
from app.main.forms import get_placeholder_form_instance
def test_form_class_not_mutated(app_):
with app_.test_request_context(method="POST", data={"placeholder_value": ""}):
form1 = get_placeholder_form_instance("name", {}, "sms", optional_placeholder=False)
form2 = get_placeholder_form_instance("city", {}, "sms", optional_placeholder=True)
assert not form1.validate_on_submit()
assert form2.validate_on_submit()
assert str(form1.placeholder_value.label) == '<label for="placeholder_value">name</label>'
assert str(form2.placeholder_value.label) == '<label for="placeholder_value">city</label>'
@pytest.mark.parametrize(
"service_can_send_international_sms, placeholder_name, template_type, value, expected_error",
[
(False, "email address", "email", "", "Enter an email address"),
(False, "email address", "email", "12345", "Enter a valid email address"),
(
False,
"email address",
"email",
"“bad”@email-address.com",
"Enter a valid email address",
),
(False, "email address", "email", "test+'éüî@example.com", None),
(False, "email address", "email", "Tom!the#[email protected]", None),
(False, "email address", "email", "Jean-o'briå[email protected]", None),
(False, "email address", "email", "Tom!the#[email protected]", None),
(False, "email address", "email", "2+2={5*4/5}@mailinator.com", None),
(False, "email address", "email", "[email protected]", None),
(False, "email address", "email", "[email protected]", None),
(False, "phone number", "sms", "", "This cannot be empty"),
(False, "phone number", "sms", "+4966921809", "Not a valid phone number"),
(False, "phone number", "sms", "6502532222", None),
(False, "phone number", "sms", "+16502532222", None),
(True, "phone number", "sms", "+123", "Not a valid phone number"),
(True, "phone number", "sms", "+16502532222", None),
(True, "phone number", "sms", "+4966921809", None),
(False, "anything else", "sms", "", "This cannot be empty"),
(False, "anything else", "email", "", "This cannot be empty"),
(True, "phone number", "sms", "invalid", "Not a valid phone number"),
(True, "phone number", "email", "invalid", None),
(True, "phone number", "letter", "invalid", None),
(True, "email address", "sms", "invalid", None),
],
)
def test_validates_recipients(
app_,
placeholder_name,
template_type,
value,
service_can_send_international_sms,
expected_error,
):
with app_.test_request_context(method="POST", data={"placeholder_value": value}):
form = get_placeholder_form_instance(
placeholder_name,
{},
template_type,
allow_international_phone_numbers=service_can_send_international_sms,
)
if expected_error:
assert not form.validate_on_submit()
assert form.placeholder_value.errors[0] == expected_error
else:
assert form.validate_on_submit()
|
py | 1a3d2492cb5087955f0a825f03f957a85bbc55e8 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/particle/shared_pt_magic_sparks.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 1a3d258c6f8244653ac805e6a4344c688ef741f8 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programytest.storage.asserts.store.assert_denormals import DenormalStoreAsserts
from programy.storage.stores.sql.store.lookups import SQLDenormalStore
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.config import SQLStorageConfiguration
import programytest.storage.engines as Engines
class SQLDenormalStoreTests(DenormalStoreAsserts):
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_initialise(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLDenormalStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_upload_from_file(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLDenormalStore(engine)
self.assert_upload_from_file(store)
|
py | 1a3d25a4d67b8d900a8e6e7f0e991ec2d1cc2ea1 | from vk_bot.core.modules.basicplug import BasicPlug
from vk_bot.core.sql.vksql import *
from vk_bot.core.sql.sqlgame import *
class Admins(BasicPlug):
command = ("бан", "разбан:", "вип",)
doc = "Забанить или разбанить"
available_for = "admins"
def main(self):
requests = self.text[0]
try:
uid = event.object.reply_message['from_id']
except:
print(".")
None
if requests == "бан":
tableadd("ban", "id", uid, one=True)
self.sendmsg("забанен нахой", "video367919273_456240239")
elif requests == "разбан":
tablerm("ban", "id", uid)
elif requests == "вип":
tableadd("vips", "id", event.object.reply_message['from_id'])
elif requests == "рассылка":
sendall(self.event, self.text, self.vk)
|
py | 1a3d26965cd87271b678bc0d4cd88c57444daf76 | from typing import Optional, Union
from snowflake.connector import SnowflakeConnection
from dbnd import log_duration
from dbnd._core.plugin.dbnd_plugins import is_plugin_enabled
from dbnd._core.tracking.metrics import log_data, log_target_operation
from dbnd_snowflake.extract_sql_query import TableTargetOperation
from dbnd_snowflake.snowflake_config import SnowflakeConfig
from dbnd_snowflake.snowflake_controller import SnowflakeController
def log_snowflake_table(
table_name: str,
connection_string: Union[str, SnowflakeConnection],
database: str,
schema: str,
key: Optional[str] = None,
with_preview: Optional[bool] = None,
with_schema: Optional[bool] = None,
raise_on_error: bool = False,
):
"""
:param table_name: table name
:param connection_string: either connection_string or actual connection
:param database:
:param schema:
:param key:
:param with_preview:
:param with_schema:
:param raise_on_error:
:return:
"""
if not is_plugin_enabled("dbnd-snowflake", module_import="dbnd_snowflake"):
return
from dbnd_snowflake import snowflake_values
with log_duration(
"log_snowflake_table__time_seconds", source="system"
), SnowflakeController(connection_string) as snowflake_ctrl:
config = SnowflakeConfig()
snowflake_table = snowflake_values.SnowflakeTable(
snowflake_ctrl, database, schema, table_name, config.table_preview_rows,
)
log_data(
key or "snowflake_table.{}".format(snowflake_table),
snowflake_table,
with_preview=with_preview,
with_schema=with_schema,
with_size=with_schema,
with_histograms=False,
raise_on_error=raise_on_error,
)
def log_snowflake_table_targets(
table_op: TableTargetOperation,
connection_string: Union[str, SnowflakeConnection],
with_preview: Optional[bool] = None,
with_schema: Optional[bool] = None,
):
if not is_plugin_enabled("dbnd-snowflake", module_import="dbnd_snowflake"):
return
from dbnd_snowflake.snowflake_values import SnowflakeTable
with SnowflakeController(connection_string) as snowflake_ctrl:
snowflake_table = SnowflakeTable.from_table(snowflake_ctrl, table_op.name)
log_target_operation(
name=table_op.name,
target=table_op.path,
operation_type=table_op.operation,
success=table_op.success,
data=snowflake_table,
with_preview=with_preview,
with_schema=with_schema,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.