max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/utilities/env_utils_v2.py | felipeek/bullet3 | 9,136 | 12630668 | <reponame>felipeek/bullet3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def get_robot_base_position(robot):
"""Gets the base position of robot."""
# TODO(b/151975607): Clean this after robot interface migration.
if hasattr(robot, "GetBasePosition"):
return robot.GetBasePosition()
else:
return robot.base_position
def get_robot_base_orientation(robot):
"""Gets the base orientation of robot."""
# TODO(b/151975607): Clean this after robot interface migration.
if hasattr(robot, "GetBaseOrientation"):
return robot.GetBaseOrientation()
else:
return robot.base_orientation_quaternion |
crabageprediction/venv/Lib/site-packages/fontTools/merge/options.py | 13rianlucero/CrabAgePrediction | 2,705 | 12630676 | # Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): <NAME>, <NAME>
class Options(object):
class UnknownOptionError(Exception):
pass
def __init__(self, **kwargs):
self.verbose = False
self.timing = False
self.drop_tables = []
self.set(**kwargs)
def set(self, **kwargs):
for k,v in kwargs.items():
if not hasattr(self, k):
raise self.UnknownOptionError("Unknown option '%s'" % k)
setattr(self, k, v)
def parse_opts(self, argv, ignore_unknown=[]):
ret = []
opts = {}
for a in argv:
orig_a = a
if not a.startswith('--'):
ret.append(a)
continue
a = a[2:]
i = a.find('=')
op = '='
if i == -1:
if a.startswith("no-"):
k = a[3:]
v = False
else:
k = a
v = True
else:
k = a[:i]
if k[-1] in "-+":
op = k[-1]+'=' # Ops is '-=' or '+=' now.
k = k[:-1]
v = a[i+1:]
ok = k
k = k.replace('-', '_')
if not hasattr(self, k):
if ignore_unknown is True or ok in ignore_unknown:
ret.append(orig_a)
continue
else:
raise self.UnknownOptionError("Unknown option '%s'" % a)
ov = getattr(self, k)
if isinstance(ov, bool):
v = bool(v)
elif isinstance(ov, int):
v = int(v)
elif isinstance(ov, list):
vv = v.split(',')
if vv == ['']:
vv = []
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
if op == '=':
v = vv
elif op == '+=':
v = ov
v.extend(vv)
elif op == '-=':
v = ov
for x in vv:
if x in v:
v.remove(x)
else:
assert 0
opts[k] = v
self.set(**opts)
return ret
|
android/art/tools/common/common.py | Solotov/deoptfuscator | 206 | 12630761 | #!/usr/bin/env python3.4
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing common logic from python testing tools."""
import abc
import os
import signal
import shlex
import shutil
import time
from enum import Enum
from enum import unique
from subprocess import DEVNULL
from subprocess import check_call
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from subprocess import TimeoutExpired
from tempfile import mkdtemp
from tempfile import NamedTemporaryFile
# Temporary directory path on device.
DEVICE_TMP_PATH = '/data/local/tmp'
# Architectures supported in dalvik cache.
DALVIK_CACHE_ARCHS = ['arm', 'arm64', 'x86', 'x86_64']
@unique
class RetCode(Enum):
"""Enum representing normalized return codes."""
SUCCESS = 0
TIMEOUT = 1
ERROR = 2
NOTCOMPILED = 3
NOTRUN = 4
@unique
class LogSeverity(Enum):
VERBOSE = 0
DEBUG = 1
INFO = 2
WARNING = 3
ERROR = 4
FATAL = 5
SILENT = 6
@property
def symbol(self):
return self.name[0]
@classmethod
def FromSymbol(cls, s):
for log_severity in LogSeverity:
if log_severity.symbol == s:
return log_severity
raise ValueError("{0} is not a valid log severity symbol".format(s))
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
def GetEnvVariableOrError(variable_name):
"""Gets value of an environmental variable.
If the variable is not set raises FatalError.
Args:
variable_name: string, name of variable to get.
Returns:
string, value of requested variable.
Raises:
FatalError: Requested variable is not set.
"""
top = os.environ.get(variable_name)
if top is None:
raise FatalError('{0} environmental variable not set.'.format(
variable_name))
return top
def GetJackClassPath():
"""Returns Jack's classpath."""
top = GetEnvVariableOrError('ANDROID_BUILD_TOP')
libdir = top + '/out/host/common/obj/JAVA_LIBRARIES'
return libdir + '/core-libart-hostdex_intermediates/classes.jack:' \
+ libdir + '/core-oj-hostdex_intermediates/classes.jack'
def _DexArchCachePaths(android_data_path):
"""Returns paths to architecture specific caches.
Args:
android_data_path: string, path dalvik-cache resides in.
Returns:
Iterable paths to architecture specific caches.
"""
return ('{0}/dalvik-cache/{1}'.format(android_data_path, arch)
for arch in DALVIK_CACHE_ARCHS)
def RunCommandForOutput(cmd, env, stdout, stderr, timeout=60):
"""Runs command piping output to files, stderr or stdout.
Args:
cmd: list of strings, command to run.
env: shell environment to run the command with.
stdout: file handle or one of Subprocess.PIPE, Subprocess.STDOUT,
Subprocess.DEVNULL, see Popen.
stderr: file handle or one of Subprocess.PIPE, Subprocess.STDOUT,
Subprocess.DEVNULL, see Popen.
timeout: int, timeout in seconds.
Returns:
tuple (string, string, RetCode) stdout output, stderr output, normalized
return code.
"""
proc = Popen(cmd, stdout=stdout, stderr=stderr, env=env,
universal_newlines=True, start_new_session=True)
try:
(output, stderr_output) = proc.communicate(timeout=timeout)
if proc.returncode == 0:
retcode = RetCode.SUCCESS
else:
retcode = RetCode.ERROR
except TimeoutExpired:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
(output, stderr_output) = proc.communicate()
retcode = RetCode.TIMEOUT
return (output, stderr_output, retcode)
def _LogCmdOutput(logfile, cmd, output, retcode):
"""Logs output of a command.
Args:
logfile: file handle to logfile.
cmd: list of strings, command.
output: command output.
retcode: RetCode, normalized retcode.
"""
logfile.write('Command:\n{0}\n{1}\nReturn code: {2}\n'.format(
CommandListToCommandString(cmd), output, retcode))
def RunCommand(cmd, out, err, timeout=5):
"""Executes a command, and returns its return code.
Args:
cmd: list of strings, a command to execute
out: string, file name to open for stdout (or None)
err: string, file name to open for stderr (or None)
timeout: int, time out in seconds
Returns:
RetCode, return code of running command (forced RetCode.TIMEOUT
on timeout)
"""
devnull = DEVNULL
outf = devnull
if out is not None:
outf = open(out, mode='w')
errf = devnull
if err is not None:
errf = open(err, mode='w')
(_, _, retcode) = RunCommandForOutput(cmd, None, outf, errf, timeout)
if outf != devnull:
outf.close()
if errf != devnull:
errf.close()
return retcode
def CommandListToCommandString(cmd):
"""Converts shell command represented as list of strings to a single string.
Each element of the list is wrapped in double quotes.
Args:
cmd: list of strings, shell command.
Returns:
string, shell command.
"""
return ' '.join([shlex.quote(segment) for segment in cmd])
class FatalError(Exception):
"""Fatal error in script."""
class ITestEnv(object):
"""Test environment abstraction.
Provides unified interface for interacting with host and device test
environments. Creates a test directory and expose methods to modify test files
and run commands.
"""
__meta_class__ = abc.ABCMeta
@abc.abstractmethod
def CreateFile(self, name=None):
"""Creates a file in test directory.
Returned path to file can be used in commands run in the environment.
Args:
name: string, file name. If None file is named arbitrarily.
Returns:
string, environment specific path to file.
"""
@abc.abstractmethod
def WriteLines(self, file_path, lines):
"""Writes lines to a file in test directory.
If file exists it gets overwritten. If file doest not exist it is created.
Args:
file_path: string, environment specific path to file.
lines: list of strings to write.
"""
@abc.abstractmethod
def RunCommand(self, cmd, log_severity=LogSeverity.ERROR):
"""Runs command in environment.
Args:
cmd: list of strings, command to run.
log_severity: LogSeverity, minimum severity of logs included in output.
Returns:
tuple (string, int) output, return code.
"""
@abc.abstractproperty
def logfile(self):
"""Gets file handle to logfile residing on host."""
class HostTestEnv(ITestEnv):
"""Host test environment. Concrete implementation of ITestEnv.
Maintains a test directory in /tmp/. Runs commands on the host in modified
shell environment. Mimics art script behavior.
For methods documentation see base class.
"""
def __init__(self, directory_prefix, cleanup=True, logfile_path=None,
timeout=60, x64=False):
"""Constructor.
Args:
directory_prefix: string, prefix for environment directory name.
cleanup: boolean, if True remove test directory in destructor.
logfile_path: string, can be used to specify custom logfile location.
timeout: int, seconds, time to wait for single test run to finish.
x64: boolean, whether to setup in x64 mode.
"""
self._cleanup = cleanup
self._timeout = timeout
self._env_path = mkdtemp(dir='/tmp/', prefix=directory_prefix)
if logfile_path is None:
self._logfile = open('{0}/log'.format(self._env_path), 'w+')
else:
self._logfile = open(logfile_path, 'w+')
os.mkdir('{0}/dalvik-cache'.format(self._env_path))
for arch_cache_path in _DexArchCachePaths(self._env_path):
os.mkdir(arch_cache_path)
lib = 'lib64' if x64 else 'lib'
android_root = GetEnvVariableOrError('ANDROID_HOST_OUT')
library_path = android_root + '/' + lib
path = android_root + '/bin'
self._shell_env = os.environ.copy()
self._shell_env['ANDROID_DATA'] = self._env_path
self._shell_env['ANDROID_ROOT'] = android_root
self._shell_env['LD_LIBRARY_PATH'] = library_path
self._shell_env['DYLD_LIBRARY_PATH'] = library_path
self._shell_env['PATH'] = (path + ':' + self._shell_env['PATH'])
# Using dlopen requires load bias on the host.
self._shell_env['LD_USE_LOAD_BIAS'] = '1'
def __del__(self):
if self._cleanup:
shutil.rmtree(self._env_path)
def CreateFile(self, name=None):
if name is None:
f = NamedTemporaryFile(dir=self._env_path, delete=False)
else:
f = open('{0}/{1}'.format(self._env_path, name), 'w+')
return f.name
def WriteLines(self, file_path, lines):
with open(file_path, 'w') as f:
f.writelines('{0}\n'.format(line) for line in lines)
return
def RunCommand(self, cmd, log_severity=LogSeverity.ERROR):
self._EmptyDexCache()
env = self._shell_env.copy()
env.update({'ANDROID_LOG_TAGS':'*:' + log_severity.symbol.lower()})
(output, err_output, retcode) = RunCommandForOutput(
cmd, env, PIPE, PIPE, self._timeout)
# We append err_output to output to stay consistent with DeviceTestEnv
# implementation.
output += err_output
_LogCmdOutput(self._logfile, cmd, output, retcode)
return (output, retcode)
@property
def logfile(self):
return self._logfile
def _EmptyDexCache(self):
"""Empties dex cache.
Iterate over files in architecture specific cache directories and remove
them.
"""
for arch_cache_path in _DexArchCachePaths(self._env_path):
for file_path in os.listdir(arch_cache_path):
file_path = '{0}/{1}'.format(arch_cache_path, file_path)
if os.path.isfile(file_path):
os.unlink(file_path)
class DeviceTestEnv(ITestEnv):
"""Device test environment. Concrete implementation of ITestEnv.
For methods documentation see base class.
"""
def __init__(self, directory_prefix, cleanup=True, logfile_path=None,
timeout=60, specific_device=None):
"""Constructor.
Args:
directory_prefix: string, prefix for environment directory name.
cleanup: boolean, if True remove test directory in destructor.
logfile_path: string, can be used to specify custom logfile location.
timeout: int, seconds, time to wait for single test run to finish.
specific_device: string, serial number of device to use.
"""
self._cleanup = cleanup
self._timeout = timeout
self._specific_device = specific_device
self._host_env_path = mkdtemp(dir='/tmp/', prefix=directory_prefix)
if logfile_path is None:
self._logfile = open('{0}/log'.format(self._host_env_path), 'w+')
else:
self._logfile = open(logfile_path, 'w+')
self._device_env_path = '{0}/{1}'.format(
DEVICE_TMP_PATH, os.path.basename(self._host_env_path))
self._shell_env = os.environ.copy()
self._AdbMkdir('{0}/dalvik-cache'.format(self._device_env_path))
for arch_cache_path in _DexArchCachePaths(self._device_env_path):
self._AdbMkdir(arch_cache_path)
def __del__(self):
if self._cleanup:
shutil.rmtree(self._host_env_path)
check_call(shlex.split(
'adb shell if [ -d "{0}" ]; then rm -rf "{0}"; fi'
.format(self._device_env_path)))
def CreateFile(self, name=None):
with NamedTemporaryFile(mode='w') as temp_file:
self._AdbPush(temp_file.name, self._device_env_path)
if name is None:
name = os.path.basename(temp_file.name)
return '{0}/{1}'.format(self._device_env_path, name)
def WriteLines(self, file_path, lines):
with NamedTemporaryFile(mode='w') as temp_file:
temp_file.writelines('{0}\n'.format(line) for line in lines)
temp_file.flush()
self._AdbPush(temp_file.name, file_path)
return
def _ExtractPid(self, brief_log_line):
"""Extracts PID from a single logcat line in brief format."""
pid_start_idx = brief_log_line.find('(') + 2
if pid_start_idx == -1:
return None
pid_end_idx = brief_log_line.find(')', pid_start_idx)
if pid_end_idx == -1:
return None
return brief_log_line[pid_start_idx:pid_end_idx]
def _ExtractSeverity(self, brief_log_line):
"""Extracts LogSeverity from a single logcat line in brief format."""
if not brief_log_line:
return None
return LogSeverity.FromSymbol(brief_log_line[0])
def RunCommand(self, cmd, log_severity=LogSeverity.ERROR):
self._EmptyDexCache()
env_vars_cmd = 'ANDROID_DATA={0} ANDROID_LOG_TAGS=*:i'.format(
self._device_env_path)
adb_cmd = ['adb']
if self._specific_device:
adb_cmd += ['-s', self._specific_device]
logcat_cmd = adb_cmd + ['logcat', '-v', 'brief', '-s', '-b', 'main',
'-T', '1', 'dex2oat:*', 'dex2oatd:*']
logcat_proc = Popen(logcat_cmd, stdout=PIPE, stderr=STDOUT,
universal_newlines=True)
cmd_str = CommandListToCommandString(cmd)
# Print PID of the shell and exec command. We later retrieve this PID and
# use it to filter dex2oat logs, keeping those with matching parent PID.
device_cmd = ('echo $$ && ' + env_vars_cmd + ' exec ' + cmd_str)
cmd = adb_cmd + ['shell', device_cmd]
(output, _, retcode) = RunCommandForOutput(cmd, self._shell_env, PIPE,
STDOUT, self._timeout)
# We need to make sure to only kill logcat once all relevant logs arrive.
# Sleep is used for simplicity.
time.sleep(0.5)
logcat_proc.kill()
end_of_first_line = output.find('\n')
if end_of_first_line != -1:
parent_pid = output[:end_of_first_line]
output = output[end_of_first_line + 1:]
logcat_output, _ = logcat_proc.communicate()
logcat_lines = logcat_output.splitlines(keepends=True)
dex2oat_pids = []
for line in logcat_lines:
# Dex2oat was started by our runtime instance.
if 'Running dex2oat (parent PID = ' + parent_pid in line:
dex2oat_pids.append(self._ExtractPid(line))
break
if dex2oat_pids:
for line in logcat_lines:
if (self._ExtractPid(line) in dex2oat_pids and
self._ExtractSeverity(line) >= log_severity):
output += line
_LogCmdOutput(self._logfile, cmd, output, retcode)
return (output, retcode)
@property
def logfile(self):
return self._logfile
def PushClasspath(self, classpath):
"""Push classpath to on-device test directory.
Classpath can contain multiple colon separated file paths, each file is
pushed. Returns analogous classpath with paths valid on device.
Args:
classpath: string, classpath in format 'a/b/c:d/e/f'.
Returns:
string, classpath valid on device.
"""
paths = classpath.split(':')
device_paths = []
for path in paths:
device_paths.append('{0}/{1}'.format(
self._device_env_path, os.path.basename(path)))
self._AdbPush(path, self._device_env_path)
return ':'.join(device_paths)
def _AdbPush(self, what, where):
check_call(shlex.split('adb push "{0}" "{1}"'.format(what, where)),
stdout=self._logfile, stderr=self._logfile)
def _AdbMkdir(self, path):
check_call(shlex.split('adb shell mkdir "{0}" -p'.format(path)),
stdout=self._logfile, stderr=self._logfile)
def _EmptyDexCache(self):
"""Empties dex cache."""
for arch_cache_path in _DexArchCachePaths(self._device_env_path):
cmd = 'adb shell if [ -d "{0}" ]; then rm -f "{0}"/*; fi'.format(
arch_cache_path)
check_call(shlex.split(cmd), stdout=self._logfile, stderr=self._logfile)
|
auth0/v3/test/authentication/test_users.py | akmjenkins/auth0-python | 340 | 12630798 | <filename>auth0/v3/test/authentication/test_users.py
import unittest
import mock
from ...authentication.users import Users
class TestUsers(unittest.TestCase):
@mock.patch('auth0.v3.authentication.users.Users.get')
def test_userinfo(self, mock_get):
u = Users('my.domain.com')
u.userinfo(access_token='<PASSWORD>')
mock_get.assert_called_with(
url='https://my.domain.com/userinfo',
headers={'Authorization': 'Bearer atk'}
)
@mock.patch('auth0.v3.authentication.users.Users.post')
def test_tokeninfo(self, mock_post):
u = Users('my.domain.com')
u.tokeninfo(jwt='jwtoken')
mock_post.assert_called_with(
url='https://my.domain.com/tokeninfo',
data={'id_token': 'jwtoken'}
)
|
utils/weibo.py | haygcao/UnicomDailyTask | 148 | 12630825 | <filename>utils/weibo.py
# -*- coding: utf8 -*-
import base64
import json
from utils.toutiao_sdk import md5
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey.RSA import importKey
# from Crypto.Random import get_random_bytes
def getCheckToken(userId, deviceId):
if not userId:
deviceId = deviceId[0:32]
return md5(''.join([userId, '/', deviceId, '/', 'obiew']))
def rsa_encrypt(message):
public_key = '''-----<KEY>'''
rsa_key = importKey(public_key)
cipher = PKCS1_v1_5.new(rsa_key)
message = json.dumps(message, separators=(',', ':',), ensure_ascii=False).encode('utf8')
length = len(message)
num = length // 117 + 1 if length % 117 else length // 117
buf = b''
for index in range(num):
start = index * 117
end = start + 117 if length > start + 117 else length
buf += cipher.encrypt(message[start:end])
return base64.b64encode(buf).decode('utf8')
# def rsa_decrypt(ciphertext):
# private_key = '-----BEGIN RSA PRIVATE KEY-----\n...\n-----END RSA PRIVATE KEY-----'
# rsa_key = importKey(private_key)
# sentinel = get_random_bytes(16)
# cipher = PKCS1_v1_5.new(rsa_key)
# ciphertext = base64.b64decode(ciphertext)
# length = len(ciphertext)
# num = length // 256 + 1 if length % 256 else length // 256
# buf = b''
# for index in range(num):
# start = index * 256
# end = start + 256 if length > start + 256 else length
# print(start, end)
# buf += cipher.decrypt(ciphertext[start:end], sentinel) # type: bytearray
# return buf.decode('utf8')
if __name__ == '__main__':
pass
|
gpytorch/lazy/kronecker_product_lazy_tensor.py | jrg365/gpytorch | 188 | 12630827 | <filename>gpytorch/lazy/kronecker_product_lazy_tensor.py
#!/usr/bin/env python3
import operator
from functools import reduce
from typing import Optional, Tuple
import torch
from torch import Tensor
from .. import settings
from ..utils.broadcasting import _matmul_broadcast_shape, _mul_broadcast_shape
from ..utils.memoize import cached
from .diag_lazy_tensor import ConstantDiagLazyTensor, DiagLazyTensor
from .lazy_tensor import LazyTensor
from .non_lazy_tensor import lazify
from .triangular_lazy_tensor import TriangularLazyTensor, _TriangularLazyTensorBase
def _kron_diag(*lts) -> Tensor:
"""Compute diagonal of a KroneckerProductLazyTensor from the diagonals of the constituiting tensors"""
lead_diag = lts[0].diag()
if len(lts) == 1: # base case:
return lead_diag
trail_diag = _kron_diag(*lts[1:])
diag = lead_diag.unsqueeze(-2) * trail_diag.unsqueeze(-1)
return diag.transpose(-1, -2).reshape(*diag.shape[:-2], -1)
def _prod(iterable):
return reduce(operator.mul, iterable, 1)
def _matmul(lazy_tensors, kp_shape, rhs):
output_shape = _matmul_broadcast_shape(kp_shape, rhs.shape)
output_batch_shape = output_shape[:-2]
res = rhs.contiguous().expand(*output_batch_shape, *rhs.shape[-2:])
num_cols = rhs.size(-1)
for lazy_tensor in lazy_tensors:
res = res.view(*output_batch_shape, lazy_tensor.size(-1), -1)
factor = lazy_tensor._matmul(res)
factor = factor.view(*output_batch_shape, lazy_tensor.size(-2), -1, num_cols).transpose(-3, -2)
res = factor.reshape(*output_batch_shape, -1, num_cols)
return res
def _t_matmul(lazy_tensors, kp_shape, rhs):
kp_t_shape = (*kp_shape[:-2], kp_shape[-1], kp_shape[-2])
output_shape = _matmul_broadcast_shape(kp_t_shape, rhs.shape)
output_batch_shape = torch.Size(output_shape[:-2])
res = rhs.contiguous().expand(*output_batch_shape, *rhs.shape[-2:])
num_cols = rhs.size(-1)
for lazy_tensor in lazy_tensors:
res = res.view(*output_batch_shape, lazy_tensor.size(-2), -1)
factor = lazy_tensor._t_matmul(res)
factor = factor.view(*output_batch_shape, lazy_tensor.size(-1), -1, num_cols).transpose(-3, -2)
res = factor.reshape(*output_batch_shape, -1, num_cols)
return res
class KroneckerProductLazyTensor(LazyTensor):
r"""
Returns the Kronecker product of the given lazy tensors
Args:
:`lazy_tensors`: List of lazy tensors
"""
def __init__(self, *lazy_tensors):
try:
lazy_tensors = tuple(lazify(lazy_tensor) for lazy_tensor in lazy_tensors)
except TypeError:
raise RuntimeError("KroneckerProductLazyTensor is intended to wrap lazy tensors.")
for prev_lazy_tensor, curr_lazy_tensor in zip(lazy_tensors[:-1], lazy_tensors[1:]):
if prev_lazy_tensor.batch_shape != curr_lazy_tensor.batch_shape:
raise RuntimeError(
"KroneckerProductLazyTensor expects lazy tensors with the "
"same batch shapes. Got {}.".format([lv.batch_shape for lv in lazy_tensors])
)
super().__init__(*lazy_tensors)
self.lazy_tensors = lazy_tensors
def __add__(self, other):
if isinstance(other, (KroneckerProductDiagLazyTensor, ConstantDiagLazyTensor)):
from .kronecker_product_added_diag_lazy_tensor import KroneckerProductAddedDiagLazyTensor
return KroneckerProductAddedDiagLazyTensor(self, other)
if isinstance(other, KroneckerProductLazyTensor):
from .sum_kronecker_lazy_tensor import SumKroneckerLazyTensor
return SumKroneckerLazyTensor(self, other)
if isinstance(other, DiagLazyTensor):
return self.add_diag(other.diag())
return super().__add__(other)
def add_diag(self, diag):
r"""
Adds a diagonal to a KroneckerProductLazyTensor
"""
from .kronecker_product_added_diag_lazy_tensor import KroneckerProductAddedDiagLazyTensor
if not self.is_square:
raise RuntimeError("add_diag only defined for square matrices")
diag_shape = diag.shape
if len(diag_shape) == 0:
# interpret scalar tensor as constant diag
diag_tensor = ConstantDiagLazyTensor(diag.unsqueeze(-1), diag_shape=self.shape[-1])
elif diag_shape[-1] == 1:
# interpret single-trailing element as constant diag
diag_tensor = ConstantDiagLazyTensor(diag, diag_shape=self.shape[-1])
else:
try:
expanded_diag = diag.expand(self.shape[:-1])
except RuntimeError:
raise RuntimeError(
"add_diag for LazyTensor of size {} received invalid diagonal of size {}.".format(
self.shape, diag_shape
)
)
diag_tensor = DiagLazyTensor(expanded_diag)
return KroneckerProductAddedDiagLazyTensor(self, diag_tensor)
def diag(self):
r"""
As :func:`torch.diag`, returns the diagonal of the matrix :math:`K` this LazyTensor represents as a vector.
:rtype: torch.tensor
:return: The diagonal of :math:`K`. If :math:`K` is :math:`n \times n`, this will be a length
n vector. If this LazyTensor represents a batch (e.g., is :math:`b \times n \times n`), this will be a
:math:`b \times n` matrix of diagonals, one for each matrix in the batch.
"""
if settings.debug.on():
if not self.is_square:
raise RuntimeError("Diag works on square matrices (or batches)")
return _kron_diag(*self.lazy_tensors)
def diagonalization(self, method: Optional[str] = None):
if method is None:
method = "symeig"
return super().diagonalization(method=method)
@cached
def inverse(self):
# here we use that (A \kron B)^-1 = A^-1 \kron B^-1
# TODO: Investigate under what conditions computing individual individual inverses makes sense
inverses = [lt.inverse() for lt in self.lazy_tensors]
return self.__class__(*inverses)
def inv_quad_logdet(self, inv_quad_rhs=None, logdet=False, reduce_inv_quad=True):
if inv_quad_rhs is not None:
inv_quad_term, _ = super().inv_quad_logdet(
inv_quad_rhs=inv_quad_rhs, logdet=False, reduce_inv_quad=reduce_inv_quad
)
else:
inv_quad_term = None
logdet_term = self._logdet() if logdet else None
return inv_quad_term, logdet_term
@cached(name="cholesky")
def _cholesky(self, upper=False):
chol_factors = [lt.cholesky(upper=upper) for lt in self.lazy_tensors]
return KroneckerProductTriangularLazyTensor(*chol_factors, upper=upper)
def _expand_batch(self, batch_shape):
return self.__class__(*[lazy_tensor._expand_batch(batch_shape) for lazy_tensor in self.lazy_tensors])
def _get_indices(self, row_index, col_index, *batch_indices):
row_factor = self.size(-2)
col_factor = self.size(-1)
res = None
for lazy_tensor in self.lazy_tensors:
sub_row_size = lazy_tensor.size(-2)
sub_col_size = lazy_tensor.size(-1)
row_factor //= sub_row_size
col_factor //= sub_col_size
sub_res = lazy_tensor._get_indices(
torch.div(row_index, row_factor, rounding_mode="floor").fmod(sub_row_size),
torch.div(col_index, col_factor, rounding_mode="floor").fmod(sub_col_size),
*batch_indices,
)
res = sub_res if res is None else (sub_res * res)
return res
def _solve(self, rhs, preconditioner=None, num_tridiag=0):
# Computes inv_matmul by exploiting the identity (A \kron B)^-1 = A^-1 \kron B^-1
# we perform the solve first before worrying about any tridiagonal matrices
tsr_shapes = [q.size(-1) for q in self.lazy_tensors]
n_rows = rhs.size(-2)
batch_shape = _mul_broadcast_shape(self.shape[:-2], rhs.shape[:-2])
perm_batch = tuple(range(len(batch_shape)))
y = rhs.clone().expand(*batch_shape, *rhs.shape[-2:])
for n, q in zip(tsr_shapes, self.lazy_tensors):
# for KroneckerProductTriangularLazyTensor this inv_matmul is very cheap
y = q.inv_matmul(y.reshape(*batch_shape, n, -1))
y = y.reshape(*batch_shape, n, n_rows // n, -1).permute(*perm_batch, -2, -3, -1)
res = y.reshape(*batch_shape, n_rows, -1)
if num_tridiag == 0:
return res
else:
# we need to return the t mat, so we return the eigenvalues
# in general, this should not be called because log determinant estimation
# is closed form and is implemented in _logdet
# TODO: make this more efficient
evals, _ = self.diagonalization()
evals_repeated = evals.unsqueeze(0).repeat(num_tridiag, *[1] * evals.ndim)
lazy_evals = DiagLazyTensor(evals_repeated)
batch_repeated_evals = lazy_evals.evaluate()
return res, batch_repeated_evals
def _inv_matmul(self, right_tensor, left_tensor=None):
# if _inv_matmul is called, we ignore the eigenvalue handling
# this is efficient because of the structure of the lazy tensor
res = self._solve(rhs=right_tensor)
if left_tensor is not None:
res = left_tensor @ res
return res
def _logdet(self):
evals, _ = self.diagonalization()
logdet = evals.clamp(min=1e-7).log().sum(-1)
return logdet
def _matmul(self, rhs):
is_vec = rhs.ndimension() == 1
if is_vec:
rhs = rhs.unsqueeze(-1)
res = _matmul(self.lazy_tensors, self.shape, rhs.contiguous())
if is_vec:
res = res.squeeze(-1)
return res
@cached(name="root_decomposition")
def root_decomposition(self, method: Optional[str] = None):
from gpytorch.lazy import RootLazyTensor
# return a dense root decomposition if the matrix is small
if self.shape[-1] <= settings.max_cholesky_size.value():
return super().root_decomposition(method=method)
root_list = [lt.root_decomposition(method=method).root for lt in self.lazy_tensors]
kronecker_root = KroneckerProductLazyTensor(*root_list)
return RootLazyTensor(kronecker_root)
@cached(name="root_inv_decomposition")
def root_inv_decomposition(self, method=None, initial_vectors=None, test_vectors=None):
from gpytorch.lazy import RootLazyTensor
# return a dense root decomposition if the matrix is small
if self.shape[-1] <= settings.max_cholesky_size.value():
return super().root_inv_decomposition()
root_list = [lt.root_inv_decomposition().root for lt in self.lazy_tensors]
kronecker_root = KroneckerProductLazyTensor(*root_list)
return RootLazyTensor(kronecker_root)
@cached(name="size")
def _size(self):
left_size = _prod(lazy_tensor.size(-2) for lazy_tensor in self.lazy_tensors)
right_size = _prod(lazy_tensor.size(-1) for lazy_tensor in self.lazy_tensors)
return torch.Size((*self.lazy_tensors[0].batch_shape, left_size, right_size))
@cached(name="svd")
def _svd(self) -> Tuple[LazyTensor, Tensor, LazyTensor]:
U, S, V = [], [], []
for lt in self.lazy_tensors:
U_, S_, V_ = lt.svd()
U.append(U_)
S.append(S_)
V.append(V_)
S = KroneckerProductLazyTensor(*[DiagLazyTensor(S_) for S_ in S]).diag()
U = KroneckerProductLazyTensor(*U)
V = KroneckerProductLazyTensor(*V)
return U, S, V
def _symeig(
self, eigenvectors: bool = False, return_evals_as_lazy: bool = False
) -> Tuple[Tensor, Optional[LazyTensor]]:
# return_evals_as_lazy is a flag to return the eigenvalues as a lazy tensor
# which is useful for root decompositions here (see the root_decomposition
# method above)
evals, evecs = [], []
for lt in self.lazy_tensors:
evals_, evecs_ = lt.symeig(eigenvectors=eigenvectors)
evals.append(evals_)
evecs.append(evecs_)
evals = KroneckerProductDiagLazyTensor(*[DiagLazyTensor(evals_) for evals_ in evals])
if not return_evals_as_lazy:
evals = evals.diag()
if eigenvectors:
evecs = KroneckerProductLazyTensor(*evecs)
else:
evecs = None
return evals, evecs
def _t_matmul(self, rhs):
is_vec = rhs.ndimension() == 1
if is_vec:
rhs = rhs.unsqueeze(-1)
res = _t_matmul(self.lazy_tensors, self.shape, rhs.contiguous())
if is_vec:
res = res.squeeze(-1)
return res
def _transpose_nonbatch(self):
return self.__class__(*(lazy_tensor._transpose_nonbatch() for lazy_tensor in self.lazy_tensors), **self._kwargs)
class KroneckerProductTriangularLazyTensor(KroneckerProductLazyTensor, _TriangularLazyTensorBase):
def __init__(self, *lazy_tensors, upper=False):
if not all(isinstance(lt, TriangularLazyTensor) for lt in lazy_tensors):
raise RuntimeError("Components of KroneckerProductTriangularLazyTensor must be TriangularLazyTensor.")
super().__init__(*lazy_tensors)
self.upper = upper
@cached
def inverse(self):
# here we use that (A \kron B)^-1 = A^-1 \kron B^-1
inverses = [lt.inverse() for lt in self.lazy_tensors]
return self.__class__(*inverses, upper=self.upper)
def inv_matmul(self, right_tensor, left_tensor=None):
# For triangular components, using triangular-triangular substition should generally be good
return self._inv_matmul(right_tensor=right_tensor, left_tensor=left_tensor)
@cached(name="cholesky")
def _cholesky(self, upper=False):
raise NotImplementedError("_cholesky not applicable to triangular lazy tensors")
def _cholesky_solve(self, rhs, upper=False):
if upper:
# res = (U.T @ U)^-1 @ v = U^-1 @ U^-T @ v
w = self._transpose_nonbatch().inv_matmul(rhs)
res = self.inv_matmul(w)
else:
# res = (L @ L.T)^-1 @ v = L^-T @ L^-1 @ v
w = self.inv_matmul(rhs)
res = self._transpose_nonbatch().inv_matmul(w)
return res
def _symeig(self, eigenvectors: bool = False) -> Tuple[Tensor, Optional[LazyTensor]]:
raise NotImplementedError("_symeig not applicable to triangular lazy tensors")
class KroneckerProductDiagLazyTensor(DiagLazyTensor, KroneckerProductTriangularLazyTensor):
def __init__(self, *lazy_tensors):
if not all(isinstance(lt, DiagLazyTensor) for lt in lazy_tensors):
raise RuntimeError("Components of KroneckerProductDiagLazyTensor must be DiagLazyTensor.")
super(KroneckerProductTriangularLazyTensor, self).__init__(*lazy_tensors)
self.upper = False
@cached(name="cholesky")
def _cholesky(self, upper=False):
chol_factors = [lt.cholesky(upper=upper) for lt in self.lazy_tensors]
return KroneckerProductDiagLazyTensor(*chol_factors)
@property
def _diag(self):
return _kron_diag(*self.lazy_tensors)
def _expand_batch(self, batch_shape):
return KroneckerProductTriangularLazyTensor._expand_batch(self, batch_shape)
def _mul_constant(self, constant):
return DiagLazyTensor(self._diag * constant.unsqueeze(-1))
def _quad_form_derivative(self, left_vecs, right_vecs):
return KroneckerProductTriangularLazyTensor._quad_form_derivative(self, left_vecs, right_vecs)
def sqrt(self):
return self.__class__(*[lt.sqrt() for lt in self.lazy_tensors])
def _symeig(
self, eigenvectors: bool = False, return_evals_as_lazy: bool = False
) -> Tuple[Tensor, Optional[LazyTensor]]:
# return_evals_as_lazy is a flag to return the eigenvalues as a lazy tensor
# which is useful for root decompositions here (see the root_decomposition
# method above)
evals, evecs = [], []
for lt in self.lazy_tensors:
evals_, evecs_ = lt.symeig(eigenvectors=eigenvectors)
evals.append(evals_)
evecs.append(evecs_)
evals = KroneckerProductDiagLazyTensor(*[DiagLazyTensor(evals_) for evals_ in evals])
if not return_evals_as_lazy:
evals = evals.diag()
if eigenvectors:
evecs = KroneckerProductDiagLazyTensor(*evecs)
else:
evecs = None
return evals, evecs
@cached
def inverse(self):
# here we use that (A \kron B)^-1 = A^-1 \kron B^-1
inverses = [lt.inverse() for lt in self.lazy_tensors]
return self.__class__(*inverses)
|
libraries/botbuilder-ai/tests/luis/luis_recognizer_v3_test.py | Fl4v/botbuilder-python | 388 | 12630839 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# pylint: disable=no-value-for-parameter
import json
from os import path
from typing import Dict, Tuple, Union
import re
from unittest import mock
from unittest.mock import MagicMock
from aioresponses import aioresponses
from aiounittest import AsyncTestCase
from botbuilder.ai.luis import LuisRecognizerOptionsV3
from botbuilder.ai.luis import LuisApplication, LuisPredictionOptions, LuisRecognizer
from botbuilder.ai.luis.luis_util import LuisUtil
from botbuilder.core import (
BotAdapter,
IntentScore,
RecognizerResult,
TurnContext,
)
from botbuilder.core.adapters import TestAdapter
from botbuilder.schema import (
Activity,
ActivityTypes,
ChannelAccount,
ConversationAccount,
)
class LuisRecognizerV3Test(AsyncTestCase):
_luisAppId: str = "b31aeaf3-3511-495b-a07f-571fc873214b"
_subscriptionKey: str = "<KEY>"
_endpoint: str = "https://westus.api.cognitive.microsoft.com"
def __init__(self, *args, **kwargs):
super(LuisRecognizerV3Test, self).__init__(*args, **kwargs)
self._mocked_results: RecognizerResult = RecognizerResult(
intents={"Test": IntentScore(score=0.2), "Greeting": IntentScore(score=0.4)}
)
self._empty_luis_response: Dict[str, object] = json.loads(
'{ "query": null, "intents": [], "entities": [] }'
)
@staticmethod
def _remove_none_property(dictionary: Dict[str, object]) -> Dict[str, object]:
for key, value in list(dictionary.items()):
if value is None:
del dictionary[key]
elif isinstance(value, dict):
LuisRecognizerV3Test._remove_none_property(value)
return dictionary
@classmethod
@aioresponses()
async def _get_recognizer_result(
cls,
utterance: str,
response_json: Union[str, Dict[str, object]],
mock_get,
bot_adapter: BotAdapter = TestAdapter(),
options: Union[LuisRecognizerOptionsV3, LuisPredictionOptions] = None,
include_api_results: bool = False,
telemetry_properties: Dict[str, str] = None,
telemetry_metrics: Dict[str, float] = None,
recognizer_class: type = LuisRecognizer,
) -> Tuple[LuisRecognizer, RecognizerResult]:
if isinstance(response_json, str):
response_json = LuisRecognizerV3Test._get_json_for_file(
response_file=response_json
)
recognizer = LuisRecognizerV3Test._get_luis_recognizer(
recognizer_class, include_api_results=include_api_results, options=options
)
context = LuisRecognizerV3Test._get_context(utterance, bot_adapter)
# mock_get.return_value.__aenter__.return_value.json = CoroutineMock(side_effect=[response_json])
pattern = re.compile(r"^https://westus.api.cognitive.microsoft.com.*$")
mock_get.post(pattern, payload=response_json, status=200)
result = await recognizer.recognize(
context, telemetry_properties, telemetry_metrics
)
return recognizer, result
@classmethod
def _get_json_for_file(cls, response_file: str) -> Dict[str, object]:
curr_dir = path.dirname(path.abspath(__file__))
response_path = path.join(curr_dir, "test_data", response_file)
with open(response_path, "r", encoding="utf-8-sig") as file:
response_str = file.read()
response_json = json.loads(response_str)
return response_json
@classmethod
def _get_luis_recognizer(
cls,
recognizer_class: type,
options: Union[LuisPredictionOptions, LuisRecognizerOptionsV3] = None,
include_api_results: bool = False,
) -> LuisRecognizer:
luis_app = LuisApplication(cls._luisAppId, cls._subscriptionKey, cls._endpoint)
if isinstance(options, LuisRecognizerOptionsV3):
LuisRecognizerOptionsV3.include_api_results = include_api_results
return recognizer_class(
luis_app,
prediction_options=options,
include_api_results=include_api_results,
)
@staticmethod
def _get_context(utterance: str, bot_adapter: BotAdapter) -> TurnContext:
activity = Activity(
type=ActivityTypes.message,
text=utterance,
conversation=ConversationAccount(),
recipient=ChannelAccount(),
from_property=ChannelAccount(),
)
return TurnContext(bot_adapter, activity)
# Luis V3 endpoint tests begin here
async def _test_json_v3(self, response_file: str) -> None:
# Arrange
expected_json = LuisRecognizerV3Test._get_json_for_file(response_file)
response_json = expected_json["v3"]["response"]
utterance = expected_json.get("text")
if utterance is None:
utterance = expected_json.get("Text")
test_options = expected_json["v3"]["options"]
options = LuisRecognizerOptionsV3(
include_all_intents=test_options["includeAllIntents"],
include_instance_data=test_options["includeInstanceData"],
log=test_options["log"],
prefer_external_entities=test_options["preferExternalEntities"],
slot=test_options["slot"],
include_api_results=test_options["includeAPIResults"],
)
if "version" in test_options:
options.version = test_options["version"]
if "externalEntities" in test_options:
options.external_entities = test_options["externalEntities"]
# dynamic_lists: List = None,
# external_entities: List = None,
# telemetry_client: BotTelemetryClient = NullTelemetryClient(),
# log_personal_information: bool = False,)
# ,
# Act
_, result = await LuisRecognizerV3Test._get_recognizer_result(
utterance, response_json, options=options, include_api_results=True
)
# Assert
actual_result_json = LuisUtil.recognizer_result_as_dict(result)
del expected_json["v3"]
trimmed_expected = LuisRecognizerV3Test._remove_none_property(expected_json)
trimmed_actual = LuisRecognizerV3Test._remove_none_property(actual_result_json)
self.assertEqual(trimmed_expected, trimmed_actual)
async def test_composite1_v3(self):
await self._test_json_v3("Composite1_v3.json")
async def test_composite2_v3(self):
await self._test_json_v3("Composite2_v3.json")
async def test_composite3_v3(self):
await self._test_json_v3("Composite3_v3.json")
async def test_external_entities_and_built_in_v3(self):
await self._test_json_v3("ExternalEntitiesAndBuiltIn_v3.json")
async def test_external_entities_and_composite_v3(self):
await self._test_json_v3("ExternalEntitiesAndComposite_v3.json")
async def test_external_entities_and_list_v3(self):
await self._test_json_v3("ExternalEntitiesAndList_v3.json")
async def test_external_entities_and_regex_v3(self):
await self._test_json_v3("ExternalEntitiesAndRegex_v3.json")
async def test_external_entities_and_simple_v3(self):
await self._test_json_v3("ExternalEntitiesAndSimple_v3.json")
async def test_geo_people_ordinal_v3(self):
await self._test_json_v3("GeoPeopleOrdinal_v3.json")
async def test_minimal_v3(self):
await self._test_json_v3("Minimal_v3.json")
async def test_no_entities_instance_true_v3(self):
await self._test_json_v3("NoEntitiesInstanceTrue_v3.json")
async def test_patterns_v3(self):
await self._test_json_v3("Patterns_v3.json")
async def test_prebuilt_v3(self):
await self._test_json_v3("Prebuilt_v3.json")
async def test_roles_v3(self):
await self._test_json_v3("roles_v3.json")
async def test_trace_activity(self):
# Arrange
utterance: str = "fly on delta at 3pm"
expected_json = LuisRecognizerV3Test._get_json_for_file("Minimal_v3.json")
response_json = expected_json["v3"]["response"]
# add async support to magic mock.
async def async_magic():
pass
MagicMock.__await__ = lambda x: async_magic().__await__()
# Act
with mock.patch.object(TurnContext, "send_activity") as mock_send_activity:
await LuisRecognizerV3Test._get_recognizer_result(
utterance, response_json, options=LuisRecognizerOptionsV3()
)
trace_activity: Activity = mock_send_activity.call_args[0][0]
# Assert
self.assertIsNotNone(trace_activity)
self.assertEqual(LuisRecognizer.luis_trace_type, trace_activity.value_type)
self.assertEqual(LuisRecognizer.luis_trace_label, trace_activity.label)
luis_trace_info = trace_activity.value
self.assertIsNotNone(luis_trace_info)
self.assertIsNotNone(luis_trace_info["recognizerResult"])
self.assertIsNotNone(luis_trace_info["luisResult"])
self.assertIsNotNone(luis_trace_info["luisOptions"])
self.assertIsNotNone(luis_trace_info["luisModel"])
recognizer_result: RecognizerResult = luis_trace_info["recognizerResult"]
self.assertEqual(utterance, recognizer_result["text"])
self.assertIsNotNone(recognizer_result["intents"]["Roles"])
self.assertEqual(
LuisRecognizerV3Test._luisAppId, luis_trace_info["luisModel"]["ModelID"]
)
|
train_word2vec_model.py | DiceTechJobs/ConceptualSearch | 265 | 12630843 | import time
from gensim.models.word2vec import Word2Vec
from Utils.string_utils import clean_str
from Utils.file_utils import find_files
from analysis_pipeline import analyze, debug_analyze
from analysis_pipeline import build_synonym_filter, fact_case_sensitive_stop_word_filter, fact_stop_word_filter
from analysis_pipeline import fact_is_synonym_filter, white_space_tokenize, remove_punct_at_end_filter, lower_case_filter, remove_empty_tokens_filter
from Config.train_word2vec_model_config import TrainWord2VecModelConfig
import sys
""" TRAIN Word 2 Vec Model"""
if len(sys.argv) != 2:
raise Exception("Incorrect number of arguments passed - one expected, the config file name")
config = TrainWord2VecModelConfig(sys.argv[1])
""" Load analysis chain """
syn_mapper = build_synonym_filter(config.keywords_files, config.case_sensitive)
if config.case_sensitive:
stop_filter = fact_case_sensitive_stop_word_filter(config.stop_words_file)
else:
stop_filter = fact_stop_word_filter(config.stop_words_file)
# <NAME>: This is quite inefficient, as each function is applied in turn
# resulting in multiple passes over the token stream. While not currently a
# big performance bottleneck, could be much faster.
# - TODO: use functional composition to speed up
is_a_synonym_filter = fact_is_synonym_filter(syn_mapper)
analysis_chain = [clean_str,
white_space_tokenize,
remove_punct_at_end_filter,
lower_case_filter,
stop_filter,
syn_mapper.map_synonyms,
remove_empty_tokens_filter]
# is_a_synonym_filter] - Un-comment to just train on keywords.
#Test
#rslt = debug_analyze("$150k as400 Sr.\ Java/j2ee and the C#.! developer. FIT \"HOT\" dev. -IBM's business, sql server management", analysis_chain)
""" Load Documents """
start = time.time()
sentences = []
files = find_files(config.processed_documents_folder, config.file_mask, True)
print("%s files found in %s" % (len(files), config.processed_documents_folder))
documents = []
for i, fname in enumerate(files):
with open(fname) as f:
contents = f.read()
sentences.extend(contents.split("\n"))
end = time.time()
print("Loading %i sentences took %s seconds" % (len(sentences), str(end - start)))
""" Analyze - clean, tokenize, extract phrases """
print("%i sentences to process" % len(sentences))
tokenized = []
print("Tokenizing sentences")
for i, sent in enumerate(sentences):
tokens = analyze(sent, analysis_chain)
if len(tokens) >= config.min_sentence_length_words:
tokenized.append(tokens)
if i % 100000 == 0:
print(i)
""" Train Model """
start = time.time()
print("Training Model. This could take a while (10-60 mins for moderate collections). Get a coffee")
model = Word2Vec(tokenized, iter=config.training_iterations, size=config.vector_size, window=config.window_size, min_count=config.min_word_count, workers=config.workers, sample=1e-5, hs=0, negative=20)
model.save(config.model_file)
end = time.time()
print "Took %s seconds" % (end - start) |
inactiveusers.py | conradwee/telegram-analysis | 104 | 12630866 | #!/usr/bin/env python3
"""
A quick hack of a program to find a rough percentage of users in a chat who have sent less than 3 messages.
Warning: written at 1AM
"""
import argparse
from json import loads
from os import path
from collections import defaultdict
def main():
"""
main function
"""
#cutoff for a 'non active' user
minimum = 3
non_active_users = 0
active_users = 0
parser = argparse.ArgumentParser(description="Find the number of inactive users (users who have sent less than 3 messages) in a Telegram chat")
parser.add_argument('filepath', help='the jsonl chatlog file to analyse')
args = parser.parse_args()
filepath = args.filepath
_, filename = path.split(filepath)
filename, _ = path.splitext(filename)
#make filename just the name of the file, with no leading directories and no extension
counter = defaultdict(int) #store events from each user
#names = {} #dict
total_datapoints = 0
with open(filepath, 'r') as jsonfile:
events = (loads(line) for line in jsonfile)
for event in events:
if "from" in event:
if "peer_id" in event["from"] and "print_name" in event["from"]:
total_datapoints += 1
user = event['from']['peer_id']
counter[user] += 1
for person, frequency in counter.items():
if frequency < minimum:
non_active_users += 1
else:
active_users += 1
print('For this chat, there were {} users who sent less than'
' {} messages, out of a total of {}.'.format(
non_active_users,minimum,non_active_users+active_users))
print("That's", round(100* non_active_users/(non_active_users + active_users),1), "%!")
# print(type(*sorted(counter.items())))
# plt.pie(*zip(*sorted(counter.items())))
if __name__ == "__main__":
main()
|
tests/test_faucet.py | c1x1x00xxPentium/poseidon | 251 | 12630871 | # -*- coding: utf-8 -*-
"""
Test module for faucet.
@author: <NAME>
"""
import os
import shutil
import tempfile
from faucetconfgetsetter import FaucetLocalConfGetSetter
from poseidon_core.controllers.faucet.faucet import FaucetProxy
from poseidon_core.helpers.config import Config
from poseidon_core.helpers.config import parse_rules
from poseidon_core.helpers.config import represent_none
from poseidon_core.helpers.config import yaml_load
from poseidon_core.helpers.config import yaml_in
from poseidon_core.helpers.config import yaml_out
from poseidon_core.helpers.endpoint import endpoint_factory
SAMPLE_CONFIG = 'tests/sample_faucet_config.yaml'
def _get_proxy(faucetconfgetsetter_cl, config=None, **kwargs):
if config is None:
config = Config().get_config()
return FaucetProxy(config, faucetconfgetsetter_cl=faucetconfgetsetter_cl, **kwargs)
def test_yaml_in():
with tempfile.TemporaryDirectory() as tmpdir:
test_yaml_file = os.path.join(tmpdir, 'test.yaml')
content = {'test': 'content'}
yaml_out(test_yaml_file, content)
assert yaml_in(test_yaml_file) == content
def test_get_endpoints():
with tempfile.TemporaryDirectory() as tmpdir:
faucetconfgetsetter_cl = FaucetLocalConfGetSetter
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join(
tmpdir, 'faucet.yaml')
shutil.copy(SAMPLE_CONFIG, faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE)
proxy = _get_proxy(faucetconfgetsetter_cl)
a = proxy.get_endpoints()
assert isinstance(a, list)
proxy = _get_proxy(faucetconfgetsetter_cl)
a = proxy.get_endpoints(messages=[{'dp_name': 'switch', 'L2_LEARN': {'l3_src_ip': '10.0.0.1', 'eth_src': '00:00:00:00:00:00', 'port_no': 1, 'vid': '100'}}, {
'version': 1, 'time': 1525205350.0357792, 'dp_id': 1, 'dp_name': 'switch-1', 'event_id': 5, 'PORT_CHANGE': {'port_no': 1, 'reason': 'MODIFY', 'status': False}}, {}])
assert isinstance(a, list)
def test_FaucetProxy():
"""
Tests Faucet
"""
with tempfile.TemporaryDirectory() as tmpdir:
faucetconfgetsetter_cl = FaucetLocalConfGetSetter
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join(
tmpdir, 'faucet.yaml')
shutil.copy(SAMPLE_CONFIG, faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE)
proxy = _get_proxy(faucetconfgetsetter_cl)
proxy.mirror_mac('00:00:00:00:00:00', None, None)
proxy.mirror_mac('00:00:00:00:00:01', None, None)
proxy.unmirror_mac('00:00:00:00:00:00', None, None)
proxy.update_acls()
proxy = _get_proxy(faucetconfgetsetter_cl)
proxy.mirror_mac('00:00:00:00:00:00', None, None)
proxy.mirror_mac('00:00:00:00:00:01', None, None)
proxy.unmirror_mac('00:00:00:00:00:00', None, None)
proxy.update_acls()
config = Config().get_config()
config['MIRROR_PORTS'] = {'foo': 1}
config['ignore_vlans'] = ['foo']
config['ignore_ports'] = [1]
proxy = _get_proxy(faucetconfgetsetter_cl, config)
def test_format_endpoints():
data = [[{'ip-state': 'foo'}, {'ip-state': 'bar'}],
[{'ip-state': 'foo', 'ip-address': '0.0.0.0'}, {'ip-state': 'bar', 'ip-address': '::1'}]]
output = FaucetProxy.format_endpoints(data)
def test_ignore_events():
with tempfile.TemporaryDirectory() as tmpdir:
faucetconfgetsetter_cl = FaucetLocalConfGetSetter
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join(
tmpdir, 'faucet.yaml')
faucet = _get_proxy(
faucetconfgetsetter_cl=faucetconfgetsetter_cl, ignore_vlans=[999], ignore_ports={'switch99': 11})
for message_type in ('L2_LEARN',):
assert faucet.ignore_event(
{'dp_name': 'switch123', message_type: {'vid': 999, 'port_no': 123}})
assert not faucet.ignore_event(
{'dp_name': 'switch123', message_type: {'vid': 333, 'port_no': 123}})
assert faucet.ignore_event(
{'dp_name': 'switch99', message_type: {'vid': 333, 'port_no': 11}})
assert not faucet.ignore_event(
{'dp_name': 'switch99', message_type: {'vid': 333, 'port_no': 99}})
assert faucet.ignore_event(
{'dp_name': 'switch99', message_type: {'vid': 333, 'port_no': 99, 'stack_descr': 'something'}})
assert faucet.ignore_event(
{'dp_name': 'switch123', 'UNKNOWN': {'vid': 123, 'port_no': 123}})
def test_parse_rules():
with tempfile.TemporaryDirectory() as tmpdir:
shutil.copy(SAMPLE_CONFIG, tmpdir)
parse_rules(os.path.join(tmpdir, os.path.basename(SAMPLE_CONFIG)))
def test_clear_mirrors():
with tempfile.TemporaryDirectory() as tmpdir:
faucetconfgetsetter_cl = FaucetLocalConfGetSetter
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join(
tmpdir, 'faucet.yaml')
shutil.copy(SAMPLE_CONFIG, faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE)
faucet = _get_proxy(
faucetconfgetsetter_cl=faucetconfgetsetter_cl, ignore_vlans=[999], ignore_ports={'switch99': 11})
faucet.frpc.read_faucet_conf(
config_file=faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE)
faucet.clear_mirrors()
faucet.frpc.write_faucet_conf()
def test_represent_none():
class MockDumper:
def represent_scalar(self, foo, bar): return True
foo = MockDumper()
represent_none(foo, '')
def test_set_mirror_config():
faucet_conf_str = """
dps:
s1:
interfaces:
1:
output_only: true
mirror: [2]
2:
native_vlan: 100
3:
native_vlan: 100
"""
def mirrors(faucet):
faucet_conf = faucet.frpc.faucet_conf
switch_conf = faucet_conf['dps']['s1']
mirror_interface_conf = switch_conf['interfaces'][1]
return mirror_interface_conf.get('mirror', None)
with tempfile.TemporaryDirectory() as tmpdir:
faucetconfgetsetter_cl = FaucetLocalConfGetSetter
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join(
tmpdir, 'faucet.yaml')
faucet = _get_proxy(
faucetconfgetsetter_cl=faucetconfgetsetter_cl,
mirror_ports={'s1': 1},
proxy_mirror_ports={'sx': ['s1', 99]})
faucet.frpc.faucet_conf = yaml_load(
faucet_conf_str)
assert mirrors(faucet) == [2]
faucet.frpc.mirror_port('s1', 1, 3)
assert mirrors(faucet) == [2, 3]
faucet.frpc.mirror_port('s1', 1, 2)
assert mirrors(faucet) == [2, 3]
faucet.frpc.clear_mirror_port('s1', 1)
assert mirrors(faucet) is None
def test_stack_default_config():
faucet_conf_str = """
dps:
s1:
stack:
priority: 1
dp_id: 0x1
interfaces:
1:
output_only: true
2:
native_vlan: 100
3:
native_vlan: 100
4:
stack:
dp: s2
port: 4
s2:
dp_id: 0x2
interfaces:
1:
output_only: true
2:
native_vlan: 100
3:
native_vlan: 100
4:
stack:
dp: s1
port: 4
acls:
existing_acl:
- rule:
actions:
allow: 1
"""
new_faucet_conf_str = """
dps:
s1:
stack:
priority: 1
dp_id: 0x1
arp_neighbor_timeout: 123
timeout: 247
interfaces:
1:
output_only: true
description: Poseidon local mirror
2:
native_vlan: 100
3:
native_vlan: 100
4:
stack:
dp: s2
port: 4
s2:
dp_id: 0x2
arp_neighbor_timeout: 123
timeout: 247
interfaces:
1:
description: Poseidon remote mirror (loopback plug)
acls_in: [poseidon_tunnel]
coprocessor:
strategy: vlan_vid
2:
native_vlan: 100
3:
native_vlan: 100
4:
stack:
dp: s1
port: 4
acls:
existing_acl:
- rule:
actions:
allow: 1
poseidon_tunnel:
- rule:
vlan_vid: 999
actions:
allow: 0
- rule:
actions:
allow: 0
output:
tunnel:
type: vlan
tunnel_id: 999
dp: s1
port: 1
"""
orig_faucet_conf = yaml_load(faucet_conf_str)
test_faucet_conf = yaml_load(new_faucet_conf_str)
with tempfile.TemporaryDirectory() as tmpdir:
faucetconfgetsetter_cl = FaucetLocalConfGetSetter
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join(
tmpdir, 'faucet.yaml')
faucet = _get_proxy(
faucetconfgetsetter_cl=faucetconfgetsetter_cl,
mirror_ports={'s1': 1, 's2': 1},
proxy_mirror_ports={'sx': ['s1', 99]},
tunnel_vlan=999, tunnel_name='poseidon_tunnel')
faucet.reinvestigation_frequency = 123
faucet.frpc.faucet_conf = orig_faucet_conf
faucet.frpc.write_faucet_conf()
faucet._set_default_switch_conf()
faucet.frpc.read_faucet_conf(config_file=None)
assert faucet.frpc.faucet_conf['dps']['s1'] == test_faucet_conf['dps']['s1']
assert faucet.frpc.faucet_conf['dps']['s2'] == test_faucet_conf['dps']['s2']
assert faucet.frpc.faucet_conf['acls'] == test_faucet_conf['acls']
def test_proxy_mirror_config():
faucet_conf_str = """
dps:
s1:
interfaces:
1:
output_only: true
2:
native_vlan: 100
3:
native_vlan: 100
99:
native_vlan: 100
sx:
interfaces:
1:
native_vlan: 100
"""
faucet_conf = yaml_load(faucet_conf_str)
with tempfile.TemporaryDirectory() as tmpdir:
faucetconfgetsetter_cl = FaucetLocalConfGetSetter
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join(
tmpdir, 'faucet.yaml')
faucet = _get_proxy(
faucetconfgetsetter_cl=faucetconfgetsetter_cl,
mirror_ports={'s1': 1},
proxy_mirror_ports={'sx': ['s1', 99]})
# returns s1:99, not sx.
faucet.frpc.faucet_conf = faucet_conf
assert faucet.proxy_mirror_port('sx', 1) == ('s1', 99)
def test_check_mirror_config():
faucet_conf_str = """
dps:
s1:
interfaces:
1:
output_only: true
mirror: [2]
2:
native_vlan: 100
3:
native_vlan: 100
"""
faucet_conf = yaml_load(faucet_conf_str)
with tempfile.TemporaryDirectory() as tmpdir:
faucetconfgetsetter_cl = FaucetLocalConfGetSetter
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join(
tmpdir, 'faucet.yaml')
faucet = _get_proxy(
faucetconfgetsetter_cl=faucetconfgetsetter_cl,
mirror_ports={'s1': 1},
proxy_mirror_ports={'sx': ['s1', 99]})
faucet.frpc.faucet_conf = faucet_conf
port = faucet.mirror_switch_port('s1')
faucet.frpc.write_faucet_conf()
assert port == 1
def test_config():
"""
Tests Config Operations
"""
def check_config(obj, endpoints):
for endpoint in endpoints:
obj.mac_table[endpoint.endpoint_data['mac']] = [
endpoint.endpoint_data]
obj.mirror_mac('00:00:00:00:00:00', 't1-1', 1)
obj.mirror_mac('00:00:00:00:00:00', 0x1, 2)
obj.mirror_mac('00:00:00:00:00:00', 't1-1', 2)
obj.mirror_mac('00:00:00:00:00:00', 't1-1', 3)
obj.mirror_mac('00:00:00:00:00:00', 't2-1', 5)
obj.mirror_mac('00:00:00:00:00:00', 'bad', 6)
obj.unmirror_mac('00:00:00:00:00:01', None, None)
obj.unmirror_mac('00:00:00:00:00:01', 't1-1', 1)
obj.unmirror_mac('00:00:00:00:00:01', 't1-1', 3)
obj.mirror_mac('00:00:00:00:00:01', 't1-1', 3)
obj.unmirror_mac('00:00:00:00:00:01', 't1-1', 3)
obj.update_acls()
obj.update_acls(endpoints=endpoints,
rules_file=os.path.join(os.getcwd(), 'config/rules.yaml'))
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 't1-1', 'port': '1', 'ipv4': '0.0.0.0', 'ipv6': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'}
endpoint.metadata = {'mac_addresses': {'00:00:00:00:00:00': {'1551805502.0': {'labels': ['developer workstation']}}}, 'ipv4_addresses': {
'0.0.0.0': {'os': 'windows'}}, 'ipv6_addresses': {'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b': {'os': 'windows'}}}
endpoint2 = endpoint_factory('foo')
endpoint2.endpoint_data = {
'tenant': 'foo', 'mac': '00:00:00:00:00:01', 'segment': 't1-1', 'port': '3', 'ipv4': '0.0.0.0', 'ipv6': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'}
endpoint2.metadata = {'mac_addresses': {'00:00:00:00:00:01': {'1551805502.0': {'labels': ['developer workstation']}}}, 'ipv4_addresses': {
'0.0.0.0': {'os': 'windows'}}, 'ipv6_addresses': {'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b': {'os': 'windows'}}}
endpoints = [endpoint, endpoint2]
with tempfile.TemporaryDirectory() as tmpdir:
faucetconfgetsetter_cl = FaucetLocalConfGetSetter
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE = os.path.join(
tmpdir, 'faucet.yaml')
shutil.copy(SAMPLE_CONFIG, faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE)
parser = _get_proxy(
faucetconfgetsetter_cl=faucetconfgetsetter_cl,
mirror_ports={'t1-1': 2},
proxy_mirror_ports={'sx': ['s1', 99]})
parser.frpc.faucet_conf = yaml_load(
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE)
parser2 = _get_proxy(faucetconfgetsetter_cl=faucetconfgetsetter_cl)
parser2.frpc.faucet_conf = yaml_load(
faucetconfgetsetter_cl.DEFAULT_CONFIG_FILE)
config = Config().get_config()
proxy = _get_proxy(
faucetconfgetsetter_cl=faucetconfgetsetter_cl, config=config)
check_config(parser, endpoints)
check_config(parser2, endpoints)
check_config(proxy, endpoints)
|
alipay/aop/api/domain/CategoryRequireInfo.py | antopen/alipay-sdk-python-all | 213 | 12630883 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CategoryRequireInfo(object):
def __init__(self):
self._business_licence_required = None
self._category_code = None
self._category_name = None
self._category_requirements = None
self._door_photo_required = None
self._special_licence_required = None
@property
def business_licence_required(self):
return self._business_licence_required
@business_licence_required.setter
def business_licence_required(self, value):
self._business_licence_required = value
@property
def category_code(self):
return self._category_code
@category_code.setter
def category_code(self, value):
self._category_code = value
@property
def category_name(self):
return self._category_name
@category_name.setter
def category_name(self, value):
self._category_name = value
@property
def category_requirements(self):
return self._category_requirements
@category_requirements.setter
def category_requirements(self, value):
self._category_requirements = value
@property
def door_photo_required(self):
return self._door_photo_required
@door_photo_required.setter
def door_photo_required(self, value):
self._door_photo_required = value
@property
def special_licence_required(self):
return self._special_licence_required
@special_licence_required.setter
def special_licence_required(self, value):
self._special_licence_required = value
def to_alipay_dict(self):
params = dict()
if self.business_licence_required:
if hasattr(self.business_licence_required, 'to_alipay_dict'):
params['business_licence_required'] = self.business_licence_required.to_alipay_dict()
else:
params['business_licence_required'] = self.business_licence_required
if self.category_code:
if hasattr(self.category_code, 'to_alipay_dict'):
params['category_code'] = self.category_code.to_alipay_dict()
else:
params['category_code'] = self.category_code
if self.category_name:
if hasattr(self.category_name, 'to_alipay_dict'):
params['category_name'] = self.category_name.to_alipay_dict()
else:
params['category_name'] = self.category_name
if self.category_requirements:
if hasattr(self.category_requirements, 'to_alipay_dict'):
params['category_requirements'] = self.category_requirements.to_alipay_dict()
else:
params['category_requirements'] = self.category_requirements
if self.door_photo_required:
if hasattr(self.door_photo_required, 'to_alipay_dict'):
params['door_photo_required'] = self.door_photo_required.to_alipay_dict()
else:
params['door_photo_required'] = self.door_photo_required
if self.special_licence_required:
if hasattr(self.special_licence_required, 'to_alipay_dict'):
params['special_licence_required'] = self.special_licence_required.to_alipay_dict()
else:
params['special_licence_required'] = self.special_licence_required
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CategoryRequireInfo()
if 'business_licence_required' in d:
o.business_licence_required = d['business_licence_required']
if 'category_code' in d:
o.category_code = d['category_code']
if 'category_name' in d:
o.category_name = d['category_name']
if 'category_requirements' in d:
o.category_requirements = d['category_requirements']
if 'door_photo_required' in d:
o.door_photo_required = d['door_photo_required']
if 'special_licence_required' in d:
o.special_licence_required = d['special_licence_required']
return o
|
src/pykka/_proxy.py | jodal/pykka | 796 | 12630902 | <filename>src/pykka/_proxy.py
import logging
from collections.abc import Callable
from typing import NamedTuple
from pykka import ActorDeadError, messages
__all__ = ["ActorProxy"]
logger = logging.getLogger("pykka")
class AttrInfo(NamedTuple):
callable: bool
traversable: bool
class ActorProxy:
"""
An :class:`ActorProxy` wraps an :class:`ActorRef <pykka.ActorRef>`
instance. The proxy allows the referenced actor to be used through regular
method calls and field access.
You can create an :class:`ActorProxy` from any :class:`ActorRef
<pykka.ActorRef>`::
actor_ref = MyActor.start()
actor_proxy = ActorProxy(actor_ref)
You can also get an :class:`ActorProxy` by using :meth:`proxy()
<pykka.ActorRef.proxy>`::
actor_proxy = MyActor.start().proxy()
**Attributes and method calls**
When reading an attribute or getting a return value from a method, you get
a :class:`Future <pykka.Future>` object back. To get the enclosed value
from the future, you must call :meth:`get() <pykka.Future.get>` on the
returned future::
print(actor_proxy.string_attribute.get())
print(actor_proxy.count().get() + 1)
If you call a method just for it's side effects and do not care about the
return value, you do not need to accept the returned future or call
:meth:`get() <pykka.Future.get>` on the future. Simply call the method, and
it will be executed concurrently with your own code::
actor_proxy.method_with_side_effect()
If you want to block your own code from continuing while the other method
is processing, you can use :meth:`get() <pykka.Future.get>` to block until
it completes::
actor_proxy.method_with_side_effect().get()
You can also use the ``await`` keyword to block until the method completes::
await actor_proxy.method_with_side_effect()
If you access a proxied method as an attribute, without calling it, you
get an :class:`CallableProxy`.
**Proxy to itself**
An actor can use a proxy to itself to schedule work for itself. The
scheduled work will only be done after the current message and all messages
already in the inbox are processed.
For example, if an actor can split a time consuming task into multiple
parts, and after completing each part can ask itself to start on the next
part using proxied calls or messages to itself, it can react faster to
other incoming messages as they will be interleaved with the parts of the
time consuming task. This is especially useful for being able to stop the
actor in the middle of a time consuming task.
To create a proxy to yourself, use the actor's :attr:`actor_ref
<pykka.Actor.actor_ref>` attribute::
proxy_to_myself_in_the_future = self.actor_ref.proxy()
If you create a proxy in your actor's constructor or :meth:`on_start
<pykka.Actor.on_start>` method, you can create a nice API for deferring
work to yourself in the future::
def __init__(self):
...
self._in_future = self.actor_ref.proxy()
...
def do_work(self):
...
self._in_future.do_more_work()
...
def do_more_work(self):
...
To avoid infinite loops during proxy introspection, proxies to self
should be kept as private instance attributes by prefixing the attribute
name with ``_``.
**Examples**
An example of :class:`ActorProxy` usage:
.. literalinclude:: ../../examples/counter.py
:param actor_ref: reference to the actor to proxy
:type actor_ref: :class:`pykka.ActorRef`
:raise: :exc:`pykka.ActorDeadError` if actor is not available
"""
#: The actor's :class:`pykka.ActorRef` instance.
actor_ref = None
def __init__(self, actor_ref, attr_path=None):
if not actor_ref.is_alive():
raise ActorDeadError(f"{actor_ref} not found")
self.actor_ref = actor_ref
self._actor = actor_ref._actor
self._attr_path = attr_path or tuple()
self._known_attrs = self._introspect_attributes()
self._actor_proxies = {}
self._callable_proxies = {}
def _introspect_attributes(self):
"""Introspects the actor's attributes."""
result = {}
attr_paths_to_visit = [[attr_name] for attr_name in dir(self._actor)]
while attr_paths_to_visit:
attr_path = attr_paths_to_visit.pop(0)
if not self._is_exposable_attribute(attr_path[-1]):
continue
attr = self._actor._introspect_attribute_from_path(attr_path)
if self._is_self_proxy(attr):
logger.warning(
f"{self._actor} attribute {'.'.join(attr_path)!r} "
f"is a proxy to itself. "
f"Consider making it private "
f"by renaming it to {'_' + attr_path[-1]!r}."
)
continue
attr_info = AttrInfo(
callable=self._is_callable_attribute(attr),
traversable=self._is_traversable_attribute(attr),
)
result[tuple(attr_path)] = attr_info
if attr_info.traversable:
for attr_name in dir(attr):
attr_paths_to_visit.append(attr_path + [attr_name])
return result
def _is_exposable_attribute(self, attr_name):
"""
Returns true for any attribute name that may be exposed through
:class:`ActorProxy`.
"""
return not attr_name.startswith("_")
def _is_self_proxy(self, attr):
"""Returns true if attribute is an equivalent actor proxy."""
return attr == self
def _is_callable_attribute(self, attr):
"""Returns true for any attribute that is callable."""
return isinstance(attr, Callable)
def _is_traversable_attribute(self, attr):
"""
Returns true for any attribute that may be traversed from another
actor through a proxy.
"""
return (
getattr(attr, "_pykka_traversable", False) is True
or getattr(attr, "pykka_traversable", False) is True
)
def __eq__(self, other):
if not isinstance(other, ActorProxy):
return False
if self._actor != other._actor:
return False
if self._attr_path != other._attr_path:
return False
return True
def __hash__(self):
return hash((self._actor, self._attr_path))
def __repr__(self):
return f"<ActorProxy for {self.actor_ref}, attr_path={self._attr_path!r}>"
def __dir__(self):
result = ["__class__"]
result += list(self.__class__.__dict__.keys())
result += list(self.__dict__.keys())
result += [attr_path[0] for attr_path in list(self._known_attrs.keys())]
return sorted(result)
def __getattr__(self, name):
"""Get a field or callable from the actor."""
attr_path = self._attr_path + (name,)
if attr_path not in self._known_attrs:
self._known_attrs = self._introspect_attributes()
attr_info = self._known_attrs.get(attr_path)
if attr_info is None:
raise AttributeError(f"{self} has no attribute {name!r}")
if attr_info.callable:
if attr_path not in self._callable_proxies:
self._callable_proxies[attr_path] = CallableProxy(
self.actor_ref, attr_path
)
return self._callable_proxies[attr_path]
elif attr_info.traversable:
if attr_path not in self._actor_proxies:
self._actor_proxies[attr_path] = ActorProxy(self.actor_ref, attr_path)
return self._actor_proxies[attr_path]
else:
message = messages.ProxyGetAttr(attr_path=attr_path)
return self.actor_ref.ask(message, block=False)
def __setattr__(self, name, value):
"""
Set a field on the actor.
Blocks until the field is set to check if any exceptions was raised.
"""
if name == "actor_ref" or name.startswith("_"):
return super().__setattr__(name, value)
attr_path = self._attr_path + (name,)
message = messages.ProxySetAttr(attr_path=attr_path, value=value)
self.actor_ref.ask(message)
class CallableProxy:
"""Proxy to a single method.
:class:`CallableProxy` instances are returned when accessing methods on a
:class:`ActorProxy` without calling them.
Example::
proxy = AnActor.start().proxy()
# Ask semantics returns a future. See `__call__()` docs.
future = proxy.do_work()
# Tell semantics are fire and forget. See `defer()` docs.
proxy.do_work.defer()
"""
def __init__(self, actor_ref, attr_path):
self.actor_ref = actor_ref
self._attr_path = attr_path
def __call__(self, *args, **kwargs):
"""Call with :meth:`~pykka.ActorRef.ask` semantics.
Returns a future which will yield the called method's return value.
If the call raises an exception is set on the future, and will be
reraised by :meth:`~pykka.Future.get`. If the future is left unused,
the exception will not be reraised. Either way, the exception will
also be logged. See :ref:`logging` for details.
"""
message = messages.ProxyCall(
attr_path=self._attr_path, args=args, kwargs=kwargs
)
return self.actor_ref.ask(message, block=False)
def defer(self, *args, **kwargs):
"""Call with :meth:`~pykka.ActorRef.tell` semantics.
Does not create or return a future.
If the call raises an exception, there is no future to set the
exception on. Thus, the actor's :meth:`~pykka.Actor.on_failure` hook
is called instead.
.. versionadded:: 2.0
"""
message = messages.ProxyCall(
attr_path=self._attr_path, args=args, kwargs=kwargs
)
self.actor_ref.tell(message)
def traversable(obj):
"""Marks an actor attribute as traversable.
The traversable marker makes the actor attribute's own methods and
attributes available to users of the actor through an
:class:`~pykka.ActorProxy`.
Used as a function to mark a single attribute::
class AnActor(pykka.ThreadingActor):
playback = pykka.traversable(Playback())
class Playback(object):
def play(self):
return True
This function can also be used as a class decorator, making all instances
of the class traversable::
class AnActor(pykka.ThreadingActor):
playback = Playback()
@pykka.traversable
class Playback(object):
def play(self):
return True
The third alternative, and the only way in Pykka < 2.0, is to manually
mark a class as traversable by setting the ``pykka_traversable`` attribute
to :class:`True`::
class AnActor(pykka.ThreadingActor):
playback = Playback()
class Playback(object):
pykka_traversable = True
def play(self):
return True
When the attribute is marked as traversable, its methods can be executed
in the context of the actor through an actor proxy::
proxy = AnActor.start().proxy()
assert proxy.playback.play().get() is True
.. versionadded:: 2.0
"""
if hasattr(obj, "__slots__"):
raise Exception(
"pykka.traversable() cannot be used to mark "
"an object using slots as traversable."
)
obj._pykka_traversable = True
return obj
|
dbaas/system/migrations/0003_auto__add_celeryhealthcheck.py | didindinn/database-as-a-service | 303 | 12630917 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CeleryHealthCheck'
db.create_table(u'system_celeryhealthcheck', (
(u'id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')
(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')
(auto_now=True, blank=True)),
('last_update', self.gf(
'django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'system', ['CeleryHealthCheck'])
def backwards(self, orm):
# Deleting model 'CeleryHealthCheck'
db.delete_table(u'system_celeryhealthcheck')
models = {
u'system.celeryhealthcheck': {
'Meta': {'object_name': 'CeleryHealthCheck'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'system.configuration': {
'Meta': {'object_name': 'Configuration'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['system']
|
zippy/benchmarks/src/benchmarks/sympy/sympy/physics/optics/waves.py | lucapele/pele-c | 319 | 12630935 | <filename>zippy/benchmarks/src/benchmarks/sympy/sympy/physics/optics/waves.py
"""
This module has all the classes and functions related to waves in optics.
**Contains**
* TWave
"""
from __future__ import print_function, division
__all__ = ['TWave']
from sympy import sympify, pi, cos, sqrt, simplify, Symbol, S
from sympy.core.expr import Expr
class TWave(Expr):
r"""
This is a simple transverse wave travelling in a two dimensional space.
Basic properties are required at the time of creation of the object but
they can be changed later with respective methods provided.
It has been represented as :math:`A \times cos(\omega \times t + \phi )`
where :math:`A` is amplitude, :math:`\omega` is angular velocity and
:math:`\phi` is phase angle of the wave.
Arguments
=========
amplitude : Sympifyable
Amplitude of the wave.
frequency : Sympifyable
Frequency of the wave.
phase : Sympifyable
Phase angle of the wave.
time_period : Sympifyable
Time period of the wave.
n : Sympifyable
Refractive index of the medium.
Raises
=======
ValueError : When niether frequency nor time period is provided.
TypeError : When anyting other than TWave objects is added.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A1, phi1, A2, phi2, f = symbols('A1, phi1, A2, phi2, f')
>>> w1 = TWave(A1, f, phi1)
>>> w2 = TWave(A2, f, phi2)
>>> w3 = w1 + w2 # Superposition of two waves
>>> w3
TWave(sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2), f, phi1 + phi2)
>>> w3.amplitude
sqrt(A1**2 + 2*A1*A2*cos(phi1 - phi2) + A2**2)
>>> w3.phase
phi1 + phi2
>>> w3.speed
c/n
>>> w3.angular_velocity
2*pi*f
"""
def __init__(
self,
amplitude,
frequency=None,
phase=S.Zero,
time_period=None,
n=Symbol('n')):
frequency = sympify(frequency)
amplitude = sympify(amplitude)
phase = sympify(phase)
time_period = sympify(time_period)
n = sympify(n)
self._frequency = frequency
self._amplitude = amplitude
self._phase = phase
self._time_period = time_period
self._n = n
self.c = Symbol('c') # Speed of light in vacuum
if time_period is not None:
self._frequency = 1/self._time_period
if frequency is not None:
self._time_period = 1/self._frequency
if time_period is not None:
if frequency != 1/time_period:
raise ValueError("frequency and time_period should be consistent.")
if frequency is None and time_period is None:
raise ValueError("Either frequency or time period is needed.")
@property
def frequency(self):
"""
Returns the frequency of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.frequency
f
"""
return self._frequency
@property
def time_period(self):
"""
Returns the time period of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.time_period
1/f
"""
return self._time_period
@property
def wavelength(self):
"""
Returns wavelength of the wave.
It depends on the medium of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.wavelength
c/(f*n)
"""
return self.c/(self._frequency*self._n)
@property
def amplitude(self):
"""
Returns the amplitude of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.amplitude
A
"""
return self._amplitude
@property
def phase(self):
"""
Returns the phase angle of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.phase
phi
"""
return self._phase
@property
def speed(self):
"""
Returns the speed of travelling wave.
It is medium dependent.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.speed
c/n
"""
return self.wavelength*self._frequency
@property
def angular_velocity(self):
"""
Returns angular velocity of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.angular_velocity
2*pi*f
"""
return 2*pi*self._frequency
def equation(self, type='cosine'):
"""
Returns equation of the wave.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.optics import TWave
>>> A, phi, f = symbols('A, phi, f')
>>> w = TWave(A, f, phi)
>>> w.equation('cosine')
A*cos(2*pi*f*t + phi)
"""
if not isinstance(type, str):
raise TypeError("type can only be a string.")
if type == 'cosine':
return self._amplitude*cos(self.angular_velocity*Symbol('t') + self._phase)
def __str__(self):
"""String representation of a TWave."""
from sympy.printing import sstr
return type(self).__name__ + sstr(self.args)
__repr__ = __str__
def __add__(self, other):
"""
Addition of two waves will result in their superposition.
The type of interference will depend on their phase angles.
"""
if isinstance(other, TWave):
if self._frequency == other._frequency and self.wavelength == other.wavelength:
return TWave(sqrt(self._amplitude**2 + other._amplitude**2 + 2 *
self.amplitude*other.amplitude*cos(
self._phase - other.phase)),
self.frequency,
self._phase + other._phase
)
else:
raise TypeError(type(other).__name__ + " and TWave objects can't be added.")
|
nemo/collections/nlp/modules/common/huggingface/huggingface_encoder.py | madhukarkm/NeMo | 4,145 | 12630944 | <gh_stars>1000+
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from hydra.utils import instantiate
from transformers import AutoConfig, AutoModel
from nemo.collections.nlp.modules.common.encoder_module import EncoderModule
from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import get_huggingface_pretrained_lm_models_list
from nemo.core.classes.common import typecheck
from nemo.utils import logging
class HuggingFaceEncoderModule(EncoderModule):
""" Class for using HuggingFace encoders in NeMo NLP."""
def __init__(
self,
model_name: Optional[str] = None,
pretrained: bool = False,
config_dict: Optional[dict] = None,
checkpoint_file: Optional[str] = None,
):
"""Gets HuggingFace based model to be used as an Encoder in NeMo NLP.
Use the model_name arg to get a named model architecture.
Available model names can be found with get_huggingface_pretrained_lm_models_list() or
by going to https://huggingface.co/models.
Use the pretrained arg to get the named model architecture with or without pretrained weights.
If model_name is None, then we can pass in a custom configuration via the config_dict.
For example, to instantiate a HuggingFace BERT model with custom configuration we would do:
config_dict={
'_target_': 'transformers.BertConfig',
'hidden_size': 1536
}
Args:
model_name (Optional[str]): Named model architecture from HuggingFace. Defaults to None.
pretrained (bool): Use True to get pretrained weights.
False will use the same architecture but with randomly initialized weights.
Defaults to False.
config_dict (Optional[dict], optional): Use for custom configuration of the HuggingFace model. Defaults to None.
checkpoint_file (Optional[str], optional): Provide weights for the transformer from a local checkpoint. Defaults to None.
"""
super().__init__()
if checkpoint_file:
raise NotImplementedError('Restoring from checkpoint file not implemented yet.')
model = None
if model_name is not None:
if model_name in get_huggingface_pretrained_lm_models_list(include_external=True):
if pretrained:
config_dict.pop('vocab_size')
if config_dict:
raise ValueError(
f'When using pretrained model, config_dict should be None or empty. Got: {config_dict}'
)
model = AutoModel.from_pretrained(model_name)
else:
cfg = AutoConfig.from_pretrained(model_name)
model = AutoModel.from_config(cfg)
else:
logging.error(f'{model_name} not found in list of HuggingFace pretrained models')
else:
if pretrained:
raise ValueError(f'If not using model_name, then pretrained should be False. Got: {pretrained}.')
cfg = instantiate(config_dict)
model = AutoModel.from_config(cfg)
self._hidden_size = model.config.hidden_size
self._vocab_size = model.config.vocab_size
self._encoder = model
@typecheck()
def forward(self, input_ids, encoder_mask):
encoder_hidden_states = self._encoder.forward(input_ids=input_ids, attention_mask=encoder_mask)[0]
return encoder_hidden_states
@property
def hidden_size(self) -> Optional[int]:
return self._hidden_size
@property
def vocab_size(self) -> Optional[int]:
return self._vocab_size
|
release/stubs/clr.py | htlcnn/ironpython-stubs | 182 | 12630974 | <gh_stars>100-1000
# encoding: utf-8
# module clr
# from (built-in)
# by generator 1.145
"""
Python module. Stores classes, functions, and data. Usually a module
is created by importing a file or package from disk. But a module can also
be directly created by calling the module type and providing a name or
optionally a documentation string.
module()
"""
# no imports
# Variables with simple values
IsNetStandard = False
# functions
def accepts(*types, p_object=None): # real signature unknown; restored from __doc__
"""
accepts(*types: Array[object]) -> object
accepts(*types) -> ArgChecker
Decorator that returns a new callable
object which will validate the arguments are of the specified types.
"""
return object()
def AddReference(*args, **kwargs): # real signature unknown
"""
Adds a reference to a .NET assembly. Parameters can be an already loaded
Assembly object, a full assembly name, or a partial assembly name. After the
load the assemblies namespaces and top-level types will be available via
import Namespace.
"""
pass
def AddReferenceByName(*args, **kwargs): # real signature unknown
"""
Adds a reference to a .NET assembly. Parameters are an assembly name.
After the load the assemblies namespaces and top-level types will be available via
import Namespace.
"""
pass
def AddReferenceByPartialName(*args, **kwargs): # real signature unknown
"""
Adds a reference to a .NET assembly. Parameters are a partial assembly name.
After the load the assemblies namespaces and top-level types will be available via
import Namespace.
"""
pass
def AddReferenceToFile(*args, **kwargs): # real signature unknown
"""
Adds a reference to a .NET assembly. One or more assembly names can
be provided. The assembly is searched for in the directories specified in
sys.path and dependencies will be loaded from sys.path as well. The assembly
name should be the filename on disk without a directory specifier and
optionally including the .EXE or .DLL extension. After the load the assemblies
namespaces and top-level types will be available via import Namespace.
"""
pass
def AddReferenceToFileAndPath(*args, **kwargs): # real signature unknown
"""
Adds a reference to a .NET assembly. One or more assembly names can
be provided which are fully qualified names to the file on disk. The
directory is added to sys.path and AddReferenceToFile is then called. After the
load the assemblies namespaces and top-level types will be available via
import Namespace.
"""
pass
def AddReferenceToTypeLibrary(rcw): # real signature unknown; restored from __doc__
"""
AddReferenceToTypeLibrary(rcw: object)
AddReferenceToTypeLibrary(rcw) -> None
Makes the type lib desc
available for importing. See also LoadTypeLibrary.
AddReferenceToTypeLibrary(typeLibGuid: Guid)
AddReferenceToTypeLibrary(guid) -> None
Makes the type lib desc
available for importing. See also LoadTypeLibrary.
"""
pass
def ClearProfilerData(): # real signature unknown; restored from __doc__
"""
ClearProfilerData()
Resets all profiler counters back to zero
"""
pass
def CompileModules(assemblyName, **kwArgs, p_str=None, p_object=None, *args): # real signature unknown; NOTE: unreliably restored from __doc__
""" CompileModules(assemblyName: str, **kwArgs: IDictionary[str, object], *filenames: Array[str]) """
pass
def CompileSubclassTypes(assemblyName, *newTypes, p_object=None): # real signature unknown; restored from __doc__
"""
CompileSubclassTypes(assemblyName: str, *newTypes: Array[object])
clr.CompileSubclassTypes(assemblyName, *typeDescription)
Provides a
helper for creating an assembly which contains pre-generated .NET
base types for
new-style types.
This assembly can then be AddReferenced or put
sys.prefix\DLLs and the cached
types will be used instead of generating the types
at runtime.
This function takes the name of the assembly to save to
and then an arbitrary
number of parameters describing the types to be created.
Each of those
parameter can either be a plain type or a sequence of base types.
clr.CompileSubclassTypes(object) -> create a base type for object
clr.CompileSubclassTypes(object, str, System.Collections.ArrayList) -> create
base types for both object and ArrayList.
clr.CompileSubclassTypes(object, (object, IComparable)) -> create base types for
object and an object which implements IComparable.
"""
pass
def Convert(o, toType): # real signature unknown; restored from __doc__
"""
Convert(o: object, toType: Type) -> object
Attempts to convert the provided object to the specified type. Conversions that
will be attempted include standard Python conversions as well as .NET implicit
and
explicit conversions.
If the conversion cannot be performed a
TypeError will be raised.
"""
return object()
def Deserialize(serializationFormat, data): # real signature unknown; restored from __doc__
"""
Deserialize(serializationFormat: str, data: str) -> object
Deserializes the result of a Serialize call. This can be used to perform serialization
for .NET types which are serializable. This method is the callable object provided
from __reduce_ex__ for .serializable .NET types.
The first
parameter indicates the serialization format and is the first tuple element
returned from the Serialize call.
The second parameter is the
serialized data.
"""
return object()
def Dir(o): # real signature unknown; restored from __doc__
"""
Dir(o: object) -> list
returns the result of dir(o) as-if "import clr" has not been performed.
"""
return []
def DirClr(o): # real signature unknown; restored from __doc__
"""
DirClr(o: object) -> list
Returns the result of dir(o) as-if "import clr" has been performed.
"""
return []
def EnableProfiler(enable): # real signature unknown; restored from __doc__
"""
EnableProfiler(enable: bool)
Enable or disable profiling for the current ScriptEngine. This will only affect code
that is compiled after the setting is changed; previously-compiled code will retain
whatever setting was active when the code was originally compiled.
The easiest way to recompile a module is to reload() it.
"""
pass
def GetBytes(*args, **kwargs): # real signature unknown
""" Converts a string to an array of bytesConverts maxCount of a string to an array of bytes """
pass
def GetClrType(type): # real signature unknown; restored from __doc__
"""
GetClrType(type: Type) -> Type
Gets the CLR Type object from a given Python type object.
"""
pass
def GetCurrentRuntime(): # real signature unknown; restored from __doc__
"""
GetCurrentRuntime() -> ScriptDomainManager
Gets the current ScriptDomainManager that IronPython is loaded into. The
ScriptDomainManager can then be used to work with the language portion of the
DLR
hosting APIs.
"""
pass
def GetDynamicType(t): # real signature unknown; restored from __doc__
"""
GetDynamicType(t: Type) -> type
OBSOLETE: Gets the Python type object from a given CLR Type object.
Use clr.GetPythonType instead.
"""
return type(*(), **{})
def GetProfilerData(includeUnused): # real signature unknown; restored from __doc__
"""
GetProfilerData(includeUnused: bool) -> tuple
Returns a list of profile data. The values are tuples of Profiler.Data objects
All times are expressed in the same unit of measure as DateTime.Ticks
"""
return ()
def GetPythonType(t): # real signature unknown; restored from __doc__
"""
GetPythonType(t: Type) -> type
Gets the Python type object from a given CLR Type object.
"""
return type(*(), **{})
def GetString(*args, **kwargs): # real signature unknown
""" Converts an array of bytes to a string.Converts maxCount of an array of bytes to a string """
pass
def GetSubclassedTypes(): # real signature unknown; restored from __doc__
"""
GetSubclassedTypes() -> tuple
clr.GetSubclassedTypes() -> tuple
Returns a tuple of information
about the types which have been subclassed.
This tuple can be passed
to clr.CompileSubclassTypes to cache these
types on disk such as:
clr.CompileSubclassTypes('assembly', *clr.GetSubclassedTypes())
"""
return ()
def ImportExtensions(type): # real signature unknown; restored from __doc__
""" ImportExtensions(type: type)ImportExtensions(namespace: namespace#) """
pass
def LoadAssemblyByName(*args, **kwargs): # real signature unknown
"""
Loads an assembly from the specified assembly name and returns the assembly
object. Namespaces or types in the assembly can be accessed directly from
the assembly object.
"""
pass
def LoadAssemblyByPartialName(*args, **kwargs): # real signature unknown
"""
Loads an assembly from the specified partial assembly name and returns the
assembly object. Namespaces or types in the assembly can be accessed directly
from the assembly object.
"""
pass
def LoadAssemblyFromFile(*args, **kwargs): # real signature unknown
"""
Loads an assembly from the specified filename and returns the assembly
object. Namespaces or types in the assembly can be accessed directly from
the assembly object.
"""
pass
def LoadAssemblyFromFileWithPath(*args, **kwargs): # real signature unknown
"""
Adds a reference to a .NET assembly. Parameters are a full path to an.
assembly on disk. After the load the assemblies namespaces and top-level types
will be available via import Namespace.
"""
pass
def LoadTypeLibrary(rcw): # real signature unknown; restored from __doc__
"""
LoadTypeLibrary(rcw: object) -> ComTypeLibInfo
LoadTypeLibrary(rcw) -> type lib desc
Gets an ITypeLib object from
OLE Automation compatible RCW ,
reads definitions of CoClass'es and Enum's from
this library
and creates an object that allows to instantiate coclasses
and get actual values for the enums.
LoadTypeLibrary(typeLibGuid: Guid) -> ComTypeLibInfo
LoadTypeLibrary(guid) -> type lib desc
Reads the latest registered
type library for the corresponding GUID,
reads definitions of CoClass'es and Enum's
from this library
and creates a IDynamicMetaObjectProvider that allows to
instantiate coclasses
and get actual values for the enums.
"""
pass
def returns(type): # real signature unknown; restored from __doc__
"""
returns(type: object) -> object
returns(type) -> ReturnChecker
Returns a new callable object which
will validate the return type is of the specified type.
"""
return object()
def Self(): # real signature unknown; restored from __doc__
""" Self() -> object """
return object()
def Serialize(self): # real signature unknown; restored from __doc__
"""
Serialize(self: object) -> tuple
Serializes data using the .NET serialization formatter for complex
types. Returns
a tuple identifying the serialization format and the serialized
data which can be
fed back into clr.Deserialize.
Current serialization formats include
custom formats for primitive .NET
types which aren't already recognized as tuples.
None is used to indicate
that the Binary .NET formatter is used.
"""
return ()
def SetCommandDispatcher(dispatcher, Action=None): # real signature unknown; restored from __doc__
""" SetCommandDispatcher(dispatcher: Action[Action]) -> Action[Action] """
pass
def Use(name): # real signature unknown; restored from __doc__
"""
Use(name: str) -> object
Use(name) -> module
Attempts to load the specified module searching
all languages in the loaded ScriptRuntime.
Use(path: str, language: str) -> object
Use(path, language) -> module
Attempts to load the specified module
belonging to a specific language loaded into the
current ScriptRuntime.
"""
return object()
# classes
class ArgChecker(object):
""" ArgChecker(prms: Array[object]) """
def __call__(self, *args): #cannot find CLR method
""" x.__call__(...) <==> x(...) """
pass
@staticmethod # known case of __new__
def __new__(self, prms):
""" __new__(cls: type, prms: Array[object]) """
pass
class StrongBox(object, IStrongBox):
"""
StrongBox[T]()
StrongBox[T](value: T)
"""
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, value=None):
"""
__new__(cls: type)
__new__(cls: type, value: T)
"""
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
Value = None
Reference = StrongBox
class ReferencesList(List[Assembly], IList[Assembly], ICollection[Assembly], IEnumerable[Assembly], IEnumerable, IList, ICollection, IReadOnlyList[Assembly], IReadOnlyCollection[Assembly], ICodeFormattable):
""" ReferencesList() """
def Add(self, *__args):
""" Add(self: ReferencesList, other: Assembly) """
pass
def __add__(self, *args): #cannot find CLR method
""" x.__add__(y) <==> x+yx.__add__(y) <==> x+y """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __repr__(self, context):
""" __repr__(self: ReferencesList) -> str """
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
class ReturnChecker(object):
""" ReturnChecker(returnType: object) """
def __call__(self, *args): #cannot find CLR method
""" x.__call__(...) <==> x(...) """
pass
@staticmethod # known case of __new__
def __new__(self, returnType):
""" __new__(cls: type, returnType: object) """
pass
retType = None
class RuntimeArgChecker(PythonTypeSlot):
"""
RuntimeArgChecker(function: object, expectedArgs: Array[object])
RuntimeArgChecker(instance: object, function: object, expectedArgs: Array[object])
"""
def __call__(self, *args): #cannot find CLR method
""" x.__call__(...) <==> x(...)x.__call__(...) <==> x(...) """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, function: object, expectedArgs: Array[object])
__new__(cls: type, instance: object, function: object, expectedArgs: Array[object])
"""
pass
class RuntimeReturnChecker(PythonTypeSlot):
"""
RuntimeReturnChecker(function: object, expectedReturn: object)
RuntimeReturnChecker(instance: object, function: object, expectedReturn: object)
"""
def GetAttribute(self, instance, owner):
""" GetAttribute(self: RuntimeReturnChecker, instance: object, owner: object) -> object """
pass
def __call__(self, *args): #cannot find CLR method
""" x.__call__(...) <==> x(...)x.__call__(...) <==> x(...) """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, function: object, expectedReturn: object)
__new__(cls: type, instance: object, function: object, expectedReturn: object)
"""
pass
# variables with complex values
References = None
|
torchtoolbox/objects/__init__.py | deeplearningforfun/torch-tools | 353 | 12630997 | from .bbox import *
|
ask-sdk-local-debug/tests/unit/test_serializer.py | nikhilym/alexa-skills-kit-sdk-for-python | 496 | 12631053 | # -*- coding: utf-8 -*-
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import unittest
from ask_sdk_local_debug.util.serializer import Serializer
class TestSerializer(unittest.TestCase):
def test_singleton_serializer(self):
with self.assertRaises(TypeError) as exc:
_test_serializer = Serializer()
self.assertIn("Singletons must be accessed through get_instance()",
str(exc.exception),
"Serializer Singleton class didn't throw exception for "
"Constructor instantiation.")
def test_singleton_serializer_instances(self):
test_serializer_1 = Serializer.get_instance()
test_serializer_2 = Serializer.get_instance()
self.assertIsInstance(test_serializer_1, Serializer)
self.assertIsInstance(test_serializer_2, Serializer)
self.assertIs(test_serializer_1, test_serializer_2,
"Serializer get_instance() did not return the same "
"singleton instance.")
|
Python/CompareTriplets/main.py | cs-mshah/AlgoCode | 151 | 12631060 | # Autor: <NAME>(TrebolDan)
#Reading input lines
a=input().split()
b=input().split()
# score[0] to A & score[1] to B
score = [0,0]
# Comparing triplets
for i in range (3):
if(a[i]!=b[i]): # If equals, neither one gets a point
if(a[i]>b[i]):
score[0]=score[0]+1 # A bigger than B
else:
score[1]=score[1]+1 # B bigger than A
# Printing out score
print(score) |
venv/lib/python3.8/site-packages/oauth2/test/__init__.py | wjone005/Netflix_Tinder | 120 | 12631076 | <reponame>wjone005/Netflix_Tinder
import sys
# Enables unit tests to work under Python 2.6
# Code copied from
# https://github.com/facebook/tornado/blob/master/tornado/test/util.py
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
|
muddery/server/quests/quest_status/not_accomplished.py | dongwudanci/muddery | 127 | 12631082 | <gh_stars>100-1000
"""
Quest status.
"""
from muddery.server.quests.base_quest_status import BaseQuestStatus
from muddery.server.utils.localized_strings_handler import _
class NotAccomplished(BaseQuestStatus):
"""
The quest's objectives are not accomplished.
"""
key = "NOT_ACCOMPLISHED"
name = _("Objectives Not Accomplished", category="quest_status")
def match(self, caller, quest_key):
"""
Check.
"""
if not caller:
return False
return caller.quest_handler.is_in_progress(quest_key) and \
not caller.quest_handler.is_accomplished(quest_key)
|
cx_Freeze/samples/relimport/pkg1/pkg2/sub5.py | lexa/cx_Freeze | 358 | 12631083 | <reponame>lexa/cx_Freeze
print("importing pkg1.pkg2.sub5")
|
src/main/python/ecir2019_axiomatic/run_batch.py | kasys-lab/anserini | 626 | 12631088 | # -*- coding: utf-8 -*-
#
# Anserini: A toolkit for reproducible information retrieval research built on Lucene
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import argparse
from multiprocessing import Pool
import json
import logging
import yaml
from search import Search
from evaluation import Evaluation
from effectiveness import Effectiveness
from coverage import Coverage
logger = logging.getLogger('ecir2019_axiomatic')
logger.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
#logger.addHandler(ch)
parallelism=1
def batch_everything(all_params, func):
if len(all_params) == 0:
return
p = Pool(min(parallelism, len(all_params)))
p.map(func, all_params)
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def get_index_path(yaml_data):
"""Find the index path."""
for index_root in yaml_data['index_roots']:
if os.path.exists(os.path.join(index_root, yaml_data['index_path'])):
index_path = os.path.join(index_root, yaml_data['index_path'])
break
return index_path
def batch_retrieval(collection_yaml, models_yaml, output_root, random = False, dry_run = False):
all_params = []
program = os.path.join(collection_yaml['anserini_root'], 'target/appassembler/bin', 'SearchCollection')
index_path = get_index_path(collection_yaml)
this_output_root = os.path.join(output_root, collection_yaml['name'])
logger.info('='*10+'Generating Batch Retrieval Parameters'+'='*10)
if random:
if collection_yaml['name'] == 'disk12' or collection_yaml['name'] == 'robust04' or collection_yaml['name'] == 'robust05' or collection_yaml['name'] == 'core17':
beta = 0.5
elif collection_yaml['name'] == 'mb11' or collection_yaml['name'] == 'mb13':
beta = 1.0
else: # Web collections
beta = 0.1
model_params = Search(index_path).gen_random_batch_retrieval_params(models_yaml, this_output_root, beta, parallelism)
else:
model_params = Search(index_path).gen_batch_retrieval_params(models_yaml, this_output_root, parallelism)
for para in model_params:
this_para = (
program,
'-searchtweets' if 'mb' in collection_yaml['name'] else '',
'-topicreader', collection_yaml['topic_reader'],
'-index', index_path,
'-topics', ' '.join([os.path.join(collection_yaml['anserini_root'], collection_yaml['topic_root'], topic) for topic in collection_yaml['topics']]),
'-{}'.format(para[0]),
para[1],
'-output', para[2]
)
all_params.append(this_para)
logger.info('='*10+'Starting Batch Retrieval'+'='*10)
if dry_run:
for params in all_params:
logger.info(' '.join(params))
else:
batch_everything(all_params, atom_retrieval)
def atom_retrieval(para):
subprocess.call(' '.join(para), shell=True)
def batch_eval(collection_yaml, models_yaml, output_root, dry_run = False):
all_params = []
index_path = get_index_path(collection_yaml)
this_output_root = os.path.join(output_root, collection_yaml['name'])
for eval in collection_yaml['evals']:
eval_params = Evaluation(index_path).gen_batch_eval_params(this_output_root, eval['metric'])
for param in eval_params:
run_file_path, eval_output = param
this_para = (
[os.path.join(collection_yaml['anserini_root'], eval['command']+' '+eval['params'])],
os.path.join(collection_yaml['anserini_root'], collection_yaml['qrels_root'], collection_yaml['qrel']),
run_file_path,
eval_output
)
all_params.append(this_para)
logger.info('='*10+'Starting Batch Evaluation'+'='*10)
if dry_run:
for params in all_params:
logger.info(params)
else:
batch_everything(all_params, atom_eval)
def atom_eval(params):
Evaluation.output_all_evaluations(*params)
def batch_output_effectiveness(collection_yaml, models_yaml, output_root, random = False):
index_path = get_index_path(collection_yaml)
this_output_root = os.path.join(output_root, collection_yaml['name'])
logger.info('='*10+'Starting Output Effectiveness'+'='*10)
Effectiveness(index_path).output_effectiveness(this_output_root, random)
def plot(collection_yaml, output_root, metrics, random = False):
this_output_root = os.path.join(output_root, collection_yaml['name'])
if random:
if collection_yaml['name'] == 'disk12' or collection_yaml['name'] == 'robust04' or collection_yaml['name'] == 'robust05' or collection_yaml['name'] == 'core17':
beta = 0.5
elif collection_yaml['name'] == 'mb11' or collection_yaml['name'] == 'mb13':
beta = 1.0
else: # Web collections
beta = 0.1
Plots().plot_random_seeds(collection_yaml['name'], this_output_root, beta, metrics)
else:
Plots().plot_params_sensitivity(collection_yaml['name'], this_output_root, metrics)
def cal_coverage(collection_yaml, model_yaml, output_root):
index_path = get_index_path(collection_yaml)
this_output_root = os.path.join(output_root, collection_yaml['name'])
Coverage(index_path).cal_coverage(
model_yaml,
os.path.join(collection_yaml['anserini_root'], collection_yaml['qrels_root'], collection_yaml['qrel']),
this_output_root
)
def plot_coverage(collection_yaml, output_root):
this_output_root = os.path.join(output_root, collection_yaml['name'])
Plots().plot_coverage(collection_yaml['name'], this_output_root)
def plot_per_topic_analysis(collection_yaml, output_root):
this_output_root = os.path.join(output_root, collection_yaml['name'])
Plots().plot_per_topic_analysis(collection_yaml['name'], this_output_root)
def concatenate_qrels():
filenames = ['src/main/resources/topics-and-qrels/qrels.51-100.txt',
'src/main/resources/topics-and-qrels/qrels.101-150.txt',
'src/main/resources/topics-and-qrels/qrels.151-200.txt']
with open('src/main/resources/topics-and-qrels/qrels.disk12.all.txt', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read())
filenames = ['src/main/resources/topics-and-qrels/qrels.701-750.txt',
'src/main/resources/topics-and-qrels/qrels.751-800.txt',
'src/main/resources/topics-and-qrels/qrels.801-850.txt']
with open('src/main/resources/topics-and-qrels/qrels.gov2.all.txt', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read())
filenames = ['src/main/resources/topics-and-qrels/qrels.web.51-100.txt',
'src/main/resources/topics-and-qrels/qrels.web.101-150.txt',
'src/main/resources/topics-and-qrels/qrels.web.151-200.txt']
with open('src/main/resources/topics-and-qrels/qrels.cw09.all.txt', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read())
filenames = ['src/main/resources/topics-and-qrels/qrels.web.201-250.txt',
'src/main/resources/topics-and-qrels/qrels.web.251-300.txt']
with open('src/main/resources/topics-and-qrels/qrels.cw12.all.txt', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read())
filenames = ['src/main/resources/topics-and-qrels/qrels.microblog2011.txt',
'src/main/resources/topics-and-qrels/qrels.microblog2012.txt']
with open('src/main/resources/topics-and-qrels/qrels.mb11.all.txt', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read())
filenames = ['src/main/resources/topics-and-qrels/qrels.microblog2013.txt',
'src/main/resources/topics-and-qrels/qrels.microblog2014.txt']
with open('src/main/resources/topics-and-qrels/qrels.mb13.all.txt', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# general settings
parser.add_argument('--anserini_root', default='', help='Anserini path')
parser.add_argument('--run', action='store_true', help='Generate the runs files and evaluate them. Otherwise we only output the evaluation results (based on the existing eval files)')
parser.add_argument('--random', action='store_true', help='Generate the random seed runs files and evaluate them. Otherwise we only output the evaluation results (based on the existing eval files)')
parser.add_argument('--plot', action='store_true', help='Plot the parameters sensitivity from performances CSV file')
parser.add_argument('--collection', required=True, help='the collection key in yaml')
parser.add_argument('--models', nargs='+', default='bm25', help='the list of base ranking models, choose from [bm25, ql, f2exp] (any ones or all of them)')
parser.add_argument('--n', dest='parallelism', type=int, default=16, help='number of parallel threads for retrieval/eval')
parser.add_argument('--output_root', default='ecir2019_axiomatic', help='output directory of all results')
parser.add_argument('--dry_run', action='store_true', help='dry run the commands without actually running them')
parser.add_argument('--cal_coverage', action='store_true', help='calculate the qrels coverage')
parser.add_argument('--per_topic_analysis', action='store_true', help='plot the per-topic analysis figures')
# runtime
parser.add_argument(
"--metrics",
nargs='+',
default=['map'],
help="inputs: [metrics]. For example, --metrics map ndcg20"
)
args = parser.parse_args()
# concatenate qrels together for easier evaluation
concatenate_qrels()
parallelism = args.parallelism
resources_root = 'src/main/resources/ecir2019_axiomatic/'
with open(os.path.join(args.anserini_root, resources_root, 'collections.yaml')) as f:
collections_yaml = yaml.safe_load(f)
with open(os.path.join(args.anserini_root, resources_root, 'models.yaml')) as f:
models_yaml = yaml.safe_load(f)
collection_yaml = collections_yaml['collections'][args.collection]
for k in collections_yaml:
if k != 'collections':
collection_yaml[k] = collections_yaml[k]
collection_yaml['anserini_root'] = args.anserini_root
if not os.path.exists(os.path.join(args.output_root, collection_yaml['name'])):
os.makedirs(os.path.join(args.output_root, collection_yaml['name']))
models_yaml['models'] = args.models
if args.run:
batch_retrieval(collection_yaml, models_yaml, args.output_root, args.random, args.dry_run)
batch_eval(collection_yaml, models_yaml, args.output_root, args.dry_run)
batch_output_effectiveness(collection_yaml, models_yaml, args.output_root, args.random)
if args.cal_coverage:
cal_coverage(collection_yaml, models_yaml, args.output_root)
if args.plot:
from plots import Plots
if args.cal_coverage:
plot_coverage(collection_yaml, args.output_root, args.metrics)
else:
plot(collection_yaml, args.output_root, args.metrics, args.random)
if args.per_topic_analysis:
from plots import Plots
plot_per_topic_analysis(collection_yaml, args.output_root)
|
src/arcrest/enrichment/__init__.py | Esri/ArcREST | 208 | 12631094 | <reponame>Esri/ArcREST<filename>src/arcrest/enrichment/__init__.py
"""
The GeoEnrichment service provides the ability to get facts about a
location or area. Using GeoEnrichment, you can get information about the
people, places, and businesses in a specific area or within a certain
distance or drive time from a location. More specifically, by submitting a
point or polygon to the GeoEnrichment service, you can retrieve the
demographics and other relevant characteristics associated with the
surrounding area. You can also use the geoenrichment service to obtain
additional geographic context (for example, the ZIP Code of a location) and
geographic boundaries (for example, the geometry for a drive-time service
area). Currently, the service is available for Canada, the United States,
and a number of European countries. Other countries will be added in the
near future.
This service enables you to answer questions about locations that you can't
answer with maps alone. For example: What kind of people live here? What do
people like to do in this area? What are their habits and lifestyles? What
kind of businesses are in this area?
Site analysis is a popular application of this type of data enrichment. For
example, the GeoEnrichment service can be leveraged to study the population
that would be affected by the development of a new community center within
their neighborhood. With the service, the proposed site can be submitted,
and the demographics and other relevant characteristics associated with the
area around the site will be returned.
"""
from __future__ import absolute_import
from ._geoenrichment import GeoEnrichment
__version__ = "3.5.9" |
cords/utils/data/datasets/SSL/augmentation/augmentation_pool.py | krishnatejakk/AUTOMATA | 185 | 12631120 | import random
import torch
import torch.nn.functional as F
import numpy as np
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
"""
For PIL.Image
"""
def autocontrast(x, *args, **kwargs):
return ImageOps.autocontrast(x.convert("RGB")).convert("RGBA")
def brightness(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Brightness(x).enhance(level)
def color(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Color(x).enhance(level)
def contrast(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Contrast(x).enhance(level)
def equalize(x, *args, **kwargs):
return ImageOps.equalize(x.convert("RGB")).convert("RGBA")
def identity(x, *args, **kwargs):
return x
def invert(x, *args, **kwargs):
return ImageOps.invert(x.convert("RGB")).convert("RGBA")
def posterize(x, level, magnitude=10, max_level=4, *args, **kwargs):
level = int((level / magnitude) * max_level)
return ImageOps.posterize(x.convert("RGB"), 4 - level).convert("RGBA")
def rotate(x, level, magnitude=10, max_level=30, *args, **kwargs):
degree = int((level / magnitude) * max_level)
if random.random() > 0.5:
degree = -degree
return x.rotate(degree)
def sharpness(x, level, magnitude=10, max_level=1.8, *args, **kwargs):
level = (level / magnitude) * max_level + 0.1
return ImageEnhance.Sharpness(x).enhance(level)
def shear_x(x, level, magnitude=10, max_level=0.3, *args, **kwargs):
level = (level / magnitude) * max_level
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, level, 0, 0, 1, 0))
def shear_y(x, level, magnitude=10, max_level=0.3, *args, **kwargs):
level = (level / magnitude) * max_level
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, 0, level, 1, 0))
def solarize(x, level, magnitude=10, max_level=256, *args, **kwargs):
level = int((level / magnitude) * max_level)
return ImageOps.solarize(x.convert("RGB"), 256 - level).convert("RGBA")
def translate_x(x, level, magnitude=10, max_level=10, *args, **kwargs):
level = int((level / magnitude) * max_level)
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, level, 0, 1, 0))
def translate_y(x, level, magnitude=10, max_level=10, *args, **kwargs):
level = int((level / magnitude) * max_level)
if random.random() > 0.5:
level = -level
return x.transform(x.size, Image.AFFINE, (1, 0, 0, 0, 1, level))
def cutout(x, level, magnitude=10, max_level=20, *args, **kwargs):
size = int((level / magnitude) * max_level)
if size <= 0:
return x
w, h = x.size
upper_coord, lower_coord = _gen_cutout_coord(h, w, size)
pixels = x.load()
for i in range(upper_coord[0], lower_coord[0]):
for j in range(upper_coord[1], lower_coord[1]):
pixels[i, j] = (127, 127, 127, 0)
return x
def _gen_cutout_coord(height, width, size):
height_loc = random.randint(0, height - 1)
width_loc = random.randint(0, width - 1)
upper_coord = (max(0, height_loc - size // 2),
max(0, width_loc - size // 2))
lower_coord = (min(height, height_loc + size // 2),
min(width, width_loc + size // 2))
return upper_coord, lower_coord
"""
For torch.Tensor
"""
class TorchCutout:
def __init__(self, size=16):
self.size = size
def __call__(self, img):
h, w = img.shape[-2:]
upper_coord, lower_coord = _gen_cutout_coord(h, w, self.size)
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = torch.ones_like(img)
zeros = torch.zeros((img.shape[0], mask_height, mask_width))
mask[:, upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1]] = zeros
return img * mask
def __repr__(self):
return f"TorchCutout(size={self.size})"
class GaussianNoise:
def __init__(self, std=0.15):
self.std = std
def __call__(self, x):
with torch.no_grad():
return x + torch.randn_like(x) * self.std
def __repr__(self):
return f"GaussianNoise(std={self.std})"
class BatchRandomFlip:
def __init__(self, flip_prob=0.5):
self.p = flip_prob
def __call__(self, x):
with torch.no_grad():
return torch.stack([
torch.flip(img, (-1,))
if random.random() > self.p
else img
for img in x
], 0)
def __repr__(self):
return f"BatchRandomFlip(flip_prob={self.p})"
class RandomFlip:
def __init__(self, flip_prob=0.5):
self.p = flip_prob
def __call__(self, x):
if random.random() > self.p:
return torch.flip(x, (-1,))
return x
def __repr__(self):
return f"RandomFlip(flip_prob={self.p})"
class BatchRandomCrop:
def __init__(self, padding=4):
self.pad = padding
def __call__(self, x):
with torch.no_grad():
b, _, h, w = x.shape
x = F.pad(x, [self.pad for _ in range(4)], mode="reflect")
left, top = torch.randint(0, 1+self.pad*2, (b,)), torch.randint(0, 1+self.pad*2, (b,))
return torch.stack([
img[..., t:t+h, l:l+w]
for img, t, l in zip(x, left, top)
], 0)
def __repr__(self):
return f"BatchRandomCrop(padding={self.pad})"
class RandomCrop:
def __init__(self, padding=4):
self.pad = padding
def __call__(self, x):
with torch.no_grad():
_, h, w = x.shape
x = F.pad(x[None], [self.pad for _ in range(4)], mode="reflect")
left, top = random.randint(0, self.pad*2), random.randint(0, self.pad*2)
return x[0, :, top:top+h, left:left+w]
def __repr__(self):
return f"RandomCrop(padding={self.pad})"
class ZCA:
def __init__(self, mean, scale):
self.mean = torch.from_numpy(mean).float()
self.scale = torch.from_numpy(scale).float()
def __call__(self, x):
c, h, w = x.shape
x = x.reshape(-1)
x = (x - self.mean) @ self.scale
return x.reshape(c, h, w)
def __repr__(self):
return f"ZCA()"
class GCN:
"""global contrast normalization"""
def __init__(self, multiplier=55, eps=1e-10):
self.multiplier = multiplier
self.eps = eps
def __call__(self, x):
x -= x.mean()
norm = x.norm(2)
norm[norm < self.eps] = 1
return self.multiplier * x / norm
def __repr__(self):
return f"GCN(multiplier={self.multiplier}, eps={self.eps})"
"""
For numpy.array
"""
def numpy_batch_gcn(images, multiplier=55, eps=1e-10):
# global contrast normalization
images = images.astype(np.float)
images -= images.mean(axis=(1,2,3), keepdims=True)
per_image_norm = np.sqrt(np.square(images).sum((1,2,3), keepdims=True))
per_image_norm[per_image_norm < eps] = 1
return multiplier * images / per_image_norm
|
liminal/core/config/config.py | ZionCervello/incubator-liminal | 107 | 12631122 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import traceback
from liminal.core import environment
from liminal.core.config.defaults import base, default_configs
from liminal.core.util import dict_util, files_util
class ConfigUtil:
"""
Load and enrich config files under configs_path.
"""
__BASE = 'base'
__PIPELINES = 'pipelines'
__SUPER = 'super'
__TYPE = 'type'
__SUB = 'sub'
__SERVICES = 'services'
__TASKS = 'tasks'
__PIPELINE_DEFAULTS = 'pipeline_defaults'
__TASK_DEFAULTS = 'task_defaults'
__BEFORE_TASKS = 'before_tasks'
__AFTER_TASKS = 'after_tasks'
__EXECUTORS = 'executors'
__IMAGES = 'images'
__BASE = "base"
__PIPELINES = "pipelines"
__SUPER = "super"
__TYPE = "type"
__SUB = "sub"
__SERVICES = "services"
__TASKS = "tasks"
__PIPELINE_DEFAULTS = "pipeline_defaults"
__TASK_DEFAULTS = "task_defaults"
__BEFORE_TASKS = "before_tasks"
__AFTER_TASKS = "after_tasks"
__EXECUTORS = "executors"
def __init__(self, configs_path):
self.configs_path = configs_path
self.config_files = files_util.load(configs_path)
self.base = base.BASE
self.loaded_subliminals = []
self.snapshot_path = os.path.join(environment.get_airflow_home_dir(), '../liminal_config_files')
def safe_load(self, is_render_variables, soft_merge=False):
"""
:returns list of config files after enrich with defaults and supers
"""
if self.loaded_subliminals:
return self.loaded_subliminals
configs = self.config_files.values()
enriched_configs = []
for subliminal in [config for config in configs if self.__is_subliminal(config)]:
name = subliminal.get('name')
logging.info(f'Loading yml {name}')
# noinspection PyBroadException
try:
superliminal = self.__get_superliminal(subliminal, soft_merge)
enriched_config = self.__merge_configs(subliminal, superliminal, is_render_variables, soft_merge)
enriched_configs.append(enriched_config)
except Exception:
logging.error(f'Failed to load yml {name}')
traceback.print_exc()
self.loaded_subliminals = enriched_configs
return self.loaded_subliminals
def __merge_configs(self, subliminal, superliminal, is_render_variables, soft_merge):
if not superliminal:
return subliminal
sub = subliminal.copy()
supr = superliminal.copy()
merged_superliminal = self.__merge_configs(
supr, self.__get_superliminal(supr, soft_merge), is_render_variables, soft_merge
)
sub[self.__EXECUTORS] = self.__merge_section(sub, merged_superliminal, self.__EXECUTORS)
sub[self.__IMAGES] = self.__merge_section(sub, merged_superliminal, self.__IMAGES)
if self.__is_subliminal(sub):
return self.__merge_sub_and_super(sub, merged_superliminal, is_render_variables)
else:
return self.__merge_superliminals(sub, merged_superliminal)
def __get_superliminal(self, liminal, soft_merge):
superliminal = {}
if not self.__is_base_config(liminal):
superliminal_name = liminal.get(self.__SUPER, '')
if not superliminal_name:
superliminal = self.base
else:
superliminal = self.__get_config(superliminal_name)
if not superliminal:
supr_is_missing_msg = (
f"superliminal '{superliminal_name}' " + f"is missing from '{self.configs_path}'"
)
if soft_merge:
logging.warning(supr_is_missing_msg)
else:
raise FileNotFoundError(supr_is_missing_msg)
return superliminal
def __get_base_config(self):
return self.base
def __is_base_config(self, config):
return config.get('name', '') == self.__BASE
def __is_subliminal(self, config):
is_subliminal = config.get(self.__TYPE, self.__SUB) != self.__SUPER
if is_subliminal:
config[self.__TYPE] = self.__SUB
return is_subliminal
def __get_config(self, config_name):
return self.config_files.get(config_name)
def __merge_sub_and_super(self, sub, supr, is_render_variables):
merged_pipelines = list()
for pipeline in sub.get(self.__PIPELINES, {}):
final_pipeline = self.__apply_pipeline_defaults(sub, supr, pipeline)
merged_pipelines.append(final_pipeline)
sub[self.__PIPELINES] = merged_pipelines
sub[self.__SERVICES] = default_configs.apply_service_defaults(sub, supr)
sub = dict_util.merge_dicts(supr.copy(), sub)
return default_configs.apply_variable_substitution(sub, supr, is_render_variables)
def __merge_superliminals(self, super1, super2):
super1_pipeline_defaults = super1.get(self.__PIPELINE_DEFAULTS, {}).copy()
super2_pipeline_defaults = super2.get(self.__PIPELINE_DEFAULTS, {}).copy()
super1[self.__PIPELINE_DEFAULTS] = super1_pipeline_defaults
super1[self.__PIPELINE_DEFAULTS][self.__BEFORE_TASKS] = super2_pipeline_defaults.pop(
self.__BEFORE_TASKS, []
) + super1_pipeline_defaults.pop(self.__BEFORE_TASKS, [])
super2[self.__PIPELINE_DEFAULTS] = super2_pipeline_defaults
super1[self.__PIPELINE_DEFAULTS][self.__AFTER_TASKS] = super1_pipeline_defaults.pop(
self.__AFTER_TASKS, []
) + super2_pipeline_defaults.pop(self.__AFTER_TASKS, [])
# merge supers tasks
return dict_util.merge_dicts(super1, super2, True)
def snapshot_final_liminal_configs(self):
files_util.dump_liminal_configs(liminal_configs=self.loaded_subliminals, path=self.snapshot_path)
def __merge_section(self, subliminal, superliminal, section):
return self.__deep_list_keyword_merge(section[:-1], subliminal.get(section, []), superliminal.get(section, []))
@staticmethod
def __apply_pipeline_defaults(subliminal, superliminal, pipeline):
return default_configs.apply_pipeline_defaults(subliminal, superliminal, pipeline)
@staticmethod
def __deep_list_keyword_merge(unique_key_name, subliminal_list_conf, superliminal_list_conf):
subliminal_key_map = {item[unique_key_name]: item for item in subliminal_list_conf}
superliminal_key_map = {item[unique_key_name]: item for item in superliminal_list_conf}
return list(dict_util.merge_dicts(superliminal_key_map, subliminal_key_map, recursive=True).values())
|
src/deutschland/bundesanzeiger/model.py | andreasbossard/deutschland | 445 | 12631183 | <gh_stars>100-1000
import numpy as np
import tensorflow.keras.backend as K
from PIL import Image
from tensorflow import keras
def load_image_arr(fp):
image = Image.open(fp).convert("L")
image = np.array(image)
image = image / 255 * 2
image = image - 1
return image
def character_indexes_to_str(character_indexes):
ALPHABET = list("abcdefghijklmnopqrstuvwxyz0123456789")
characters = np.array(ALPHABET)[character_indexes]
return "".join(list(characters)).upper()
def prediction_to_str(label):
character_indexes = np.argmax(label, axis=1)
return character_indexes_to_str(character_indexes)
def my_accuracy(y_true, y_pred):
return K.cast(
K.all(K.equal(K.argmax(y_true, axis=2), K.argmax(y_pred, axis=2)), axis=1),
K.floatx(),
)
def load_model():
return keras.models.load_model(
"assets/model.h5", custom_objects={"my_accuracy": my_accuracy}
)
|
startup_scripts/320_services.py | systempal/netbox-docker | 691 | 12631212 | import sys
from dcim.models import Device
from ipam.models import Service
from startup_script_utils import load_yaml
from virtualization.models import VirtualMachine
services = load_yaml("/opt/netbox/initializers/services.yml")
if services is None:
sys.exit()
optional_assocs = {
"device": (Device, "name"),
"virtual_machine": (VirtualMachine, "name"),
}
for params in services:
for assoc, details in optional_assocs.items():
if assoc in params:
model, field = details
query = {field: params.pop(assoc)}
params[assoc] = model.objects.get(**query)
service, created = Service.objects.get_or_create(**params)
if created:
print("🧰 Created Service", service.name)
|
k8s_handle/k8s/diff.py | jetbrains-infra/k8s-handle | 152 | 12631219 | import sys
import logging
import copy
from difflib import ndiff
from datetime import datetime
from functools import reduce
import operator
import yaml
from .adapters import Adapter
from k8s_handle.templating import get_template_contexts
log = logging.getLogger(__name__)
IGNORE_FIELDS = [
'metadata.annotations:kubectl.kubernetes.io/last-applied-configuration',
'metadata.annotations:deployment.kubernetes.io/revision',
'metadata:creationTimestamp',
'metadata:resourceVersion',
'metadata:selfLink',
'metadata:uid',
'metadata:namespace',
'metadata:generation',
'metadata:managedFields',
'status'
]
def remove_from_dict(d, path, key):
del reduce(operator.getitem, path, d)[key]
def to_dict(obj):
if hasattr(obj, 'attribute_map'):
result = {}
for k, v in getattr(obj, 'attribute_map').items():
val = getattr(obj, k)
if val is not None:
result[v] = to_dict(val)
return result
elif type(obj) == list:
return [to_dict(x) for x in obj]
elif type(obj) == datetime:
return str(obj)
elif type(obj) == dict:
newobj = copy.deepcopy(obj)
for k, v in obj.items():
newobj[k] = to_dict(obj[k])
return newobj
else:
return obj
def apply_filter(d, field_path):
try:
path, field = field_path.split(':')
path = path.split('.')
except ValueError:
del d[field_path]
else:
remove_from_dict(d, path, field)
class Diff:
@staticmethod
def run(file_path):
for template_body in get_template_contexts(file_path):
if template_body.get('kind') == 'Secret':
log.info(f'Skipping secret {template_body.get("metadata", {}).get("name")}')
continue
kube_client = Adapter.get_instance(template_body)
new = yaml.safe_dump(template_body)
k8s_object = kube_client.get()
if k8s_object is None:
current_dict = {}
else:
current_dict = to_dict(k8s_object)
for field_path in IGNORE_FIELDS:
try:
apply_filter(current_dict, field_path)
except KeyError:
pass
metadata = current_dict.get('metadata', {})
if 'annotations' in metadata and metadata['annotations'] == {}:
del metadata['annotations']
current = yaml.safe_dump(current_dict)
if new == current:
log.info(f' Kind: "{template_body.get("kind")}", '
f'name: "{template_body.get("metadata", {}).get("name")}" : NO CHANGES')
else:
diff = ndiff(current.splitlines(keepends=True), new.splitlines(keepends=True))
log.info(f' Kind: "{template_body.get("kind")}", '
f'name: "{template_body.get("metadata", {}).get("name")}"')
sys.stdout.write(''.join(diff))
|
test/python/classical_function_compiler/bad_examples.py | Roshan-Thomas/qiskit-terra | 1,599 | 12631264 | <reponame>Roshan-Thomas/qiskit-terra
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-function-docstring, undefined-variable
"""These are bad examples and raise errors in in the classicalfunction compiler"""
from qiskit.circuit import Int1, Int2
def id_no_type_arg(a) -> Int1:
return a
def id_no_type_return(a: Int1):
return a
def id_bad_return(a: Int1) -> Int2:
return a
def out_of_scope(a: Int1) -> Int1:
return a & c
def bit_not(a: Int1) -> Int1:
# Bitwise not does not operate on booleans (aka, bits), but int
return ~a
|
tests/parser/test_base.py | adaamz/datamodel-code-generator | 891 | 12631305 | <filename>tests/parser/test_base.py<gh_stars>100-1000
from collections import OrderedDict
from typing import Dict, List, Tuple
import pytest
from datamodel_code_generator.model import DataModel, DataModelFieldBase
from datamodel_code_generator.model.pydantic import BaseModel, DataModelField
from datamodel_code_generator.parser.base import Parser, relative, sort_data_models
from datamodel_code_generator.reference import Reference, snake_to_upper_camel
from datamodel_code_generator.types import DataType
class A(DataModel):
pass
class B(DataModel):
pass
class C(Parser):
def parse_raw(self, name: str, raw: Dict) -> None:
pass
def parse(self) -> str:
return 'parsed'
def test_parser():
c = C(
data_model_type=D,
data_model_root_type=B,
data_model_field_type=DataModelFieldBase,
base_class='Base',
source='',
)
assert c.data_model_type == D
assert c.data_model_root_type == B
assert c.data_model_field_type == DataModelFieldBase
assert c.base_class == 'Base'
def test_sort_data_models():
reference_a = Reference(path='A', original_name='A', name='A')
reference_b = Reference(path='B', original_name='B', name='B')
reference_c = Reference(path='C', original_name='C', name='C')
data_type_a = DataType(reference=reference_a)
data_type_b = DataType(reference=reference_b)
data_type_c = DataType(reference=reference_c)
reference = [
BaseModel(
fields=[
DataModelField(data_type=data_type_a),
DataModelFieldBase(data_type=data_type_c),
],
reference=reference_a,
),
BaseModel(
fields=[DataModelField(data_type=data_type_b)],
reference=reference_b,
),
BaseModel(
fields=[DataModelField(data_type=data_type_b)],
reference=reference_c,
),
]
unresolved, resolved, require_update_action_models = sort_data_models(reference)
expected = OrderedDict()
expected['B'] = reference[1]
expected['C'] = reference[2]
expected['A'] = reference[0]
assert resolved == expected
assert unresolved == []
assert require_update_action_models == ['B', 'A']
def test_sort_data_models_unresolved():
reference_a = Reference(path='A', original_name='A', name='A')
reference_b = Reference(path='B', original_name='B', name='B')
reference_c = Reference(path='C', original_name='C', name='C')
reference_d = Reference(path='D', original_name='D', name='D')
reference_v = Reference(path='V', original_name='V', name='V')
reference_z = Reference(path='Z', original_name='Z', name='Z')
data_type_a = DataType(reference=reference_a)
data_type_b = DataType(reference=reference_b)
data_type_c = DataType(reference=reference_c)
data_type_v = DataType(reference=reference_v)
data_type_z = DataType(reference=reference_z)
reference = [
BaseModel(
fields=[
DataModelField(data_type=data_type_a),
DataModelFieldBase(data_type=data_type_c),
],
reference=reference_a,
),
BaseModel(
fields=[DataModelField(data_type=data_type_b)],
reference=reference_b,
),
BaseModel(
fields=[DataModelField(data_type=data_type_b)],
reference=reference_c,
),
BaseModel(
fields=[
DataModelField(data_type=data_type_a),
DataModelField(data_type=data_type_c),
DataModelField(data_type=data_type_z),
],
reference=reference_d,
),
BaseModel(
fields=[DataModelField(data_type=data_type_v)],
reference=reference_z,
),
]
with pytest.raises(Exception):
sort_data_models(reference)
def test_sort_data_models_unresolved_raise_recursion_error():
reference_a = Reference(path='A', original_name='A', name='A')
reference_b = Reference(path='B', original_name='B', name='B')
reference_c = Reference(path='C', original_name='C', name='C')
reference_d = Reference(path='D', original_name='D', name='D')
reference_v = Reference(path='V', original_name='V', name='V')
reference_z = Reference(path='Z', original_name='Z', name='Z')
data_type_a = DataType(reference=reference_a)
data_type_b = DataType(reference=reference_b)
data_type_c = DataType(reference=reference_c)
data_type_v = DataType(reference=reference_v)
data_type_z = DataType(reference=reference_z)
reference = [
BaseModel(
fields=[
DataModelField(data_type=data_type_a),
DataModelFieldBase(data_type=data_type_c),
],
reference=reference_a,
),
BaseModel(
fields=[DataModelField(data_type=data_type_b)],
reference=reference_b,
),
BaseModel(
fields=[DataModelField(data_type=data_type_b)],
reference=reference_c,
),
BaseModel(
fields=[
DataModelField(data_type=data_type_a),
DataModelField(data_type=data_type_c),
DataModelField(data_type=data_type_z),
],
reference=reference_d,
),
BaseModel(
fields=[DataModelField(data_type=data_type_v)],
reference=reference_z,
),
]
with pytest.raises(Exception):
sort_data_models(reference, recursion_count=100000)
@pytest.mark.parametrize(
'current_module,reference,val',
[
('', 'Foo', ('', '')),
('a', 'a.Foo', ('', '')),
('a', 'a.b.Foo', ('.', 'b')),
('a.b', 'a.Foo', ('.', 'Foo')),
('a.b.c', 'a.Foo', ('..', 'Foo')),
('a.b.c', 'Foo', ('...', 'Foo')),
],
)
def test_relative(current_module: str, reference: str, val: Tuple[str, str]):
assert relative(current_module, reference) == val
@pytest.mark.parametrize(
'word,expected',
[
(
'_hello',
'_Hello',
), # In case a name starts with a underline, we should keep it.
('hello_again', 'HelloAgain'), # regular snake case
('hello__again', 'HelloAgain'), # handles double underscores
(
'hello___again_again',
'HelloAgainAgain',
), # handles double and single underscores
('hello_again_', 'HelloAgain'), # handles trailing underscores
('hello', 'Hello'), # no underscores
('____', '_'), # degenerate case, but this is the current expected behavior
],
)
def test_snake_to_upper_camel(word, expected):
"""Tests the snake to upper camel function."""
actual = snake_to_upper_camel(word)
assert actual == expected
class D(DataModel):
def __init__(self, filename: str, data: str, fields: List[DataModelFieldBase]):
super().__init__(fields=fields, reference=Reference(''))
self._data = data
def render(self) -> str:
return self._data
|
scrapy-redis/tests/test_package_import.py | GongkunJiang/MySpider | 3,305 | 12631315 | import scrapy_redis
def test_package_metadata():
assert scrapy_redis.__author__
assert scrapy_redis.__email__
assert scrapy_redis.__version__
|
nodemcu_uploader/validate.py | bazooka07/nodemcu-uploader | 324 | 12631333 | <filename>nodemcu_uploader/validate.py
from .exceptions import ValidationException
MAX_FS_NAME_LEN = 31
def remotePath(path):
"""Do various checks on the remote file name like max length.
Raises exception if not valid
"""
if len(path) > MAX_FS_NAME_LEN:
raise ValidationException('To long. >{0}'.format(MAX_FS_NAME_LEN), 'path', path)
if len(path) < 1:
raise ValidationException('To short', 'path', path)
|
tests/perf/test_long_cycles_nbrows_cycle_length_41000_140.py | shaido987/pyaf | 377 | 12631382 | import tests.perf.test_cycles_full_long_long as gen
gen.test_nbrows_cycle(41000 , 140)
|
var/spack/repos/builtin/packages/py-fenics-ffc/package.py | LiamBindle/spack | 2,360 | 12631402 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFenicsFfc(PythonPackage):
"""The FEniCS Form Compiler FFC is a compiler for finite element
variational forms, translating high-level mathematical descriptions
of variational forms into efficient low-level C++ code for finite
element assembly."""
homepage = "https://fenicsproject.org/"
git = "https://bitbucket.org/fenics-project/ffc.git"
url = "https://bitbucket.org/fenics-project/ffc/downloads/ffc-2019.1.0.post0.tar.gz"
maintainers = ['emai-imcs']
version('2019.1.0.post0', sha256='306e1179630200a34202975a5369194939b3482eebfc34bc44ad74dab1f109e8')
version('2018.1.0', sha256='c5a6511693106d1cd2fc013148d0cd01cd1b99fc65dab461ca0b95851a9ea271')
version('2017.2.0.post0', sha256='1969a5460cb866c478df64874ce213f81cb5c893b89f991a578e258b1a64fee5')
version('2016.2.0', sha256='097c284780447ea7bb47d4d51956648a1efb2cb9047eb1382944421dde351ecb')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
for ver in ['2019.1.0.post0', '2018.1.0', '2017.2.0.post0', '2016.2.0']:
if ver in ['2019.1.0.post0', '2017.2.0.post0']:
ver = ver[:ver.rfind('.post')]
wver = '@' + ver
depends_on('py-fenics-fiat{0}'.format(wver), type=('build', 'run'), when=wver)
if(Version(ver) < Version('2017.2.0')):
depends_on('py-fenics-instant{0}'.format(wver), type=('build', 'run'), when=wver)
else:
depends_on('py-fenics-dijitso{0}'.format(wver), type=('build', 'run'), when=wver)
depends_on('py-fenics-ufl{0}'.format(wver), type=('build', 'run'), when=wver)
|
src/discriminator/train.py | Ravi-0809/question-generation | 212 | 12631425 | import sys,json,math
sys.path.insert(0, "/Users/tom/Dropbox/msc-ml/project/src/")
sys.path.insert(0, "/cs/student/msc/ml/2017/thosking/dev/msc-project/src/")
sys.path.insert(0, "/home/thosking/msc-project/src/")
import tensorflow as tf
import numpy as np
from instance import DiscriminatorInstance
import helpers.loader as loader
from tqdm import tqdm
def main(_):
FLAGS = tf.app.flags.FLAGS
# results=results[:32]
# dev_ctxts, dev_qs,dev_ans,dev_ans_pos, dev_correct = zip(*squad_dev)
positive_data=[]
negative_data=[]
if FLAGS.disc_trainongenerated is True:
with open(FLAGS.log_dir+'out_eval_'+ FLAGS.disc_modelslug +'.json') as f:
results = json.load(f)
# for res in results:
# qpred,qgold,ctxt,ans_text,ans_pos =res
for res in results['results']:
positive_data.append( (res['c'], res['q_gold'], res['a_text'], res['a_pos']) )
negative_data.append( (res['c'], res['q_pred'], res['a_text'], res['a_pos']) )
if FLAGS.disc_trainonsquad is True:
squad_v2 = loader.load_squad_triples(FLAGS.data_path, FLAGS.disc_dev_set, v2=True)
for res in squad_v2:
ctxt,q,ans_text,ans_pos,label =res
if label is False: # label is "is_unanswerable"
positive_data.append( (ctxt.lower(), q.lower(), ans_text.lower(), ans_pos) )
else:
negative_data.append( (ctxt.lower(), q.lower(), ans_text.lower(), ans_pos) )
num_instances = min(len(negative_data), len(positive_data))
disc = DiscriminatorInstance(path=(FLAGS.model_dir+'saved/qanet2/' if FLAGS.disc_init_qanet is True else None), trainable=True, log_slug=FLAGS.disc_modelslug+("_SQUAD" if FLAGS.disc_trainonsquad else "")+("_QAINIT" if FLAGS.disc_init_qanet else ""), force_init=FLAGS.disc_init_qanet)
# disc.load_from_chkpt() # this loads the embeddings etc
train_samples = math.floor(0.8*num_instances)
dev_samples = math.floor(0.2*num_instances)
positive_data_train = positive_data[:train_samples]
negative_data_train = negative_data[:train_samples]
positive_data_dev = positive_data[train_samples:]
negative_data_dev = negative_data[train_samples:]
num_steps_train = train_samples//FLAGS.batch_size
num_steps_dev = dev_samples//FLAGS.batch_size
num_steps_squad = num_steps_dev
best_oos_nll=1e6
for i in tqdm(range(num_steps_train*FLAGS.disc_num_epochs), desc='Training'):
if i % num_steps_train ==0:
np.random.shuffle(positive_data_train)
np.random.shuffle(negative_data_train)
ixs = np.round(np.random.binomial(1,0.5,FLAGS.batch_size))
# batch = train_data[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]
batch = [negative_data_train[(i% num_steps_train)*FLAGS.batch_size+j] if ix < 0.5 else positive_data_train[(i% num_steps_train)*FLAGS.batch_size+j] for j,ix in enumerate(ixs.tolist())]
ctxt,qbatch,ans_text,ans_pos = zip(*batch)
# print(ixs)
# print(qbatch)
# print(ans_text)
# print(ans_pos)
# print(ctxt)
# exit()
# +qpred[ix].replace("</Sent>","").replace("<PAD>","")
qbatch = [q.replace(" </Sent>","").replace(" <PAD>","") for q in qbatch]
# qbatch = ["fake " if ixs[ix] < 0.5 else "real " for ix in range(FLAGS.batch_size)]
# print(qbatch, ixs)
loss = disc.train_step(ctxt, qbatch, ans_text, ans_pos, ixs, (i))
if i % 1000 == 0 and i >0:
dev_acc=[]
dev_nll=[]
for dev_i in tqdm(range(num_steps_dev), desc='Step '+str(i) + " dev"):
ixs = np.round(np.random.binomial(1,0.5,FLAGS.batch_size))
batch = [negative_data_dev[dev_i*FLAGS.batch_size+j] if ix < 0.5 else positive_data_dev[dev_i*FLAGS.batch_size+j] for j,ix in enumerate(ixs.tolist())]
ctxt,qbatch,ans_text,ans_pos = zip(*batch)
qbatch = [q.replace(" </Sent>","").replace(" <PAD>","") for q in qbatch]
pred = disc.get_pred(ctxt, qbatch, ans_text, ans_pos)
nll = disc.get_nll(ctxt, qbatch, ans_text, ans_pos, ixs)
acc = 1.0*np.equal(np.round(pred), ixs)
dev_acc.extend(acc.tolist())
dev_nll.extend(nll.tolist())
accsummary = tf.Summary(value=[tf.Summary.Value(tag="dev_perf/acc",
simple_value=np.mean(dev_acc))])
nllsummary = tf.Summary(value=[tf.Summary.Value(tag="dev_perf/nll",
simple_value=np.mean(dev_nll))])
disc.summary_writer.add_summary(accsummary, global_step=i)
disc.summary_writer.add_summary(nllsummary, global_step=i)
print(np.mean(dev_acc))
if np.mean(dev_nll) < best_oos_nll:
best_oos_nll=np.mean(dev_nll)
disc.save_to_chkpt(FLAGS.model_dir, i)
print("New best NLL, saving")
if __name__ == "__main__":
tf.app.run()
|
code/draw_detail_3D.py | huangyangyu/Noise-Tolerant-Paradigm-for-Training-Face-Recognition-CNNs | 149 | 12631440 | <gh_stars>100-1000
#!/usr/bin/env python
#coding: utf-8
from mpl_toolkits.mplot3d import Axes3D
import os
import sys
import cv2
import math
import json
import numpy as np
import matplotlib
from matplotlib import ticker
import matplotlib.pyplot as plt
plt.switch_backend("agg")
bins = 201
def choose_iter(_iter, interval):
if _iter == 1000:
return True
if _iter == 200000:
return True
k = _iter / interval
if k % 1 != 0 or (k / 1) % (_iter / 40000 + 1) != 0:
return False
else:
return True
def get_pdf(X):
pdf = bins * [0.0]
for x in X:
pdf[int(x*100)] += 1
return pdf
def mean_filter(pdf, fr=2):
filter_pdf = bins * [0.0]
for i in xrange(fr, bins-fr):
for j in xrange(i-fr, i+fr+1):
filter_pdf[i] += pdf[j] / (fr+fr+1)
return filter_pdf
def load_lfw(prefix):
items = list()
for line in open("./data/%s_lfw.txt" % prefix):
item = line.strip().split()
items.append((int(item[0]), float(item[1]), 100.0 * float(item[2])))
items.append(items[-1])
return items
def draw_imgs(prefix, ratio, mode):
lfw = load_lfw(prefix)
frames = 0
_X = list()
_Y = list()
_Z = list()
iters = list()
threds = list()
accs = list()
alphas = list()
betas = list()
gammas = list()
for _iter in xrange(1000, 200000+1, 1000):
_iter1, _thred1, _acc1 = lfw[_iter/10000]
_iter2, _thred2, _acc2 = lfw[_iter/10000+1]
_thred = _thred1 + (_thred2 - _thred1) * (_iter - _iter1) / max(1, (_iter2 - _iter1))
_acc = _acc1 + (_acc2 - _acc1) * (_iter - _iter1) / max(1, (_iter2 - _iter1))
iters.append(_iter)
threds.append(_thred)
accs.append(_acc)
log_file = "./data/%s_%d.txt" % (prefix, _iter)
if not os.path.exists(log_file):
print "miss: %s" % log_file
continue
lines = open(log_file).readlines()
# pdf
pdf = list()
for line in lines[3:bins+3]:
item = line.strip().split()
item = map(lambda x: float(x), item)
pdf.append(item[1])
# clean pdf
clean_pdf = list()
for line in lines[bins+3+1:bins+bins+3+1]:
item = line.strip().split()
item = map(lambda x: float(x), item)
clean_pdf.append(item[1])
# noise pdf
noise_pdf = list()
for line in lines[bins+bins+3+1+1:bins+bins+bins+3+1+1]:
item = line.strip().split()
item = map(lambda x: float(x), item)
noise_pdf.append(item[1])
# pcf
pcf = list()
for line in lines[bins+bins+bins+3+1+1+1:bins+bins+bins+bins+3+1+1+1]:
item = line.strip().split()
item = map(lambda x: float(x), item)
pcf.append(item[1])
# weight
W = list()
for line in lines[bins+bins+bins+bins+3+1+1+1+1:bins+bins+bins+bins+bins+3+1+1+1+1]:
item = line.strip().split()
item = map(lambda x: float(x), item)
W.append(item[1])
X = list()
for i in xrange(bins):
X.append(i * 0.01 - 1.0)
_X.append(X)
_Y.append(bins * [_iter])
_Z.append(mean_filter(pdf))
if not choose_iter(_iter, 1000):
continue
titlesize = 44
asize = 44
glinewidth = 2
fig = plt.figure(0)
fig.set_size_inches(24, 18)
ax = Axes3D(fig)
#ax.set_title(r"$The\ cos\theta\ distribution\ of\ $" + str(ratio) + "%" + r"$\ noisy\ training\ data\ over\ iteration$", fontsize=titlesize)
ax.set_xlabel(r"$cos\theta$", fontsize=asize)
ax.set_ylabel(r"$Iter$", fontsize=asize)
ax.set_zlabel(r"$Numbers$", fontsize=asize)
ax.tick_params(labelsize=32)
ax.set_xlim(-1.0, 1.0)
ax.set_ylim(0, 200000)
ax.set_zlim(0.0, 6000.0)
ax.grid(True, linewidth=glinewidth)
surf = ax.plot_surface(_X, _Y, _Z, rstride=3, cstride=3, cmap=plt.cm.coolwarm, linewidth=0.1, antialiased=False)
surf.set_clim([0, 6000])
cbar = fig.colorbar(surf, shrink=0.5, aspect=10, norm=plt.Normalize(0, 6000))
cbar.set_ticks([0, 1000, 2000, 3000, 4000, 5000, 6000])
cbar.set_ticklabels(["0", "1k", "2k", "3k", "4k", "5k", "6k"])
#cbar.locator = ticker.MaxNLocator(nbins=6)
#cbar.update_ticks()
cbar.ax.tick_params(labelsize=24)
#print dir(ax)
#_ax = ax.twiny()
#_ax.set_ylim(0.0, 1.0)
#_ax.plot(bins * [-1.0], iters, accs, label="LFW")
#_ax.legend()
#ax.plot(len(iters) * [-1.0], iters, 100.0 * np.array(accs), color="k", label="LFW")
#ax.plot(len(iters) * [-1.0], iters, 60.0 * np.array(accs), color="k", label="LFW")
#ax.legend()
plt.savefig("./figures/%s_3D_dist_%d.jpg" % (prefix, _iter))
plt.close()
frames += 1
print "frames:", frames
print "processed:", _iter
sys.stdout.flush()
def draw_video(prefix1, prefix2, ratio):
draw_imgs(prefix1, ratio, mode=1)
draw_imgs(prefix2, ratio, mode=2)
fps = 25
#size = (4800, 1800)
#size = (2400, 900)
size = (2000, 750)
videowriter = cv2.VideoWriter("./figures/demo_3D_distribution_noise-%d%%.avi" % ratio, cv2.cv.CV_FOURCC(*"MJPG"), fps, size)
for _iter in xrange(1000, 200000+1, 1000):
if not choose_iter(_iter, 1000):
continue
image_file1 = "./figures/%s_3D_dist_%d.jpg" % (prefix1, _iter)
img1 = cv2.imread(image_file1)
img1 = cv2.resize(img1, (1000, 750))##
h1, w1, c1 = img1.shape
image_file2 = "./figures/%s_3D_dist_%d.jpg" % (prefix2, _iter)
img2 = cv2.imread(image_file2)
img2 = cv2.resize(img2, (1000, 750))##
h2, w2, c2 = img2.shape
assert h1 == h2 and w1 == w2
img = np.zeros((size[1], size[0], 3), dtype=img1.dtype)
img[0:h1, 0:w1, :] = img1
img[0:h1, w1:w1+w2, :] = img2
videowriter.write(img)
if __name__ == "__main__":
prefixs = [
(0, "p_casia-webface_noise-flip-outlier-1_1-0_Nsoftmax_exp", "p_casia-webface_noise-flip-outlier-1_1-0_Nsoftmax_FIT_exp"), \
(20, "p_casia-webface_noise-flip-outlier-1_1-20_Nsoftmax_exp", "p_casia-webface_noise-flip-outlier-1_1-20_Nsoftmax_FIT_exp"), \
(40, "p_casia-webface_noise-flip-outlier-1_1-40_Nsoftmax_exp", "p_casia-webface_noise-flip-outlier-1_1-40_Nsoftmax_FIT_exp"), \
(60, "p_casia-webface_noise-flip-outlier-1_1-60_Nsoftmax_exp", "p_casia-webface_noise-flip-outlier-1_1-60_Nsoftmax_FIT_exp")
]
for ratio, prefix1, prefix2 in prefixs:
draw_video(prefix1, prefix2, ratio)
print "processing noise-%d" % ratio
|
deeppavlov/models/doc_retrieval/tfidf_ranker.py | xbodx/DeepPavlov | 5,893 | 12631444 | <gh_stars>1000+
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from typing import List, Any, Tuple
import numpy as np
from deeppavlov.core.common.registry import register
from deeppavlov.core.models.estimator import Component
from deeppavlov.models.vectorizers.hashing_tfidf_vectorizer import HashingTfIdfVectorizer
logger = getLogger(__name__)
@register("tfidf_ranker")
class TfidfRanker(Component):
"""Rank documents according to input strings.
Args:
vectorizer: a vectorizer class
top_n: a number of doc ids to return
active: whether to return a number specified by :attr:`top_n` (``True``) or all ids
(``False``)
Attributes:
top_n: a number of doc ids to return
vectorizer: an instance of vectorizer class
active: whether to return a number specified by :attr:`top_n` or all ids
index2doc: inverted :attr:`doc_index`
iterator: a dataset iterator used for generating batches while fitting the vectorizer
"""
def __init__(self, vectorizer: HashingTfIdfVectorizer, top_n=5, active: bool = True, **kwargs):
self.top_n = top_n
self.vectorizer = vectorizer
self.active = active
def __call__(self, questions: List[str]) -> Tuple[List[Any], List[float]]:
"""Rank documents and return top n document titles with scores.
Args:
questions: list of queries used in ranking
Returns:
a tuple of selected doc ids and their scores
"""
batch_doc_ids, batch_docs_scores = [], []
q_tfidfs = self.vectorizer(questions)
for q_tfidf in q_tfidfs:
scores = q_tfidf * self.vectorizer.tfidf_matrix
scores = np.squeeze(
scores.toarray() + 0.0001) # add a small value to eliminate zero scores
if self.active:
thresh = self.top_n
else:
thresh = len(self.vectorizer.doc_index)
if thresh >= len(scores):
o = np.argpartition(-scores, len(scores) - 1)[0:thresh]
else:
o = np.argpartition(-scores, thresh)[0:thresh]
o_sort = o[np.argsort(-scores[o])]
doc_scores = scores[o_sort]
doc_ids = [self.vectorizer.index2doc[i] for i in o_sort]
batch_doc_ids.append(doc_ids)
batch_docs_scores.append(doc_scores)
return batch_doc_ids, batch_docs_scores
|
tabular/tests/unittests/features/generators/test_rename.py | zhiqiangdon/autogluon | 4,462 | 12631454 | <reponame>zhiqiangdon/autogluon
from autogluon.features.generators import RenameFeatureGenerator
def test_rename(generator_helper, data_helper):
# Given
input_data = data_helper.generate_multi_feature_full()
generator = RenameFeatureGenerator(name_prefix='pre_', name_suffix='_suf')
expected_feature_metadata_in_full = {
('category', ()): ['cat'],
('datetime', ()): ['datetime'],
('float', ()): ['float'],
('int', ()): ['int'],
('object', ()): ['obj'],
('object', ('datetime_as_object',)): ['datetime_as_object'],
('object', ('text',)): ['text']
}
expected_feature_metadata_full = {
('category', ()): ['pre_cat_suf'],
('datetime', ()): ['pre_datetime_suf'],
('float', ()): ['pre_float_suf'],
('int', ()): ['pre_int_suf'],
('object', ()): ['pre_obj_suf'],
('object', ('datetime_as_object',)): ['pre_datetime_as_object_suf'],
('object', ('text',)): ['pre_text_suf']
}
# When
output_data = generator_helper.fit_transform_assert(
input_data=input_data,
generator=generator,
expected_feature_metadata_in_full=expected_feature_metadata_in_full,
expected_feature_metadata_full=expected_feature_metadata_full,
)
# Therefore
output_data.columns = input_data.columns
assert input_data.equals(output_data)
|
pytest-shutil/tests/integration/test_cmdline_integration.py | RaiVaibhav/pytest-plugins | 282 | 12631459 | <filename>pytest-shutil/tests/integration/test_cmdline_integration.py
import os
from pytest_shutil import cmdline
def test_chdir():
here = os.getcwd()
bindir = os.path.realpath('/bin')
with cmdline.chdir(bindir):
assert os.getcwd() == bindir
assert os.getcwd() == here
def test_chdir_goes_away(workspace):
os.chdir(workspace.workspace)
workspace.teardown()
bindir = os.path.realpath('/bin')
with cmdline.chdir(bindir):
assert os.getcwd() == bindir
assert os.getcwd() == '/' |
supersuit/utils/agent_indicator.py | PettingZoo-Team/SuperSu | 237 | 12631462 | <filename>supersuit/utils/agent_indicator.py
import re
import numpy as np
from gym.spaces import Box, Discrete
import warnings
def change_obs_space(space, num_indicators):
if isinstance(space, Box):
ndims = len(space.shape)
if ndims == 1:
pad_space = np.ones((num_indicators,), dtype=space.dtype)
new_low = np.concatenate([space.low, pad_space * 0], axis=0)
new_high = np.concatenate([space.high, pad_space], axis=0)
new_space = Box(low=new_low, high=new_high, dtype=space.dtype)
return new_space
elif ndims == 3 or ndims == 2:
orig_low = space.low if ndims == 3 else np.expand_dims(space.low, 2)
orig_high = space.high if ndims == 3 else np.expand_dims(space.high, 2)
pad_space = np.ones(orig_low.shape[:2] + (num_indicators,), dtype=space.dtype)
new_low = np.concatenate([orig_low, pad_space * 0], axis=2)
new_high = np.concatenate([orig_high, pad_space], axis=2)
new_space = Box(low=new_low, high=new_high, dtype=space.dtype)
return new_space
elif isinstance(space, Discrete):
return Discrete(space.n * num_indicators)
assert False, "agent_indicator space must be 1d, 2d, or 3d Box or Discrete, was {}".format(space)
def get_indicator_map(agents, type_only):
if type_only:
assert all(re.match("[a-z]+_[0-9]+", agent) for agent in agents), "when the `type_only` parameter is True to agent_indicator, the agent names must follow the `<type>_<n>` format"
agent_id_map = {}
type_idx_map = {}
idx_num = 0
for agent in agents:
type = agent.split("_")[0]
if type not in type_idx_map:
type_idx_map[type] = idx_num
idx_num += 1
agent_id_map[agent] = type_idx_map[type]
if idx_num == 1:
warnings.warn("agent_indicator wrapper is degenerate, only one agent type; doing nothing")
return agent_id_map
else:
return {agent: i for i, agent in enumerate(agents)}
def check_params(spaces):
spaces = list(spaces)
first_space = spaces[0]
for space in spaces:
assert repr(space) == repr(first_space), "spaces need to be the same shape to add an indicator. Try using the `pad_observations` wrapper before agent_indicator."
change_obs_space(space, 1)
def change_observation(obs, space, indicator_data):
indicator_num, num_indicators = indicator_data
assert 0 <= indicator_num < num_indicators
if isinstance(space, Box):
ndims = len(space.shape)
if ndims == 1:
old_len = len(obs)
new_obs = np.pad(obs, (0, num_indicators))
new_obs[indicator_num + old_len] = 1.0
return new_obs
elif ndims == 3 or ndims == 2:
obs = obs if ndims == 3 else np.expand_dims(obs, 2)
old_shaped3 = obs.shape[2]
new_obs = np.pad(obs, [(0, 0), (0, 0), (0, num_indicators)])
new_obs[:, :, old_shaped3 + indicator_num] = 1.0
return new_obs
elif isinstance(space, Discrete):
return obs * num_indicators + indicator_num
assert False, "agent_indicator space must be 1d, 2d, or 3d Box or Discrete, was {}".format(space)
|
src/azure-cli-core/azure/cli/core/tests/test_help_loaders.py | YuanyuanNi/azure-cli | 3,287 | 12631489 | <reponame>YuanyuanNi/azure-cli
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from azure.cli.core import AzCommandsLoader
from azure.cli.core._help_loaders import HelpLoaderV1
# region TestCommandLoader
class TestCommandLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
compute_custom = CliCommandType(
operations_tmpl='{}#{{}}'.format(__name__),
)
super(TestCommandLoader, self).__init__(cli_ctx=cli_ctx,
custom_command_type=compute_custom)
self.cmd_to_loader_map = {}
def load_command_table(self, args):
with self.command_group('test') as g:
g.custom_command('alpha', 'dummy_handler')
return self.command_table
def load_arguments(self, command):
with self.argument_context('test') as c:
c.argument('arg1', options_list=['--arg1', '-a'])
c.argument('arg2', options_list=['--arg2', '-b'], help="Help From code.")
with self.argument_context('test alpha') as c:
c.positional('arg4', metavar="ARG4")
self._update_command_definitions() # pylint: disable=protected-access
def dummy_handler(arg1, arg2=None, arg3=None, arg4=None):
"""
Short summary here. Long summary here. Still long summary.
:param arg1: arg1's docstring help text
:param arg2: arg2's docstring help text
:param arg3: arg3's docstring help text
:param arg4: arg4's docstring help text
"""
pass
COMMAND_LOADER_CLS = TestCommandLoader
# region Test Help Loader
class JsonLoaderMixin(object):
"""A class containing helper methods for Json Loaders."""
# get the list of json help file names for the command or group
@staticmethod
def _get_json_help_files_list(nouns, cmd_loader_map_ref):
import inspect
import os
command_nouns = " ".join(nouns)
# if command in map, get the loader. Path of loader is path of helpfile.
ldr_or_none = cmd_loader_map_ref.get(command_nouns, [None])[0]
if ldr_or_none:
loaders = {ldr_or_none}
else:
loaders = set()
# otherwise likely a group, try to find all command loaders under group as the group help could be defined
# in either.
if not loaders:
for cmd_name, cmd_ldr in cmd_loader_map_ref.items():
# if first word in loader name is the group, this is a command in the command group
if cmd_name.startswith(command_nouns + " "):
loaders.add(cmd_ldr[0])
results = []
if loaders:
for loader in loaders:
loader_file_path = inspect.getfile(loader.__class__)
dir_name = os.path.dirname(loader_file_path)
files = os.listdir(dir_name)
for file in files:
if file.endswith("help.json"):
help_file_path = os.path.join(dir_name, file)
results.append(help_file_path)
return results
@staticmethod
def _parse_json_from_string(text, help_file_path):
import os
import json
dir_name, base_name = os.path.split(help_file_path)
pretty_file_path = os.path.join(os.path.basename(dir_name), base_name)
if not text:
raise CLIError("No content passed for {}.".format(pretty_file_path))
try:
return json.loads(text)
except ValueError as e:
raise CLIError("Error parsing {}:\n\n{}".format(pretty_file_path, e))
# test Help Loader, loads from help.json
class DummyHelpLoader(HelpLoaderV1, JsonLoaderMixin):
# This loader has different keys in the data object. Except for "arguments" and "examples".
core_attrs_to_keys = [("short_summary", "short"), ("long_summary", "long")]
body_attrs_to_keys = core_attrs_to_keys + [("links", "hyper-links")]
param_attrs_to_keys = core_attrs_to_keys + [("value_sources", "sources")]
@property
def version(self):
return 2
def get_noun_help_file_names(self, nouns):
cmd_loader_map_ref = self.help_ctx.cli_ctx.invocation.commands_loader.cmd_to_loader_map
return self._get_json_help_files_list(nouns, cmd_loader_map_ref)
def update_file_contents(self, file_contents):
for file_name in file_contents:
if file_name not in self._file_content_dict:
data_dict = {file_name: self._parse_json_from_string(file_contents[file_name], file_name)}
self._file_content_dict.update(data_dict)
def load_entry_data(self, help_obj, parser):
prog = parser.prog if hasattr(parser, "prog") else parser._prog_prefix # pylint: disable=protected-access
command_nouns = prog.split()[1:]
cmd_loader_map_ref = self.help_ctx.cli_ctx.invocation.commands_loader.cmd_to_loader_map
files_list = self._get_json_help_files_list(command_nouns, cmd_loader_map_ref)
data_list = [self._file_content_dict[name] for name in files_list]
self._entry_data = self._get_entry_data(help_obj.command, data_list)
def load_help_body(self, help_obj):
self._update_obj_from_data_dict(help_obj, self._entry_data, self.body_attrs_to_keys)
|
docs/video_moviepy.py | dariober/ASCIIGenome | 195 | 12631502 | <reponame>dariober/ASCIIGenome
#!/usr/bin/env ipython
from moviepy.editor import *
from moviepy import editor
from moviepy.video.tools.subtitles import SubtitlesClip
import os
# ---------------------- 8< ----------------------------------------------------
def annotate(clip, txt, txt_color= 'grey20', fontsize=50, font='Xolonium-Bold'):
""" Writes a text at the bottom of the clip. """
txtclip = editor.TextClip(txt, fontsize=fontsize, font=font, color=txt_color)
cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))])
return cvc.set_duration(clip.duration)
# ---------------------- 8< ----------------------------------------------------
os.chdir("/Users/berald01/Desktop/asciigenome_demo/")
clip = VideoFileClip("bam-3.mov")
sub1= clip.subclip(1.04, 3.17)
sub2= clip.subclip(8.16, 32.00)
#header = TextClip(txt= "ASCIIGenome!\n- Genome Browser for Terminals -\n",
# font='Amiri-Bold', fontsize=100, bg_color= 'white', color="grey20").set_duration(1)
final= concatenate_videoclips([sub1, sub2], method= 'compose')
final.write_videofile('bam-3.cut.mp4', fps= 3, codec= 'mpeg4')
clip = VideoFileClip("bam-3.cut.mp4")
subs = [((0, 3.17), 'Load bam file'),
((3.17, 7.21), 'Go to region'),
((7.21, 12), 'Zoom in'),
((12, 15), 'Move forward'),
((15, 18), 'Zoom out'),
((18, 26), 'Filter reads')
]
annotated_clips = [annotate(clip.subclip(from_t, to_t), txt, txt_color= 'blue') for (from_t, to_t), txt in subs]
final_clip = editor.concatenate_videoclips(annotated_clips)
final_clip.write_videofile("bam-3.subs.mp4", fps= 3)
# ---------------------- 8< ----------------------------------------------------
os.chdir("/Users/berald01/Desktop/asciigenome_demo/")
clip = VideoFileClip("bigWig-2.mov")
cat= [clip.subclip(20.00, 22.00),
clip.subclip(24.00, 29.00),
clip.subclip(30.00, 35.00),
clip.subclip(41.00, 45),
clip.subclip(46.5, 48),
clip.subclip(51, 53),
clip.subclip(56, 64),
clip.subclip(80, 82),
]
final= concatenate_videoclips(cat, method= 'compose')
final.write_videofile('bigWig-3.cut.mp4', fps= 3, codec= 'mpeg4')
|
tests/test_vmtkScripts/test_vmtkcenterlinegeometry.py | michelebucelli/vmtk | 217 | 12631512 | <reponame>michelebucelli/vmtk
## Program: VMTK
## Language: Python
## Date: January 10, 2018
## Version: 1.4
## Copyright (c) <NAME>, <NAME>, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## <NAME> (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtkcenterlinegeometry as centerlinegeometry
def test_default_parameters(aorta_centerline, compare_centerlines):
name = __name__ + '_test_default_parameters.vtp'
geometry = centerlinegeometry.vmtkCenterlineGeometry()
geometry.Centerlines = aorta_centerline
geometry.Execute()
assert compare_centerlines(geometry.Centerlines, name, method='addcellarray', arrayname='Tortuosity') == True
assert compare_centerlines(geometry.Centerlines, name, method='addpointarray', arrayname='Torsion') == True
assert compare_centerlines(geometry.Centerlines, name, method='addpointarray', arrayname='Curvature') == True
assert compare_centerlines(geometry.Centerlines, name, method='addcellarray', arrayname='Length') == True
assert compare_centerlines(geometry.Centerlines, name, method='addpointarray', arrayname='FrenetTangent') == True
assert compare_centerlines(geometry.Centerlines, name, method='addpointarray', arrayname='FrenetNormal') == True
assert compare_centerlines(geometry.Centerlines, name, method='addpointarray', arrayname='FrenetBinormal') == True
|
tests/test_matrices.py | spectralDNS/shenfun | 138 | 12631514 | <reponame>spectralDNS/shenfun
from copy import copy, deepcopy
import functools
from itertools import product
import numpy as np
import sympy as sp
from scipy.sparse.linalg import spsolve
from mpi4py import MPI
import pytest
import mpi4py_fft
import shenfun
from shenfun.chebyshev import matrices as cmatrices
from shenfun.chebyshev import bases as cbases
from shenfun.legendre import matrices as lmatrices
from shenfun.legendre import bases as lbases
from shenfun.laguerre import matrices as lagmatrices
from shenfun.laguerre import bases as lagbases
from shenfun.hermite import matrices as hmatrices
from shenfun.hermite import bases as hbases
from shenfun.jacobi import matrices as jmatrices
from shenfun.jacobi import bases as jbases
from shenfun.chebyshev import la as cla
from shenfun.legendre import la as lla
from shenfun import div, grad, inner, TensorProductSpace, FunctionSpace, SparseMatrix, \
Function, comm, VectorSpace, TrialFunction, TestFunction, BlockMatrix, CompositeSpace
from shenfun.spectralbase import inner_product
from shenfun.config import config
cBasis = (cbases.Orthogonal,
cbases.ShenDirichlet,
cbases.ShenNeumann,
cbases.ShenBiharmonic,
cbases.DirichletNeumann,
cbases.NeumannDirichlet)
# Bases with only GC quadrature
cBasisGC = (cbases.UpperDirichlet,
cbases.ShenBiPolar,
cbases.Heinrichs,
cbases.DirichletU,
cbases.MikNeumann,
cbases.CombinedShenNeumann,
)
lBasis = (lbases.Orthogonal,
lbases.ShenDirichlet,
functools.partial(lbases.ShenDirichlet, scaled=True),
lbases.ShenBiharmonic,
lbases.ShenNeumann)
# Bases with only LG quadrature
lBasisLG = (lbases.UpperDirichlet,
lbases.ShenBiPolar,
lbases.ShenBiPolar0)
lagBasis = (lagbases.Orthogonal,
lagbases.ShenDirichlet)
hBasis = (hbases.Orthogonal,)
jBasis = (jbases.Orthogonal,
jbases.ShenDirichlet,
jbases.ShenBiharmonic,
jbases.ShenOrder6)
cquads = ('GC', 'GL')
lquads = ('LG', 'GL')
lagquads = ('LG',)
hquads = ('HG',)
jquads = ('JG',)
for f in ['dct', 'dst', 'fft', 'ifft', 'rfft', 'irfft']:
config['fftw'][f]['planner_effort'] = 'FFTW_ESTIMATE'
N = 12
k = np.arange(N).astype(float)
a = np.random.random(N)
c = np.zeros(N)
c1 = np.zeros(N)
work = {
3:
(np.random.random((N, N, N)),
np.zeros((N, N, N)),
np.zeros((N, N, N))),
2:
(np.random.random((N, N)),
np.zeros((N, N)),
np.zeros((N, N)))
}
cbases2 = list(product(cBasis, cBasis))+list(product(cBasisGC, cBasisGC))
lbases2 = list(product(lBasis, lBasis))+list(product(lBasisLG, lBasisLG))
lagbases2 = list(product(lagBasis, lagBasis))
hbases2 = list(product(hBasis, hBasis))
jbases2 = list(product(jBasis, jBasis))
bases2 = cbases2+lbases2+lagbases2+hbases2+jbases2
cmats_and_quads = [list(k[0])+[k[1]] for k in product([(k, v) for k, v in cmatrices.mat.items()], cquads)]
lmats_and_quads = [list(k[0])+[k[1]] for k in product([(k, v) for k, v in lmatrices.mat.items()], lquads)]
lagmats_and_quads = [list(k[0])+[k[1]] for k in product([(k, v) for k, v in lagmatrices.mat.items()], lagquads)]
hmats_and_quads = [list(k[0])+[k[1]] for k in product([(k, v) for k, v in hmatrices.mat.items()], hquads)]
jmats_and_quads = [list(k[0])+[k[1]] for k in product([(k, v) for k, v in jmatrices.mat.items()], jquads)]
mats_and_quads = cmats_and_quads+lmats_and_quads+lagmats_and_quads+hmats_and_quads+jmats_and_quads
#cmats_and_quads_ids = ['-'.join(i) for i in product([v.__name__ for v in cmatrices.mat.values()], cquads)]
#lmats_and_quads_ids = ['-'.join(i) for i in product([v.__name__ for v in lmatrices.mat.values()], lquads)]
x = sp.symbols('x', real=True)
xp = sp.symbols('x', real=True, positive=True)
some_mats_and_quads = [mats_and_quads[i] for i in np.random.randint(0, len(mats_and_quads), 10)]
@pytest.mark.parametrize('key, mat, quad', mats_and_quads)
def test_mat(key, mat, quad):
"""Test that matrices built by hand equals those automatically generated"""
test = key[0]
trial = key[1]
measure = 1
if len(key) == 4:
domain = key[2]
measure = key[3]
if quad == 'GL':
return
if not measure == 1:
# Way too time-consuming
return
if trial[0] in lBasisLG+cBasisGC +cBasisGC and quad == 'GL':
return
t0 = test[0]
t1 = trial[0]
if len(key) == 4:
t0 = functools.partial(t0, domain=domain)
t1 = functools.partial(t1, domain=domain)
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
mat = mat(testfunction, trialfunction, measure=measure)
except AssertionError: # In case something is not implemented
return
shenfun.check_sanity(mat, testfunction, trialfunction, measure)
if test[0].family() == 'Legendre' and test[0].boundary_condition() == 'Dirichlet':
testfunction = (test[0](N, quad=quad, scaled=True), test[1])
trialfunction = (trial[0](N, quad=quad, scaled=True), trial[1])
mat = mat(testfunction, trialfunction)
shenfun.check_sanity(mat, testfunction, trialfunction, measure)
@pytest.mark.parametrize('b0,b1', cbases2)
@pytest.mark.parametrize('quad', cquads)
@pytest.mark.parametrize('k', range(5))
def test_cmatvec(b0, b1, quad, k):
"""Test matrix-vector product"""
global c, c1
if quad == 'GL' and (b0 in cBasisGC or b1 in cBasisGC):
return
b0 = b0(N, quad=quad)
b1 = b1(N, quad=quad)
mat = inner_product((b0, 0), (b1, k))
formats = mat._matvec_methods + ['python', 'csr']
c = mat.matvec(a, c, format='csr')
for format in formats:
c1 = mat.matvec(a, c1, format=format)
assert np.allclose(c, c1)
for dim in (2, 3):
b, d, d1 = work[dim]
for axis in range(0, dim):
d = mat.matvec(b, d, format='csr', axis=axis)
for format in formats:
d1 = mat.matvec(b, d1, format=format, axis=axis)
assert np.allclose(d, d1)
@pytest.mark.parametrize('b0,b1', lbases2)
@pytest.mark.parametrize('quad', ('LG',))
@pytest.mark.parametrize('k0,k1', product((0, 1, 2), (0, 1, 2)))
def test_lmatvec(b0, b1, quad, k0, k1):
"""Test matrix-vector product"""
global c, c1, a
b0 = b0(N, quad=quad)
b1 = b1(N, quad=quad)
mat = inner_product((b0, k0), (b1, k1))
c = mat.matvec(a, c, format='dia')
formats = mat._matvec_methods + ['python', 'csr']
for format in formats:
c1 = mat.matvec(a, c1, format=format)
assert np.allclose(c, c1)
for dim in (2, 3):
b, d, d1 = work[dim]
for axis in range(0, dim):
d = mat.matvec(b, d, format='dia', axis=axis)
for formats in formats:
d1 = mat.matvec(b, d1, format=format, axis=axis)
assert np.allclose(d, d1)
@pytest.mark.parametrize('b0,b1', lagbases2)
@pytest.mark.parametrize('quad', lagquads)
@pytest.mark.parametrize('format', ('dia', 'python'))
@pytest.mark.parametrize('k0,k1', product((0, 1, 2), (0, 1, 2)))
def test_lagmatvec(b0, b1, quad, format, k0, k1):
"""Test matrix-vector product"""
global c, c1
b0 = b0(N, quad=quad)
b1 = b1(N, quad=quad)
mat = inner_product((b0, k0), (b1, k1))
c = mat.matvec(a, c, format='csr')
formats = mat._matvec_methods + ['python', 'csr']
for format in formats:
c1 = mat.matvec(a, c1, format=format)
assert np.allclose(c, c1)
for dim in (2, 3):
b, d, d1 = work[dim]
for axis in range(0, dim):
d = mat.matvec(b, d, format='csr', axis=axis)
for format in formats:
d1 = mat.matvec(b, d1, format=format, axis=axis)
assert np.allclose(d, d1)
@pytest.mark.parametrize('b0,b1', hbases2)
@pytest.mark.parametrize('quad', hquads)
@pytest.mark.parametrize('format', ('dia', 'python'))
@pytest.mark.parametrize('k0,k1', product((0, 1, 2), (0, 1, 2)))
def test_hmatvec(b0, b1, quad, format, k0, k1):
"""Test matrix-vector product"""
global c, c1
b0 = b0(N, quad=quad)
b1 = b1(N, quad=quad)
mat = inner_product((b0, k0), (b1, k1))
formats = mat._matvec_methods + ['python', 'csr']
c = mat.matvec(a, c, format='csr')
for format in formats:
c1 = mat.matvec(a, c1, format=format)
assert np.allclose(c, c1)
for dim in (2, 3):
b, d, d1 = work[dim]
for axis in range(0, dim):
d = mat.matvec(b, d, format='csr', axis=axis)
for format in formats:
d1 = mat.matvec(b, d1, format=format, axis=axis)
assert np.allclose(d, d1)
@pytest.mark.parametrize('b0,b1', jbases2)
@pytest.mark.parametrize('quad', jquads)
@pytest.mark.parametrize('format', ('dia', 'python'))
@pytest.mark.parametrize('k0,k1', product((0, 1, 2), (0, 1, 2)))
def test_jmatvec(b0, b1, quad, format, k0, k1):
"""Testq matrix-vector product"""
global c, c1
b0 = b0(N, quad=quad)
b1 = b1(N, quad=quad)
mat = inner_product((b0, k0), (b1, k1))
c = mat.matvec(a, c, format='csr')
formats = mat._matvec_methods + ['python', 'csr']
for format in formats:
c1 = mat.matvec(a, c1, format=format)
assert np.allclose(c, c1)
dim = 2
b, d, d1 = work[dim]
for axis in range(0, dim):
d = mat.matvec(b, d, format='csr', axis=axis)
for format in formats:
d1 = mat.matvec(b, d1, format=format, axis=axis)
assert np.allclose(d, d1)
def test_eq():
m0 = SparseMatrix({0: 1, 2: 2}, (6, 6))
m1 = SparseMatrix({0: 1., 2: 2.}, (6, 6))
assert m0 == m1
assert m0 is not m1
m2 = SparseMatrix({0: 1., 2: 3.}, (6, 6))
assert m0 != m2
@pytest.mark.parametrize('key, mat, quad', some_mats_and_quads)
def test_imul(key, mat, quad):
test = key[0]
trial = key[1]
measure = 1
if len(key) == 4:
domain = key[2]
measure = key[3]
if quad == 'GL':
return
if trial[0] in lBasisLG+cBasisGC and quad == 'GL':
return
t0 = test[0]
t1 = trial[0]
if len(key) == 4:
t0 = functools.partial(t0, domain=domain)
t1 = functools.partial(t1, domain=domain)
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
mat = mat(testfunction, trialfunction, measure=measure)
except AssertionError:
return
mc = mat.scale
mat *= 2
assert mat.scale == 2.0*mc
mat.scale = mc
mat = SparseMatrix(copy(dict(mat)), mat.shape)
mat *= 2
assert mat.scale == 2.0
@pytest.mark.parametrize('key, mat, quad', some_mats_and_quads)
def test_mul(key, mat, quad):
test = key[0]
trial = key[1]
measure = 1
if len(key) == 4:
domain = key[2]
measure = key[3]
if quad == 'GL':
return
if trial[0] in lBasisLG+cBasisGC and quad == 'GL':
return
t0 = test[0]
t1 = trial[0]
if len(key) == 4:
t0 = functools.partial(t0, domain=domain)
t1 = functools.partial(t1, domain=domain)
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
m = mat(testfunction, trialfunction, measure=measure)
except AssertionError:
return
mc = 2.*m
assert mc.scale == 2.0*m.scale
mat = SparseMatrix(copy(dict(m)), m.shape)
mc = 2.*mat
assert mc.scale == 2.0
def test_mul2():
mat = SparseMatrix({0: 1}, (3, 3))
v = np.ones(3)
c = mat * v
assert np.allclose(c, 1)
mat = SparseMatrix({-2:1, -1:1, 0: 1, 1:1, 2:1}, (3, 3))
c = mat * v
assert np.allclose(c, 3)
SD = FunctionSpace(8, "L", bc=(0, 0), scaled=True)
u = shenfun.TrialFunction(SD)
v = shenfun.TestFunction(SD)
mat = inner(grad(u), grad(v))
z = Function(SD, val=1)
c = mat * z
assert np.allclose(c[:6], 1)
@pytest.mark.parametrize('key, mat, quad', some_mats_and_quads)
def test_rmul(key, mat, quad):
test = key[0]
trial = key[1]
measure = 1
if len(key) == 4:
domain = key[2]
measure = key[3]
if quad == 'GL':
return
if trial[0] in lBasisLG+cBasisGC and quad == 'GL':
return
t0 = test[0]
t1 = trial[0]
if len(key) == 4:
t0 = functools.partial(t0, domain=domain)
t1 = functools.partial(t1, domain=domain)
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
m = mat(testfunction, trialfunction, measure=measure)
except AssertionError:
return
mc = m*2.
assert mc.scale == 2.0*m.scale
mat = SparseMatrix(copy(dict(m)), m.shape)
mc = mat*2.
assert mc.scale == 2.0
@pytest.mark.parametrize('key, mat, quad', some_mats_and_quads)
def test_div(key, mat, quad):
test = key[0]
trial = key[1]
measure = 1
if len(key) == 4:
domain = key[2]
measure = key[3]
if quad == 'GL':
return
if trial[0] in lBasisLG+cBasisGC and quad == 'GL':
return
t0 = test[0]
t1 = trial[0]
if len(key) == 4:
t0 = functools.partial(t0, domain=domain)
t1 = functools.partial(t1, domain=domain)
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
m = mat(testfunction, trialfunction, measure=measure)
except AssertionError:
return
mc = m/2.
assert mc.scale == 0.5*m.scale
mat = SparseMatrix(copy(dict(m)), m.shape)
mc = mat/2.
assert mc.scale == 0.5
@pytest.mark.parametrize('basis, quad', list(product(cBasis, cquads))+
list(product(lBasis, lquads))+list(product(lagBasis, lagquads)))
def test_div2(basis, quad):
B = basis(8, quad=quad)
u = shenfun.TrialFunction(B)
v = shenfun.TestFunction(B)
m = inner(u, v)
z = Function(B, val=1)
c = m / z
#m2 = m.diags('csr')
#c2 = spsolve(m2, z[B.slice()])
c2 = Function(B)
c2 = m.solve(z, c2)
assert np.allclose(c2[B.slice()], c[B.slice()])
@pytest.mark.parametrize('key, mat, quad', some_mats_and_quads)
def test_add(key, mat, quad):
test = key[0]
trial = key[1]
measure = 1
if len(key) == 4:
domain = key[2]
measure = key[3]
if quad == 'GL':
return
if trial[0] in lBasisLG+cBasisGC and quad == 'GL':
return
t0 = test[0]
t1 = trial[0]
if len(key) == 4:
t0 = functools.partial(t0, domain=domain)
t1 = functools.partial(t1, domain=domain)
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
m = mat(testfunction, trialfunction, measure=measure)
except AssertionError:
return
mc = m + m
assert np.linalg.norm(mc.diags('csr').data-m.diags('csr').data*2) < 1e-8
mat = SparseMatrix(copy(dict(m)), m.shape, m.scale)
mc = m + mat
assert np.linalg.norm(mc.diags('csr').data-m.diags('csr').data*2) < 1e-8
@pytest.mark.parametrize('key, mat, quad', some_mats_and_quads)
def test_iadd(key, mat, quad):
test = key[0]
trial = key[1]
measure = 1
if len(key) == 4:
domain = key[2]
measure = key[3]
if quad == 'GL':
return
if trial[0] in lBasisLG+cBasisGC and quad == 'GL':
return
t0 = test[0]
t1 = trial[0]
if len(key) == 4:
t0 = functools.partial(t0, domain=domain)
t1 = functools.partial(t1, domain=domain)
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
m = mat(testfunction, trialfunction, measure=measure)
except AssertionError:
return
mc = m.copy()
m += mc
assert np.linalg.norm(m.diags('csr').data-mc.diags('csr').data*2) < 1e-8
m -= 2*mc
assert np.linalg.norm(m.diags('csr').data) < 1e-8
@pytest.mark.parametrize('key, mat, quad', some_mats_and_quads)
def test_isub(key, mat, quad):
test = key[0]
trial = key[1]
measure = 1
if len(key) == 4:
domain = key[2]
measure = key[3]
if quad == 'GL':
return
if trial[0] in lBasisLG+cBasisGC and quad == 'GL':
return
t0 = test[0]
t1 = trial[0]
if len(key) == 4:
t0 = functools.partial(t0, domain=domain)
t1 = functools.partial(t1, domain=domain)
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
m = mat(testfunction, trialfunction, measure=measure)
except AssertionError:
return
mc = m.copy()
m -= mc
assert np.linalg.norm(m.diags('csr').data) < 1e-8
m1 = SparseMatrix(deepcopy(dict(mc)), m.shape)
m2 = SparseMatrix(deepcopy(dict(mc)), m.shape)
m1 -= m2
assert np.linalg.norm(m1.diags('csr').data) < 1e-8
@pytest.mark.parametrize('key, mat, quad', some_mats_and_quads)
def test_sub(key, mat, quad):
test = key[0]
trial = key[1]
measure = 1
if len(key) == 4:
domain = key[2]
measure = key[3]
if quad == 'GL':
return
if trial[0] in lBasisLG+cBasisGC and quad == 'GL':
return
t0 = test[0]
t1 = trial[0]
if len(key) == 4:
t0 = functools.partial(t0, domain=domain)
t1 = functools.partial(t1, domain=domain)
testfunction = (t0(N, quad=quad), test[1])
trialfunction = (t1(N, quad=quad), trial[1])
try:
m = mat(testfunction, trialfunction, measure=measure)
except AssertionError:
return
mc = m - m
assert np.linalg.norm(mc.diags('csr').data) < 1e-8
m1 = SparseMatrix(copy(dict(m)), m.shape)
m2 = SparseMatrix(copy(dict(m)), m.shape)
mc = m1 - m2
assert np.linalg.norm(mc.diags('csr').data) < 1e-8
allaxes2D = {0: (0, 1), 1: (1, 0)}
allaxes3D = {0: (0, 1, 2), 1: (1, 0, 2), 2: (2, 0, 1)}
@pytest.mark.parametrize('axis', (0, 1, 2))
@pytest.mark.parametrize('family', ('chebyshev',))
def test_helmholtz3D(family, axis):
la = cla
N = (8, 9, 10)
SD = FunctionSpace(N[allaxes3D[axis][0]], family=family, bc=(0, 0))
K1 = FunctionSpace(N[allaxes3D[axis][1]], family='F', dtype='D')
K2 = FunctionSpace(N[allaxes3D[axis][2]], family='F', dtype='d')
subcomms = mpi4py_fft.pencil.Subcomm(MPI.COMM_WORLD, [0, 1, 1])
bases = [0]*3
bases[allaxes3D[axis][0]] = SD
bases[allaxes3D[axis][1]] = K1
bases[allaxes3D[axis][2]] = K2
T = TensorProductSpace(subcomms, bases, axes=allaxes3D[axis], modify_spaces_inplace=True)
u = shenfun.TrialFunction(T)
v = shenfun.TestFunction(T)
mat = inner(v, div(grad(u)))
H = la.Helmholtz(*mat)
u = Function(T)
s = SD.sl[SD.slice()]
u[s] = np.random.random(u[s].shape) + 1j*np.random.random(u[s].shape)
f = Function(T)
f = H.matvec(u, f)
g0 = Function(T)
g1 = Function(T)
mat = H.tpmats
M = {d.get_key(): d for d in mat}
g0 = M['ASDSDmat'].matvec(u, g0)
g1 = M['BSDSDmat'].matvec(u, g1)
assert np.linalg.norm(f-(g0+g1)) < 1e-12, np.linalg.norm(f-(g0+g1))
uc = Function(T)
uc = H(f, uc)
assert np.linalg.norm(uc-u) < 1e-12
@pytest.mark.parametrize('axis', (0, 1))
@pytest.mark.parametrize('family', ('chebyshev',))
def test_helmholtz2D(family, axis):
la = cla
N = (8, 9)
SD = FunctionSpace(N[axis], family=family, bc=(0, 0))
K1 = FunctionSpace(N[(axis+1)%2], family='F', dtype='d')
subcomms = mpi4py_fft.pencil.Subcomm(MPI.COMM_WORLD, allaxes2D[axis])
bases = [K1]
bases.insert(axis, SD)
T = TensorProductSpace(subcomms, bases, axes=allaxes2D[axis], modify_spaces_inplace=True)
u = shenfun.TrialFunction(T)
v = shenfun.TestFunction(T)
mat = inner(v, div(grad(u)))
H = la.Helmholtz(*mat)
u = Function(T)
s = SD.sl[SD.slice()]
u[s] = np.random.random(u[s].shape) + 1j*np.random.random(u[s].shape)
f = Function(T)
f = H.matvec(u, f)
g0 = Function(T)
g1 = Function(T)
mat = H.tpmats
M = {d.get_key(): d for d in mat}
g0 = M['ASDSDmat'].matvec(u, g0)
g1 = M['BSDSDmat'].matvec(u, g1)
assert np.linalg.norm(f-(g0+g1)) < 1e-12, np.linalg.norm(f-(g0+g1))
uc = Function(T)
uc = H(f, uc)
assert np.linalg.norm(uc-u) < 1e-12
@pytest.mark.parametrize('axis', (0, 1, 2))
@pytest.mark.parametrize('family', ('chebyshev',))
def test_biharmonic3D(family, axis):
la = cla
N = (16, 16, 16)
SD = FunctionSpace(N[allaxes3D[axis][0]], family=family, bc=(0, 0, 0, 0))
K1 = FunctionSpace(N[allaxes3D[axis][1]], family='F', dtype='D')
K2 = FunctionSpace(N[allaxes3D[axis][2]], family='F', dtype='d')
subcomms = mpi4py_fft.pencil.Subcomm(MPI.COMM_WORLD, [0, 1, 1])
bases = [0]*3
bases[allaxes3D[axis][0]] = SD
bases[allaxes3D[axis][1]] = K1
bases[allaxes3D[axis][2]] = K2
T = TensorProductSpace(subcomms, bases, axes=allaxes3D[axis])
u = shenfun.TrialFunction(T)
v = shenfun.TestFunction(T)
mat = inner(v, div(grad(div(grad(u)))))
H = la.Biharmonic(*mat)
u = Function(T)
u[:] = np.random.random(u.shape) + 1j*np.random.random(u.shape)
f = Function(T)
f = H.matvec(u, f)
g0 = Function(T)
g1 = Function(T)
g2 = Function(T)
mat = H.tpmats
M = {d.get_key(): d for d in mat}
g0 = M['SSBSBmat'].matvec(u, g0)
g1 = M['ASBSBmat'].matvec(u, g1)
g2 = M['BSBSBmat'].matvec(u, g2)
assert np.linalg.norm(f-(g0+g1+g2)) < 1e-8, np.linalg.norm(f-(g0+g1+g2))
@pytest.mark.parametrize('axis', (0, 1))
@pytest.mark.parametrize('family', ('chebyshev',))
def test_biharmonic2D(family, axis):
la = cla
N = (16, 16)
SD = FunctionSpace(N[axis], family=family, bc=(0, 0, 0, 0))
K1 = FunctionSpace(N[(axis+1)%2], family='F', dtype='d')
subcomms = mpi4py_fft.pencil.Subcomm(MPI.COMM_WORLD, allaxes2D[axis])
bases = [K1]
bases.insert(axis, SD)
T = TensorProductSpace(subcomms, bases, axes=allaxes2D[axis])
u = shenfun.TrialFunction(T)
v = shenfun.TestFunction(T)
mat = inner(v, div(grad(div(grad(u)))))
H = la.Biharmonic(*mat)
u = Function(T)
u[:] = np.random.random(u.shape) + 1j*np.random.random(u.shape)
f = Function(T)
f = H.matvec(u, f)
g0 = Function(T)
g1 = Function(T)
g2 = Function(T)
mat = H.tpmats
M = {d.get_key(): d for d in mat}
g0 = M['SSBSBmat'].matvec(u, g0)
g1 = M['ASBSBmat'].matvec(u, g1)
g2 = M['BSBSBmat'].matvec(u, g2)
assert np.linalg.norm(f-(g0+g1+g2)) < 1e-8
D = FunctionSpace(8, 'C', bc=(0, 0))
D0 = FunctionSpace(8, 'C')
F = FunctionSpace(8, 'F', dtype='d')
F2 = FunctionSpace(8, 'F', dtype='D')
@pytest.mark.parametrize('bases', ((D, D0), (D, F), (D, D0), (D, D0, F),
(D, F2, F), (D0, F2, F)))
def test_blockmatrix(bases):
T = TensorProductSpace(comm, bases)
V = VectorSpace(T)
u = TrialFunction(V)
v = TestFunction(V)
A = inner(u, v)
B = BlockMatrix(A)
uh = Function(V, val=1)
c = Function(V)
c = B.matvec(uh, c, use_scipy=True)
c2 = Function(V)
c2 = B.matvec(uh, c2, use_scipy=False)
assert np.linalg.norm(c2-c) < 1e-8
VQ = CompositeSpace([V, T])
u, p = TrialFunction(VQ)
v, q = TestFunction(VQ)
A2 = inner(u, v) + [inner(p, q)]
B2 = BlockMatrix(A2)
uh = Function(VQ, val=1)
c = Function(VQ)
c = B2.matvec(uh, c, use_scipy=True)
c2 = Function(VQ)
c2 = B2.matvec(uh, c2, use_scipy=False)
assert np.linalg.norm(c2-c) < 1e-8
if __name__ == '__main__':
import sympy as sp
x = sp.symbols('x', real=True)
xp = sp.Symbol('x', real=True, positive=True)
#test_mat(((cbases.ShenBiharmonic, 0), (cbases.ShenDirichlet, 0)), cmatrices.BSBSDmat, 'GL')
#test_mat(*cmats_and_quads[12])
#test_cmatvec(cBasis[2], cBasis[2], 'GC', 2)
#test_lmatvec(lBasis[0], lBasis[0], 'LG', 2, 0)
#test_lagmatvec(lagBasis[0], lagBasis[1], 'LG', 'python', 3, 2, 0)
#test_hmatvec(hBasis[0], hBasis[0], 'HG', 'self', 3, 1, 1)
#test_isub(((cbases.ShenNeumann, 0), (cbases.ShenDirichlet, 1)), cmatrices.CSNSDmat, 'GC')
#test_add(((cbases.Orthogonal, 0), (cbases.ShenDirichlet, 1)), cmatrices.CTSDmat, 'GC')
test_blockmatrix((D, F2, F))
#test_sub(*mats_and_quads[15])
#test_mul2()
#test_div2(cBasis[1], 'GC')
#test_helmholtz2D('legendre', 1)
#test_helmholtz3D('chebyshev', 0)
#test_biharmonic3D('chebyshev', 0)
#test_biharmonic2D('jacobi', 0)
|
doc/examples/scripts/sequence/homolog_msa.py | danijoo/biotite | 208 | 12631515 | """
Multiple sequence alignment of Cas9 homologs
============================================
This script searches for proteins homologous to Cas9 from
*Streptococcus pyogenes* via NCBI BLAST and performs a multiple
sequence alignment of the hit sequences afterwards, using MUSCLE.
"""
# Code source: <NAME>
# License: BSD 3 cl
from tempfile import gettempdir
import biotite.sequence as seq
import biotite.sequence.io.fasta as fasta
import biotite.sequence.graphics as graphics
import biotite.application.muscle as muscle
import biotite.application.blast as blast
import biotite.database.entrez as entrez
import matplotlib.pyplot as plt
# Download sequence of Streptococcus pyogenes Cas9
file_name = entrez.fetch("Q99ZW2", gettempdir(), "fa", "protein", "fasta")
fasta_file = fasta.FastaFile.read(file_name)
ref_seq = fasta.get_sequence(fasta_file)
# Find homologous proteins using NCBI Blast
# Search only the UniProt/SwissProt database
blast_app = blast.BlastWebApp("blastp", ref_seq, "swissprot", obey_rules=False)
blast_app.start()
blast_app.join()
alignments = blast_app.get_alignments()
# Get hit IDs for hits with score > 200
hits = []
for ali in alignments:
if ali.score > 200:
hits.append(ali.hit_id)
# Get the sequences from hit IDs
hit_seqs = []
for hit in hits:
file_name = entrez.fetch(hit, gettempdir(), "fa", "protein", "fasta")
fasta_file = fasta.FastaFile.read(file_name)
hit_seqs.append(fasta.get_sequence(fasta_file))
# Perform a multiple sequence alignment using MUSCLE
app = muscle.MuscleApp(hit_seqs)
app.start()
app.join()
alignment = app.get_alignment()
# Print the MSA with hit IDs
print("MSA results:")
gapped_seqs = alignment.get_gapped_sequences()
for i in range(len(gapped_seqs)):
print(hits[i], " "*3, gapped_seqs[i])
# Visualize the first 200 columns of the alignment
# Reorder alignments to reflect sequence distance
fig = plt.figure(figsize=(8.0, 8.0))
ax = fig.add_subplot(111)
order = app.get_alignment_order()
graphics.plot_alignment_type_based(
ax, alignment[:200, order.tolist()], labels=[hits[i] for i in order],
show_numbers=True, color_scheme="clustalx"
)
fig.tight_layout()
plt.show() |
demo/mpi-ref-v1/ex-2.34.py | gmdzy2010/mpi4py | 533 | 12631534 | ## mpiexec -n 2 python ex-2.34.py
# Use of ready-mode and synchonous-mode
# --------------------------------------------------------------------
from mpi4py import MPI
try:
import numpy
except ImportError:
raise SystemExit
if MPI.COMM_WORLD.Get_size() < 2:
raise SystemExit
# --------------------------------------------------------------------
comm = MPI.COMM_WORLD
buff = numpy.empty((1000,2), dtype='f', order='fortran')
rank = comm.Get_rank()
if rank == 0:
req1 = comm.Irecv([buff[:, 0], MPI.FLOAT], 1, 1)
req2 = comm.Irecv([buff[:, 1], MPI.FLOAT], 1, 2)
status = [MPI.Status(), MPI.Status()]
MPI.Request.Waitall([req1, req2], status)
elif rank == 1:
buff[:, 0] = 5
buff[:, 1] = 7
comm.Ssend([buff[:, 1], MPI.FLOAT], 0, 2)
comm.Rsend([buff[:, 0], MPI.FLOAT], 0, 1)
# --------------------------------------------------------------------
all = numpy.all
if rank == 0:
assert all(buff[:, 0] == 5)
assert all(buff[:, 1] == 7)
assert status[0].source == 1
assert status[0].tag == 1
assert status[1].source == 1
assert status[1].tag == 2
# --------------------------------------------------------------------
|
wa/workloads/androbench/__init__.py | robF8/workload-automation | 159 | 12631538 | <filename>wa/workloads/androbench/__init__.py
# Copyright 2014-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from wa import ApkUiautoWorkload
from wa.framework.exception import WorkloadError
class Androbench(ApkUiautoWorkload):
name = 'androbench'
package_names = ['com.andromeda.androbench2']
regex_matches = [re.compile(r'Sequential Read Score ([\d.]+)'),
re.compile(r'Sequential Write Score ([\d.]+)'),
re.compile(r'Random Read Score ([\d.]+)'),
re.compile(r'Random Write Score ([\d.]+)'),
re.compile(r'SQL Insert Score ([\d.]+)'),
re.compile(r'SQL Update Score ([\d.]+)'),
re.compile(r'SQL Delete Score ([\d.]+)')]
description = '''
Executes storage performance benchmarks
The Androbench workflow carries out the following typical productivity tasks.
1. Open Androbench application
2. Execute all memory benchmarks
Known working APK version: 5.0.1
'''
def update_output(self, context):
super(Androbench, self).update_output(context)
expected_results = len(self.regex_matches)
logcat_file = context.get_artifact_path('logcat')
with open(logcat_file, errors='replace') as fh:
for line in fh:
for regex in self.regex_matches:
match = regex.search(line)
if match:
result = float(match.group(1))
entry = regex.pattern.rsplit(None, 1)[0]
context.add_metric(entry, result, 'MB/s', lower_is_better=False)
expected_results -= 1
if expected_results > 0:
msg = "The Androbench workload has failed. Expected {} scores, Detected {} scores."
raise WorkloadError(msg.format(len(self.regex_matches), expected_results))
|
aliyun-python-sdk-bssopenapi/aliyunsdkbssopenapi/request/v20171214/QueryEvaluateListRequest.py | leafcoder/aliyun-openapi-python-sdk | 1,001 | 12631541 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbssopenapi.endpoint import endpoint_data
class QueryEvaluateListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'BssOpenApi', '2017-12-14', 'QueryEvaluateList')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EndSearchTime(self):
return self.get_query_params().get('EndSearchTime')
def set_EndSearchTime(self,EndSearchTime):
self.add_query_param('EndSearchTime',EndSearchTime)
def get_OutBizId(self):
return self.get_query_params().get('OutBizId')
def set_OutBizId(self,OutBizId):
self.add_query_param('OutBizId',OutBizId)
def get_SortType(self):
return self.get_query_params().get('SortType')
def set_SortType(self,SortType):
self.add_query_param('SortType',SortType)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_PageNum(self):
return self.get_query_params().get('PageNum')
def set_PageNum(self,PageNum):
self.add_query_param('PageNum',PageNum)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_EndAmount(self):
return self.get_query_params().get('EndAmount')
def set_EndAmount(self,EndAmount):
self.add_query_param('EndAmount',EndAmount)
def get_BillCycle(self):
return self.get_query_params().get('BillCycle')
def set_BillCycle(self,BillCycle):
self.add_query_param('BillCycle',BillCycle)
def get_BizTypeLists(self):
return self.get_query_params().get('BizTypeList')
def set_BizTypeLists(self, BizTypeLists):
for depth1 in range(len(BizTypeLists)):
if BizTypeLists[depth1] is not None:
self.add_query_param('BizTypeList.' + str(depth1 + 1) , BizTypeLists[depth1])
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_StartSearchTime(self):
return self.get_query_params().get('StartSearchTime')
def set_StartSearchTime(self,StartSearchTime):
self.add_query_param('StartSearchTime',StartSearchTime)
def get_EndBizTime(self):
return self.get_query_params().get('EndBizTime')
def set_EndBizTime(self,EndBizTime):
self.add_query_param('EndBizTime',EndBizTime)
def get_StartAmount(self):
return self.get_query_params().get('StartAmount')
def set_StartAmount(self,StartAmount):
self.add_query_param('StartAmount',StartAmount)
def get_StartBizTime(self):
return self.get_query_params().get('StartBizTime')
def set_StartBizTime(self,StartBizTime):
self.add_query_param('StartBizTime',StartBizTime) |
4-Collections and Iterators/collections_and_iterators.py | medav/Data-Structure-Zoo | 305 | 12631549 | <reponame>medav/Data-Structure-Zoo
""" Collections
<NAME> 2015
"""
class SinglyLinkedList(object):
__next__ = next # For Python 3.X compatibility
def __init__(self):
self.head = None
self.size = 0
self.cursor = None
def __len__(self):
return self.size
def __iter__(self):
return self
def __contains__(self, item):
if self.head is not None:
cur = self.head
while cur is not None:
if cur.data is item:
return True
cur = cur.next
return False
""" Both getitem and setitem represent the magic methods
for the object[index] and object[index] = other operations
for linked lists they run in O(n) time making them less
efficient than a list() for lookups
"""
def __getitem__(self, index):
if index >= self.size or index < 0:
raise IndexError()
else:
cur = self.head
for x in xrange(index):
cur = cur.next
return cur.data
def __setitem__(self, index, value):
if index >= self.size or index < 0:
raise IndexError()
else:
cur = self.head
for x in xrange(index):
cur = cur.next
cur.data = value
def next(self):
if self.cursor is None:
raise StopIteration()
else:
node = self.cursor.data
self.cursor = self.cursor.next
return node
def append(self, data):
""" Note: The average time for append is O(n)
however, insertion is O(1), giving it an
advantage over arrays.
"""
if self.head is None:
self.head = SinglyLinkedNode(data)
self.cursor = self.head
else:
node = self.head
# This is a common pattern with linked lists
while node.next is not None:
node = node.next
# [node] [new_node]->None
new_node = SinglyLinkedNode(data)
# [node]->[new_node]->None
node.next = new_node
self.size += 1
class SinglyLinkedNode(object):
def __init__(self, data):
self.data = data
self.next = None
class DoublyLinkedList(SinglyLinkedList):
def __init__(self):
# DRY: We're just going to inherit this for convinence
super(DoublyLinkedList, self).__init__()
def previous(self):
if self.cursor.prev is None:
raise StopIteration()
else:
self.cursor.prev
self.cursor = self.cursor.prev
return self.cursor.data
def append(self, data):
""" Note: The average time for append is O(n)
however, insertion is O(1), giving it an
advantage over arrays.
"""
if self.head is None:
self.head = DoublyLinkedNode(data)
self.cursor = self.head
else:
node = self.head
# This is a common pattern with linked lists
while node.next is not None:
node = node.next
# A simple change to use the double node
# and link the previous
new_node = DoublyLinkedNode(data)
node.next = new_node
new_node.prev = node
self.size += 1
def insert(self, data, index):
if index >= self.size:
raise IndexError()
if self.head is None:
self.head = DoublyLinkedNode(data)
self.cursor = self.head
else:
# If this insertion should be an append
if index == self.size - 1:
# We've got a method for that!
self.append(data)
# If a new head needs to be added
elif index == 0:
# _____ _____
# self.head|->| A | | C |
# <-|_____| |_____|
a = self.head
c = DoublyLinkedNode(data)
# _____ ______
# self.head|->| C |->| A |
# <-|_____|<-|_____|
self.head = c
c.next = a
a.prev = c
self.cursor = self.head
# It's between two nodes
else:
a = self.head
for x in xrange(index - 1):
a = a.next
# _____ _____
# | A |->| B |
# |_____|<-|_____|
b = a.next
c = DoublyLinkedNode(data)
# _____ _____ _____
# | A | | C |->| B |
# |_____| |_____|<-|____|
b.prev = c
c.next = b
# _____ _____ _____
# | A |->| C |->| B |
# |_____|<-|_____|<-|____|
a.next = c
c.prev = a
self.size += 1
class DoublyLinkedNode(SinglyLinkedNode):
def __init__(self, data):
# Staying DRY!
super(DoublyLinkedNode, self).__init__(data)
self.prev = None
if __name__ == '__main__':
test = SinglyLinkedList()
test.append(2)
test.append(3)
test.append(4)
print test[0]
print test[1]
print test[2]
print test[-1]
|
nncf/torch/dynamic_graph/trace_functions.py | MaximProshin/nncf | 136 | 12631551 | <gh_stars>100-1000
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
from typing import Callable
from torch import Tensor
from nncf.torch.dynamic_graph.trace_tensor import flatten_args
from nncf.torch.dynamic_graph.trace_tensor import TracedTensor
def forward_trace_only(operator: Callable, *args, **kwargs):
"""
This wrapper override will result in the operator not being added to graph,
but the result will still have TracedTensors with parent IDs left the same as in input.
Useful for operators which are not likely to be present in patterns considered for
compression, but still have to be accounted for so that the NNCF internal graph representation
does not become disjoint.
"""
result = operator(*args, **kwargs)
fargs = flatten_args(args, kwargs)
input_traced_tensor_indices = [i for i in range(len(fargs)) if isinstance(fargs[i], TracedTensor)]
if isinstance(result, (list, tuple)):
output_tensors_to_be_traced_indices = [i for i in range(len(result)) if
isinstance(result[i], Tensor)]
was_tuple = isinstance(result, tuple)
result = list(result)
if len(input_traced_tensor_indices) == 1:
# Broadcast one and the same creator ID of input to all outputs
for out_idx in output_tensors_to_be_traced_indices:
forwarded_meta = deepcopy(fargs[input_traced_tensor_indices[0]].tensor_meta)
if forwarded_meta is not None:
forwarded_meta.shape = tuple(result[out_idx].shape)
result[out_idx] = TracedTensor.from_torch_tensor(result[out_idx],
forwarded_meta)
elif len(input_traced_tensor_indices) != len(output_tensors_to_be_traced_indices):
raise RuntimeError("Unable to forward trace through operator {} - "
"input and output tensor count mismatch!".format(operator.__name__))
else:
# Assume that output tensor order corresponds to input tensor order
for in_idx, out_idx in zip(input_traced_tensor_indices, output_tensors_to_be_traced_indices):
forwarded_meta = deepcopy(fargs[in_idx].tensor_meta)
if forwarded_meta is not None:
forwarded_meta.shape = tuple(result[out_idx].shape)
result[out_idx] = TracedTensor.from_torch_tensor(result[out_idx],
forwarded_meta)
if was_tuple:
result = tuple(result)
elif len(input_traced_tensor_indices) > 1:
raise RuntimeError("Unable to forward trace through operator {} - "
"input and output tensor count mismatch!".format(operator.__name__))
elif input_traced_tensor_indices:
forwarded_meta = deepcopy(fargs[input_traced_tensor_indices[0]].tensor_meta)
if forwarded_meta is not None:
forwarded_meta.shape = tuple(result.shape)
return TracedTensor.from_torch_tensor(result,
forwarded_meta)
# No traced tensors in input, return a usual torch.Tensor as well
return result
|
qcodes/tests/dataset/test_concurrent_datasets.py | riju-pal/QCoDeS_riju | 223 | 12631559 | """
Test that multiple datasets can coexist as expected
"""
import pytest
from qcodes import new_experiment
from qcodes.dataset.data_set import DataSet
def test_foreground_after_background_raises(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=True)
ds2 = DataSet(conn=empty_temp_db_connection)
with pytest.raises(RuntimeError, match="All datasets written"):
ds2.mark_started(start_bg_writer=False)
def test_background_after_foreground_raises(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=False)
ds2 = DataSet(conn=empty_temp_db_connection)
with pytest.raises(RuntimeError, match="All datasets written"):
ds2.mark_started(start_bg_writer=True)
def test_background_twice(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=True)
ds2 = DataSet(conn=empty_temp_db_connection)
ds2.mark_started(start_bg_writer=True)
def test_foreground_twice(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=False)
ds2 = DataSet(conn=empty_temp_db_connection)
ds2.mark_started(start_bg_writer=False)
def test_foreground_after_background_non_concurrent(empty_temp_db_connection):
new_experiment("test", "test1", conn=empty_temp_db_connection)
ds1 = DataSet(conn=empty_temp_db_connection)
ds1.mark_started(start_bg_writer=True)
ds1.mark_completed()
ds2 = DataSet(conn=empty_temp_db_connection)
ds2.mark_started(start_bg_writer=False)
ds2.mark_completed()
ds3 = DataSet(conn=empty_temp_db_connection)
ds3.mark_started(start_bg_writer=True)
ds3.mark_completed()
|
mp_sort/virtenv/lib/python3.6/site-packages/transcrypt/modules/logging/handlers.py | ang-jason/fip_powerx_mini_projects-foxtrot | 2,200 | 12631567 | <gh_stars>1000+
# Copyright 2001-2016 by <NAME>. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 <NAME>. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
Edited by <NAME> 2016 for the Transcrypt project
I've kept some of the handlers but I've pruned anything related to
the file system at this point. There is a stub in preparation for
an AJAX based handler that will likely need to be coupled with
a QueueHandler and maybe a buffered handler.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
This code is not well tested yet!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
from org.transcrypt.stubs.browser import __pragma__
import logging
import re
#from stat import ST_DEV, ST_INO, ST_MTIME
#import queue
# try:
# import threading
# except ImportError: #pragma: no cover
# threading = None
threading = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class AJAXHandler(logging.Handler):
"""
This class provides a means of pushing log records to a webserver
via AJAX requests to a host server. Likely will have cross-domain
restrictions unless you do something special.
"""
def __init__(self, url, method="GET", headers = []):
"""
Initialize the instance with url and the method type
("GET" or "POST")
@param url the page to send the messages to. Generally
this is going to be address relative to this host
but could also be fullyqualified
@param method "GET" or "POST"
@param headers list of tuples that contains the
headers that will be added to the HTTP request for the
AJAX push. None are applied by default.
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.url = url
self.method = method
self.headers = headers
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by <NAME>.
"""
return record.__dict__
def urlencode(self, msg):
""" Encode the passed string with escapes for
non-valid characters in a url.
"""
def repl(m):
v = m.group(0)
v = ord(v)
hVal = v.toString(16)
if ( len(hVal) == 1 ):
hVal = "0" + hVal
# Convert this value from a character into
# %xx format
hVal = "%" + hVal
return(hVal)
p = re.compile(r"[^-A-Za-z0-9\-\._~:/?#[\]@!$&'()\*+,;=`]")
ret = p.sub(repl, msg)
return(ret)
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
if ( type(record) is str ):
msg = record
else:
msg = self.format(record)
try:
url = self.url
data = None
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "{}msg={}".format(sep, msg)
url = self.urlencode(url)
else: # "POST"
data = "msg={}".format(msg)
data = self.urlencode(data)
def ajaxCallback():
# @note - we should probably do something
# like keep track of error messages and
# provide a counter of failed pushes ?
return(0)
conn = None
errObj = None
__pragma__('js', '{}',
'''
try {
conn = new(XMLHttpRequest || ActiveXObject)('MSXML2.XMLHTTP.3.0');
} catch( err ) {
errObj = err
}
''')
if ( errObj is not None ):
raise Exception( "Failed Create AJAX Request", errObj )
if ( conn is None ):
raise Exception("Invalid Ajax Object")
conn.open(self.method, url, 1);
for key,val in self.headers:
conn.setRequestHeader( key, val )
conn.onreadystatechange = ajaxCallback
conn.send(data)
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
For transcrypt, this maybe useful for implementing a web worker in
the background for processing logs and sending them to the server or
elsewhere without blocking the main task.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
raise NotImplementedError("No Working Implementation Yet")
#self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
In transcrypt we will likely want to use a push to a webworker
here instead of a normal queue.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
|
tests/test_packages/test_contracts/test_erc1155/test_contract.py | bryanchriswhite/agents-aea | 126 | 12631588 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""The tests module contains the tests of the packages/contracts/erc1155 dir."""
import re
import time
from pathlib import Path
from typing import cast
from unittest import mock
import pytest
from aea_ledger_ethereum import EthereumCrypto
from aea_ledger_fetchai import FetchAIApi, FetchAICrypto
from aea.configurations.loader import (
ComponentType,
ContractConfig,
load_component_configuration,
)
from aea.contracts.base import Contract, contract_registry
from aea.test_tools.test_contract import BaseContractTestCase
from tests.conftest import (
ETHEREUM_ADDRESS_ONE,
ETHEREUM_ADDRESS_TWO,
ETHEREUM_PRIVATE_KEY_PATH,
ETHEREUM_PRIVATE_KEY_TWO_PATH,
ETHEREUM_TESTNET_CONFIG,
FETCHAI_TESTNET_CONFIG,
MAX_FLAKY_RERUNS,
ROOT_DIR,
UseGanache,
)
@pytest.mark.ledger
class TestERC1155ContractEthereum(BaseContractTestCase, UseGanache):
"""Test the ERC1155 contract on Ethereum."""
ledger_identifier = EthereumCrypto.identifier
path_to_contract = Path(ROOT_DIR, "packages", "fetchai", "contracts", "erc1155")
@classmethod
def setup(cls):
"""Setup."""
super().setup(
ledger_config=ETHEREUM_TESTNET_CONFIG,
deployer_private_key_path=ETHEREUM_PRIVATE_KEY_PATH,
item_owner_private_key_path=ETHEREUM_PRIVATE_KEY_TWO_PATH,
)
cls.token_ids_a = [
340282366920938463463374607431768211456,
340282366920938463463374607431768211457,
340282366920938463463374607431768211458,
340282366920938463463374607431768211459,
340282366920938463463374607431768211460,
340282366920938463463374607431768211461,
340282366920938463463374607431768211462,
340282366920938463463374607431768211463,
340282366920938463463374607431768211464,
340282366920938463463374607431768211465,
]
cls.token_id_b = 680564733841876926926749214863536422912
@classmethod
def finish_contract_deployment(cls) -> str:
"""
Finish deploying contract.
:return: contract address
"""
contract_address = cls.ledger_api.get_contract_address(
cls.deployment_tx_receipt
)
if contract_address is None:
raise ValueError("Contract address not found!") # pragma: nocover
return contract_address
def test_generate_token_ids(self):
"""Test the generate_token_ids method of the ERC1155 contract."""
# setup
nft_token_type = 1
nb_tokens = 2
expected_toke_ids = [
340282366920938463463374607431768211456,
340282366920938463463374607431768211457,
]
# operation
actual_toke_ids = self.contract.generate_token_ids(nft_token_type, nb_tokens)
# after
assert actual_toke_ids == expected_toke_ids
def test_generate_id(self):
"""Test the _generate_id method of the ERC1155 contract."""
# setup
ft_token_type = 2
index = 0
expected_toke_id = 680564733841876926926749214863536422912
# operation
actual_toke_id = self.contract._generate_id(index, ft_token_type)
# after
assert actual_toke_id == expected_toke_id
def test_get_create_batch_transaction(self):
"""Test the get_create_batch_transaction method of the ERC1155 contract."""
# operation
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=self.token_ids_a,
)
# after
assert len(tx) == 7
assert all(
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to", "data"]
)
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
def test_get_create_single_transaction(self):
"""Test the get_create_single_transaction method of the ERC1155 contract."""
# operation
tx = self.contract.get_create_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_id=self.token_id_b,
)
# after
assert len(tx) == 7
assert all(
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to", "data"]
)
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
def test_get_mint_batch_transaction(self):
"""Test the get_mint_batch_transaction method of the ERC1155 contract."""
# operation
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
mint_quantities=[1] * len(self.token_ids_a),
)
# after
assert len(tx) == 7
assert all(
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to", "data"]
)
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
def test_validate_mint_quantities(self):
"""Test the validate_mint_quantities method of the ERC1155 contract."""
# Valid NFTs
self.contract.validate_mint_quantities(
token_ids=self.token_ids_a, mint_quantities=[1] * len(self.token_ids_a),
)
# Valid FTs
token_id = 680564733841876926926749214863536422912
mint_quantity = 1
self.contract.validate_mint_quantities(
token_ids=[token_id], mint_quantities=[mint_quantity],
)
# Invalid NFTs
token_id = self.token_ids_a[0]
mint_quantity = 2
with pytest.raises(
ValueError,
match=re.escape(
f"Cannot mint NFT (token_id={token_id}) with mint_quantity more than 1 (found={mint_quantity})"
),
):
self.contract.validate_mint_quantities(
token_ids=[token_id], mint_quantities=[mint_quantity],
)
# Invalid: neither NFT nor FT
token_id = 1020847100762815390390123822295304634368
mint_quantity = 1
with pytest.raises(
ValueError,
match=re.escape(
f"The token type must be 1 or 2. Found type=3 for token_id={token_id}"
),
):
self.contract.validate_mint_quantities(
token_ids=[token_id], mint_quantities=[mint_quantity],
)
def test_decode_id(self):
"""Test the decode_id method of the ERC1155 contract."""
# FT
expected_token_type = 2
token_id = 680564733841876926926749214863536422912
actual_token_type = self.contract.decode_id(token_id)
assert actual_token_type == expected_token_type
# NFT
expected_token_type = 1
token_id = 340282366920938463463374607431768211456
actual_token_type = self.contract.decode_id(token_id)
assert actual_token_type == expected_token_type
def test_get_mint_single_transaction(self):
"""Test the get_mint_single_transaction method of the ERC1155 contract."""
# operation
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
mint_quantity=1,
)
# after
assert len(tx) == 7
assert all(
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to", "data"]
)
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
def test_get_balance(self):
"""Test the get_balance method of the ERC1155 contract."""
# operation
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
)
# after
assert "balance" in result
assert result["balance"][self.token_id_b] == 0
def test_get_balances(self):
"""Test the get_balances method of the ERC1155 contract."""
# operation
result = self.contract.get_balances(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
)
# after
assert "balances" in result
assert all(result["balances"][token_id] == 0 for token_id in self.token_ids_a)
def test_get_hash_single(self):
"""Test the get_hash_single method of the ERC1155 contract."""
# operation
result = self.contract.get_hash_single(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
from_supply=0,
to_supply=10,
value=1,
trade_nonce=1,
)
# after
assert isinstance(result, bytes)
def test_get_hash_batch(self):
"""Test the get_hash_batch method of the ERC1155 contract."""
# operation
result = self.contract.get_hash_batch(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
from_supplies=[0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
to_supplies=[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
value=1,
trade_nonce=1,
)
# after
assert isinstance(result, bytes)
def test_generate_trade_nonce(self):
"""Test the generate_trade_nonce method of the ERC1155 contract."""
# operation
result = self.contract.generate_trade_nonce(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
)
# after
assert "trade_nonce" in result
assert isinstance(result["trade_nonce"], int)
@pytest.mark.integration
def test_helper_methods_and_get_transactions(self):
"""Test helper methods and get transactions."""
expected_a = [
340282366920938463463374607431768211456,
340282366920938463463374607431768211457,
340282366920938463463374607431768211458,
340282366920938463463374607431768211459,
340282366920938463463374607431768211460,
340282366920938463463374607431768211461,
340282366920938463463374607431768211462,
340282366920938463463374607431768211463,
340282366920938463463374607431768211464,
340282366920938463463374607431768211465,
]
actual = self.contract.generate_token_ids(token_type=1, nb_tokens=10)
assert expected_a == actual
expected_b = [
680564733841876926926749214863536422912,
680564733841876926926749214863536422913,
]
actual = self.contract.generate_token_ids(token_type=2, nb_tokens=2)
assert expected_b == actual
tx = self.contract.get_deploy_transaction(
ledger_api=self.ledger_api, deployer_address=ETHEREUM_ADDRESS_ONE
)
assert len(tx) == 6
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[key in tx for key in ["value", "from", "gas", "gasPrice", "nonce"]]
), "Error, found: {}".format(tx)
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=ETHEREUM_ADDRESS_ONE,
deployer_address=ETHEREUM_ADDRESS_ONE,
token_ids=expected_a,
)
assert len(tx) == 7
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to"]
]
), "Error, found: {}".format(tx)
tx = self.contract.get_create_single_transaction(
ledger_api=self.ledger_api,
contract_address=ETHEREUM_ADDRESS_ONE,
deployer_address=ETHEREUM_ADDRESS_ONE,
token_id=expected_b[0],
)
assert len(tx) == 7
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to"]
]
), "Error, found: {}".format(tx)
mint_quantities = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=ETHEREUM_ADDRESS_ONE,
deployer_address=ETHEREUM_ADDRESS_ONE,
recipient_address=ETHEREUM_ADDRESS_ONE,
token_ids=expected_a,
mint_quantities=mint_quantities,
)
assert len(tx) == 7
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to"]
]
), "Error, found: {}".format(tx)
mint_quantity = 1
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=ETHEREUM_ADDRESS_ONE,
deployer_address=ETHEREUM_ADDRESS_ONE,
recipient_address=ETHEREUM_ADDRESS_ONE,
token_id=expected_b[1],
mint_quantity=mint_quantity,
)
assert len(tx) == 7
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in ["value", "chainId", "gas", "gasPrice", "nonce", "to"]
]
), "Error, found: {}".format(tx)
@pytest.mark.integration
def test_get_single_atomic_swap(self):
"""Test get single atomic swap."""
from_address = ETHEREUM_ADDRESS_ONE
to_address = ETHEREUM_ADDRESS_TWO
token_id = self.contract.generate_token_ids(token_type=2, nb_tokens=1)[0]
from_supply = 0
to_supply = 10
value = 1
trade_nonce = 1
tx_hash = self.contract.get_hash_single(
self.ledger_api,
self.contract_address,
from_address,
to_address,
token_id,
from_supply,
to_supply,
value,
trade_nonce,
)
assert isinstance(tx_hash, bytes)
signature = self.deployer_crypto.sign_message(tx_hash)
tx = self.contract.get_atomic_swap_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=from_address,
to_address=to_address,
token_id=token_id,
from_supply=from_supply,
to_supply=to_supply,
value=value,
trade_nonce=trade_nonce,
signature=signature,
)
assert len(tx) == 8
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in [
"value",
"chainId",
"gas",
"gasPrice",
"nonce",
"to",
"from",
]
]
), "Error, found: {}".format(tx)
@pytest.mark.integration
def test_get_batch_atomic_swap(self):
"""Test get batch atomic swap."""
from_address = ETHEREUM_ADDRESS_ONE
to_address = ETHEREUM_ADDRESS_TWO
token_ids = self.contract.generate_token_ids(token_type=2, nb_tokens=10)
from_supplies = [0, 1, 0, 0, 1, 0, 0, 0, 0, 1]
to_supplies = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]
value = 1
trade_nonce = 1
tx_hash = self.contract.get_hash_batch(
self.ledger_api,
self.contract_address,
from_address,
to_address,
token_ids,
from_supplies,
to_supplies,
value,
trade_nonce,
)
assert isinstance(tx_hash, bytes)
signature = self.deployer_crypto.sign_message(tx_hash)
tx = self.contract.get_atomic_swap_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=from_address,
to_address=to_address,
token_ids=token_ids,
from_supplies=from_supplies,
to_supplies=to_supplies,
value=value,
trade_nonce=trade_nonce,
signature=signature,
)
assert len(tx) == 8
data = tx.pop("data")
assert len(data) > 0 and data.startswith("0x")
assert all(
[
key in tx
for key in [
"value",
"chainId",
"gas",
"gasPrice",
"nonce",
"to",
"from",
]
]
), "Error, found: {}".format(tx)
@pytest.mark.integration
def test_full(self):
"""Setup."""
# Test tokens IDs
token_ids = self.contract.generate_token_ids(token_type=2, nb_tokens=10)
# create
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=token_ids,
)
tx_signed = self.deployer_crypto.sign_transaction(tx)
tx_receipt = self.ledger_api.send_signed_transaction(tx_signed)
time.sleep(1)
receipt = self.ledger_api.get_transaction_receipt(tx_receipt)
assert self.ledger_api.is_transaction_settled(receipt)
mint_quantities = [10] * len(token_ids)
# mint
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.deployer_crypto.address,
token_ids=token_ids,
mint_quantities=mint_quantities,
)
tx_signed = self.deployer_crypto.sign_transaction(tx)
tx_receipt = self.ledger_api.send_signed_transaction(tx_signed)
time.sleep(1)
receipt = self.ledger_api.get_transaction_receipt(tx_receipt)
assert self.ledger_api.is_transaction_settled(receipt)
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_ids=token_ids,
mint_quantities=mint_quantities,
)
tx_signed = self.deployer_crypto.sign_transaction(tx)
tx_receipt = self.ledger_api.send_signed_transaction(tx_signed)
time.sleep(1)
receipt = self.ledger_api.get_transaction_receipt(tx_receipt)
assert self.ledger_api.is_transaction_settled(receipt)
# batch trade
from_address = self.deployer_crypto.address
to_address = self.item_owner_crypto.address
from_supplies = [0, 1, 0, 0, 1, 0, 0, 0, 0, 1]
to_supplies = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]
value = 0
trade_nonce = 1
tx_hash = self.contract.get_hash_batch(
self.ledger_api,
self.contract_address,
from_address,
to_address,
token_ids,
from_supplies,
to_supplies,
value,
trade_nonce,
)
signature = self.item_owner_crypto.sign_message(
tx_hash, is_deprecated_mode=True
)
tx = self.contract.get_atomic_swap_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
from_address=from_address,
to_address=to_address,
token_ids=token_ids,
from_supplies=from_supplies,
to_supplies=to_supplies,
value=value,
trade_nonce=trade_nonce,
signature=signature,
)
tx_signed = self.deployer_crypto.sign_transaction(tx)
tx_receipt = self.ledger_api.send_signed_transaction(tx_signed)
time.sleep(1)
receipt = self.ledger_api.get_transaction_receipt(tx_receipt)
assert self.ledger_api.is_transaction_settled(receipt)
class TestCosmWasmContract(BaseContractTestCase):
"""Test the cosmwasm contract."""
ledger_identifier = FetchAICrypto.identifier
path_to_contract = Path(ROOT_DIR, "packages", "fetchai", "contracts", "erc1155")
fund_from_faucet = True
@classmethod
def setup(cls):
"""Setup."""
# Test tokens IDs
super().setup(ledger_config=FETCHAI_TESTNET_CONFIG)
cls.token_ids_a = [
340282366920938463463374607431768211456,
340282366920938463463374607431768211457,
340282366920938463463374607431768211458,
340282366920938463463374607431768211459,
340282366920938463463374607431768211460,
340282366920938463463374607431768211461,
340282366920938463463374607431768211462,
340282366920938463463374607431768211463,
340282366920938463463374607431768211464,
340282366920938463463374607431768211465,
]
cls.token_id_b = 680564733841876926926749214863536422912
@classmethod
def finish_contract_deployment(cls) -> str:
"""
Finish deploying contract.
:return: contract address
"""
code_id = cast(FetchAIApi, cls.ledger_api).get_code_id(
cls.deployment_tx_receipt
)
assert code_id is not None
# Init contract
tx = cls._contract.get_deploy_transaction(
ledger_api=cls.ledger_api,
deployer_address=cls.deployer_crypto.address,
code_id=code_id,
init_msg={},
tx_fee=0,
amount=0,
label="ERC1155",
gas=1000000,
)
if tx is None:
raise ValueError("Deploy transaction not found!") # pragma: nocover
tx_receipt = cls.sign_send_confirm_receipt_multisig_transaction(
tx, cls.ledger_api, [cls.deployer_crypto]
)
contract_address = cls.ledger_api.get_contract_address(tx_receipt)
if contract_address is None:
raise ValueError("Contract address not found!") # pragma: nocover
return contract_address
@pytest.mark.integration
@pytest.mark.ledger
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS)
def test_create_and_mint_and_balances(self):
"""Test cosmwasm contract create, mint and balances functionalities."""
# Create single token
tx = self.contract.get_create_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_id=self.token_id_b,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Create batch of tokens
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=self.token_ids_a,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Mint single token
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
mint_quantity=1,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Get balance of single token
res = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_id=self.token_id_b,
)
assert "balance" in res
assert res["balance"][self.token_id_b] == 1
# Mint batch of tokens
tx = self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
mint_quantities=[1] * len(self.token_ids_a),
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Get balances of multiple tokens
res = self.contract.get_balances(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_ids=self.token_ids_a,
)
assert "balances" in res
assert res["balances"] == {token_id: 1 for token_id in self.token_ids_a}
@pytest.mark.integration
@pytest.mark.ledger
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS)
def test_cosmwasm_single_atomic_swap(self):
"""Test single atomic swap."""
# Create batch of tokens
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=self.token_ids_a,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Mint single ERC1155 token a[0] to Deployer
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.deployer_crypto.address,
token_id=self.token_ids_a[0],
mint_quantity=1,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Store balance of Deployer's native tokens before atomic swap
original_deployer_balance = self.ledger_api.get_balance(
self.deployer_crypto.address
)
# Atomic swap
# Send 1 ERC1155 token a[0] from Deployer to Item owner
# Send 1 native token from Item owner to Deployer
tx = self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
from_pubkey=self.deployer_crypto.public_key,
to_pubkey=self.item_owner_crypto.public_key,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto, self.item_owner_crypto]
)
# Check Item owner's ERC1155 token balance
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
)
assert "balance" in result
assert result["balance"][self.token_ids_a[0]] == 1
# Check deployer's native token balance
deployer_balance = self.ledger_api.get_balance(self.deployer_crypto.address)
assert deployer_balance == original_deployer_balance + 1
# Other direction of atomic swap
# Send 1 ERC1155 token a[0] from Item owner to Deployer
# Send 1 native token from Item owner to Deployer
tx = self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
from_supply=0,
to_supply=1,
value=1,
trade_nonce=0,
from_pubkey=self.deployer_crypto.public_key,
to_pubkey=self.item_owner_crypto.public_key,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.item_owner_crypto]
)
# Check Item owner's ERC1155 token balance
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.deployer_crypto.address,
token_id=self.token_ids_a[0],
)
assert "balance" in result
assert result["balance"][self.token_ids_a[0]] == 1
# Check deployer's native token balance
deployer_balance = self.ledger_api.get_balance(self.deployer_crypto.address)
assert deployer_balance == original_deployer_balance + 2
# Check invalid case with from_supply > 0 and to_supply > 0
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=1,
value=1,
trade_nonce=0,
from_pubkey=self.deployer_crypto.public_key,
to_pubkey=self.item_owner_crypto.public_key,
)
@pytest.mark.integration
@pytest.mark.ledger
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS)
def test_cosmwasm_batch_atomic_swap(self):
"""Test batch atomic swap."""
# Create batch of tokens
tx = self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
token_ids=self.token_ids_a,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Mint single token a[0] to Deployer
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.deployer_crypto.address,
token_id=self.token_ids_a[0],
mint_quantity=1,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Mint single token a[1] to Item owner
tx = self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
deployer_address=self.deployer_crypto.address,
recipient_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[1],
mint_quantity=1,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto]
)
# Store balance of Deployer's native tokens before atomic swap
original_deployer_balance = self.ledger_api.get_balance(
self.deployer_crypto.address
)
# Atomic swap
# Send 1 ERC1155 token a[0] from Deployer to Item owner
# Send 1 ERC1155 token a[1] from Item owner to Deployer
# Send 1 native token from Item owner to Deployer
tx = self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address=self.contract_address,
from_address=self.deployer_crypto.address,
to_address=self.item_owner_crypto.address,
token_ids=[self.token_ids_a[0], self.token_ids_a[1]],
from_supplies=[1, 0],
to_supplies=[0, 1],
value=1,
trade_nonce=0,
from_pubkey=self.deployer_crypto.public_key,
to_pubkey=self.item_owner_crypto.public_key,
)
assert len(tx) == 2
self.sign_send_confirm_receipt_multisig_transaction(
tx, self.ledger_api, [self.deployer_crypto, self.item_owner_crypto]
)
# Check Item owner's ERC1155 token balance
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.item_owner_crypto.address,
token_id=self.token_ids_a[0],
)
assert "balance" in result
assert result["balance"][self.token_ids_a[0]] == 1
# Check Deployer's ERC1155 token balance
result = self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address=self.contract_address,
agent_address=self.deployer_crypto.address,
token_id=self.token_ids_a[1],
)
assert "balance" in result
assert result["balance"][self.token_ids_a[1]] == 1
# Check deployer's native token balance
deployer_balance = self.ledger_api.get_balance(self.deployer_crypto.address)
assert deployer_balance == original_deployer_balance + 1
class TestContractCommon:
"""Other tests for the contract."""
@classmethod
def setup(cls):
"""Setup."""
# Register smart contract used for testing
cls.path_to_contract = Path(
ROOT_DIR, "packages", "fetchai", "contracts", "erc1155"
)
# register contract
configuration = cast(
ContractConfig,
load_component_configuration(ComponentType.CONTRACT, cls.path_to_contract),
)
configuration._directory = ( # pylint: disable=protected-access
cls.path_to_contract
)
if str(configuration.public_id) not in contract_registry.specs:
# load contract into sys modules
Contract.from_config(configuration)
cls.contract = contract_registry.make(str(configuration.public_id))
cls.token_ids_a = [
340282366920938463463374607431768211456,
]
# Create mock ledger with unknown identifier
cls.ledger_api = mock.Mock()
attrs = {"identifier": "dummy"}
cls.ledger_api.configure_mock(**attrs)
@pytest.mark.ledger
def test_get_create_batch_transaction_wrong_identifier(self):
"""Test if get_create_batch_transaction with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_create_batch_transaction(
ledger_api=self.ledger_api,
contract_address="contract_address",
deployer_address="address",
token_ids=self.token_ids_a,
)
@pytest.mark.ledger
def test_get_create_single_transaction_wrong_identifier(self):
"""Test if get_create_single_transaction with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_create_single_transaction(
ledger_api=self.ledger_api,
contract_address="contract_address",
deployer_address="address",
token_id=self.token_ids_a[0],
)
@pytest.mark.ledger
def test_get_mint_batch_transaction_wrong_identifier(self):
"""Test if get_mint_batch_transaction with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_mint_batch_transaction(
ledger_api=self.ledger_api,
contract_address="contract_address",
deployer_address="address",
recipient_address="address",
token_ids=self.token_ids_a,
mint_quantities=[1],
)
@pytest.mark.ledger
def test_get_mint_single_transaction_wrong_identifier(self):
"""Test if get_mint_single_transaction with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_mint_single_transaction(
ledger_api=self.ledger_api,
contract_address="contract_address",
deployer_address="address",
recipient_address="address",
token_id=self.token_ids_a[0],
mint_quantity=1,
)
@pytest.mark.ledger
def test_get_balance_wrong_identifier(self):
"""Test if get_balance with wrong api identifier fails."""
# Test if function is not implemented for unknown ledger
with pytest.raises(NotImplementedError):
self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address="contract_address",
agent_address="address",
token_id=self.token_ids_a[0],
)
@pytest.mark.ledger
def test_get_balance_wrong_query_res(self):
"""Test if get_balance with wrong api identifier fails."""
# Create mock fetchai ledger that returns None on execute_contract_query
attrs = {"identifier": "fetchai", "execute_contract_query.return_value": None}
self.ledger_api.configure_mock(**attrs)
# Test if get balance returns ValueError when querying contract returns None
with pytest.raises(ValueError):
self.contract.get_balance(
ledger_api=self.ledger_api,
contract_address="contract_address",
agent_address="address",
token_id=self.token_ids_a[0],
)
@pytest.mark.ledger
def test_get_balances_wrong_query_res(self):
"""Test if get_balances with wrong api identifier fails."""
# Create mock fetchai ledger that returns None on execute_contract_query
attrs = {"identifier": "fetchai", "execute_contract_query.return_value": None}
self.ledger_api.configure_mock(**attrs)
# Test if get balance returns ValueError when querying contract returns None
with pytest.raises(ValueError):
self.contract.get_balances(
ledger_api=self.ledger_api,
contract_address="contract_address",
agent_address="address",
token_ids=self.token_ids_a,
)
@pytest.mark.ledger
def test_get_hash_batch_not_same(self):
"""Test if get_hash_batch returns ValueError when on-chain hash is not same as computed hash."""
self.ledger_api.identifier = "ethereum"
# Test if get hash returns ValueError when on chain hash is not same as computed hash
with mock.patch.object(type(self.contract), "_get_hash_batch", new=mock.Mock()):
with pytest.raises(ValueError):
self.contract.get_hash_batch(
ledger_api=self.ledger_api,
contract_address="contract_address",
from_address="address",
to_address="address",
token_ids=self.token_ids_a,
from_supplies=[1],
to_supplies=[0],
value=123,
trade_nonce=123,
)
@pytest.mark.ledger
def test_generate_trade_nonce_if_exist(self):
"""Test if generate_trade_nonce retries when nonce already exist."""
# Etherem ledger api mock
self.ledger_api.identifier = "ethereum"
# instance.functions.is_nonce_used(agent_address, trade_nonce).call() -> True, False
is_nonce_used_mock = mock.Mock()
is_nonce_used_mock.configure_mock(**{"call.side_effect": [True, False]})
# instance.functions.is_nonce_used(agent_address, trade_nonce) -> is_nonce_used_mock with call method
instance_mock = mock.Mock()
instance_mock.configure_mock(
**{"functions.is_nonce_used.return_value": is_nonce_used_mock}
)
# cls.get_instance(ledger_api, contract_address) -> instance_mock
get_instance_mock = mock.Mock()
get_instance_mock.configure_mock(**{"return_value": instance_mock})
# Patch get_instance method to return get_instance_mock which returns instance of instance_mock when called
with mock.patch.object(
type(self.contract), "get_instance", new=get_instance_mock
):
self.contract.generate_trade_nonce(
ledger_api=self.ledger_api,
contract_address="contract_address",
agent_address="address",
)
# Check if is_nonce_used was called twice
assert is_nonce_used_mock.call.call_count == 2
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_eth_no_signature(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError if signature not present on Ethereum case."""
self.ledger_api.identifier = "ethereum"
# Test if get_atomic_swap_single_transaction returns RuntimeError when signature is missing
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_eth_pubkeys(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError if pubkeys are present on Ethereum case."""
self.ledger_api.identifier = "ethereum"
# Test if get_atomic_swap_single_transaction returns RuntimeError when pubkey is present
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
signature="signature",
from_pubkey="deadbeef",
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_signature(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError if signature is present on Cosmos/Fetch case."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction returns RuntimeError when signature is present
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
signature="signature",
from_pubkey="deadbeef",
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_one_pubkey_valid(self):
"""Test if get_atomic_swap_single_transaction allows one pubkey in case of only one direction of transfers."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction works with only to_pubkey
tx = self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=0,
to_supply=1,
value=1,
trade_nonce=0,
to_pubkey="deadbeef",
)
assert tx is not None
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_one_pubkey_invalid(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing from_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing from_key
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_to_pubkey_missing(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing to_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing from_key
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=1,
trade_nonce=0,
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_eth_pubkeys(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError if pubkeys are present on Ethereum case."""
self.ledger_api.identifier = "ethereum"
# Test if get_atomic_swap_batch_transaction returns RuntimeError when pubkey is present
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
signature="signature",
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_signature(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError if signature is present on Cosmos/Fetch case."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_batch_transaction returns RuntimeError when signature is present
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
signature="signature",
from_pubkey="deadbeef",
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_one_pubkey_valid(self):
"""Test if get_atomic_swap_batch_transaction allows one pubkey in case of only one direction of transfers."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_batch_transaction works with only to_pubkey
tx = self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[0],
to_supplies=[1],
value=1,
trade_nonce=0,
to_pubkey="deadbeef",
)
assert tx is not None
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_one_pubkey_invalid(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing from_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_batch_transaction fails with missing from_key
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
to_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_ba_transaction_eth_no_signature(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError if signature not present on Ethereum case."""
self.ledger_api.identifier = "ethereum"
# Test if get_atomic_swap_single_transaction returns RuntimeError when signature is missing
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_to_pubkey_missing(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing to_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with all amounts to be zero
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=1,
trade_nonce=0,
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_to_pubkey_missing_no_from_pubkey_required(
self,
):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing to_pubkey and from_pubkey not required."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing to_pubkey
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[0],
to_supplies=[1],
value=1,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_from_pubkey_missing_no_to_pubkey_required(
self,
):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing from_pubkey and to_pubkey not required."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing from_pubkey
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=0,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_cosmos_from_pubkey_only(self):
"""Test if get_atomic_swap_batch_transaction returns Tx in case with only from_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction works with only from_pubkey
res = self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[1],
to_supplies=[0],
value=0,
trade_nonce=0,
from_pubkey="deadbeef",
)
assert res is not None
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_amounts_missing(self):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing amounts."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with all amounts to be zero
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=0,
to_supply=0,
value=0,
trade_nonce=0,
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_batch_transaction_amounts_missing(self):
"""Test if get_atomic_swap_batch_transaction returns RuntimeError with missing amounts."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with all amounts to be zero
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_batch_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_ids=[self.token_ids_a[0]],
from_supplies=[0],
to_supplies=[0],
value=0,
trade_nonce=0,
from_pubkey="deadbeef",
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_to_pubkey_missing_no_from_pubkey_required(
self,
):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing to_pubkey and from_pubkey not required."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing to_pubkey
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=0,
to_supply=1,
value=1,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_from_pubkey_missing_no_to_pubkey_required(
self,
):
"""Test if get_atomic_swap_single_transaction returns RuntimeError with missing from_pubkey and to_pubkey not required."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction fails with missing from_pubkey
with pytest.raises(RuntimeError):
self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=0,
trade_nonce=0,
)
@pytest.mark.ledger
def test_get_atomic_swap_single_transaction_cosmos_from_pubkey_only(self):
"""Test if get_atomic_swap_single_transaction returns Tx in case with only from_pubkey."""
self.ledger_api.identifier = "fetchai"
# Test if get_atomic_swap_single_transaction works with only from_pubkey
res = self.contract.get_atomic_swap_single_transaction(
self.ledger_api,
contract_address="address",
from_address="address",
to_address="address",
token_id=self.token_ids_a[0],
from_supply=1,
to_supply=0,
value=0,
trade_nonce=0,
from_pubkey="deadbeef",
)
assert res is not None
|
boards/admin.py | EndermanOfCoding/django-beginners-guide | 1,106 | 12631597 | <filename>boards/admin.py
from django.contrib import admin
from .models import Board
admin.site.register(Board)
|
nose2/tests/functional/support/scenario/pretty_asserts/ignore_passing/test_prettyassert_ignore_passing.py | deeplow/nose2 | 637 | 12631623 | <reponame>deeplow/nose2
import unittest
class TestFailAssert(unittest.TestCase):
def test_failing_assert(self):
x = True
y = False
# fmt: off
# flake8: noqa
assert x; assert y
# fmt: on
def test_failing_assert2(self):
p = 1
q = 0
assert p
assert q
|
bits_wilp/primeFactorization.py | deepak5998/Py | 726 | 12631638 | <gh_stars>100-1000
from math import sqrt
def get_prime_factors(num):
factors = []
# get all the 2's
while num % 2 == 0:
factors.append(2)
num = num / 2
# check for other prime factors
# sqrt is used to reduce the range by log(n)
# step size of 2 to avoid checking with even numbers
for i in range(3, int(sqrt(num))+1, 2):
while num % i == 0:
# print(num, i)
factors.append(i)
num = num / i
# num is now the last prime number
if num > 2:
factors.append(int(num))
return factors
n = int(input("Enter the number: "))
result = get_prime_factors(n)
print("The factors of {n} are {result}".format(n=n, result=result))
# Enter the number: 1081310109
# The factors of 1081310109 are [3, 11, 17, 23, 181, 463]
|
vnpy/amqp/test06_rpc_client.py | howyu88/vnpy2 | 323 | 12631644 | <reponame>howyu88/vnpy2
# encoding: UTF-8
from uuid import uuid1
import json
import random
from vnpy.amqp.producer import rpc_client
def cb_function(*args):
print('resp call back')
for arg in args:
print(u'{}'.format(arg))
if __name__ == '__main__':
import time
c = rpc_client(host='localhost', user='admin', password='<PASSWORD>')
counter = 0
while True:
time.sleep(0.1)
mission = {'method': 'test_01'}
params = {}
params.update({'p2': random.random()})
params.update({'p3': random.random()})
params.update({'p1': counter})
mission.update({'params': params})
msg = json.dumps(mission)
print(f'[x] rpc call :{msg}')
c.call(msg, str(uuid1()), cb_function)
counter += 1
if counter > 100:
break
print('exit')
c.exit()
|
minemeld/ft/test.py | zul126/minemeld-core | 147 | 12631669 | from __future__ import absolute_import
import logging
import gevent
from . import base
from .utils import utc_millisec
import netaddr
LOG = logging.getLogger(__name__)
class TestMiner(base.BaseFT):
def __init__(self, name, chassis, config):
super(TestMiner, self).__init__(name, chassis, config)
self._glet = None
def configure(self):
super(TestMiner, self).configure()
self.num_messages = self.config.get('num_messages', 100000)
self.mps = self.config.get('mps', 1000)
def initialize(self):
pass
def rebuild(self):
pass
def reset(self):
pass
def _run(self):
cip = 0x0A000000
v = {
'type': 'IPv4',
'confidence': 0,
'share_level': 'red'
}
LOG.info('%s - start sending messages: %d', self.name, utc_millisec())
t1 = utc_millisec()
for i in xrange(self.num_messages):
ip = str(netaddr.IPAddress(i+cip))
self.emit_update(ip, v)
if ((i+1) % self.mps) == 0:
now = utc_millisec()
LOG.info('%d: %d', i+1, now - t1)
if now - t1 < 1000:
gevent.sleep((1000 - now + t1)/1000.0)
t1 = now
LOG.info('%s - all messages sent: %d', self.name, utc_millisec())
def length(self, source=None):
return 0
def start(self):
super(TestMiner, self).start()
self._glet = gevent.spawn_later(
2,
self._run
)
def stop(self):
super(TestMiner, self).stop()
if self._glet is None:
return
self._glet.kill()
class TestFeed(base.BaseFT):
def __init__(self, name, chassis, config):
super(TestFeed, self).__init__(name, chassis, config)
def configure(self):
super(TestFeed, self).configure()
self.num_messages = self.config.get('num_messages', 100000)
def read_checkpoint(self):
self.last_checkpoint = None
def create_checkpoint(self, value):
pass
def initialize(self):
pass
def rebuild(self):
pass
def reset(self):
pass
@base._counting('update.processed')
def filtered_update(self, source=None, indicator=None, value=None):
if self.statistics['update.processed'] == 1:
LOG.info('%s - first message: %d', self.name, utc_millisec())
elif self.statistics['update.processed'] == self.num_messages:
LOG.info('%s - last message: %d', self.name, utc_millisec())
@base._counting('withdraw.processed')
def filtered_withdraw(self, source=None, indicator=None, value=None):
pass
def length(self, source=None):
pass
class FaultyConfig(base.BaseFT):
def configure(self):
super(FaultyConfig, self).configure()
raise RuntimeError('fault !')
def initialize(self):
pass
def rebuild(self):
pass
def reset(self):
pass
def length(self, source=None):
return 0
class FaultyInit(base.BaseFT):
def __init__(self, name, chassis, config):
raise RuntimeError('fault !')
def configure(self):
pass
def initialize(self):
pass
def rebuild(self):
pass
def reset(self):
pass
def length(self, source=None):
return 0
|
mindmeld/active_learning/results_manager.py | ritvikshrivastava/mindmeld | 580 | 12631679 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module saves the results from the Active Learning Pipeline.
"""
import os
import json
import datetime
import logging
from typing import Dict, List
from mindmeld.markup import dump_query
from ..path import (
AL_PARAMS_PATH,
AL_RESULTS_FOLDER,
AL_PLOTS_FOLDER,
AL_ACCURACIES_PATH,
AL_SELECTED_QUERIES_PATH,
)
from ..constants import STRATEGY_ABRIDGED
logger = logging.getLogger(__name__)
class ResultsManager:
"""Handles the initialization of generated folder and its contents. Keeps record of experiment
results."""
def __init__(
self,
output_folder: str,
):
"""
Args:
output_folder (str): Directory to create an experiment folder or save log queries.
"""
self.output_folder = output_folder
self.experiment_folder_name = None
def set_experiment_folder_name(self, selection_strategies) -> str:
"""
Args:
selection_strategies (list): List of strategies used for the experiment.
Returns:
experiment_folder_name (str): Creates the name of the current experiment folder
based on the current timestamp.
"""
strategies = "_".join(
STRATEGY_ABRIDGED[s] for s in selection_strategies if s in STRATEGY_ABRIDGED
)
now = datetime.datetime.now()
self.experiment_folder_name = (
f"{now.year}-{now.month}-{now.day}_{now.hour}:{now.minute}_{strategies}"
)
@property
def experiment_folder(self):
"""
Returns:
experiment_folder (str): Path to the Active Learning experiment folder.
"""
return os.path.join(self.output_folder, self.experiment_folder_name)
def create_experiment_folder(
self, active_learning_params: Dict, tuning_strategies: List
):
"""Creates the active learning experiment folder.
Args:
active_learning_params (Dict): Dictionary representation of the params to store.
tuning_strategies (list): List of strategies used for the experiment.
"""
self.set_experiment_folder_name(tuning_strategies)
os.makedirs(self.experiment_folder, exist_ok=True)
self.dump_json(AL_PARAMS_PATH, active_learning_params)
self.create_folder(AL_RESULTS_FOLDER)
self.create_folder(AL_PLOTS_FOLDER)
def create_folder(self, unformatted_path):
"""Creates a folder given an unformatted path.
Args:
unformatted_path (str): Unformatted path to JSON file.
"""
os.makedirs(self.format_path(unformatted_path), exist_ok=True)
def format_path(self, unformatted_path):
"""
Args:
unformatted_path (str): Unformatted path to JSON file.
Returns:
formatted_path (str): Path formatted with the experiment folder.
"""
return unformatted_path.format(experiment_folder=self.experiment_folder)
def load_json(self, unformatted_path: str):
"""Load JSON data from file. If the JSON file doesn't exist, an empty json file is created.
Args:
unformatted_path (str): Unformatted path to JSON file.
Returns:
json_data (Dict): Loaded JSON data.
"""
formatted_path = unformatted_path.format(
experiment_folder=self.experiment_folder
)
if not os.path.isfile(formatted_path):
self.dump_json(formatted_path, data={})
with open(formatted_path, "r") as infile:
json_data = json.load(infile)
return json_data
def dump_json(self, unformatted_path: str, data: Dict):
"""Dump data to a JSON file.
Args:
unformatted_path (str): Unformatted path to JSON file.
data (Dict): Data to dump.
"""
formatted_path = self.format_path(unformatted_path)
with open(formatted_path, "w") as outfile:
json.dump(data, outfile, indent=4)
def update_json(
self, unformatted_path: str, strategy: str, epoch: int, iteration: int, data
):
"""Helper method to update json files.
Args:
unformatted_path (str): Unformatted path to JSON file.
strategy (str): Current training strategy.
epoch (int): Current epoch.
iteration (int): Current iteration.
data (Dict or List): Data to store for current strategy, epoch, and iteration.
"""
json_data = self.load_json(unformatted_path)
json_data[strategy] = json_data.get(strategy, {})
json_data[strategy][str(epoch)] = json_data[strategy].get(
str(epoch), {str(epoch): {}}
)
json_data[strategy][str(epoch)][str(iteration)] = data
self.dump_json(unformatted_path, json_data)
def update_accuracies_json(
self, strategy: str, epoch: int, iteration: int, eval_stats
):
"""Update accuracies.json with iteration metrics"""
self.update_json(AL_ACCURACIES_PATH, strategy, epoch, iteration, eval_stats)
def update_selected_queries_json(
self, strategy: str, epoch: int, iteration: int, queries
):
"""Update accuracies.json with iteration metrics"""
query_dicts = ResultsManager.queries_to_dict(queries)
self.update_json(
AL_SELECTED_QUERIES_PATH, strategy, epoch, iteration, query_dicts
)
def write_log_selected_queries_json(self, strategy: str, queries):
"""Update accuracies.json with iteration metrics"""
query_dicts = ResultsManager.queries_to_dict(queries)
log_selected_queries_path = os.path.join(
self.output_folder, "log_selected_queries.json"
)
data = {"strategy": strategy, "selected_queries": query_dicts}
with open(log_selected_queries_path, "w") as outfile:
json.dump(data, outfile, indent=4)
logger.info("Selected Log Queries saved at: %s", log_selected_queries_path)
@staticmethod
def queries_to_dict(queries: List) -> List:
"""Convert a list of ProcessedQueries into a list dictionaries.
Args:
queries (List): List of ProcessedQuery objects
Returns:
query_dicts (List): List of queries represented as a dict with the keys
"unannotated_text", "annotated_text", "domain", and "intent".
"""
return [
{
"unannotated_text": query.query.text,
"annotated_text": dump_query(query),
"domain": query.domain,
"intent": query.intent,
}
for query in queries
]
|
chainer/functions/normalization/decorrelated_batch_normalization.py | zjzh/chainer | 3,705 | 12631743 | import numpy
from chainer import backend
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
# {numpy: True, cupy: False}
_xp_supports_batch_eigh = {}
# routines for batched matrices
def _eigh(a, xp):
if xp not in _xp_supports_batch_eigh:
try:
xp.linalg.eigh(xp.ones((2, 2, 2), xp.float32))
except ValueError:
_xp_supports_batch_eigh[xp] = False
else:
_xp_supports_batch_eigh[xp] = True
if _xp_supports_batch_eigh[xp]:
return xp.linalg.eigh(a)
ws = []
vs = []
for ai in a:
w, v = xp.linalg.eigh(ai)
ws.append(w)
vs.append(v)
return xp.stack(ws), xp.stack(vs)
def _matmul(a, b, xp):
if hasattr(xp, 'matmul'): # numpy.matmul is supported from version 1.10.0
return xp.matmul(a, b)
else:
return xp.einsum('bij,bjk->bik', a, b)
def _diag(a, xp):
s0, s1 = a.shape
ret = xp.zeros((s0, s1, s1), a.dtype)
arange_s1 = numpy.arange(s1)
ret[:, arange_s1, arange_s1] = a
return ret
def _calc_axis_and_m(x_shape, batch_size):
m = batch_size
spatial_ndim = len(x_shape) - 2
spatial_axis = tuple(range(2, 2 + spatial_ndim))
for i in spatial_axis:
m *= x_shape[i]
return spatial_axis, m
class DecorrelatedBatchNormalization(function_node.FunctionNode):
def __init__(self, groups=16, eps=2e-5, mean=None, projection=None,
decay=0.9):
self.groups = groups
self.running_mean = mean
self.running_projection = projection
self.eps = eps
self.decay = decay
self.axis = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.shape[1] % self.groups == 0,
)
type_check.expect(
x_type.ndim >= 2,
)
def forward(self, inputs):
self.retain_inputs(())
x = inputs[0]
xp = backend.get_array_module(x)
x_shape = x.shape
b, c = x_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(x_shape, b)
# (g, C, m)
x_hat = x.transpose((1, 0) + spatial_axis).reshape(g, C, m)
mean = x_hat.mean(axis=2, keepdims=True)
x_hat = x_hat - mean
self.eps = x.dtype.type(self.eps)
eps_matrix = self.eps * xp.eye(C, dtype=x.dtype)
cov = _matmul(
x_hat, x_hat.transpose(0, 2, 1),
xp) / x.dtype.type(m) + eps_matrix
# (g, C), (g, C, C)
self.eigvals, self.eigvectors = _eigh(cov, xp)
U = _matmul(
_diag(self.eigvals ** -0.5, xp),
self.eigvectors.transpose(0, 2, 1),
xp)
self.y_hat_pca = _matmul(U, x_hat, xp) # PCA whitening
# ZCA whitening
y_hat = _matmul(self.eigvectors, self.y_hat_pca, xp)
y = y_hat.reshape((c, b) + x_shape[2:]).transpose(
(1, 0) + spatial_axis)
# Update running statistics
if self.running_mean is not None:
mean = mean.squeeze(axis=2)
self.running_mean *= self.decay
self.running_mean += (1 - self.decay) * mean
if self.running_projection is not None:
adjust = m / max(m - 1., 1.) # unbiased estimation
self.running_projection *= self.decay
projection = _matmul(self.eigvectors, U, xp)
self.running_projection += (1 - self.decay) * adjust * projection
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
f = DecorrelatedBatchNormalizationGrad(
self.groups, self.eigvals, self.eigvectors, self.y_hat_pca)
return f.apply((gy,))
class DecorrelatedBatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, groups, eigvals, eigvectors, y_hat_pca):
self.groups = groups
self.eigvals = eigvals
self.eigvectors = eigvectors
self.y_hat_pca = y_hat_pca
def forward(self, inputs):
self.retain_inputs(())
gy = inputs[0]
xp = backend.get_array_module(gy)
gy_shape = gy.shape
b, c = gy_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(gy_shape, b)
arange_C = numpy.arange(C)
diag_indices = slice(None), arange_C, arange_C
gy_hat = gy.transpose((1, 0) + spatial_axis).reshape(g, C, m)
eigvectors = self.eigvectors
eigvals = self.eigvals
y_hat_pca = self.y_hat_pca
gy_hat_pca = _matmul(eigvectors.transpose(0, 2, 1), gy_hat, xp)
f = gy_hat_pca.mean(axis=2, keepdims=True)
K = eigvals[:, :, None] - eigvals[:, None, :]
valid = K != 0 # to avoid nan, use eig_i != eig_j instead of i != j
K[valid] = xp.reciprocal(K[valid])
V = _diag(eigvals, xp)
V_sqrt = _diag(eigvals ** 0.5, xp)
V_invsqrt = _diag(eigvals ** -0.5, xp)
F_c = _matmul(
gy_hat_pca, y_hat_pca.transpose(0, 2, 1),
xp) / gy.dtype.type(m)
M = xp.zeros_like(F_c)
M[diag_indices] = F_c[diag_indices]
mat = K.transpose(0, 2, 1) * (
_matmul(V, F_c.transpose(0, 2, 1), xp)
+ _matmul(_matmul(V_sqrt, F_c, xp), V_sqrt, xp)
)
S = mat + mat.transpose(0, 2, 1)
R = gy_hat_pca - f + _matmul(
(S - M).transpose(0, 2, 1), y_hat_pca, xp)
gx_hat = _matmul(
_matmul(R.transpose(0, 2, 1), V_invsqrt, xp),
eigvectors.transpose(0, 2, 1), xp
).transpose(0, 2, 1)
gx = gx_hat.reshape((c, b) + gy_shape[2:]).transpose(
(1, 0) + spatial_axis)
self.retain_outputs(())
return gx,
def backward(self, inputs, grad_outputs):
# TODO(crcrpar): Implement this.
raise NotImplementedError('Double backward is not implemented for'
' decorrelated batch normalization.')
class FixedDecorrelatedBatchNormalization(function_node.FunctionNode):
def __init__(self, groups):
self.groups = groups
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, mean_type, var_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
mean_type.dtype == x_type.dtype,
var_type.dtype == x_type.dtype,
)
type_check.expect(
x_type.ndim >= 2,
)
def forward(self, inputs):
self.retain_inputs((0, 1, 2))
x, mean, projection = inputs
xp = backend.get_array_module(x)
x_shape = x.shape
b, c = x_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(x_shape, b)
x_hat = x.transpose((1, 0) + spatial_axis).reshape(g, C, m)
x_hat = x_hat - xp.expand_dims(mean, axis=2)
y_hat = _matmul(projection, x_hat, xp)
y = y_hat.reshape((c, b) + x_shape[2:]).transpose(
(1, 0) + spatial_axis)
return y,
def backward(self, indexes, grad_outputs):
x, mean, projection = self.get_retained_inputs()
gy, = grad_outputs
f = FixedDecorrelatedBatchNormalizationGrad(self.groups)
return f.apply((x, mean, projection, gy))
class FixedDecorrelatedBatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, groups):
self.groups = groups
def forward(self, inputs):
self.retain_inputs(())
x, mean, projection, gy = inputs
xp = backend.get_array_module(x)
gy_shape = gy.shape
b, c = gy_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(gy_shape, b)
gy_hat = gy.transpose((1, 0) + spatial_axis).reshape(g, C, m)
x_hat = x.transpose((1, 0) + spatial_axis).reshape(g, C, m)
gy_hat_pca = _matmul(projection.transpose(0, 2, 1), gy_hat, xp)
gx = gy_hat_pca.reshape((c, b) + gy_shape[2:]).transpose(
(1, 0) + spatial_axis)
rhs = x_hat - xp.expand_dims(mean, axis=2)
gprojection = _matmul((x_hat - rhs).transpose(0, 2, 1), gy_hat, xp)
gmean = -gy_hat_pca[..., 0]
self.retain_outputs(())
return gx, gmean, gprojection
def backward(self, inputs, grad_outputs):
# TODO(crcrpar): Implement this.
raise NotImplementedError('Double backward is not implemented for'
' fixed decorrelated batch normalization.')
def decorrelated_batch_normalization(x, **kwargs):
"""decorrelated_batch_normalization(x, *, groups=16, eps=2e-5, \
running_mean=None, running_projection=None, decay=0.9)
Decorrelated batch normalization function.
It takes the input variable ``x`` and normalizes it using
batch statistics to make the output zero-mean and decorrelated.
Args:
x (:class:`~chainer.Variable`): Input variable.
groups (int): Number of groups to use for group whitening.
eps (float): Epsilon value for numerical stability.
running_mean (:ref:`ndarray`): Expected value of the mean. This is a
running average of the mean over several mini-batches using
the decay parameter. If ``None``, the expected mean is initialized
to zero.
running_projection (:ref:`ndarray`):
Expected value of the project matrix. This is a
running average of the projection over several mini-batches using
the decay parameter. If ``None``, the expected projected is
initialized to the identity matrix.
decay (float): Decay rate of moving average. It is used during
training.
Returns:
~chainer.Variable: The output variable which has the same shape as
:math:`x`.
See: `Decorrelated Batch Normalization <https://arxiv.org/abs/1804.08450>`_
.. seealso:: :class:`~chainer.links.DecorrelatedBatchNormalization`
"""
groups, eps, running_mean, running_projection, decay = \
argument.parse_kwargs(
kwargs, ('groups', 16), ('eps', 2e-5), ('running_mean', None),
('running_projection', None), ('decay', 0.9))
f = DecorrelatedBatchNormalization(
groups, eps, running_mean, running_projection, decay)
return f.apply((x,))[0]
def fixed_decorrelated_batch_normalization(x, mean, projection, groups=16):
"""Decorrelated batch normalization function with fixed statistics.
This is a variant of decorrelated batch normalization, where the mean and
projection statistics are given by the caller as fixed variables. This is
used in testing mode of the decorrelated batch normalization layer, where
batch statistics cannot be used for prediction consistency.
Args:
x (:class:`~chainer.Variable`): Input variable.
mean (:class:`~chainer.Variable` or :ref:`ndarray`):
Shifting parameter of input.
projection (:class:`~chainer.Variable` or :ref:`ndarray`):
Projection matrix for decorrelation of input.
groups (int): Number of groups to use for group whitening.
Returns:
~chainer.Variable: The output variable which has the same shape as
:math:`x`.
.. seealso::
:func:`~chainer.functions.decorrelated_batch_normalization`,
:class:`~chainer.links.DecorrelatedBatchNormalization`
"""
f = FixedDecorrelatedBatchNormalization(groups)
return f.apply((x, mean, projection))[0]
|
examples/undocumented/python/transfer_multitask_leastsquares_regression.py | gf712/shogun | 2,753 | 12631779 | #!/usr/bin/env python
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat]]
def transfer_multitask_leastsquares_regression (fm_train=traindat,fm_test=testdat,label_train=label_traindat):
from shogun import RegressionLabels, Task, TaskGroup
try:
from shogun import MultitaskLeastSquaresRegression
except ImportError:
print("MultitaskLeastSquaresRegression not available")
exit(0)
import shogun as sg
features = sg.create_features(traindat)
labels = RegressionLabels(label_train)
n_vectors = features.get_num_vectors()
task_one = Task(0,n_vectors//2)
task_two = Task(n_vectors//2,n_vectors)
task_group = TaskGroup()
task_group.append_task(task_one)
task_group.append_task(task_two)
mtlsr = MultitaskLeastSquaresRegression(0.1,features,labels,task_group)
mtlsr.set_regularization(1) # use regularization ratio
mtlsr.set_tolerance(1e-2) # use 1e-2 tolerance
mtlsr.train()
mtlsr.set_current_task(0)
out = mtlsr.apply_regression().get_labels()
return out
if __name__=='__main__':
print('TransferMultitaskLeastSquaresRegression')
transfer_multitask_leastsquares_regression(*parameter_list[0])
|
src/products/migrations/0014_CourseWelcomeLetter.py | denkasyanov/education-backend | 151 | 12631787 | <filename>src/products/migrations/0014_CourseWelcomeLetter.py
# Generated by Django 2.2.13 on 2020-09-30 17:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0013_TemplateIdRename'),
]
operations = [
migrations.RemoveField(
model_name='course',
name='template_id',
),
migrations.AddField(
model_name='course',
name='welcome_letter_template_id',
field=models.CharField(blank=True, help_text='Will be sent upon purchase if set', max_length=255, null=True, verbose_name='Welcome letter template id'),
),
]
|
terrascript/provider/taiidani/jenkins.py | mjuenema/python-terrascript | 507 | 12631812 | <gh_stars>100-1000
# terrascript/provider/taiidani/jenkins.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:19:30 UTC)
import terrascript
class jenkins(terrascript.Provider):
"""Jenkins Terraform Provider"""
__description__ = "Jenkins Terraform Provider"
__namespace__ = "taiidani"
__name__ = "jenkins"
__source__ = "https://github.com/taiidani/terraform-provider-jenkins"
__version__ = "0.9.0"
__published__ = "2021-09-15T16:35:39Z"
__tier__ = "community"
__all__ = ["jenkins"]
|
VoiceActivityDetection/train.py | jeffery-work/SpeechAlgorithms | 338 | 12631814 | """
@FileName: train.py
@Description: Implement train
@Author: Ryuk
@CreateDate: 2020/05/13
@LastEditTime: 2020/05/13
@LastEditors: Please set LastEditors
@Version: v0.1
"""
from model import *
from utils import *
logger = getLogger()
logger.info("====================================Programm Start====================================")
set_seed()
config = Config()
config.print_params(logger.info)
def infer(model, loader):
model.eval()
correct = 0.
total = len(loader.dataset) * 333
for input, target in loader:
with torch.no_grad():
pred = model(input)
pred[pred > 0.5] = 1
pred[pred <= 0.5] = 0
correct += torch.eq(pred, target).sum().item()
return correct / total
def main():
best_acc = 0.
train_set = VADDataset(config.data_path, mode="train")
train_loader = DataLoader(train_set, batch_size=config.batch_size, shuffle=True, num_workers=1)
val_set = VADDataset(config.data_path, mode="val")
val_loader = DataLoader(val_set, batch_size=config.batch_size, shuffle=True, num_workers=1)
test_set = VADDataset(config.data_path, mode="test")
test_loader = DataLoader(test_set, batch_size=config.batch_size, shuffle=True, num_workers=1)
logger.info("Data Load Successfully")
model = VADNet()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
logger.info("Start Training")
for epoch in range(config.iters):
for step, (input, target) in enumerate(train_loader):
optimizer.zero_grad()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
optimizer.step()
# val
if epoch % 10 == 0:
val_acc = infer(model, val_loader)
logger.info("Epoch:%d, Val_acc:%f" % (epoch,val_acc))
if val_acc > best_acc:
best_acc = val_acc
torch.save(model.state_dict(), config.params_path)
logger.info("Finished Training")
logger.info("==================Testing==================")
test_acc = infer(model, test_loader)
logger.info("Test_acc:%f" % test_acc)
if __name__ == "__main__":
main() |
tests/helper.py | anton-ryzhov/python-manhole | 256 | 12631881 | <gh_stars>100-1000
from __future__ import print_function
import atexit
import errno
import logging
import os
import signal
import sys
import time
from functools import partial
TIMEOUT = int(os.getenv('MANHOLE_TEST_TIMEOUT', 10))
SOCKET_PATH = '/tmp/manhole-socket'
OUTPUT = sys.__stdout__
def handle_sigterm(signo, _frame):
# Simulate real termination
print("Terminated", file=OUTPUT)
sys.exit(128 + signo)
# Handling sigterm ensure that atexit functions are called, and we do not leave
# leftover /tmp/manhole-pid sockets.
signal.signal(signal.SIGTERM, handle_sigterm)
@atexit.register
def log_exit():
print("In atexit handler.", file=OUTPUT)
def setup_greenthreads(patch_threads=False):
try:
from gevent import monkey
monkey.patch_all(thread=False)
except (ImportError, SyntaxError):
pass
try:
import eventlet
eventlet.hubs.get_hub() # workaround for circular import issue in eventlet,
# see https://github.com/eventlet/eventlet/issues/401
eventlet.monkey_patch(thread=False)
except (ImportError, SyntaxError):
pass
def do_fork():
pid = os.fork()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
os.waitpid(pid, 0)
else:
time.sleep(TIMEOUT * 10)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='[pid=%(process)d - %(asctime)s]: %(name)s - %(levelname)s - %(message)s',
)
test_name = sys.argv[1]
try:
if os.getenv('PATCH_THREAD', False):
import manhole
setup_greenthreads(True)
else:
setup_greenthreads(True)
import manhole
if test_name == 'test_environ_variable_activation':
time.sleep(TIMEOUT)
elif test_name == 'test_install_twice_not_strict':
manhole.install(oneshot_on='USR2')
manhole.install(strict=False)
time.sleep(TIMEOUT)
elif test_name == 'test_log_fd':
manhole.install(verbose=True, verbose_destination=2)
manhole._LOG("whatever-1")
manhole._LOG("whatever-2")
elif test_name == 'test_log_fh':
class Output(object):
data = []
write = data.append
manhole.install(verbose=True, verbose_destination=Output)
manhole._LOG("whatever")
if Output.data and "]: whatever" in Output.data[-1]:
print("SUCCESS")
elif test_name == 'test_activate_on_usr2':
manhole.install(activate_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_install_once':
manhole.install()
try:
manhole.install()
except manhole.AlreadyInstalled:
print('ALREADY_INSTALLED')
else:
raise AssertionError("Did not raise AlreadyInstalled")
elif test_name == 'test_stderr_doesnt_deadlock':
import subprocess
manhole.install()
for i in range(50):
print('running iteration', i)
p = subprocess.Popen(['true'])
print('waiting for process', p.pid)
p.wait()
print('process ended')
path = '/tmp/manhole-%d' % p.pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
print('SUCCESS')
elif test_name == 'test_fork_exec':
manhole.install(reinstall_delay=5)
print("Installed.")
time.sleep(0.2)
pid = os.fork()
print("Forked, pid =", pid)
if pid:
os.waitpid(pid, 0)
path = '/tmp/manhole-%d' % pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
else:
try:
time.sleep(1)
print("Exec-ing `true`")
os.execvp('true', ['true'])
finally:
os._exit(1)
print('SUCCESS')
elif test_name == 'test_activate_on_with_oneshot_on':
manhole.install(activate_on='USR2', oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_interrupt_on_accept':
def handle_usr2(_sig, _frame):
print('Got USR2')
signal.signal(signal.SIGUSR2, handle_usr2)
import ctypes
import ctypes.util
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
raise ImportError
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
raise ImportError
pthread_kill = libpthread.pthread_kill
pthread_kill.argtypes = [ctypes.c_void_p, ctypes.c_int]
pthread_kill.restype = ctypes.c_int
manhole.install(sigmask=None)
for i in range(15):
time.sleep(0.1)
print("Sending signal to manhole thread ...")
pthread_kill(manhole._MANHOLE.thread.ident, signal.SIGUSR2)
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_oneshot_on_usr2':
manhole.install(oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name.startswith('test_signalfd_weirdness'):
signalled = False
@partial(signal.signal, signal.SIGUSR1)
def signal_handler(sig, _):
print('Received signal %s' % sig)
global signalled
signalled = True
if 'negative' in test_name:
manhole.install(sigmask=None)
else:
manhole.install(sigmask=[signal.SIGUSR1])
time.sleep(0.3) # give the manhole a bit enough time to start
print('Starting ...')
import signalfd
signalfd.sigprocmask(signalfd.SIG_BLOCK, [signal.SIGUSR1])
sys.setcheckinterval(1)
for i in range(100000):
os.kill(os.getpid(), signal.SIGUSR1)
print('signalled=%s' % signalled)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_auth_fail':
manhole.get_peercred = lambda _: (-1, -1, -1)
manhole.install()
time.sleep(TIMEOUT * 10)
elif test_name == 'test_socket_path':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_daemon_connection':
manhole.install(daemon_connection=True)
time.sleep(TIMEOUT)
elif test_name == 'test_socket_path_with_fork':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
do_fork()
elif test_name == 'test_locals':
manhole.install(socket_path=SOCKET_PATH,
locals={'k1': 'v1', 'k2': 'v2'})
time.sleep(TIMEOUT)
elif test_name == 'test_locals_after_fork':
manhole.install(locals={'k1': 'v1', 'k2': 'v2'})
do_fork()
elif test_name == 'test_redirect_stderr_default':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
elif test_name == 'test_redirect_stderr_disabled':
manhole.install(socket_path=SOCKET_PATH, redirect_stderr=False)
time.sleep(TIMEOUT)
elif test_name == 'test_sigmask':
manhole.install(socket_path=SOCKET_PATH, sigmask=[signal.SIGUSR1])
time.sleep(TIMEOUT)
elif test_name == 'test_connection_handler_exec_func':
manhole.install(connection_handler=manhole.handle_connection_exec, locals={'tete': lambda: print('TETE')})
time.sleep(TIMEOUT * 10)
elif test_name == 'test_connection_handler_exec_str':
manhole.install(connection_handler='exec', locals={'tete': lambda: print('TETE')})
time.sleep(TIMEOUT * 10)
else:
manhole.install()
time.sleep(0.3) # give the manhole a bit enough time to start
if test_name == 'test_simple':
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_forkpty':
time.sleep(1)
pid, masterfd = os.forkpty()
if pid:
@atexit.register
def cleanup():
try:
os.kill(pid, signal.SIGINT)
time.sleep(0.2)
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno != errno.ESRCH:
raise
while not os.waitpid(pid, os.WNOHANG)[0]:
try:
os.write(2, os.read(masterfd, 1024))
except OSError as e:
print("Error while reading from masterfd:", e)
else:
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_fork':
time.sleep(1)
do_fork()
else:
raise RuntimeError('Invalid test spec.')
except: # noqa
print('Died with %s.' % sys.exc_info()[0].__name__, file=OUTPUT)
import traceback
traceback.print_exc(file=OUTPUT)
print('DIED.', file=OUTPUT)
|
src/__init__.py | Briles/gruvbox | 251 | 12631891 | #!/usr/bin/env python
# coding: utf-8
from .documentation import *
from .support import *
from .gruvbox import *
|
dataprep/clean/components/num_scaling/minmax_scaler.py | devinllu/dataprep | 1,229 | 12631928 | """
Implement numerical minmax scaler.
"""
from typing import Any, Union
import dask.dataframe as dd
class MinmaxScaler:
"""Min Value and Max Value Scaler for scaling numerical values
Attributes:
name
Name of scaler
min
Min value of provided data column
max
Max value of provided data column
"""
def __init__(self) -> None:
"""
This function initiate numerical scaler.
"""
self.name = "minmaxScaler"
self.min = 0
self.max = 0
def fit(self, col_df: dd.Series) -> Any:
"""
Extract min value and max value for Minmax Scaler according to the provided column.
Parameters
----------
col_df
Provided data column.
"""
self.min = col_df.min()
self.max = col_df.max()
return self
def transform(self, col_df: dd.Series) -> dd.Series:
"""
Transform the provided data column with the extracted min value and max value.
Parameters
----------
col_df
Provided data column.
"""
result = col_df.map(self.compute_val)
return result
def fit_transform(self, col_df: dd.Series) -> dd.Series:
""" "
Extract min value and max value for Minmax Scaler according to the provided column.
Transform the provided data column with the extracted min value and max value.
Parameters
----------
col_df
Data column.
"""
return self.fit(col_df).transform(col_df)
def compute_val(self, val: Union[int, float]) -> Union[int, float]:
"""
Compute scaling value of provided value with fitted min value and max value.
Parameters
----------
val
Value should be scaled.
"""
return (val - self.min) / (self.max - self.min)
|
recipes/UrbanSound8k/SoundClassification/urbansound8k_prepare.py | JasonSWFu/speechbrain | 3,913 | 12631930 | """
Creates data manifest files from UrbanSound8k, suitable for use in SpeechBrain.
https://urbansounddataset.weebly.com/urbansound8k.html
From the authors of UrbanSound8k:
1. Don't reshuffle the data! Use the predefined 10 folds and perform 10-fold (not 5-fold) cross validation
The experiments conducted by vast majority of publications using UrbanSound8K (by ourselves and others)
evaluate classification models via 10-fold cross validation using the predefined splits*.
We strongly recommend following this procedure.
Why?
If you reshuffle the data (e.g. combine the data from all folds and generate a random train/test split)
you will be incorrectly placing related samples in both the train and test sets, leading to inflated
scores that don't represent your model's performance on unseen data. Put simply, your results will be wrong.
Your results will NOT be comparable to previous results in the literature, meaning any claims to an
improvement on previous research will be invalid. Even if you don't reshuffle the data, evaluating using
different splits (e.g. 5-fold cross validation) will mean your results are not comparable to previous research.
2. Don't evaluate just on one split! Use 10-fold (not 5-fold) cross validation and average the scores
We have seen reports that only provide results for a single train/test split, e.g. train on folds 1-9,
test on fold 10 and report a single accuracy score. We strongly advise against this. Instead, perform
10-fold cross validation using the provided folds and report the average score.
Why?
Not all the splits are as "easy". That is, models tend to obtain much higher scores when trained on folds
1-9 and tested on fold 10, compared to (e.g.) training on folds 2-10 and testing on fold 1. For this reason,
it is important to evaluate your model on each of the 10 splits and report the average accuracy.
Again, your results will NOT be comparable to previous results in the literature.
* 10-fold cross validation using the predefined folds: train on data from 9 of the 10 predefined folds and
test on data from the remaining fold. Repeat this process 10 times (each time using a different set of
9 out of the 10 folds for training and the remaining fold for testing). Finally report the average classification
accuracy over all 10 experiments (as an average score + standard deviation, or, even better, as a boxplot).
Authors:
* <NAME>, 2021
"""
import os
import json
import logging
import ntpath
import torchaudio
from speechbrain.dataio.dataio import read_audio
from speechbrain.dataio.dataio import load_data_csv
logger = logging.getLogger(__name__)
URBAN_SOUND_8K_DOWNLOAD_FORM_URL = (
"https://urbansounddataset.weebly.com/download-urbansound8k.html"
)
MODIFIED_METADATA_FILE_NAME = "UrbanSound8k_speechbrain.csv"
ACCEPTABLE_FOLD_NUMS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def prepare_urban_sound_8k(
data_folder,
audio_data_folder,
save_json_train,
save_json_valid,
save_json_test,
train_fold_nums=[1, 2, 3, 4, 5, 6, 7, 8],
valid_fold_nums=[9],
test_fold_nums=[10],
skip_manifest_creation=False,
):
"""
Prepares the json files for the UrbanSound8k dataset.
Prompts to download the dataset if it is not found in the `data_folder`.
Arguments
---------
data_folder : str
Path to the folder where the UrbanSound8k dataset metadata is stored.
audio_data_folder: str
Path to the folder where the UrbanSound8k dataset audio files are stored.
save_json_train : str
Path where the train data specification file will be saved.
save_json_valid : str
Path where the validation data specification file will be saved.
save_json_test : str
Path where the test data specification file will be saved.
train_folds: list or int (integers [1,10])
A list of integers defining which pre-defined "folds" to use for training. Must be
exclusive of valid_folds and test_folds.
valid_folds: list or int (integers [1,10])
A list of integers defining which pre-defined "folds" to use for validation. Must be
exclusive of train_folds and test_folds.
test_folds: list or int (integers [1,10])
A list of integers defining which pre-defined "folds" to use for test. Must be
exclusive of train_folds and valid_folds.
Example
-------
>>> data_folder = '/path/to/UrbanSound8k'
>>> prepare_urban_sound_8k(data_folder, 'train.json', 'valid.json', 'test.json', [1,2,3,4,5,6,7,8], [9], [10])
"""
# Tease params to correct type if necessary
if type(train_fold_nums) is int:
train_fold_nums = [train_fold_nums]
if type(valid_fold_nums) is int:
valid_fold_nums = [valid_fold_nums]
if type(test_fold_nums) is int:
test_fold_nums = [test_fold_nums]
# Validate passed fold params
for fold_num in train_fold_nums:
if fold_num not in ACCEPTABLE_FOLD_NUMS:
print(
f"Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
logger.info(
f"Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
return
for fold_num in valid_fold_nums:
if fold_num not in ACCEPTABLE_FOLD_NUMS:
print(
f"Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
logger.info(
f"Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
return
for fold_num in test_fold_nums:
if fold_num not in ACCEPTABLE_FOLD_NUMS:
print(
f"Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
logger.info(
f"Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}"
)
return
# Check if train, and valid and train and test folds are exclusive
if folds_overlap(train_fold_nums, valid_fold_nums):
print(
f"Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!"
)
logger.info(
f"Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!"
)
return
if folds_overlap(train_fold_nums, test_fold_nums):
print(
f"Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!"
)
logger.info(
f"Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!"
)
return
# If the dataset doesn't exist yet, prompt the user to set or download it
if not check_folders(audio_data_folder):
prompt_download_urban_sound_8k(audio_data_folder)
return
# Don't need to do this every single time
if skip_manifest_creation is True:
return
# If our modified metadata file does not exist, create it
urban_sound_8k_speechbrain_metadata_csv_path = os.path.join(
os.path.abspath(data_folder), "metadata/", MODIFIED_METADATA_FILE_NAME
)
if not os.path.exists(urban_sound_8k_speechbrain_metadata_csv_path):
urban_sound_8k_speechbrain_metadata_csv_path = create_metadata_speechbrain_file(
data_folder
)
# TODO: If it does not exist, we create it, but next step will certainly fail?
# Read the metadata into a dictionary
# Every key of this dictionary is now one of the sound filenames, without the ".wav" suffix
metadata = load_data_csv(urban_sound_8k_speechbrain_metadata_csv_path)
# List files and create manifest from list
logger.info(
f"Creating {save_json_train}, {save_json_valid}, and {save_json_test}"
)
# Creating json files
create_json(metadata, audio_data_folder, train_fold_nums, save_json_train)
create_json(metadata, audio_data_folder, valid_fold_nums, save_json_valid)
create_json(metadata, audio_data_folder, test_fold_nums, save_json_test)
def create_json(metadata, audio_data_folder, folds_list, json_file):
"""
Creates the json file given a list of wav files.
Arguments
---------
metadata: dict
A dictionary containing the UrbanSound8k metadata file modified for the
SpeechBrain, such that keys are IDs (which are the .wav file names without the file extension).
folds_list : list of int
The list of folds [1,10] to include in this batch
json_file : str
The path of the output json file
"""
# Processing all the wav files in the list
json_dict = {}
for ID, sample_metadata in metadata.items():
fold_num = int(sample_metadata["fold"])
if fold_num in folds_list:
# Reading the signal (to retrieve duration in seconds)
wav_file = os.path.join(
os.path.abspath(audio_data_folder),
"fold" + str(fold_num) + "/",
sample_metadata["slice_file_name"],
)
try:
signal = read_audio(wav_file)
file_info = torchaudio.info(wav_file)
# If we're using sox/soundfile backend, file_info will have the old type
if isinstance(
file_info, torchaudio.backend.common.AudioMetaData
):
duration = signal.shape[0] / file_info.sample_rate
else:
duration = signal.shape[0] / file_info[0].rate
# Create entry for this sample ONLY if we have successfully read-in the file using SpeechBrain/torchaudio
json_dict[ID] = {
"wav": sample_metadata["slice_file_name"],
"classID": int(sample_metadata["classID"]),
"class_string": sample_metadata["class_string"],
"salience": int(sample_metadata["salience"]),
"fold": sample_metadata["fold"],
"duration": duration,
}
except Exception:
print(
f"There was a problem reading the file:{wav_file}. Skipping duration field for it."
)
logger.exception(
f"There was a problem reading the file:{wav_file}. Skipping it."
)
# Writing the dictionary to the json file
# Need to make sure sub folder "manifest" exists, if not create it
parent_dir = os.path.dirname(json_file)
if not os.path.exists(parent_dir):
os.mkdir(parent_dir)
with open(json_file, mode="w") as json_f:
json.dump(json_dict, json_f, indent=2)
logger.info(f"{json_file} successfully created!")
def folds_overlap(list1, list2):
"""Returns True if any passed lists has incorrect type OR has items in common."""
if (type(list1) != list) or (type(list2) != list):
return True
if any(item in list1 for item in list2):
return True
return False
def check_folders(*folders):
"""Returns False if any passed folder does not exist."""
for folder in folders:
if not os.path.exists(folder):
return False
return True
def full_path_to_audio_file(data_folder, slice_file_name, fold_num):
"""Get path to file given slice file name and fold number
Arguments
---------
slice_file_name : str
Filename.
fold_num : int
Fold number.
Returns
------
string containing absolute path to corresponding file
"""
return os.path.join(
os.path.abspath(data_folder),
"audio/",
"fold" + str(fold_num) + "/",
slice_file_name,
)
def create_metadata_speechbrain_file(data_folder):
"""Get path to file given slice file name and fold number
Arguments
---------
data_folder : str
UrbanSound8k data folder.
Returns
------
string containing absolute path to metadata csv file modified for SpeechBrain or None if source file not found
"""
import pandas as pd
urban_sound_8k_metadata_csv_path = os.path.join(
os.path.abspath(data_folder), "metadata/UrbanSound8K.csv"
)
if not os.path.exists(urban_sound_8k_metadata_csv_path):
return None
urbansound_metadata_df = pd.read_csv(urban_sound_8k_metadata_csv_path)
# SpeechBrain wants an ID column
urbansound_metadata_df["ID"] = urbansound_metadata_df.apply(
lambda row: removesuffix(row["slice_file_name"], ".wav"), axis=1
)
urbansound_metadata_df = urbansound_metadata_df.rename(
columns={"class": "class_string"}
)
urban_sound_speechbrain_metadata_csv_path = os.path.join(
os.path.abspath(data_folder), "metadata/", MODIFIED_METADATA_FILE_NAME
)
urbansound_metadata_df.to_csv(
urban_sound_speechbrain_metadata_csv_path, index=False
)
return urban_sound_speechbrain_metadata_csv_path
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def removesuffix(somestring, suffix):
"""Removed a suffix from a string
Arguments
---------
somestring : str
Any string.
suffix : str
Suffix to be removed from somestring.
Returns
------
string resulting from suffix removed from somestring, if found, unchanged otherwise
"""
if somestring.endswith(suffix):
return somestring[: -1 * len(suffix)]
else:
return somestring
def prompt_download_urban_sound_8k(destination):
"""Prompt to download dataset
Arguments
---------
destination : str
Place to put dataset.
"""
print(
"UrbanSound8k data is missing from {}!\nRequest it from here: {}".format(
destination, URBAN_SOUND_8K_DOWNLOAD_FORM_URL
)
)
# Testing
if __name__ == "__main__":
import speechbrain
# Data preparation, to be run on only one process.
speechbrain.utils.distributed.run_on_main(
prepare_urban_sound_8k,
kwargs={
"data_folder": "/Volumes/BigMule/BigDevDocuments/UrbanSound8K",
"audio_data_folder": "/Volumes/BigMule/BigDevDocuments/UrbanSound8K/audio",
"save_json_train": "./UrbanSound8k/manifest/train.json",
"save_json_valid": "./UrbanSound8k/manifest/valid.json",
"save_json_test": "./UrbanSound8k/manifest/test.json",
"train_fold_nums": [1, 2, 3, 4, 5, 6, 7, 8],
"valid_fold_nums": [9],
"test_fold_nums": [10],
},
)
|
scripts/create-geo-simple-lexicon.py | yash-srivastava19/sempre | 812 | 12631936 | #!/usr/bin/python
import sys
import json
class LexicalEntry:
def __init__(self, l, f, t):
self.lexeme=l.strip()
self.formula=f.strip()
self.type=t.strip()
out = open(sys.argv[2],'w')
with open(sys.argv[1]) as f:
for line in f:
tokens = line.split("\t")
if len(tokens) > 2:
continue
if(tokens[0] == "loc_city"):
index = tokens[1].rfind('.')
citystate = tokens[1][index+1:]
city = citystate[0:citystate.rfind('_')]
city = city.replace('_',' ').strip()
entry = LexicalEntry(city, tokens[1], "fb:en.city")
out.write(json.dumps(entry.__dict__)+'\n')
elif (tokens[0] == "loc_state"):
index = tokens[1].rfind('.')
state = tokens[1][index+1:].strip()
state = state.replace('_',' ').strip()
entry = LexicalEntry(state, tokens[1], "fb:en.state")
out.write(json.dumps(entry.__dict__)+'\n')
elif tokens[0] == "loc_river":
index = tokens[1].rfind('.')
river = tokens[1][index+1:].strip()
river = river.replace('_',' ').strip()
entry = LexicalEntry(river+" river", tokens[1], "fb:en.river")
out.write(json.dumps(entry.__dict__)+'\n')
elif (tokens[0] == "loc_place"):
index = tokens[1].rfind('.')
place = tokens[1][index+1:].strip()
place = place.replace('_',' ').strip()
entry = LexicalEntry(place, tokens[1], "fb:en.place")
out.write(json.dumps(entry.__dict__)+'\n')
elif (tokens[0] == "loc_lake"):
index = tokens[1].rfind('.')
lake = tokens[1][index+1:].strip()
lake = lake.replace('_',' ').strip()
if not 'lake' in lake:
lake = lake + " lake"
entry = LexicalEntry(lake, tokens[1], "fb:en.lake")
out.write(json.dumps(entry.__dict__)+'\n')
elif (tokens[0] == "loc_mountain"):
index = tokens[1].rfind('.')
mountain = tokens[1][index+1:].strip()
mountain = mountain.replace('_',' ').strip()
entry = LexicalEntry("mount " + mountain, tokens[1], "fb:en.mountain")
out.write(json.dumps(entry.__dict__)+'\n')
elif (tokens[0] == "loc_country"):
index = tokens[1].rfind('.')
country = tokens[1][index+1:].strip()
country = country.replace('_',' ').strip()
entry = LexicalEntry(country, tokens[1], "fb:en.country")
out.write(json.dumps(entry.__dict__)+'\n')
out.close()
|
workflows/cloudify_system_workflows/deployment_update/step_extractor.py | cloudify-cosmo/cloudify-manager | 124 | 12631939 | import operator
from collections import Counter
from functools import total_ordering
import networkx as nx
RELEVANT_DEPLOYMENT_FIELDS = ['blueprint_id', 'id', 'inputs', 'nodes',
'outputs', 'workflows', 'groups', 'policy_types',
'policy_triggers', 'description',
'deployment_plugins_to_install',
'workflow_plugins_to_install']
DEFAULT_TOPOLOGY_LEVEL = 0
NODE = 'node'
NODES = 'nodes'
OUTPUT = 'output'
OUTPUTS = 'outputs'
TYPE = 'type'
HOST_ID = 'host_id'
OPERATION = 'operation'
OPERATIONS = 'operations'
RELATIONSHIP = 'relationship'
RELATIONSHIPS = 'relationships'
TARGET_ID = 'target_id'
SOURCE_OPERATIONS = 'source_operations'
TARGET_OPERATIONS = 'target_operations'
PROPERTY = 'property'
PROPERTIES = 'properties'
WORKFLOW = 'workflow'
WORKFLOWS = 'workflows'
GROUP = 'group'
GROUPS = 'groups'
POLICY_TYPE = 'policy_type'
POLICY_TYPES = 'policy_types'
POLICY_TRIGGER = 'policy_trigger'
POLICY_TRIGGERS = 'policy_triggers'
DEPLOYMENT_PLUGINS_TO_INSTALL = 'deployment_plugins_to_install'
PLUGIN = 'plugin'
PLUGINS = 'plugins'
PLUGINS_TO_INSTALL = 'plugins_to_install'
DESCRIPTION = 'description'
CONTAINED_IN_RELATIONSHIP_TYPE = 'cloudify.relationships.contained_in'
TYPE_HIERARCHY = 'type_hierarchy'
@total_ordering
class DeploymentUpdateStep(object):
def __init__(self,
action,
entity_type,
entity_id,
supported=True,
topology_order=DEFAULT_TOPOLOGY_LEVEL):
self.action = action
self.entity_type = entity_type
self.entity_id = entity_id
self.supported = supported
self.topology_order = topology_order
@property
def entity_name(self):
return self.entity_id.split(':')[-1]
def as_dict(self):
if not self.supported:
raise RuntimeError(
'Unsupported steps cannot be represented as dicts')
return {
'action': self.action,
'entity_id': self.entity_id,
'entity_type': self.entity_type,
'topology_order': self.topology_order,
}
def __hash__(self):
return hash(self.entity_id)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return self.__str__()
def __lt__(self, other):
"""Is this step considered "smaller" than the other step?
This is used for sorting the steps, ie. steps that are smaller
come earlier, and will be executed first.
"""
if self.action != other.action:
# the order is 'remove' < 'add' < 'modify'
actions = ['remove', 'add', 'modify']
return actions.index(self.action) < actions.index(other.action)
if self.action == 'add':
if self.entity_type == NODE:
if other.entity_type == RELATIONSHIP:
# add node before adding relationships
return True
if other.entity_type == NODE:
# higher topology order before lower topology order
return self.topology_order > other.topology_order
if self.action == 'remove':
# remove relationships before removing nodes
if self.entity_type == RELATIONSHIP and other.entity_type == NODE:
return True
return False
def _is_contained_in_changed(node, other_node):
node_container = next(
(r['target_id'] for r in node['relationships']
if CONTAINED_IN_RELATIONSHIP_TYPE in r[TYPE_HIERARCHY]), None)
other_node_container = next(
(r['target_id'] for r in other_node['relationships']
if CONTAINED_IN_RELATIONSHIP_TYPE in r[TYPE_HIERARCHY]), None)
return node_container != other_node_container
def _create_steps(nodes, deployment, new_plan):
if new_plan[DESCRIPTION] != deployment.description:
yield DeploymentUpdateStep(
action='modify',
entity_type=DESCRIPTION,
entity_id=DESCRIPTION,
)
new_nodes = {node['id']: node for node in new_plan[NODES]}
yield from _extract_host_agent_plugins_steps(new_nodes, nodes)
yield from _diff_nodes(new_nodes, nodes)
for action, key in _diff_dicts(new_plan[OUTPUTS], deployment.outputs):
yield DeploymentUpdateStep(
action=action,
entity_type=OUTPUT,
entity_id=f'{OUTPUTS}:{key}'
)
for action, key in _diff_dicts(new_plan[WORKFLOWS], deployment.workflows):
yield DeploymentUpdateStep(
action=action,
entity_type=WORKFLOW,
entity_id=f'{WORKFLOWS}:{key}'
)
for action, key in _diff_dicts(
new_plan[POLICY_TYPES], deployment.policy_types):
yield DeploymentUpdateStep(
action=action,
entity_type=POLICY_TYPE,
entity_id=f'{POLICY_TYPES}:{key}',
supported=False
)
for action, key in _diff_dicts(
new_plan[POLICY_TRIGGERS],
deployment.policy_triggers):
yield DeploymentUpdateStep(
action=action,
entity_type=POLICY_TRIGGER,
entity_id=f'{POLICY_TRIGGERS}:{key}',
supported=False
)
for action, key in _diff_dicts(
new_plan[GROUPS], deployment.groups,
compare=_compare_groups):
yield DeploymentUpdateStep(
action=action,
entity_type=GROUP,
entity_id=f'{GROUPS}:{key}',
supported=False
)
def _extract_host_agent_plugins_steps(new_nodes, old_nodes):
# We want to update both the node's `plugins_to_install` and `plugins`
for entity_type in (PLUGINS_TO_INSTALL, PLUGINS):
for new_node_name, new_node in new_nodes.items():
new_plugins = new_node.get(entity_type) or []
# If it's a new node, the plugin will be installed anyway
if not new_plugins or new_node_name not in old_nodes:
continue
old_node = old_nodes[new_node_name]
old_plugins = old_node.get(entity_type) or []
if old_node.get(entity_type) == new_node.get(entity_type):
continue
for new_plugin in new_plugins:
old_plugin = _find_matching_plugin(new_plugin, old_plugins)
if old_plugin == new_plugin:
continue
entity_id =\
f'{entity_type}:{new_node_name}:{new_plugin["name"]}'
action = 'add' if not old_plugin else 'modify'
yield DeploymentUpdateStep(
action=action,
entity_type=PLUGIN,
entity_id=entity_id,
)
def _find_matching_plugin(new_plugin, old_plugins):
for old_plugin in old_plugins:
if (old_plugin['name'] == new_plugin['name'] and
old_plugin['executor'] == new_plugin['executor']):
return old_plugin
return None
def _diff_nodes(new_nodes, old_nodes):
# with self.entity_id_builder.extend_id(NODES):
for node_name, node in new_nodes.items():
# with self.entity_id_builder.extend_id(node_name):
if node_name not in old_nodes:
yield DeploymentUpdateStep(
action='add',
entity_type=NODE,
entity_id=f'{NODES}:{node_name}',
)
continue
old_node = old_nodes[node_name]
if node[TYPE] != old_node[TYPE] or \
_is_contained_in_changed(node, old_node):
# a node that changed its type or its host_id
# counts as a different node
yield DeploymentUpdateStep(
action='modify',
entity_type=NODE,
entity_id=f'{NODES}:{node_name}',
supported=False
)
# Since the node was classified as added or
# removed, there is no need to compare its other
# fields.
continue
yield from _diff_node(node_name, node, old_node)
for node_name in old_nodes:
if node_name not in new_nodes:
yield DeploymentUpdateStep(
action='remove',
entity_type=NODE,
entity_id=f'{NODES}:{node_name}',
)
def _relationship_key(rel):
"""A comparable key for a relationship, to check for identical rels.
Relationships will be considered identical, if they have the same key
(ie. same type, and target).
"""
return rel[TYPE], rel[TARGET_ID]
def _diff_node(node_name, new_node, old_node):
for action, key in _diff_dicts(new_node[OPERATIONS], old_node[OPERATIONS]):
yield DeploymentUpdateStep(
action=action,
entity_type=OPERATION,
entity_id=f'{NODES}:{node_name}:{OPERATIONS}:{key}'
)
seen_relationships = Counter()
for rel_index, relationship in enumerate(new_node[RELATIONSHIPS]):
entity_id_base = f'{NODES}:{node_name}:{RELATIONSHIPS}'
rel_key = _relationship_key(relationship)
old_relationship, old_rel_index = \
_find_relationship(
old_node[RELATIONSHIPS],
relationship[TYPE],
relationship[TARGET_ID],
seen_relationships[rel_key])
seen_relationships[rel_key] += 1
if old_relationship is None:
yield DeploymentUpdateStep(
action='add',
entity_type=RELATIONSHIP,
entity_id=f'{entity_id_base}:[{rel_index}]',
)
continue
if old_rel_index != rel_index:
yield DeploymentUpdateStep(
action='modify',
entity_type=RELATIONSHIP,
entity_id=f'{entity_id_base}:[{old_rel_index}]:[{rel_index}]',
)
for op_type in [SOURCE_OPERATIONS, TARGET_OPERATIONS]:
for action, key in _diff_dicts(
relationship.get(op_type), old_relationship.get(op_type)):
yield DeploymentUpdateStep(
action=action,
entity_type=OPERATION,
entity_id=f'{entity_id_base}:[{rel_index}]:{op_type}:{key}'
)
for action, key in _diff_dicts(
relationship.get(PROPERTIES),
old_relationship.get(PROPERTIES)):
# modifying relationship properties is not supported yet
yield DeploymentUpdateStep(
action=action,
entity_type=PROPERTY,
entity_id=f'{entity_id_base}:[{rel_index}]:{PROPERTIES}:{key}',
supported=False,
)
seen_relationships = Counter()
for rel_index, relationship in enumerate(old_node[RELATIONSHIPS]):
entity_id_base = f'{NODES}:{node_name}:{RELATIONSHIPS}'
rel_key = _relationship_key(relationship)
matching_relationship, _ = \
_find_relationship(
new_node[RELATIONSHIPS],
relationship[TYPE],
relationship[TARGET_ID],
seen_relationships[rel_key])
seen_relationships[rel_key] += 1
if matching_relationship is None:
yield DeploymentUpdateStep(
action='remove',
entity_type=RELATIONSHIP,
entity_id=f'{entity_id_base}:[{rel_index}]',
)
for action, key in _diff_dicts(new_node[PROPERTIES], old_node[PROPERTIES]):
yield DeploymentUpdateStep(
action=action,
entity_type=PROPERTY,
entity_id=f'{NODES}:{node_name}:{PROPERTIES}:{key}'
)
def _diff_operations(entity_id_base, operations, old_operations):
for op_name in set(operations) | set(old_operations):
new_operation = operations.get(op_name)
old_operation = old_operations.get(op_name)
if new_operation is not None and old_operation is None:
action = 'add'
elif new_operation is None and old_operation is not None:
action = 'remove'
elif new_operation != old_operation:
action = 'modify'
else:
continue
yield DeploymentUpdateStep(
action=action,
entity_type=OPERATION,
entity_id=f'{entity_id_base}:{op_name}',
)
def _diff_dicts(new, old, compare=operator.eq):
new = new or {}
old = old or {}
for key in set(new) | set(old):
if key in new and key not in old:
yield 'add', key
elif key in old and key not in new:
yield 'remove', key
elif not compare(new[key], old[key]):
from cloudify.state import workflow_ctx
workflow_ctx.logger.info('diff %s != %s', new[key], old[key])
yield 'modify', key
def _compare_groups(new, old):
old_clone = old.copy()
new_clone = new.copy()
old_members = set(old_clone.pop('members', ()))
new_members = set(new_clone.pop('members', ()))
return old_members == new_members and old_clone == new_clone
def _find_relationship(relationships, rel_type, target_id, skip=0):
"""Find an entry in relationships that matches the type and target.
:param relationships: the list of relationships in which to find one
:param rel_type: type of the relationship to find
:param target_id: target of the relationship to find
:param skip: return the Nth relationship that matches the type and target
:return: the relationship and its index, or a pair of (None, None)
"""
for rel_index, other_relationship in enumerate(relationships):
other_r_type = other_relationship[TYPE]
other_target_id = other_relationship[TARGET_ID]
if rel_type == other_r_type and other_target_id == target_id:
if skip == 0:
return other_relationship, rel_index
skip -= 1
return None, None
def _extract_added_nodes_names(supported_steps):
add_node_steps = [step for step in supported_steps
if step.action == 'add' and step.entity_type == NODE]
added_nodes_names = [step.entity_name for step in add_node_steps]
return added_nodes_names
def _create_added_nodes_graph(nodes, supported_steps):
""" create a graph representing the added nodes and relationships
involving them in the deployment update blueprint
:rtype: nx.Digraph
"""
added_nodes_names = _extract_added_nodes_names(supported_steps)
added_nodes_graph = nx.DiGraph()
added_nodes_graph.add_nodes_from(added_nodes_names)
for node_name, node in nodes.items():
if node_name in added_nodes_names:
for relationship in node[RELATIONSHIPS]:
if relationship[TARGET_ID] in added_nodes_names:
added_nodes_graph.add_edge(node_name,
relationship[TARGET_ID])
return added_nodes_graph
def _update_topology_order_of_add_node_steps(supported_steps,
topologically_sorted_added_nodes):
for i, node_name in enumerate(topologically_sorted_added_nodes):
# Get the corresponding 'add node' step for this node name,
# and assign it its topology_order order
for step in supported_steps:
if step.action == 'add' and step.entity_type == NODE \
and step.entity_name == node_name:
step.topology_order = i
def _sort_supported_steps(nodes, supported_steps):
added_nodes_graph = _create_added_nodes_graph(nodes, supported_steps)
topologically_sorted_added_nodes = nx.topological_sort(
added_nodes_graph)
_update_topology_order_of_add_node_steps(
supported_steps, topologically_sorted_added_nodes)
supported_steps.sort()
def extract_steps(nodes, deployment, new_plan):
"""Create DeploymentUpdateSteps
:param nodes: currently existing nodes, as a list of dicts
:param deployment: the deployment to be updated, as an orm object
:param new_plan: the new deployment plan, as returned by the dsl-parser
:return: a pair of lists: the supported steps, and the unsupported steps
"""
nodes = {node['id']: node for node in nodes}
new_nodes = {node['id']: node for node in new_plan[NODES]}
supported_steps = []
unsupported_steps = []
for step in _create_steps(nodes, deployment, new_plan):
if step.supported:
supported_steps.append(step)
else:
unsupported_steps.append(step)
_sort_supported_steps(new_nodes, supported_steps)
return supported_steps, unsupported_steps
|
python_packages/jupyterlab_lsp/jupyterlab_lsp/__init__.py | icankeep/jupyterlab-lsp | 1,117 | 12631944 | # flake8: noqa: F401
from ._version import __version__
def _jupyter_labextension_paths():
return [
{
"src": "labextensions/@krassowski/jupyterlab-lsp",
"dest": "@krassowski/jupyterlab-lsp",
}
]
|
backend/MaskFormer/mask_former/data/datasets/register_mapillary_vistas.py | rune-l/coco-annotator | 143 | 12631947 | # Copyright (c) Facebook, Inc. and its affiliates.
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
MAPILLARY_VISTAS_SEM_SEG_CATEGORIES = [
{
"color": [165, 42, 42],
"instances": True,
"readable": "Bird",
"name": "animal--bird",
"evaluate": True,
},
{
"color": [0, 192, 0],
"instances": True,
"readable": "Ground Animal",
"name": "animal--ground-animal",
"evaluate": True,
},
{
"color": [196, 196, 196],
"instances": False,
"readable": "Curb",
"name": "construction--barrier--curb",
"evaluate": True,
},
{
"color": [190, 153, 153],
"instances": False,
"readable": "Fence",
"name": "construction--barrier--fence",
"evaluate": True,
},
{
"color": [180, 165, 180],
"instances": False,
"readable": "Guard Rail",
"name": "construction--barrier--guard-rail",
"evaluate": True,
},
{
"color": [90, 120, 150],
"instances": False,
"readable": "Barrier",
"name": "construction--barrier--other-barrier",
"evaluate": True,
},
{
"color": [102, 102, 156],
"instances": False,
"readable": "Wall",
"name": "construction--barrier--wall",
"evaluate": True,
},
{
"color": [128, 64, 255],
"instances": False,
"readable": "Bike Lane",
"name": "construction--flat--bike-lane",
"evaluate": True,
},
{
"color": [140, 140, 200],
"instances": True,
"readable": "Crosswalk - Plain",
"name": "construction--flat--crosswalk-plain",
"evaluate": True,
},
{
"color": [170, 170, 170],
"instances": False,
"readable": "Curb Cut",
"name": "construction--flat--curb-cut",
"evaluate": True,
},
{
"color": [250, 170, 160],
"instances": False,
"readable": "Parking",
"name": "construction--flat--parking",
"evaluate": True,
},
{
"color": [96, 96, 96],
"instances": False,
"readable": "Pedestrian Area",
"name": "construction--flat--pedestrian-area",
"evaluate": True,
},
{
"color": [230, 150, 140],
"instances": False,
"readable": "Rail Track",
"name": "construction--flat--rail-track",
"evaluate": True,
},
{
"color": [128, 64, 128],
"instances": False,
"readable": "Road",
"name": "construction--flat--road",
"evaluate": True,
},
{
"color": [110, 110, 110],
"instances": False,
"readable": "Service Lane",
"name": "construction--flat--service-lane",
"evaluate": True,
},
{
"color": [244, 35, 232],
"instances": False,
"readable": "Sidewalk",
"name": "construction--flat--sidewalk",
"evaluate": True,
},
{
"color": [150, 100, 100],
"instances": False,
"readable": "Bridge",
"name": "construction--structure--bridge",
"evaluate": True,
},
{
"color": [70, 70, 70],
"instances": False,
"readable": "Building",
"name": "construction--structure--building",
"evaluate": True,
},
{
"color": [150, 120, 90],
"instances": False,
"readable": "Tunnel",
"name": "construction--structure--tunnel",
"evaluate": True,
},
{
"color": [220, 20, 60],
"instances": True,
"readable": "Person",
"name": "human--person",
"evaluate": True,
},
{
"color": [255, 0, 0],
"instances": True,
"readable": "Bicyclist",
"name": "human--rider--bicyclist",
"evaluate": True,
},
{
"color": [255, 0, 100],
"instances": True,
"readable": "Motorcyclist",
"name": "human--rider--motorcyclist",
"evaluate": True,
},
{
"color": [255, 0, 200],
"instances": True,
"readable": "Other Rider",
"name": "human--rider--other-rider",
"evaluate": True,
},
{
"color": [200, 128, 128],
"instances": True,
"readable": "Lane Marking - Crosswalk",
"name": "marking--crosswalk-zebra",
"evaluate": True,
},
{
"color": [255, 255, 255],
"instances": False,
"readable": "Lane Marking - General",
"name": "marking--general",
"evaluate": True,
},
{
"color": [64, 170, 64],
"instances": False,
"readable": "Mountain",
"name": "nature--mountain",
"evaluate": True,
},
{
"color": [230, 160, 50],
"instances": False,
"readable": "Sand",
"name": "nature--sand",
"evaluate": True,
},
{
"color": [70, 130, 180],
"instances": False,
"readable": "Sky",
"name": "nature--sky",
"evaluate": True,
},
{
"color": [190, 255, 255],
"instances": False,
"readable": "Snow",
"name": "nature--snow",
"evaluate": True,
},
{
"color": [152, 251, 152],
"instances": False,
"readable": "Terrain",
"name": "nature--terrain",
"evaluate": True,
},
{
"color": [107, 142, 35],
"instances": False,
"readable": "Vegetation",
"name": "nature--vegetation",
"evaluate": True,
},
{
"color": [0, 170, 30],
"instances": False,
"readable": "Water",
"name": "nature--water",
"evaluate": True,
},
{
"color": [255, 255, 128],
"instances": True,
"readable": "Banner",
"name": "object--banner",
"evaluate": True,
},
{
"color": [250, 0, 30],
"instances": True,
"readable": "Bench",
"name": "object--bench",
"evaluate": True,
},
{
"color": [100, 140, 180],
"instances": True,
"readable": "Bike Rack",
"name": "object--bike-rack",
"evaluate": True,
},
{
"color": [220, 220, 220],
"instances": True,
"readable": "Billboard",
"name": "object--billboard",
"evaluate": True,
},
{
"color": [220, 128, 128],
"instances": True,
"readable": "Catch Basin",
"name": "object--catch-basin",
"evaluate": True,
},
{
"color": [222, 40, 40],
"instances": True,
"readable": "CCTV Camera",
"name": "object--cctv-camera",
"evaluate": True,
},
{
"color": [100, 170, 30],
"instances": True,
"readable": "Fire Hydrant",
"name": "object--fire-hydrant",
"evaluate": True,
},
{
"color": [40, 40, 40],
"instances": True,
"readable": "Junction Box",
"name": "object--junction-box",
"evaluate": True,
},
{
"color": [33, 33, 33],
"instances": True,
"readable": "Mailbox",
"name": "object--mailbox",
"evaluate": True,
},
{
"color": [100, 128, 160],
"instances": True,
"readable": "Manhole",
"name": "object--manhole",
"evaluate": True,
},
{
"color": [142, 0, 0],
"instances": True,
"readable": "Phone Booth",
"name": "object--phone-booth",
"evaluate": True,
},
{
"color": [70, 100, 150],
"instances": False,
"readable": "Pothole",
"name": "object--pothole",
"evaluate": True,
},
{
"color": [210, 170, 100],
"instances": True,
"readable": "Street Light",
"name": "object--street-light",
"evaluate": True,
},
{
"color": [153, 153, 153],
"instances": True,
"readable": "Pole",
"name": "object--support--pole",
"evaluate": True,
},
{
"color": [128, 128, 128],
"instances": True,
"readable": "Traffic Sign Frame",
"name": "object--support--traffic-sign-frame",
"evaluate": True,
},
{
"color": [0, 0, 80],
"instances": True,
"readable": "Utility Pole",
"name": "object--support--utility-pole",
"evaluate": True,
},
{
"color": [250, 170, 30],
"instances": True,
"readable": "Traffic Light",
"name": "object--traffic-light",
"evaluate": True,
},
{
"color": [192, 192, 192],
"instances": True,
"readable": "Traffic Sign (Back)",
"name": "object--traffic-sign--back",
"evaluate": True,
},
{
"color": [220, 220, 0],
"instances": True,
"readable": "Traffic Sign (Front)",
"name": "object--traffic-sign--front",
"evaluate": True,
},
{
"color": [140, 140, 20],
"instances": True,
"readable": "Trash Can",
"name": "object--trash-can",
"evaluate": True,
},
{
"color": [119, 11, 32],
"instances": True,
"readable": "Bicycle",
"name": "object--vehicle--bicycle",
"evaluate": True,
},
{
"color": [150, 0, 255],
"instances": True,
"readable": "Boat",
"name": "object--vehicle--boat",
"evaluate": True,
},
{
"color": [0, 60, 100],
"instances": True,
"readable": "Bus",
"name": "object--vehicle--bus",
"evaluate": True,
},
{
"color": [0, 0, 142],
"instances": True,
"readable": "Car",
"name": "object--vehicle--car",
"evaluate": True,
},
{
"color": [0, 0, 90],
"instances": True,
"readable": "Caravan",
"name": "object--vehicle--caravan",
"evaluate": True,
},
{
"color": [0, 0, 230],
"instances": True,
"readable": "Motorcycle",
"name": "object--vehicle--motorcycle",
"evaluate": True,
},
{
"color": [0, 80, 100],
"instances": False,
"readable": "On Rails",
"name": "object--vehicle--on-rails",
"evaluate": True,
},
{
"color": [128, 64, 64],
"instances": True,
"readable": "Other Vehicle",
"name": "object--vehicle--other-vehicle",
"evaluate": True,
},
{
"color": [0, 0, 110],
"instances": True,
"readable": "Trailer",
"name": "object--vehicle--trailer",
"evaluate": True,
},
{
"color": [0, 0, 70],
"instances": True,
"readable": "Truck",
"name": "object--vehicle--truck",
"evaluate": True,
},
{
"color": [0, 0, 192],
"instances": True,
"readable": "Wheeled Slow",
"name": "object--vehicle--wheeled-slow",
"evaluate": True,
},
{
"color": [32, 32, 32],
"instances": False,
"readable": "Car Mount",
"name": "void--car-mount",
"evaluate": True,
},
{
"color": [120, 10, 10],
"instances": False,
"readable": "Ego Vehicle",
"name": "void--ego-vehicle",
"evaluate": True,
},
{
"color": [0, 0, 0],
"instances": False,
"readable": "Unlabeled",
"name": "void--unlabeled",
"evaluate": False,
},
]
def _get_mapillary_vistas_meta():
stuff_classes = [k["readable"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
assert len(stuff_classes) == 65
stuff_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
assert len(stuff_colors) == 65
ret = {
"stuff_classes": stuff_classes,
"stuff_colors": stuff_colors,
}
return ret
def register_all_mapillary_vistas(root):
root = os.path.join(root, "mapillary_vistas")
meta = _get_mapillary_vistas_meta()
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(root, dirname, "images")
gt_dir = os.path.join(root, dirname, "labels")
name = f"mapillary_vistas_sem_seg_{name}"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
)
MetadataCatalog.get(name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=65, # different from other datasets, Mapillary Vistas sets ignore_label to 65
**meta,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_mapillary_vistas(_root)
|
pipeline/archivebot/seesaw/tasks.py | AKTheKnight/ArchiveBot | 250 | 12632044 | <reponame>AKTheKnight/ArchiveBot<filename>pipeline/archivebot/seesaw/tasks.py
import datetime
import functools
import glob
import gzip
import json
import os
import shutil
import time
import requests
import socket
from seesaw.externalprocess import WgetDownload
from seesaw.task import Task, SimpleTask
from tornado.ioloop import IOLoop
import tornado.ioloop
from redis.exceptions import ConnectionError
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
def process(self, item):
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'Are you behind a firewall/proxy? That is a big no-no!')
raise Exception(
'Are you behind a firewall/proxy? That is a big no-no!')
# Domains that are not supposed to resolve
for domain in ('domain.invalid', 'nxdomain.archiveteam.org', 'www'):
try:
ip = socket.gethostbyname(domain)
except socket.gaierror as e:
if e.errno != socket.EAI_NONAME:
raise
else:
item.log_output('Got an IP address ({}) for {} instead of NXDOMAIN'.format(ip, domain))
item.log_output('Are you behind a firewall/proxy or have a misconfigured resolv.conf? That is a big no-no!')
raise Exception('Are you behind a firewall/proxy or have a misconfigured resolv.conf? That is a big no-no!')
class CheckLocalWebserver(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckLocalWebserver")
def process(self, item):
for port in (80, 443, 8000, 8080, 8443):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(('localhost', port))
except socket.error:
pass
else:
item.log_output('Was able to connect to localhost:{}'.format(port))
item.log_output('Are you running a web server on the same machine as the pipeline? That is a big no-no!')
raise Exception('Are you running a web server on the same machine as the pipeline? That is a big no-no!')
class RetryableTask(Task):
retry_delay = 5
cancelable = False
def enqueue(self, item):
self.start_item(item)
item.log_output('Starting %s for %s' % (self, item.description()))
self.process(item)
def schedule_retry(self, item):
item.may_be_canceled = self.cancelable
IOLoop.instance().add_timeout(datetime.timedelta(seconds=self.retry_delay),
functools.partial(self.retry, item))
def retry(self, item):
if not item.canceled:
item.may_be_canceled = False
self.process(item)
def notify_retry(self, reason, item):
item.log_output("%s. Retrying %s in %s seconds." %
(reason, self, self.retry_delay))
def notify_connection_error(self, item):
self.notify_retry('Lost connection to ArchiveBot controller', item)
# ------------------------------------------------------------------------------
class GetItemFromQueue(RetryableTask):
def __init__(self, control, pipeline_id, pipeline_nick, retry_delay=5,
ao_only=False, large=False, version_check = None):
RetryableTask.__init__(self, 'GetItemFromQueue')
self.control = control
self.pipeline_id = pipeline_id
self.pipeline_nick = pipeline_nick
self.retry_delay = retry_delay
self.cancelable = True
self.pipeline_queue = 'pending:%s' % self.pipeline_id
self.ao_only = ao_only
self.large = large
# (versionOnStartup, versionFunc) where the latter is an argument-less function returning the current version of the files
self.version_on_startup, self.version_func = version_check
def process(self, item):
# Check that the files haven't changed since the pipeline was launched
currentVersion = self.version_func()
if currentVersion != self.version_on_startup:
item.log_output('Version has changed from {!r} on startup to {!r} now'.format(self.version_on_startup, currentVersion))
raise Exception('Version has changed from {!r} on startup to {!r} now'.format(self.version_on_startup, currentVersion))
try:
ident, job_data = self.control.reserve_job(self.pipeline_id,
self.pipeline_nick, self.ao_only, self.large)
if ident == None:
self.schedule_retry(item)
else:
item['fetch_depth'] = job_data.get('fetch_depth')
item['ident'] = ident
item['log_key'] = job_data.get('log_key')
item['pipeline_id'] = self.pipeline_id
item['queued_at'] = job_data.get('queued_at')
item['slug'] = job_data.get('slug')
item['started_by'] = job_data.get('started_by')
item['started_in'] = job_data.get('started_in')
item['url'] = job_data.get('url')
item['url_file'] = job_data.get('url_file')
item['user_agent'] = job_data.get('user_agent')
item['no_offsite_links'] = job_data.get('no_offsite_links')
item['youtube_dl'] = job_data.get('youtube_dl')
item.log_output('Received item %s.' % ident)
self.complete_item(item)
except ConnectionError:
self.notify_connection_error(item)
self.schedule_retry(item)
# ------------------------------------------------------------------------------
class StartHeartbeat(SimpleTask):
def __init__(self, control):
SimpleTask.__init__(self, 'StartHeartbeat')
self.control = control
def process(self, item):
cb = tornado.ioloop.PeriodicCallback(
functools.partial(self.send_heartbeat, item),
1000)
item['heartbeat'] = cb
cb.start()
def send_heartbeat(self, item):
self.control.heartbeat(item['ident'])
# ------------------------------------------------------------------------------
class SetFetchDepth(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, 'SetFetchDepth')
def process(self, item):
depth = item['fetch_depth']
if depth == 'shallow':
item['recursive'] = False
else:
item['recursive'] = True
item['depth'] = depth
# ------------------------------------------------------------------------------
class TargetPathMixin(object):
def set_target_paths(self, item):
item['target_warc_file_prefix'] = '%(data_dir)s/%(warc_file_base)s' % item
item['target_info_file'] = '%(data_dir)s/%(warc_file_base)s.json' % item
def get_source_warc_filenames(self, item):
return list(sorted(
glob.glob('%(source_warc_file_prefix)s*.warc.gz' % item)
))
# ------------------------------------------------------------------------------
class PreparePaths(SimpleTask, TargetPathMixin):
def __init__(self):
SimpleTask.__init__(self, 'PreparePaths')
def process(self, item):
item_dir = '%(data_dir)s/%(ident)s' % item
last_five = item['ident'][0:5]
if os.path.isdir(item_dir):
shutil.rmtree(item_dir)
os.makedirs(item_dir)
item['item_dir'] = item_dir
item['warc_file_base'] = '%s-%s-%s' % (item['slug'],
time.strftime("%Y%m%d-%H%M%S"), last_five)
item['source_warc_file_prefix'] = '%(item_dir)s/%(warc_file_base)s' % item
item['source_info_file'] = '%(item_dir)s/%(warc_file_base)s.json' % item
item['cookie_jar'] = '%(item_dir)s/cookies.txt' % item
self.set_target_paths(item)
# ------------------------------------------------------------------------------
class Wpull(WgetDownload):
def on_subprocess_end(self, item, returncode):
item['wpull_returncode'] = returncode
super().on_subprocess_end(item, returncode)
# ------------------------------------------------------------------------------
class RelabelIfAborted(RetryableTask, TargetPathMixin):
def __init__(self, control):
RetryableTask.__init__(self, 'RelabelIfAborted')
self.control = control
def process(self, item):
try:
if self.control.is_aborted(item['ident']):
item['aborted'] = True
item['warc_file_base'] = '%(warc_file_base)s-aborted' % item
self.set_target_paths(item)
item.log_output('Adjusted target WARC path to %(target_warc_file_prefix)s' %
item)
self.complete_item(item)
except ConnectionError:
self.notify_connection_error(item)
self.schedule_retry(item)
# ------------------------------------------------------------------------------
class CompressLogIfFailed(SimpleTask, TargetPathMixin):
def __init__(self):
SimpleTask.__init__(self, 'CompressLogIfNoMetaWarc')
def process(self, item):
#TODO: Instead of checking the exit status of wpull, this should check whether wpull wrote a meta WARC (and whether it contains the log).
#TODO: If the disk is almost full, this may crash, which would probably mean a loss of the log file (and possibly also anything else).
if item['wpull_returncode'] not in (0, 4, 8):
item['source_log_file'] = '%(item_dir)s/%(warc_file_base)s-wpull.log.gz' % item
item['target_log_file'] = '%(data_dir)s/%(warc_file_base)s-wpull.log.gz' % item
with open('%(item_dir)s/wpull.log' % item, 'rb') as fI:
with gzip.GzipFile(item['source_log_file'], 'w', compresslevel = 9) as fO:
shutil.copyfileobj(fI, fO)
# ------------------------------------------------------------------------------
class MoveFiles(SimpleTask, TargetPathMixin):
def __init__(self, target_directory):
SimpleTask.__init__(self, "MoveFiles")
self.target_directory = target_directory
def process(self, item):
item['target_warc_files'] = self.rename_warc_files(item)
item['all_target_files'] = item['target_warc_files'] + [item['target_info_file']]
if 'target_url_file' in item:
item['all_target_files'].append(item['target_url_file'])
os.rename(item['source_url_file'], item['target_url_file'])
if 'target_log_file' in item:
item['all_target_files'].append(item['target_log_file'])
os.rename(item['source_log_file'], item['target_log_file'])
os.rename(item['source_info_file'], item['target_info_file'])
shutil.rmtree("%(item_dir)s" % item)
for fn in item['all_target_files']:
shutil.move(fn, self.target_directory)
def rename_warc_files(self, item):
target_filenames = []
for source_filename in self.get_source_warc_filenames(item):
assert source_filename.startswith(item['source_warc_file_prefix'])
target_filename = source_filename.replace(
item['source_warc_file_prefix'],
item['target_warc_file_prefix'],
1
)
os.rename(source_filename, target_filename)
target_filenames.append(target_filename)
return target_filenames
# ------------------------------------------------------------------------------
class WriteInfo(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, 'WriteInfo')
def process(self, item):
# The "aborted" key might not have been written by any prior process,
# i.e. if the job wasn't aborted. For accessor convenience, we add
# that key here.
if 'aborted' in item:
aborted = item['aborted']
else:
aborted = False
# This JSON object's fieldset is an externally visible interface.
# Adding fields is fine; changing existing ones, not so much.
item['info'] = {
'aborted': aborted,
'fetch_depth': item['fetch_depth'],
'pipeline_id': item['pipeline_id'],
'queued_at': item['queued_at'],
'started_by': item['started_by'],
'started_in': item['started_in'],
'url': item['url'],
'url_file': item['url_file']
}
with open(item['source_info_file'], 'w') as f:
f.write(json.dumps(item['info'], indent=True))
# ------------------------------------------------------------------------------
class DownloadUrlFile(RetryableTask):
def __init__(self, control):
RetryableTask.__init__(self, 'DownloadUrlFile')
self.control = control
def process(self, item):
if not item['url_file']:
self.complete_item(item)
return
try:
r = requests.get(item['url_file'], stream=True)
item['source_url_file'] = \
'%(source_warc_file_prefix)s-urls.txt' % item
item['target_url_file'] = \
'%(target_warc_file_prefix)s-urls.txt' % item
# Files could be huge, and we do not care about their contents or
# encoding. (We leave parsing the file to the crawler.)
with open(item['source_url_file'], 'wb') as f:
for chunk in r.iter_content(4096):
f.write(chunk)
size = os.stat(item['source_url_file']).st_size
item.log_output('Downloaded {0} bytes from {1}'.format(size, item['url_file']))
self.complete_item(item)
except requests.exceptions.RequestException as e:
item.log_output('Exception raised in DownloadUrlFile: {}'.format(e))
# It's possible that the URL that was originally provided has gone
# bad in some way. We re-read the URL to allow the job submitter
# to make changes. If a URL is present, we replace the existing
# URL in the item. If a URL is not present, we keep what we have.
item.log_output('Refreshing file URL from ArchiveBot')
new_url_file = self.control.get_url_file(item['ident'])
if new_url_file:
item['url_file'] = new_url_file
self.schedule_retry(item)
# ------------------------------------------------------------------------------
class StopHeartbeat(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, 'StopHeartbeat')
def process(self, item):
if 'heartbeat' in item:
item['heartbeat'].stop()
del item['heartbeat']
else:
item.log_output("Warning: couldn't find a heartbeat to stop")
# ------------------------------------------------------------------------------
class MarkItemAsDone(RetryableTask):
def __init__(self, control, expire_time):
RetryableTask.__init__(self, 'MarkItemAsDone')
self.control = control
self.expire_time = expire_time
def process(self, item):
try:
self.control.mark_done(item, self.expire_time)
self.complete_item(item)
except ConnectionError:
self.notify_connection_error(item)
self.schedule_retry(item)
# vim:ts=4:sw=4:et:tw=78
|
metrics/lsun_bedroom.py | SachinKumar105/Implicit-Competitive-Regularization | 107 | 12632076 | import numpy as np
import torch
import tensorflow as tf
# from tflib.inception_score import get_inception_score
from .inception_tf13 import get_inception_score
import tflib.fid as fid
BATCH_SIZE = 100
N_CHANNEL = 3
RESOLUTION = 64
NUM_SAMPLES = 50000
def cal_inception_score(G, device, z_dim):
all_samples = []
samples = torch.randn(NUM_SAMPLES, z_dim)
for i in range(0, NUM_SAMPLES, BATCH_SIZE):
samples_100 = samples[i:i + BATCH_SIZE]
samples_100 = samples_100.to(device=device)
all_samples.append(G(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)) #.transpose(0, 2, 3, 1)
return get_inception_score(all_samples)
def cal_inception_score_o(G, device, z_dim):
all_samples = []
samples = torch.randn(NUM_SAMPLES, z_dim)
for i in range(0, NUM_SAMPLES, BATCH_SIZE):
samples_100 = samples[i:i + BATCH_SIZE]
samples_100 = samples_100.to(device=device)
all_samples.append(G(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)) #.transpose(0, 2, 3, 1)
return get_inception_score(list(all_samples))
def cal_fid_score(G, device, z_dim):
stats_path = 'tflib/data/fid_stats_lsun_train.npz'
inception_path = fid.check_or_download_inception('tflib/model')
f = np.load(stats_path)
mu_real, sigma_real = f['mu'][:], f['sigma'][:]
f.close()
fid.create_inception_graph(inception_path)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
all_samples = []
samples = torch.randn(NUM_SAMPLES, z_dim, 1, 1)
for i in range(0, NUM_SAMPLES, BATCH_SIZE):
samples_100 = samples[i:i + BATCH_SIZE]
samples_100 = samples_100.to(device=device)
all_samples.append(G(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, N_CHANNEL, RESOLUTION, RESOLUTION)).transpose(0, 2, 3, 1)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
mu_gen, sigma_gen = fid.calculate_activation_statistics(all_samples, sess, batch_size=BATCH_SIZE)
fid_value = fid.calculate_frechet_distance(mu_gen, sigma_gen, mu_real, sigma_real)
return fid_value
|
test/run/t437.py | timmartin/skulpt | 2,671 | 12632091 | <filename>test/run/t437.py
import re
m = re.match('([0-9]+)([a-z]+)([A-Z]*)','345abu')
print "\ngroup"
print m.group() == '345abu'
print m.group(0) == '345abu'
print m.group(1) == '345'
print m.group(2) == 'abu'
print m.group(3) == ''
print "\ngroups"
print m.groups() == ('345','abu','')
print m.groups('default') == ('345','abu','')
|
fuel/converters/__init__.py | zaimusho/fuel | 767 | 12632094 | """Data conversion modules for built-in datasets.
Conversion submodules generate an HDF5 file that is compatible with
their corresponding built-in dataset.
Conversion functions accept a single argument, `subparser`, which is an
`argparse.ArgumentParser` instance that it needs to fill with its own
specific arguments. They should set a `func` default argument for the
subparser with a function that will get called and given the parsed
command-line arguments, and is expected to download the required files.
"""
from fuel.converters import adult
from fuel.converters import binarized_mnist
from fuel.converters import caltech101_silhouettes
from fuel.converters import celeba
from fuel.converters import cifar10
from fuel.converters import cifar100
from fuel.converters import dogs_vs_cats
from fuel.converters import iris
from fuel.converters import mnist
from fuel.converters import svhn
from fuel.converters import ilsvrc2010
from fuel.converters import ilsvrc2012
from fuel.converters import youtube_audio
__version__ = '0.2'
all_converters = (
('adult', adult.fill_subparser),
('binarized_mnist', binarized_mnist.fill_subparser),
('caltech101_silhouettes', caltech101_silhouettes.fill_subparser),
('celeba', celeba.fill_subparser),
('cifar10', cifar10.fill_subparser),
('cifar100', cifar100.fill_subparser),
('dogs_vs_cats', dogs_vs_cats.fill_subparser),
('iris', iris.fill_subparser),
('mnist', mnist.fill_subparser),
('svhn', svhn.fill_subparser),
('ilsvrc2010', ilsvrc2010.fill_subparser),
('ilsvrc2012', ilsvrc2012.fill_subparser),
('youtube_audio', youtube_audio.fill_subparser))
|
tests/performance/memory_pressure_check.py | zazula/talos | 1,536 | 12632111 | if __name__ == '__main__':
import numpy as np
import pandas as pd
import os
print('\n Memory Pressure Test Starts...\n')
for i in os.listdir():
if 'mprofile_' in i:
df = pd.read_csv(i, sep=' ', error_bad_lines=False)
df.columns = ['null', 'memory', 'time']
df.drop('null', 1, inplace=True)
std_limit = 5
highest_limit = 800
std = np.std(np.array(df.memory.values[1500:]))
highest = df.memory.max()
if std > std_limit:
raise Exception('MEMORY TEST FAILED: Standard deviation of memory pressure is %d which is above the %d limit' % (std, std_limit))
if highest > highest_limit:
raise Exception('MEMORY TEST FAILED: Max memory is %d which is above the %d limit' % (highest, highest_limit))
print("\n Memory Pressure Test Passed \n")
|
src/controller/msgReactionDeleteRouter.py | lin483/Funny-Nations | 126 | 12632115 | <gh_stars>100-1000
from discord import Client, Reaction, User, TextChannel, RawReactionActionEvent, PartialEmoji
from typing import Dict
from pymysql import Connection
from src.Storage import Storage
from src.utils.casino.Casino import Casino
from src.utils.casino.table.Table import Table
from src.utils.casino.table.BlackJackTable import BlackJackTable
from src.controller.routes.quitGame import quitGameByReaction
async def msgReactionDeleteRouter(self: Client, channel: TextChannel, user: User, storage: Storage, db: Connection, event: RawReactionActionEvent):
emoji: PartialEmoji = event.emoji
if emoji.name == '✅':
# For game
tables: Dict[int, Table] = storage.casino.tables
if channel.id in tables:
await quitGameByReaction(tables[channel.id], user, channel, self, db, storage.casino)
|
tests/test_search.py | pablo-angulo/TexSoup | 190 | 12632132 | <reponame>pablo-angulo/TexSoup
from TexSoup import TexSoup
###############
# BASIC TESTS #
###############
#########
# CASES #
#########
def test_commands_without_any_sort_arguments():
"""Tests that commands without any sort argument can still be searched."""
soup = TexSoup(r"""
\Question \textbf{Question Title}
Here is what chickens do:
\sol{They fly!}
\Question
\textbf{Question 2 Title}
""")
assert len(list(soup.find_all('Question'))) == 2
assert soup.find('section') is None
def test_commands_with_one_or_more_arguments():
"""Tests that commands with one or more argument can still be searched."""
soup = TexSoup(r"""
\section{Chikin Tales}
\subsection{Chikin Fly}
\section{Chikin Sequel}
""")
assert len(list(soup.find_all('section'))) == 2
assert soup.find('title') is None
def test_list_search():
"""Tests that giving a list to search returns all matches """
soup = TexSoup(r"""
\section*{Chikin Tales}
\subsection{Chikin Fly}
\section{Chikin Sequel}
""")
assert len(list(soup.find_all(['section', 'section*']))) == 2
|
tests/utils.py | Krande/ipygany | 450 | 12632137 | <filename>tests/utils.py<gh_stars>100-1000
import numpy as np
from ipygany import Data, Component
def get_test_assets():
vertices = np.array([
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
])
triangles = np.array([
[0, 1, 2],
])
data_1d = Data(name='1d', components=[Component('x', np.array([0., 0., 0.]))])
data_3d = Data('3d', [
Component(name='x', array=np.array([1., 1., 1.])),
Component('y', np.array([2., 2., 2.])),
Component('z', np.array([3., 3., 3.])),
])
return vertices, triangles, data_1d, data_3d
|
dask_ml/ensemble/_blockwise.py | GueroudjiAmal/dask-ml | 803 | 12632165 | import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import sklearn.base
from sklearn.utils.validation import check_is_fitted
from ..base import ClassifierMixin, RegressorMixin
from ..utils import check_array
class BlockwiseBase(sklearn.base.BaseEstimator):
def __init__(self, estimator):
self.estimator = estimator
def _check_array(self, X):
return check_array(
X,
accept_dask_dataframe=True,
accept_unknown_chunks=True,
preserve_pandas_dataframe=True,
)
def fit(self, X, y, **kwargs):
X = self._check_array(X)
estimatord = dask.delayed(self.estimator)
Xs = X.to_delayed()
ys = y.to_delayed()
if isinstance(X, da.Array):
Xs = Xs.flatten()
if isinstance(y, da.Array):
ys = ys.flatten()
if len(Xs) != len(ys):
raise ValueError(
f"The number of blocks in X and y must match. {len(Xs)} != {len(ys)}"
)
estimators = [
dask.delayed(sklearn.base.clone)(estimatord) for _ in range(len(Xs))
]
results = [
estimator_.fit(X_, y_, **kwargs)
for estimator_, X_, y_, in zip(estimators, Xs, ys)
]
results = list(dask.compute(*results))
self.estimators_ = results
def _predict(self, X):
"""Collect results from many predict calls"""
if isinstance(self, ClassifierMixin):
dtype = "int64"
else:
dtype = "float64"
if isinstance(X, da.Array):
chunks = (X.chunks[0], len(self.estimators_))
combined = X.map_blocks(
_predict_stack,
estimators=self.estimators_,
dtype=np.dtype(dtype),
chunks=chunks,
)
elif isinstance(X, dd._Frame):
meta = np.empty((0, len(self.classes_)), dtype=dtype)
combined = X.map_partitions(
_predict_stack, estimators=self.estimators_, meta=meta
)
else:
# TODO: this should be done in parallel?
combined = np.vstack(
[estimator.predict(X) for estimator in self.estimators_]
).T
return combined
class BlockwiseVotingClassifier(ClassifierMixin, BlockwiseBase):
"""
Blockwise training and ensemble voting classifier.
This classifier trains on blocks / partitions of Dask Arrays or DataFrames.
A cloned version of `estimator` will be fit *independently* on each block
or partition of the Dask collection. This is useful when the sub estimator
only works on small in-memory data structures like a NumPy array or pandas
DataFrame.
Prediction is done by the *ensemble* of learned models.
.. warning::
Ensure that your data are sufficiently shuffled prior to training!
If the values of the various blocks / partitions of your dataset are not
distributed similarly, the classifier will give poor results.
Parameters
----------
estimator : Estimator
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
classes : list-like, optional
The set of classes that `y` can take. This can also be provided as
a fit param if the underlying estimator requires `classes` at fit time.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators that are `estimator` fitted
on each partition / block of the inputs.
classes_ : array-like, shape (n_predictions,)
The class labels.
Examples
--------
>>> import dask_ml.datasets
>>> import dask_ml.ensemble
>>> import sklearn.linear_model
>>> X, y = dask_ml.datasets.make_classification(n_samples=100_000,
>>> ... chunks=10_000)
>>> subestimator = sklearn.linear_model.RidgeClassifier(random_state=0)
>>> clf = dask_ml.ensemble.BlockwiseVotingClassifier(
>>> ... subestimator,
>>> ... classes=[0, 1]
>>> ... )
>>> clf.fit(X, y)
"""
def __init__(self, estimator, voting="hard", classes=None):
self.voting = voting
self.classes = classes
super().__init__(estimator)
def fit(self, X, y, **kwargs):
if self.classes is None and "classes" not in kwargs:
raise ValueError("Must provide the classes of `y`.")
elif self.classes is not None:
classes = self.classes
else:
classes = kwargs["classes"]
super().fit(X, y, **kwargs)
self.classes_ = np.array(classes)
def predict(self, X):
check_is_fitted(self, attributes=["estimators_"])
X = self._check_array(X)
# TODO: check for just row-wise partition!
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X) # (N, n_estimators) ensure chunking!
if isinstance(predictions, da.Array):
maj = predictions.map_blocks(_vote_block, dtype="int64", drop_axis=1)
else:
maj = _vote_block(predictions)
return maj
@property
def predict_proba(self):
if self.voting == "hard":
raise AttributeError(
"predict_proba is not available when" " voting=%r" % self.voting
)
return self._predict_proba
def _predict_proba(self, X):
check_is_fitted(self, attributes=["estimators_"])
X = self._check_array(X)
avg = np.average(self._collect_probas(X), axis=0)
return avg
def _collect_probas(self, X):
if isinstance(X, da.Array):
chunks = (len(self.estimators_), X.chunks[0], len(self.classes_))
meta = np.array([], dtype="float64")
# (n_estimators, len(X), n_classes)
combined = X.map_blocks(
_predict_proba_stack,
estimators=self.estimators_,
chunks=chunks,
meta=meta,
)
elif isinstance(X, dd._Frame):
# TODO: replace with a _predict_proba_stack version.
# This current raises; dask.dataframe doesn't like map_partitions that
# return new axes.
# meta = np.empty((len(self.estimators_), 0, len(self.classes_)),
# dtype="float64")
# combined = X.map_partitions(_predict_proba_stack, meta=meta,
# estimators=self.estimators_)
# combined._chunks = ((len(self.estimators_),),
# (np.nan,) * X.npartitions,
# (len(X.columns),))
meta = np.empty((0, len(self.classes_)), dtype="float64")
probas = [
X.map_partitions(_predict_proba, meta=meta, estimator=estimator)
for estimator in self.estimators_
]
# TODO(https://github.com/dask/dask/issues/6177): replace with da.stack
chunks = probas[0]._chunks
for proba in probas:
proba._chunks = ((1,) * len(chunks[0]), chunks[1])
combined = da.stack(probas)
combined._chunks = ((1,) * len(self.estimators_),) + chunks
else:
# ndarray, etc.
combined = np.stack(
[estimator.predict_proba(X) for estimator in self.estimators_]
)
return combined
class BlockwiseVotingRegressor(RegressorMixin, BlockwiseBase):
"""
Blockwise training and ensemble voting regressor.
This regressor trains on blocks / partitions of Dask Arrays or DataFrames.
A cloned version of `estimator` will be fit *independently* on each block
or partition of the Dask collection.
Prediction is done by the *ensemble* of learned models.
.. warning::
Ensure that your data are sufficiently shuffled prior to training!
If the values of the various blocks / partitions of your dataset are not
distributed similarly, the regressor will give poor results.
Parameters
----------
estimator : Estimator
Attributes
----------
estimators_ : list of regressors
The collection of fitted sub-estimators that are `estimator` fitted
on each partition / block of the inputs.
Examples
--------
>>> import dask_ml.datasets
>>> import dask_ml.ensemble
>>> import sklearn.linear_model
>>> X, y = dask_ml.datasets.make_regression(n_samples=100_000,
... chunks=10_000)
>>> subestimator = sklearn.linear_model.LinearRegression()
>>> clf = dask_ml.ensemble.BlockwiseVotingRegressor(
... subestimator,
... )
>>> clf.fit(X, y)
"""
def predict(self, X):
check_is_fitted(self, attributes=["estimators_"])
return np.average(self._predict(X), axis=1)
def fit(estimator, x, y):
# TODO: logging
estimator.fit(x, y)
return estimator
def _predict_proba(part, estimator):
return estimator.predict_proba(part)
def _vote(x):
return np.argmax(np.bincount(x))
def _vote_block(block):
return np.apply_along_axis(_vote, 1, block)
def _predict_stack(part, estimators):
# predict for a batch of estimators and stack up the results.
batches = [estimator.predict(part) for estimator in estimators]
return np.vstack(batches).T
def _predict_proba_stack(part, estimators):
# predict for a batch of estimators and stack up the results.
batches = [estimator.predict_proba(part) for estimator in estimators]
return np.stack(batches)
|
src/c3nav/mapdata/migrations/0075_label_settings.py | johnjohndoe/c3nav | 132 | 12632179 | # Generated by Django 2.2.8 on 2019-12-21 23:27
import c3nav.mapdata.fields
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0074_show_labels'),
]
operations = [
migrations.CreateModel(
name='LabelSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', c3nav.mapdata.fields.I18nField(fallback_any=True, plural_name='titles', verbose_name='Title')),
('min_zoom', models.DecimalField(decimal_places=1, default=-10, max_digits=3, validators=[django.core.validators.MinValueValidator(Decimal('-10')), django.core.validators.MaxValueValidator(Decimal('10'))], verbose_name='min zoom')),
('max_zoom', models.DecimalField(decimal_places=1, default=10, max_digits=3, validators=[django.core.validators.MinValueValidator(Decimal('-10')), django.core.validators.MaxValueValidator(Decimal('10'))], verbose_name='max zoom')),
('font_size', models.IntegerField(default=12, validators=[django.core.validators.MinValueValidator(12), django.core.validators.MaxValueValidator(30)], verbose_name='font size')),
],
options={
'verbose_name': 'Label Settings',
'verbose_name_plural': 'Label Settings',
'default_related_name': 'labelsettings',
},
),
migrations.RemoveField(
model_name='area',
name='show_label',
),
migrations.RemoveField(
model_name='level',
name='show_label',
),
migrations.RemoveField(
model_name='locationgroup',
name='show_labels',
),
migrations.RemoveField(
model_name='poi',
name='show_label',
),
migrations.RemoveField(
model_name='space',
name='show_label',
),
migrations.AddField(
model_name='area',
name='label_override',
field=c3nav.mapdata.fields.I18nField(blank=True, fallback_any=True, plural_name='label_overrides', verbose_name='Label override'),
),
migrations.AddField(
model_name='level',
name='label_override',
field=c3nav.mapdata.fields.I18nField(blank=True, fallback_any=True, plural_name='label_overrides', verbose_name='Label override'),
),
migrations.AddField(
model_name='poi',
name='label_override',
field=c3nav.mapdata.fields.I18nField(blank=True, fallback_any=True, plural_name='label_overrides', verbose_name='Label override'),
),
migrations.AddField(
model_name='space',
name='label_override',
field=c3nav.mapdata.fields.I18nField(blank=True, fallback_any=True, plural_name='label_overrides', verbose_name='Label override'),
),
migrations.AddField(
model_name='area',
name='label_settings',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='areas', to='mapdata.LabelSettings', verbose_name='label settings'),
),
migrations.AddField(
model_name='level',
name='label_settings',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='levels', to='mapdata.LabelSettings', verbose_name='label settings'),
),
migrations.AddField(
model_name='locationgroup',
name='label_settings',
field=models.ForeignKey(help_text='unless location specifies otherwise', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='locationgroups', to='mapdata.LabelSettings', verbose_name='label settings'),
),
migrations.AddField(
model_name='poi',
name='label_settings',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='pois', to='mapdata.LabelSettings', verbose_name='label settings'),
),
migrations.AddField(
model_name='space',
name='label_settings',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='spaces', to='mapdata.LabelSettings', verbose_name='label settings'),
),
]
|
tests/test_csw_skgeodesy.py | geoanalytics-ca/OWSLib | 218 | 12632209 | <filename>tests/test_csw_skgeodesy.py
from tests.utils import service_ok
import pytest
from owslib.csw import CatalogueServiceWeb
SERVICE_URL = 'https://zbgisws.skgeodesy.sk/zbgiscsw/service.svc/get'
@pytest.mark.online
@pytest.mark.skipif(not service_ok(SERVICE_URL),
reason='service is unreachable')
def test_csw_skgeodsy():
c = CatalogueServiceWeb(SERVICE_URL)
assert sorted([op.name for op in c.operations]) == [
'DescribeRecord',
'GetCapabilities',
'GetRecordById',
'GetRecords',
'Transaction']
grop = c.get_operation_by_name('GetRecords')
assert grop.name == 'GetRecords'
c.getrecords2(typenames='csw:Record gmd:MD_Metadata')
assert c.results.get('returned') > 0
assert c.results.get('nextrecord') > 0
assert c.results.get('matches') > 0
|
metadata-ingestion/src/datahub_provider/client/airflow_generator.py | pppsunil/datahub | 289 | 12632217 | <filename>metadata-ingestion/src/datahub_provider/client/airflow_generator.py
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from airflow.configuration import conf
from datahub.api.entities.datajob import DataFlow, DataJob
from datahub.api.entities.dataprocess.dataprocess_instance import (
DataProcessInstance,
InstanceRunResult,
)
from datahub.metadata.schema_classes import DataProcessTypeClass
from datahub.utilities.urns.data_flow_urn import DataFlowUrn
from datahub.utilities.urns.data_job_urn import DataJobUrn
from datahub_provider.hooks.datahub import AIRFLOW_1
if TYPE_CHECKING:
from airflow import DAG
from airflow.models import BaseOperator, DagRun, TaskInstance
from datahub.emitter.kafka_emitter import DatahubKafkaEmitter
from datahub.emitter.rest_emitter import DatahubRestEmitter
class AirflowGenerator:
@staticmethod
def _get_dependencies(
task: "BaseOperator", dag: "DAG", flow_urn: DataFlowUrn
) -> List[DataJobUrn]:
# resolve URNs for upstream nodes in subdags upstream of the current task.
upstream_subdag_task_urns: List[DataJobUrn] = []
for upstream_task_id in task.upstream_task_ids:
upstream_task = dag.task_dict[upstream_task_id]
# if upstream task is not a subdag, then skip it
if upstream_task.subdag is None:
continue
# else, link the leaf tasks of the upstream subdag as upstream tasks
upstream_subdag = upstream_task.subdag
for upstream_subdag_task_id in upstream_subdag.task_dict:
upstream_subdag_task = upstream_subdag.task_dict[
upstream_subdag_task_id
]
upstream_subdag_task_urn = DataJobUrn.create_from_ids(
job_id=upstream_subdag_task_id, data_flow_urn=str(flow_urn)
)
# if subdag task is a leaf task, then link it as an upstream task
if len(upstream_subdag_task._downstream_task_ids) == 0:
upstream_subdag_task_urns.append(upstream_subdag_task_urn)
# resolve URNs for upstream nodes that trigger the subdag containing the current task.
# (if it is in a subdag at all)
upstream_subdag_triggers: List[DataJobUrn] = []
# subdags are always named with 'parent.child' style or Airflow won't run them
# add connection from subdag trigger(s) if subdag task has no upstreams
if (
dag.is_subdag
and dag.parent_dag is not None
and len(task._upstream_task_ids) == 0
):
# filter through the parent dag's tasks and find the subdag trigger(s)
subdags = [
x for x in dag.parent_dag.task_dict.values() if x.subdag is not None
]
matched_subdags = [
x
for x in subdags
if getattr(getattr(x, "subdag"), "dag_id") == dag.dag_id
]
# id of the task containing the subdag
subdag_task_id = matched_subdags[0].task_id
# iterate through the parent dag's tasks and find the ones that trigger the subdag
for upstream_task_id in dag.parent_dag.task_dict:
upstream_task = dag.parent_dag.task_dict[upstream_task_id]
upstream_task_urn = DataJobUrn.create_from_ids(
data_flow_urn=str(flow_urn), job_id=upstream_task_id
)
# if the task triggers the subdag, link it to this node in the subdag
if subdag_task_id in upstream_task._downstream_task_ids:
upstream_subdag_triggers.append(upstream_task_urn)
# exclude subdag operator tasks since these are not emitted, resulting in empty metadata
upstream_tasks = (
[
DataJobUrn.create_from_ids(job_id=task_id, data_flow_urn=str(flow_urn))
for task_id in task.upstream_task_ids
if dag.task_dict[task_id].subdag is None
]
+ upstream_subdag_task_urns
+ upstream_subdag_triggers
)
return upstream_tasks
@staticmethod
def generate_dataflow(
cluster: str,
dag: "DAG",
capture_owner: bool = True,
capture_tags: bool = True,
) -> DataFlow:
"""
Generates a Dataflow object from an Airflow DAG
:param cluster: str - name of the cluster
:param dag: DAG -
:param capture_tags:
:param capture_owner:
:return: DataFlow - Data generated dataflow
"""
from airflow.serialization.serialized_objects import SerializedDAG
id = dag.dag_id
orchestrator = "airflow"
description = f"{dag.description}\n\n{dag.doc_md or ''}"
data_flow = DataFlow(
cluster=cluster, id=id, orchestrator=orchestrator, description=description
)
flow_property_bag: Dict[str, str] = {
key: repr(value)
for (key, value) in SerializedDAG.serialize_dag(dag).items()
}
for key in dag.get_serialized_fields():
if key not in flow_property_bag:
flow_property_bag[key] = repr(getattr(dag, key))
allowed_flow_keys = [
"_access_control",
"_concurrency",
"_default_view",
"catchup",
"fileloc",
"is_paused_upon_creation",
"start_date",
"tags",
"timezone",
]
flow_property_bag = {
k: v for (k, v) in flow_property_bag.items() if k in allowed_flow_keys
}
data_flow.properties = flow_property_bag
base_url = conf.get("webserver", "base_url")
data_flow.url = f"{base_url}/tree?dag_id={dag.dag_id}"
if capture_owner and dag.owner:
data_flow.owners.add(dag.owner)
if capture_tags and dag.tags:
data_flow.tags.update(dag.tags)
return data_flow
@staticmethod
def generate_datajob(
cluster: str,
task: "BaseOperator",
dag: "DAG",
set_dependendecies: bool = True,
capture_owner: bool = True,
capture_tags: bool = True,
) -> DataJob:
"""
:param cluster: str
:param task: TaskIntance
:param dag: DAG
:param set_dependendecies: bool - whether to extract dependencies from airflow task
:param capture_owner: bool - whether to extract owner from airflow task
:param capture_tags: bool - whether to set tags automatically from airflow task
:return: DataJob - returns the generated DataJob object
"""
from airflow.serialization.serialized_objects import SerializedBaseOperator
dataflow_urn = DataFlowUrn.create_from_ids(
orchestrator="airflow", env=cluster, flow_id=dag.dag_id
)
datajob = DataJob(id=task.task_id, flow_urn=dataflow_urn)
datajob.description = (
(task.doc or task.doc_md or task.doc_json or task.doc_yaml or task.doc_rst)
if not AIRFLOW_1
else None
)
job_property_bag: Dict[str, str] = {
key: repr(value)
for (key, value) in SerializedBaseOperator.serialize_operator(task).items()
}
for key in task.get_serialized_fields():
if key not in job_property_bag:
job_property_bag[key] = repr(getattr(task, key))
allowed_task_keys = [
"_downstream_task_ids",
"_inlets",
"_outlets",
"_task_type",
"_task_module",
"depends_on_past",
"email",
"label",
"execution_timeout",
"sla",
"sql",
"task_id",
"trigger_rule",
"wait_for_downstream",
]
job_property_bag = {
k: v for (k, v) in job_property_bag.items() if k in allowed_task_keys
}
datajob.properties = job_property_bag
base_url = conf.get("webserver", "base_url")
datajob.url = f"{base_url}/taskinstance/list/?flt1_dag_id_equals={datajob.flow_urn.get_flow_id()}&_flt_3_task_id={task.task_id}"
if capture_owner and dag.owner:
datajob.owners.add(dag.owner)
if capture_tags and dag.tags:
datajob.tags.update(dag.tags)
if set_dependendecies:
datajob.upstream_urns.extend(
AirflowGenerator._get_dependencies(
task=task, dag=dag, flow_urn=datajob.flow_urn
)
)
return datajob
@staticmethod
def create_datajob_instance(
cluster: str,
task: "BaseOperator",
dag: "DAG",
data_job: Optional[DataJob] = None,
) -> DataProcessInstance:
if data_job is None:
data_job = AirflowGenerator.generate_datajob(cluster, task=task, dag=dag)
dpi = DataProcessInstance.from_datajob(
datajob=data_job, id=task.task_id, clone_inlets=True, clone_outlets=True
)
return dpi
@staticmethod
def run_dataflow(
emitter: Union["DatahubRestEmitter", "DatahubKafkaEmitter"],
cluster: str,
dag_run: "DagRun",
start_timestamp_millis: Optional[int] = None,
dataflow: Optional[DataFlow] = None,
) -> None:
if dataflow is None:
assert dag_run.dag
dataflow = AirflowGenerator.generate_dataflow(cluster, dag_run.dag)
if start_timestamp_millis is None:
start_timestamp_millis = int(dag_run.execution_date.timestamp() * 1000)
dpi = DataProcessInstance.from_dataflow(dataflow=dataflow, id=dag_run.run_id)
# This property only exists in Airflow2
if hasattr(dag_run, "run_type"):
from airflow.utils.types import DagRunType
if dag_run.run_type == DagRunType.SCHEDULED:
dpi.type = DataProcessTypeClass.BATCH_SCHEDULED
elif dag_run.run_type == DagRunType.MANUAL:
dpi.type = DataProcessTypeClass.BATCH_AD_HOC
else:
if dag_run.run_id.startswith("scheduled__"):
dpi.type = DataProcessTypeClass.BATCH_SCHEDULED
else:
dpi.type = DataProcessTypeClass.BATCH_AD_HOC
property_bag: Dict[str, str] = {}
property_bag["run_id"] = str(dag_run.run_id)
property_bag["execution_date"] = str(dag_run.execution_date)
property_bag["end_date"] = str(dag_run.end_date)
property_bag["start_date"] = str(dag_run.start_date)
property_bag["creating_job_id"] = str(dag_run.creating_job_id)
property_bag["data_interval_start"] = str(dag_run.data_interval_start)
property_bag["data_interval_end"] = str(dag_run.data_interval_end)
property_bag["external_trigger"] = str(dag_run.external_trigger)
dpi.properties.update(property_bag)
dpi.emit_process_start(
emitter=emitter, start_timestamp_millis=start_timestamp_millis
)
@staticmethod
def complete_dataflow(
emitter: Union["DatahubRestEmitter", "DatahubKafkaEmitter"],
cluster: str,
dag_run: "DagRun",
end_timestamp_millis: Optional[int] = None,
dataflow: Optional[DataFlow] = None,
) -> None:
"""
:param emitter: DatahubRestEmitter - the datahub rest emitter to emit the generated mcps
:param cluster: str - name of the cluster
:param dag_run: DagRun
:param end_timestamp_millis: Optional[int] - the completion time in milliseconds if not set the current time will be used.
:param dataflow: Optional[Dataflow]
"""
if dataflow is None:
assert dag_run.dag
dataflow = AirflowGenerator.generate_dataflow(cluster, dag_run.dag)
dpi = DataProcessInstance.from_dataflow(dataflow=dataflow, id=dag_run.run_id)
if end_timestamp_millis is None:
if dag_run.end_date is None:
raise Exception(
f"Dag {dag_run.dag_id}_{dag_run.run_id} is still running and unable to get end_date..."
)
end_timestamp_millis = int(dag_run.end_date.timestamp() * 1000)
# We should use DagRunState but it is not available in Airflow 1
if dag_run.state == "success":
result = InstanceRunResult.SUCCESS
elif dag_run.state == "failed":
result = InstanceRunResult.FAILURE
else:
raise Exception(
f"Result should be either success or failure and it was {dag_run.state}"
)
dpi.emit_process_end(
emitter=emitter,
end_timestamp_millis=end_timestamp_millis,
result=result,
result_type="airflow",
)
@staticmethod
def run_datajob(
emitter: Union["DatahubRestEmitter", "DatahubKafkaEmitter"],
cluster: str,
ti: "TaskInstance",
dag: "DAG",
dag_run: "DagRun",
start_timestamp_millis: Optional[int] = None,
datajob: Optional[DataJob] = None,
attempt: Optional[int] = None,
emit_templates: bool = True,
) -> DataProcessInstance:
if datajob is None:
datajob = AirflowGenerator.generate_datajob(cluster, ti.task, dag)
dpi = DataProcessInstance.from_datajob(
datajob=datajob,
id=f"{dag.dag_id}_{ti.task_id}_{dag_run.run_id}",
clone_inlets=True,
clone_outlets=True,
)
job_property_bag: Dict[str, str] = {}
job_property_bag["run_id"] = str(dag_run.run_id)
job_property_bag["duration"] = str(ti.duration)
job_property_bag["start_date"] = str(ti.start_date)
job_property_bag["end_date"] = str(ti.end_date)
job_property_bag["execution_date"] = str(ti.execution_date)
job_property_bag["try_number"] = str(ti.try_number - 1)
job_property_bag["hostname"] = str(ti.hostname)
job_property_bag["max_tries"] = str(ti.max_tries)
# Not compatible with Airflow 1
if not AIRFLOW_1:
job_property_bag["external_executor_id"] = str(ti.external_executor_id)
job_property_bag["pid"] = str(ti.pid)
job_property_bag["state"] = str(ti.state)
job_property_bag["operator"] = str(ti.operator)
job_property_bag["priority_weight"] = str(ti.priority_weight)
job_property_bag["unixname"] = str(ti.unixname)
job_property_bag["log_url"] = ti.log_url
dpi.properties.update(job_property_bag)
dpi.url = ti.log_url
# This property only exists in Airflow2
if hasattr(ti.dag_run, "run_type"):
from airflow.utils.types import DagRunType
if ti.dag_run.run_type == DagRunType.SCHEDULED:
dpi.type = DataProcessTypeClass.BATCH_SCHEDULED
elif ti.dag_run.run_type == DagRunType.MANUAL:
dpi.type = DataProcessTypeClass.BATCH_AD_HOC
else:
if dag_run.run_id.startswith("scheduled__"):
dpi.type = DataProcessTypeClass.BATCH_SCHEDULED
else:
dpi.type = DataProcessTypeClass.BATCH_AD_HOC
if start_timestamp_millis is None:
assert ti.start_date
start_timestamp_millis = int(ti.start_date.timestamp() * 1000)
if attempt is None:
attempt = ti.try_number
dpi.emit_process_start(
emitter=emitter,
start_timestamp_millis=start_timestamp_millis,
attempt=attempt,
emit_template=emit_templates,
)
return dpi
@staticmethod
def complete_datajob(
emitter: Union["DatahubRestEmitter", "DatahubKafkaEmitter"],
cluster: str,
ti: "TaskInstance",
dag: "DAG",
dag_run: "DagRun",
end_timestamp_millis: Optional[int] = None,
result: Optional[InstanceRunResult] = None,
datajob: Optional[DataJob] = None,
) -> DataProcessInstance:
"""
:param emitter: DatahubRestEmitter
:param cluster: str
:param ti: TaskInstance
:param dag: DAG
:param dag_run: DagRun
:param end_timestamp_millis: Optional[int]
:param result: Optional[str] One of the result from datahub.metadata.schema_class.RunResultTypeClass
:param datajob: Optional[DataJob]
:return: DataProcessInstance
"""
if datajob is None:
datajob = AirflowGenerator.generate_datajob(cluster, ti.task, dag)
if end_timestamp_millis is None:
assert ti.end_date
end_timestamp_millis = int(ti.end_date.timestamp() * 1000)
if result is None:
# We should use TaskInstanceState but it is not available in Airflow 1
if ti.state == "success":
result = InstanceRunResult.SUCCESS
elif ti.state == "failed":
result = InstanceRunResult.FAILURE
else:
raise Exception(
f"Result should be either success or failure and it was {ti.state}"
)
dpi = DataProcessInstance.from_datajob(
datajob=datajob,
id=f"{dag.dag_id}_{ti.task_id}_{dag_run.run_id}",
clone_inlets=True,
clone_outlets=True,
)
dpi.emit_process_end(
emitter=emitter,
end_timestamp_millis=end_timestamp_millis,
result=result,
result_type="airflow",
)
return dpi
|
src/python/twitter/common/rpc/finagle/trace.py | zhouyijiaren/commons | 1,143 | 12632240 | # ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import copy
from contextlib import contextmanager
import random
import re
class SpanId(object):
__slots__ = ('_value',)
HEX_REGEX = re.compile(r'^[a-f0-9]{16}$', re.IGNORECASE)
class InvalidSpanId(ValueError):
def __init__(self, value):
ValueError.__init__(self, 'Invalid SpanId: %s' % repr(value))
@staticmethod
def from_value(value):
if isinstance(value, str):
if SpanId.HEX_REGEX.match(value):
return SpanId(int(value, 16))
elif isinstance(value, (int, long)):
return SpanId(value)
elif isinstance(value, SpanId):
return SpanId(value.value)
elif value is None:
return SpanId(None)
raise SpanId.InvalidSpanId(value)
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
def __str__(self):
return 'SpanId(%016x)' % (self._value if self._value is not None else 'Empty')
class TraceId(object):
@staticmethod
def rand():
return random.randint(0, 2**63-1)
def __init__(self, trace_id, parent_id, span_id, sampled):
self.trace_id = SpanId.from_value(trace_id)
self.parent_id = SpanId.from_value(parent_id)
self.span_id = SpanId.from_value(span_id)
self.sampled = bool(sampled)
def next(self):
return TraceId(self.trace_id, self.span_id, TraceId.rand(), self.sampled)
def __str__(self):
return 'TraceId(trace_id = %s, parent_id = %s, span_id = %s, sampled = %s)' % (
self.trace_id, self.parent_id, self.span_id, self.sampled)
class Trace(object):
"""
The container of a trace. Typically stored as a threadlocal on each
finagle-upgraded protocol.
"""
def __init__(self, sample_rate=0.001):
assert 0.0 <= sample_rate <= 1.0
self._sample_rate = sample_rate
self._stack = []
def get(self):
if len(self._stack) == 0:
span_id = TraceId.rand()
trace_id = TraceId(span_id, None, span_id, self.should_sample())
self._stack.append(trace_id)
return self._stack[-1]
@contextmanager
def push(self, trace_id):
self._stack.append(trace_id)
try:
yield self
finally:
self._stack.pop()
@contextmanager
def unwind(self):
trace_id_copy = copy.deepcopy(self._stack[-1])
try:
yield self
finally:
self._stack[-1] = trace_id_copy
def pop(self):
return self._stack.pop()
def should_sample(self):
return random.random() < self._sample_rate
|
pt_cnn_svm/models/cnn.py | bimalka98/CNNwithSVM-for-Image-Classification | 250 | 12632251 | <reponame>bimalka98/CNNwithSVM-for-Image-Classification
# Copyright 2017-2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of convolutional network in PyTorch"""
import torch
class CNN(torch.nn.Module):
"""
A convolutional neural network that optimizes
softmax cross entropy using Adam optimizer.
"""
def __init__(
self,
input_dim: int = 1,
num_classes: int = 10,
model_device: torch.device = torch.device("cpu"),
learning_rate: float = 1e-4,
):
"""
Constructs a convolutional neural network classifier.
Parameters
----------
input_dim: int
The dimensionality of the input feature channel.
num_classes: int
The number of classes in the dataset.
model_device: torch.device
The device to use for model computations.
learning_rate: float
The learning rate to use for optimization.
"""
super().__init__()
self.layers = torch.nn.ModuleList(
[
torch.nn.Conv2d(
in_channels=input_dim,
out_channels=64,
kernel_size=8,
stride=2,
padding=1,
),
torch.nn.ReLU(),
torch.nn.Conv2d(
in_channels=64, out_channels=128, kernel_size=6, stride=2, padding=1
),
torch.nn.ReLU(),
torch.nn.Flatten(),
torch.nn.Linear(in_features=(128 * 5 * 5), out_features=2048),
torch.nn.ReLU(),
torch.nn.Linear(in_features=2048, out_features=2048),
torch.nn.ReLU(),
torch.nn.Linear(in_features=2048, out_features=512),
torch.nn.ReLU(),
torch.nn.Linear(in_features=512, out_features=num_classes),
]
)
self.model_device = model_device
self.optimizer = torch.optim.Adam(params=self.parameters(), lr=learning_rate)
self.criterion = torch.nn.CrossEntropyLoss().to(self.model_device)
self.train_loss = []
def forward(self, features):
"""
Defines the forward pass by the model.
Parameter
---------
features : torch.Tensor
The input features.
Returns
-------
logits : torch.Tensor
The model output.
"""
activations = {}
for index, layer in enumerate(self.layers):
if index == 0:
activations[index] = layer(features)
else:
activations[index] = layer(activations[index - 1])
logits = activations[len(activations) - 1]
return logits
def predict(self, features, return_likelihoods=False):
"""
Returns model classifications
Parameters
----------
features: torch.Tensor
The input features to classify.
return_likelihoods: bool
Whether to return classes with likelihoods or not.
Returns
-------
predictions: torch.Tensor
The class likelihood output by the model.
classes: torch.Tensor
The class prediction by the model.
"""
outputs = self.forward(features)
predictions, classes = torch.max(outputs.data, dim=1)
return (predictions, classes) if return_likelihoods else classes
def fit(self, data_loader, epochs):
"""
Trains the cnn model.
Parameters
----------
data_loader : torch.utils.dataloader.DataLoader
The data loader object that consists of the data pipeline.
epochs : int
The number of epochs to train the model.
"""
self.to(self.model_device)
for epoch in range(epochs):
epoch_loss = self.epoch_train(self, data_loader)
if "cuda" in self.model_device.type:
torch.cuda.empty_cache()
self.train_loss.append(epoch_loss)
print(f"epoch {epoch + 1}/{epochs} : mean loss = {self.train_loss[-1]:.6f}")
@staticmethod
def epoch_train(model, data_loader):
"""
Trains a model for one epoch.
Parameters
----------
model : torch.nn.Module
The model to train.
data_loader : torch.utils.dataloader.DataLoader
The data loader object that consists of the data pipeline.
Returns
-------
epoch_loss : float
The epoch loss.
"""
epoch_loss = 0
for batch_features, batch_labels in data_loader:
batch_features = batch_features.to(model.model_device)
batch_labels = batch_labels.to(model.model_device)
model.optimizer.zero_grad()
outputs = model(batch_features)
train_loss = model.criterion(outputs, batch_labels)
train_loss.backward()
model.optimizer.step()
epoch_loss += train_loss.item()
epoch_loss /= len(data_loader)
return epoch_loss
|
hawck-ui/hawck_ui/template_manager.py | abitrolly/Hawck | 320 | 12632290 | ## ================================================================================
## template_manager.py is a part of hawck-ui, which is distributed under the
## following license:
##
## Copyright (C) 2018 <NAME> (no) <<EMAIL>>
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
## SOFTWARE.
## ================================================================================
import os
from gi.repository import Gtk
import pkg_resources as pkg
class TemplateManager:
def __init__(self, dir_path):
self.dir_path = dir_path
self.templates = {}
## Get builder instance of template
def get(self, name):
src = self.templates[name]
builder = Gtk.Builder()
builder.add_from_string(src)
root = builder.get_object("root")
## Make sure Python keeps the reference
root.builder = builder
root.unparent()
return root, builder
def load(self, name):
fpath = pkg.resource_filename(
"hawck_ui",
os.path.join(self.dir_path, name)
)
with open(fpath) as f:
self.insert(name, f.read())
def insert(self, name, string):
self.templates[name] = string
|
tests/endpoints/test_eth_gasprice.py | kanzure/eth-testrpc | 164 | 12632303 | from testrpc.client.utils import (
encode_number,
)
def test_eth_gasprice(accounts, rpc_client):
result = rpc_client('eth_gasPrice')
assert result == "0x1"
|
py_entitymatching/labeler/labeler.py | kvpradap/py_entitymatching | 165 | 12632306 | """
This module contains labeling related routines for a single table.
"""
import logging
import pandas as pd
import six
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.utils.validation_helper import validate_object_type
logger = logging.getLogger(__name__)
def label_table(table, label_column_name, verbose=False):
"""
Label a pandas DataFrame (for supervised learning purposes).
This functions labels a DataFrame, typically used for supervised learning
purposes. This function expects the input DataFrame containing the metadata
of a candidate set (such as key, fk_ltable, fk_rtable, ltable, rtable).
This function creates a copy of the input DataFrame, adds label column
at the end of the DataFrame, fills the column values with 0, invokes a
GUI for the user to enter labels (0/1, 0: non-match, 1: match) and finally
returns the labeled DataFrame. Further, this function also copies the
properties from the input DataFrame to the output DataFrame.
Args:
table (DataFrame): The input DataFrame to be labeled.
Specifically,
a DataFrame containing the metadata of a candidate set (such as
key, fk_ltable, fk_rtable, ltable, rtable) in the catalog.
label_column_name (string): The column name to be given for the labels
entered by the user.
verbose (boolean): A flag to indicate whether more detailed information
about the execution steps should be printed out (default value is
False).
Returns:
A new DataFrame with the labels entered by the user. Further,
this function sets the output DataFrame's properties same as input
DataFrame.
Raises:
AssertionError: If `table` is not of type pandas DataFrame.
AssertionError: If `label_column_name` is not of type string.
AssertionError: If the `label_column_name` is already present in the
input table.
Examples:
>>> import py_entitymatching as em
>>> G = em.label_table(S, label_column_name='label') # S is the (sampled) table that has to be labeled.
"""
# Validate the input parameters: check input types, check the metadata
# for the input DataFrame as it will get copied to the labeled DataFrame
_validate_inputs(table, label_column_name, verbose)
# Initialize the table to be labeled: create a copy and set the column
# values to be 0s
labeled_table = _init_label_table(table, label_column_name)
# Invoke the GUI
try:
from PyQt5 import QtGui
except ImportError:
raise ImportError('PyQt5 is not installed. Please install PyQt5 to use '
'GUI related functions in py_entitymatching.')
from py_entitymatching.gui.table_gui import edit_table
edit_table(labeled_table)
# Post process the labeled table: validate whether the labels contain
# only 0/1s, copy the properties (in the catalog) of the input table to the
# labeled table
labeled_table = _post_process_labelled_table(table, labeled_table,
label_column_name)
# Return the labeled table
return labeled_table
def _validate_inputs(table, label_column_name, verbose):
"""
This function validates the inputs for the label_table function
"""
# Validate the input parameters
# # The input table table is expected to be of type pandas DataFrame
validate_object_type(table, pd.DataFrame)
# # The label column name is expected to be of type string
validate_object_type(label_column_name, six.string_types, error_prefix='Input attr.')
# # Check if the label column name is already present in the input table
if ch.check_attrs_present(table, label_column_name):
logger.error('The label column name (%s) is already present in the '
'input table', label_column_name)
raise AssertionError('The label column name (%s) is already present '
'in the input table', label_column_name)
# Now, validate the metadata for the input DataFrame as we have to copy
# these properties to the output DataFrame
# # First, display what metadata is required for this function
ch.log_info(logger, 'Required metadata: cand.set key, fk ltable, '
'fk rtable, ltable, rtable, ltable key, rtable key',
verbose)
# # Second, get the metadata
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = \
cm.get_metadata_for_candset(table, logger, verbose)
# # Third, validate the metadata
cm._validate_metadata_for_candset(table, key, fk_ltable, fk_rtable,
ltable, rtable, l_key, r_key,
logger, verbose)
# Return True if everything was successful
return True
def _init_label_table(table, label_column_name):
"""
This function initializes inputs required for label_table function.
Specifically, this function makes a copy of the input table and
initializes the column values to 0s.
"""
# Create a copy of the input table
labeled_table = table.copy()
# Add the label column at the end and initialize to 0s (non-match)
labeled_table[label_column_name] = 0
# Return the label table
return labeled_table
def _post_process_labelled_table(input_table, labeled_table, col_name):
"""
This function post processes the labeled table and updates the catalog.
Specifically, this function validates that the label column contain only
0 and 1's, and finally copies the properties from the input table to the
output table.
"""
# Cast the label values to int as initially they will be strings when it
# comes from the GUI
labeled_table[col_name] = labeled_table[col_name].astype(int)
# Check if the table contains only 0s and 1s
label_value_with_1 = labeled_table[col_name] == 1
label_value_with_0 = labeled_table[col_name] == 0
sum_of_labels = sum(label_value_with_1 | label_value_with_0)
# If they contain column values other than 0 and 1, raise an error
if not sum_of_labels == len(labeled_table):
logger.error('The label column contains values other than 0 and 1')
raise AssertionError(
'The label column contains values other than 0 and 1')
# Copy the properties from the input table to label table.
# Note: Here we dont have to check for the integrity of 'key' because the
# key column is not tampered from the input table.
cm.init_properties(labeled_table)
cm.copy_properties(input_table, labeled_table)
# Return the label table
return labeled_table
|
rasa/nlu/classifiers/mitie_intent_classifier.py | fintzd/rasa | 9,701 | 12632321 | <reponame>fintzd/rasa
from __future__ import annotations
import logging
from rasa.nlu.featurizers.featurizer import Featurizer
import typing
from typing import Any, Dict, List, Optional, Text, Type
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.classifiers.classifier import IntentClassifier
from rasa.nlu.utils.mitie_utils import MitieModel, MitieNLP
from rasa.nlu.constants import TOKENS_NAMES
from rasa.shared.nlu.constants import TEXT, INTENT
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
if typing.TYPE_CHECKING:
import mitie
logger = logging.getLogger(__name__)
@DefaultV1Recipe.register(
DefaultV1Recipe.ComponentType.INTENT_CLASSIFIER,
is_trainable=True,
model_from="MitieNLP",
)
class MitieIntentClassifier(GraphComponent, IntentClassifier):
"""Intent classifier which uses the `mitie` library."""
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [MitieNLP, Featurizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""Returns default config (see parent class for full docstring)."""
return {"num_threads": 1}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
clf: Optional["mitie.text_categorizer"] = None,
) -> None:
"""Constructs a new intent classifier using the MITIE framework."""
self._config = config
self._model_storage = model_storage
self._resource = resource
self._clf = clf
@classmethod
def required_packages(cls) -> List[Text]:
"""Lists required dependencies (see parent class for full docstring)."""
return ["mitie"]
def train(self, training_data: TrainingData, model: MitieModel) -> Resource:
"""Trains classifier.
Args:
training_data: The NLU training data.
model: The loaded mitie model provided by `MitieNLP`.
Returns:
The resource locator for the trained classifier.
"""
import mitie
trainer = mitie.text_categorizer_trainer(str(model.model_path))
trainer.num_threads = self._config["num_threads"]
for example in training_data.intent_examples:
tokens = self._tokens_of_message(example)
trainer.add_labeled_text(tokens, example.get(INTENT))
if training_data.intent_examples:
# we can not call train if there are no examples!
clf = trainer.train()
self._persist(clf)
return self._resource
def process(self, messages: List[Message], model: MitieModel) -> List[Message]:
"""Make intent predictions using `mitie`.
Args:
messages: The message which the intents should be predicted for.
model: The loaded mitie model provided by `MitieNLP`.
"""
for message in messages:
if self._clf:
token_strs = self._tokens_of_message(message)
intent, confidence = self._clf(token_strs, model.word_feature_extractor)
else:
# either the model didn't get trained or it wasn't
# provided with any data
intent = None
confidence = 0.0
message.set(
"intent", {"name": intent, "confidence": confidence}, add_to_output=True
)
return messages
@staticmethod
def _tokens_of_message(message: Message) -> List[Text]:
return [token.text for token in message.get(TOKENS_NAMES[TEXT], [])]
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> MitieIntentClassifier:
"""Creates component for training see parent class for full docstring)."""
return cls(config, model_storage, resource)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> MitieIntentClassifier:
"""Loads component for inference see parent class for full docstring)."""
import mitie
text_categorizer = None
try:
with model_storage.read_from(resource) as directory:
text_categorizer = mitie.text_categorizer(str(directory / "model.dat"))
except (
ValueError,
Exception,
): # the latter is thrown by the `mitie.text_categorizer`
logger.warning(
f"Failed to load {cls.__class__.__name__} from model storage. Resource "
f"'{resource.name}' doesn't exist."
)
return cls(config, model_storage, resource, text_categorizer)
def _persist(self, text_categorizer: "mitie.text_categorizer") -> None:
"""Persists trained model (see parent class for full docstring)."""
with self._model_storage.write_to(self._resource) as directory:
classifier_file = directory / "model.dat"
text_categorizer.save_to_disk(str(classifier_file), pure_model=True)
|
tests/causal_estimators/test_regression_discontinuity_estimator.py | Sid-darthvader/dowhy | 2,904 | 12632329 | <reponame>Sid-darthvader/dowhy
import pytest
from dowhy.causal_estimators.regression_discontinuity_estimator import RegressionDiscontinuityEstimator
from .base import TestEstimator
@pytest.mark.usefixtures("fixed_seed")
class TestRegressionDiscontinuityEstimator(object):
@pytest.mark.parametrize(["error_tolerance", "Estimator",
"num_common_causes", "num_instruments",
"num_effect_modifiers", "num_treatments",
"treatment_is_binary", "outcome_is_binary", "identifier_method"],
[(0.2, RegressionDiscontinuityEstimator, [1], [1,], [0], [1,], [True,], [False,], "iv"),])
def test_average_treatment_effect(self, error_tolerance, Estimator,
num_common_causes, num_instruments, num_effect_modifiers,
num_treatments, treatment_is_binary, outcome_is_binary,
identifier_method
):
estimator_tester = TestEstimator(error_tolerance, Estimator,
identifier_method=identifier_method)
estimator_tester.average_treatment_effect_testsuite(
num_common_causes=num_common_causes,
num_instruments = num_instruments,
num_effect_modifiers = num_effect_modifiers,
num_treatments=num_treatments,
treatment_is_binary=treatment_is_binary,
outcome_is_binary=outcome_is_binary,
dataset="simple-iv",
method_params ={'rd_variable_name':'Z0',
'rd_threshold_value':0.5,
'rd_bandwidth': 0.2})
|
deep_privacy/engine/hooks/log_hooks.py | skoskjei/DP-ATT | 1,128 | 12632362 | import torch
import logging
import time
from deep_privacy import torch_utils, logger
from deep_privacy.metrics import metric_api
from .base import HookBase, HOOK_REGISTRY
from deep_privacy.inference import infer
try:
from apex import amp
except ImportError:
pass
@HOOK_REGISTRY.register_module
class ImageSaveHook(HookBase):
def __init__(self, ims_per_save: int, n_diverse_samples: int):
self.ims_per_save = ims_per_save
self.next_save_point = self.ims_per_save
self.before_images = None
self._n_diverse_samples = n_diverse_samples
def state_dict(self):
return {
"next_save_point": self.next_save_point,
"before_images": self.before_images}
def load_state_dict(self, state_dict: dict):
self.next_save_point = state_dict["next_save_point"]
self.before_images = state_dict["before_images"]
def after_step(self):
if self.global_step() >= self.next_save_point:
self.next_save_point += self.ims_per_save
self.save_fake_images(True)
self.save_fake_images(False)
def save_fake_images(self, validation: bool):
g = self.trainer.generator
if validation:
g = self.trainer.RA_generator
fake_data, real_data, condition = self.get_images(g)
fake_data = fake_data[:64]
logger.save_images(
"fakes", fake_data, denormalize=True, nrow=8,
log_to_validation=validation)
logger.save_images(
"reals", real_data[:64], denormalize=True, log_to_writer=False,
nrow=8,
log_to_validation=validation)
condition = condition[:64]
logger.save_images(
"condition", condition, log_to_writer=False, denormalize=True,
nrow=8,
log_to_validation=validation)
self.save_images_diverse()
def get_images(self, g):
g.eval()
batch = next(iter(self.trainer.dataloader_val))
z = g.generate_latent_variable(batch["img"]).zero_()
with torch.no_grad():
fake_data_sample = g(**batch,
z=z)
g.train()
return fake_data_sample, batch["img"], batch["condition"]
@torch.no_grad()
def save_images_diverse(self):
"""
Generates images with several latent variables
"""
g = self.trainer.RA_generator
g.eval()
batch = next(iter(self.trainer.dataloader_val))
batch = {k: v[:8] for k, v in batch.items()}
fakes = [batch["condition"].cpu()]
for i in range(self._n_diverse_samples):
z = g.generate_latent_variable(batch["img"])
fake = g(**batch, z=z)
fakes.append(fake.cpu())
fakes = torch.cat(fakes)
logger.save_images(
"diverse", fakes, log_to_validation=True, nrow=8, denormalize=True)
g.train()
def before_extend(self):
transition_value = 1
self.trainer.RA_generator.update_transition_value(
transition_value
)
fake_data, real_data, condition = self.get_images(
self.trainer.RA_generator
)
before_images = [
torch_utils.denormalize_img(x[:8])
for x in [real_data, fake_data, condition]
]
before_images = torch.cat((before_images), dim=0)
self.before_images = before_images.cpu()
def after_extend(self):
transition_value = 0
self.trainer.RA_generator.update_transition_value(
transition_value
)
fake_data, real_data, condition = self.get_images(
self.trainer.RA_generator
)
after_images = [
torch_utils.denormalize_img(x[:8])
for x in [real_data, fake_data, condition]
]
after_images = torch.cat((after_images), dim=0)
after_images = torch.nn.functional.avg_pool2d(after_images, 2)
after_images = after_images.cpu()
assert after_images.shape == self.before_images.shape
diff = self.before_images - after_images
to_save = torch.cat(
(self.before_images, after_images, diff), dim=2)
imsize = after_images.shape[-1]
imname = f"transition/from_{imsize}"
logger.save_images(imname, to_save,
log_to_writer=True, nrow=8 * 3)
self.before_images = None
@HOOK_REGISTRY.register_module
class MetricHook(HookBase):
def __init__(
self,
ims_per_log: int,
fid_batch_size: int,
lpips_batch_size: int,
min_imsize_to_calculate: int):
self.next_check = ims_per_log
self.num_ims_per_fid = ims_per_log
self.lpips_batch_size = lpips_batch_size
self.fid_batch_size = fid_batch_size
self.min_imsize_to_calculate = min_imsize_to_calculate
def state_dict(self):
return {"next_check": self.next_check}
def load_state_dict(self, state_dict: dict):
self.next_check = state_dict["next_check"]
def after_step(self):
if self.global_step() >= self.next_check:
self.next_check += self.num_ims_per_fid
if self.current_imsize() >= self.min_imsize_to_calculate:
self.calculate_fid()
def calculate_fid(self):
logger.info("Starting calculation of FID value")
generator = self.trainer.RA_generator
real_images, fake_images = infer.infer_images(
self.trainer.dataloader_val, generator,
truncation_level=0
)
"""
# Remove FID calculation as holy shit this is expensive.
cfg = self.trainer.cfg
identifier = f"{cfg.dataset_type}_{cfg.data_val.dataset.percentage}_{self.current_imsize()}"
transition_value = self.trainer.RA_generator.transition_value
fid_val = metric_api.fid(
real_images, fake_images,
batch_size=self.fid_batch_size)
logger.log_variable("stats/fid", np.mean(fid_val),
log_level=logging.INFO)
"""
l1 = metric_api.l1(real_images, fake_images)
l2 = metric_api.l1(real_images, fake_images)
psnr = metric_api.psnr(real_images, fake_images)
lpips = metric_api.lpips(
real_images, fake_images, self.lpips_batch_size)
logger.log_variable("stats/l1", l1, log_level=logging.INFO)
logger.log_variable("stats/l2", l2, log_level=logging.INFO)
logger.log_variable("stats/psnr", psnr, log_level=logging.INFO)
logger.log_variable("stats/lpips", lpips, log_level=logging.INFO)
@HOOK_REGISTRY.register_module
class StatsLogger(HookBase):
def __init__(
self,
num_ims_per_log: int):
self.num_ims_per_log = num_ims_per_log
self.next_log_point = self.num_ims_per_log
self.start_time = time.time()
self.num_skipped_steps = 0
def state_dict(self):
return {
"total_time": (time.time() - self.start_time),
"num_skipped_steps": self.num_skipped_steps
}
def load_state_dict(self, state_dict: dict):
self.start_time = time.time() - state_dict["total_time"]
self.num_skipped_steps = state_dict["num_skipped_steps"]
def before_train(self):
self.batch_start_time = time.time()
self.log_dictionary({"stats/batch_size": self.trainer.batch_size()})
def log_dictionary(self, to_log: dict):
logger.log_dictionary(to_log)
def after_step(self):
has_gradient_penalty = "loss/gradient_penalty" in self.to_log
if has_gradient_penalty or self.global_step() >= self.next_log_point:
self.log_stats()
self.log_dictionary(self.to_log)
self.log_loss_scales()
self.next_log_point = self.global_step() + self.num_ims_per_log
def log_stats(self):
time_spent = time.time() - self.batch_start_time
num_steps = self.global_step() - self.next_log_point + self.num_ims_per_log
num_steps = max(num_steps, 1)
nsec_per_img = time_spent / num_steps
total_time = (time.time() - self.start_time) / 60
to_log = {
"stats/nsec_per_img": nsec_per_img,
"stats/batch_size": self.trainer.batch_size(),
"stats/training_time_minutes": total_time,
}
self.batch_start_time = time.time()
self.log_dictionary(to_log)
def log_loss_scales(self):
to_log = {f'amp/loss_scale_{loss_idx}': loss_scaler._loss_scale
for loss_idx, loss_scaler in enumerate(amp._amp_state.loss_scalers)}
to_log['amp/num_skipped_gradients'] = self.num_skipped_steps
self.log_dictionary(to_log)
|
bottleneck/tests/scalar_input_test.py | joye1503/bottleneck | 372 | 12632409 | """Check that functions can handle scalar input"""
from typing import Callable, Union
import hypothesis
import numpy as np
import pytest
from hypothesis.strategies import floats, integers, one_of
from numpy.testing import assert_array_almost_equal
import bottleneck as bn # noqa: F401
from .util import get_functions
int64_iinfo = np.iinfo(np.int64)
scalars = one_of(
[integers(min_value=int64_iinfo.min, max_value=int64_iinfo.max), floats()]
)
@hypothesis.given(scalar=scalars)
@pytest.mark.parametrize(
"func",
get_functions("reduce") + get_functions("nonreduce_axis"),
ids=lambda x: x.__name__,
)
def test_scalar_input(
func: Callable[[np.array], Union[int, float, np.array]], scalar: Union[int, float]
) -> None:
"""Test that bn.xxx gives the same output as bn.slow.xxx for scalar input."""
if func.__name__ in ("partition", "argpartition", "push"):
return
func0 = eval("bn.slow.%s" % func.__name__)
msg = "\nfunc %s | input %s\n"
actual_raised = False
desired_raised = False
try:
actual = func(scalar)
except ValueError:
actual_raised = True
try:
desired = func0(scalar)
except ValueError:
desired_raised = True
if desired_raised or actual_raised:
assert actual_raised and desired_raised
else:
err_msg = msg % (func.__name__, scalar)
assert_array_almost_equal(actual, desired, err_msg=err_msg)
|
dataset/__init__.py | fenghansen/ELD | 258 | 12632411 | import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def read_fns(filename):
with open(filename) as f:
fns = f.readlines()
fns = [fn.strip() for fn in fns]
return fns
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def read_paired_fns(filename):
with open(filename) as f:
fns = f.readlines()
fns = [tuple(fn.strip().split(' ')) for fn in fns]
return fns
|
backend/projects/models.py | alairice/doccano | 2,082 | 12632415 | import abc
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Manager
from polymorphic.models import PolymorphicModel
from roles.models import Role
DOCUMENT_CLASSIFICATION = "DocumentClassification"
SEQUENCE_LABELING = "SequenceLabeling"
SEQ2SEQ = "Seq2seq"
SPEECH2TEXT = "Speech2text"
IMAGE_CLASSIFICATION = "ImageClassification"
INTENT_DETECTION_AND_SLOT_FILLING = "IntentDetectionAndSlotFilling"
PROJECT_CHOICES = (
(DOCUMENT_CLASSIFICATION, "document classification"),
(SEQUENCE_LABELING, "sequence labeling"),
(SEQ2SEQ, "sequence to sequence"),
(INTENT_DETECTION_AND_SLOT_FILLING, "intent detection and slot filling"),
(SPEECH2TEXT, "speech to text"),
(IMAGE_CLASSIFICATION, "image classification"),
)
class Project(PolymorphicModel):
name = models.CharField(max_length=100)
description = models.TextField(default="")
guideline = models.TextField(default="", blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
User,
on_delete=models.SET_NULL,
null=True,
)
project_type = models.CharField(max_length=30, choices=PROJECT_CHOICES)
random_order = models.BooleanField(default=False)
collaborative_annotation = models.BooleanField(default=False)
single_class_classification = models.BooleanField(default=False)
def add_admin(self):
admin_role = Role.objects.get(name=settings.ROLE_PROJECT_ADMIN)
Member.objects.create(
project=self,
user=self.created_by,
role=admin_role,
)
@property
@abc.abstractmethod
def is_text_project(self) -> bool:
return False
@property
def can_define_label(self) -> bool:
"""Whether or not the project can define label(ignoring the type of label)"""
return False
@property
def can_define_relation(self) -> bool:
"""Whether or not the project can define relation."""
return False
@property
def can_define_category(self) -> bool:
"""Whether or not the project can define category."""
return False
@property
def can_define_span(self) -> bool:
"""Whether or not the project can define span."""
return False
def __str__(self):
return self.name
class TextClassificationProject(Project):
@property
def is_text_project(self) -> bool:
return True
@property
def can_define_label(self) -> bool:
return True
@property
def can_define_category(self) -> bool:
return True
class SequenceLabelingProject(Project):
allow_overlapping = models.BooleanField(default=False)
grapheme_mode = models.BooleanField(default=False)
use_relation = models.BooleanField(default=False)
@property
def is_text_project(self) -> bool:
return True
@property
def can_define_label(self) -> bool:
return True
@property
def can_define_span(self) -> bool:
return True
class Seq2seqProject(Project):
@property
def is_text_project(self) -> bool:
return True
class IntentDetectionAndSlotFillingProject(Project):
@property
def is_text_project(self) -> bool:
return True
@property
def can_define_label(self) -> bool:
return True
@property
def can_define_category(self) -> bool:
return True
@property
def can_define_span(self) -> bool:
return True
class Speech2textProject(Project):
@property
def is_text_project(self) -> bool:
return False
class ImageClassificationProject(Project):
@property
def is_text_project(self) -> bool:
return False
@property
def can_define_label(self) -> bool:
return True
@property
def can_define_category(self) -> bool:
return True
class Tag(models.Model):
text = models.TextField()
project = models.ForeignKey(to=Project, on_delete=models.CASCADE, related_name="tags")
def __str__(self):
return self.text
class MemberManager(Manager):
def can_update(self, project: int, member_id: int, new_role: str) -> bool:
"""The project needs at least 1 admin.
Args:
project: The project id.
member_id: The member id.
new_role: The new role name.
Returns:
Whether the mapping can be updated or not.
"""
queryset = self.filter(project=project, role__name=settings.ROLE_PROJECT_ADMIN)
if queryset.count() > 1:
return True
else:
admin = queryset.first()
# we can change the role except for the only admin.
return admin.id != member_id or new_role == settings.ROLE_PROJECT_ADMIN
def has_role(self, project_id: int, user: User, role_name: str):
return self.filter(project=project_id, user=user, role__name=role_name).exists()
class Member(models.Model):
user = models.ForeignKey(to=User, on_delete=models.CASCADE, related_name="role_mappings")
project = models.ForeignKey(to=Project, on_delete=models.CASCADE, related_name="role_mappings")
role = models.ForeignKey(to=Role, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = MemberManager()
def clean(self):
members = self.__class__.objects.exclude(id=self.id)
if members.filter(user=self.user, project=self.project).exists():
message = "This user is already assigned to a role in this project."
raise ValidationError(message)
@property
def username(self):
return self.user.username
class Meta:
unique_together = ("user", "project")
|
tools/perf/page_sets/login_helpers/pinterest_login.py | google-ar/chromium | 2,151 | 12632472 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets.login_helpers import login_utils
def LoginDesktopAccount(action_runner, credential,
credentials_path=login_utils.DEFAULT_CREDENTIAL_PATH):
"""Logs in into a Dropbox account.
This function navigates the tab into Dropbox's login page and logs in a user
using credentials in |credential| part of the |credentials_path| file.
Args:
action_runner: Action runner responsible for running actions on the page.
credential: The credential to retrieve from the credentials file (string).
credentials_path: The path to credential file (string).
Raises:
exceptions.Error: See ExecuteJavaScript()
for a detailed list of possible exceptions.
"""
account_name, password = login_utils.GetAccountNameAndPassword(
credential, credentials_path=credentials_path)
action_runner.Navigate('https://www.pinterest.com/login/')
action_runner.Wait(1) # Error page happens if this wait is not here.
login_utils.InputWithSelector(
action_runner, <EMAIL>' % account_name, 'input[type=email]')
login_utils.InputWithSelector(
action_runner, password, 'input[type=password]')
login_button_function = (
'document.getElementsByClassName("Button Module btn hasText large '
'primary continueButton rounded")[0]')
action_runner.WaitForElement(element_function=login_button_function)
action_runner.ClickElement(element_function=login_button_function)
search_bar_function = (
'document.getElementsByClassName("Input Module field")[0]')
action_runner.WaitForElement(element_function=search_bar_function)
|
dm_control/mjcf/code_for_debugging_test.py | h8907283/dm_control | 2,863 | 12632519 | # Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Constructs models for debugging_test.py.
The purpose of this file is to provide "golden" source line numbers for test
cases in debugging_test.py. When this module is loaded, it inspects its own
source code to look for lines that begin with `# !!LINE_REF`, and stores the
following line number in a dict. This allows test cases to look up the line
number by name, rather than brittly hard-coding in the line number.
"""
import collections
import os
import re
from dm_control import mjcf
SourceLine = collections.namedtuple(
'SourceLine', ('line_number', 'text'))
LINE_REF = {}
def make_valid_model():
# !!LINE_REF make_valid_model.mjcf_model
mjcf_model = mjcf.RootElement()
# !!LINE_REF make_valid_model.my_body
my_body = mjcf_model.worldbody.add('body', name='my_body')
my_body.add('inertial', mass=1, pos=[0, 0, 0], diaginertia=[1, 1, 1])
# !!LINE_REF make_valid_model.my_joint
my_joint = my_body.add('joint', name='my_joint', type='hinge')
# !!LINE_REF make_valid_model.my_actuator
mjcf_model.actuator.add('velocity', name='my_actuator', joint=my_joint)
return mjcf_model
def make_broken_model():
# !!LINE_REF make_broken_model.mjcf_model
mjcf_model = mjcf.RootElement()
# !!LINE_REF make_broken_model.my_body
my_body = mjcf_model.worldbody.add('body', name='my_body')
my_body.add('inertial', mass=1, pos=[0, 0, 0], diaginertia=[1, 1, 1])
# !!LINE_REF make_broken_model.my_joint
my_body.add('joint', name='my_joint', type='hinge')
# !!LINE_REF make_broken_model.my_actuator
mjcf_model.actuator.add('velocity', name='my_actuator', joint='invalid_joint')
return mjcf_model
def break_valid_model(mjcf_model):
# !!LINE_REF break_valid_model.my_actuator.joint
mjcf_model.find('actuator', 'my_actuator').joint = 'invalid_joint'
return mjcf_model
def _parse_line_refs():
line_ref_pattern = re.compile(r'\s*# !!LINE_REF\s*([^\s]+)')
filename, _ = os.path.splitext(__file__) # __file__ can be `.pyc`.
with open(filename + '.py') as f:
src = f.read()
src_lines = src.split('\n')
for line_number, line in enumerate(src_lines):
match = line_ref_pattern.match(line)
if match:
LINE_REF[match.group(1)] = SourceLine(
line_number + 2, src_lines[line_number + 1].strip())
_parse_line_refs()
|
test_indoor.py | cao-cong/RViDeNet | 147 | 12632521 | from __future__ import division
import os, scipy.io
import re
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import glob
import cv2
import argparse
from PIL import Image
from utils import *
parser = argparse.ArgumentParser(description='Testing')
parser.add_argument('--model', dest='model', type=str, default='finetune', help='model type')
parser.add_argument('--gpu_id', dest='gpu_id', type=int, default=0, help='gpu id')
parser.add_argument('--output_dir', type=str, default='./results/finetune/', help='output path')
parser.add_argument('--vis_data', type=bool, default=False, help='whether to visualize noisy and gt data')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
isp = torch.load('isp/ISP_CNN.pth').cuda()
model = torch.load('model/finetune.pth').cuda()
iso_list = [1600,3200,6400,12800,25600]
for iso in iso_list:
print('processing iso={}'.format(iso))
if not os.path.isdir(args.output_dir+'ISO{}'.format(iso)):
os.makedirs(args.output_dir+'ISO{}'.format(iso))
f = open('{}_model_test_psnr_and_ssim_on_iso{}.txt'.format(args.model, iso), 'w')
context = 'ISO{}'.format(iso) + '\n'
f.write(context)
scene_avg_raw_psnr = 0
scene_avg_raw_ssim = 0
scene_avg_srgb_psnr = 0
scene_avg_srgb_ssim = 0
for scene_id in range(7,11+1):
context = 'scene{}'.format(scene_id) + '\n'
f.write(context)
frame_avg_raw_psnr = 0
frame_avg_raw_ssim = 0
frame_avg_srgb_psnr = 0
frame_avg_srgb_ssim = 0
for i in range(1,7+1):
frame_list = []
for j in range(-1,2):
if (i+j)<1:
raw = cv2.imread('./data/CRVD_data/scene{}/ISO{}/frame1_noisy0.tiff'.format(scene_id, iso),-1)
input_full = np.expand_dims(pack_gbrg_raw(raw), axis=0)
frame_list.append(input_full)
elif (i+j)>7:
raw = cv2.imread('./data/CRVD_data/scene{}/ISO{}/frame7_noisy0.tiff'.format(scene_id, iso),-1)
input_full = np.expand_dims(pack_gbrg_raw(raw), axis=0)
frame_list.append(input_full)
else:
raw = cv2.imread('./data/CRVD_data/scene{}/ISO{}/frame{}_noisy0.tiff'.format(scene_id, iso, i+j),-1)
input_full = np.expand_dims(pack_gbrg_raw(raw), axis=0)
frame_list.append(input_full)
input_data = np.concatenate(frame_list, axis=3)
test_result = test_big_size_raw(input_data, model, patch_h = 256, patch_w = 256, patch_h_overlap = 64, patch_w_overlap = 64)
test_result = depack_gbrg_raw(test_result)
test_gt = cv2.imread('./data/CRVD_data/scene{}/ISO{}/frame{}_clean_and_slightly_denoised.tiff'.format(scene_id, iso, i),-1).astype(np.float32)
test_gt = (test_gt-240)/(2**12-1-240)
test_raw_psnr = compare_psnr(test_gt,(np.uint16(test_result*(2**12-1-240)+240).astype(np.float32)-240)/(2**12-1-240), data_range=1.0)
test_raw_ssim = compute_ssim_for_packed_raw(test_gt, (np.uint16(test_result*(2**12-1-240)+240).astype(np.float32)-240)/(2**12-1-240))
print('scene {} frame{} test raw psnr : {}, test raw ssim : {} '.format(scene_id, i, test_raw_psnr, test_raw_ssim))
context = 'raw psnr/ssim: {}/{}'.format(test_raw_psnr,test_raw_ssim) + '\n'
f.write(context)
frame_avg_raw_psnr += test_raw_psnr
frame_avg_raw_ssim += test_raw_ssim
output = test_result*(2**12-1-240)+240
save_result = Image.fromarray(np.uint16(output))
save_result.save(args.output_dir+'ISO{}/scene{}_frame{}_denoised_raw.tiff'.format(iso, scene_id, i))
noisy_raw_frame = preprocess(input_data[:,:,:,4:8])
noisy_srgb_frame = postprocess(isp(noisy_raw_frame))[0]
if args.vis_data:
cv2.imwrite(args.output_dir+'ISO{}/scene{}_frame{}_noisy_sRGB.png'.format(iso, scene_id, i), np.uint8(noisy_srgb_frame*255))
denoised_raw_frame = preprocess(np.expand_dims(pack_gbrg_raw(output),axis=0))
denoised_srgb_frame = postprocess(isp(denoised_raw_frame))[0]
cv2.imwrite(args.output_dir+'ISO{}/scene{}_frame{}_denoised_sRGB.png'.format(iso, scene_id, i), np.uint8(denoised_srgb_frame*255))
gt_raw_frame = np.expand_dims(pack_gbrg_raw(test_gt*(2**12-1-240)+240), axis=0)
gt_srgb_frame = postprocess(isp(preprocess(gt_raw_frame)))[0]
if args.vis_data:
cv2.imwrite(args.output_dir+'ISO{}/scene{}_frame{}_gt_sRGB.png'.format(iso, scene_id, i), np.uint8(gt_srgb_frame*255))
test_srgb_psnr = compare_psnr(np.uint8(gt_srgb_frame*255).astype(np.float32)/255, np.uint8(denoised_srgb_frame*255).astype(np.float32)/255, data_range=1.0)
test_srgb_ssim = compare_ssim(np.uint8(gt_srgb_frame*255).astype(np.float32)/255, np.uint8(denoised_srgb_frame*255).astype(np.float32)/255, data_range=1.0, multichannel=True)
print('scene {} frame{} test srgb psnr : {}, test srgb ssim : {} '.format(scene_id, i, test_srgb_psnr, test_srgb_ssim))
context = 'srgb psnr/ssim: {}/{}'.format(test_srgb_psnr,test_srgb_ssim) + '\n'
f.write(context)
frame_avg_srgb_psnr += test_srgb_psnr
frame_avg_srgb_ssim += test_srgb_ssim
frame_avg_raw_psnr = frame_avg_raw_psnr/7
frame_avg_raw_ssim = frame_avg_raw_ssim/7
frame_avg_srgb_psnr = frame_avg_srgb_psnr/7
frame_avg_srgb_ssim = frame_avg_srgb_ssim/7
context = 'frame average raw psnr:{},frame average raw ssim:{}'.format(frame_avg_raw_psnr,frame_avg_raw_ssim) + '\n'
f.write(context)
context = 'frame average srgb psnr:{},frame average srgb ssim:{}'.format(frame_avg_srgb_psnr,frame_avg_srgb_ssim) + '\n'
f.write(context)
scene_avg_raw_psnr += frame_avg_raw_psnr
scene_avg_raw_ssim += frame_avg_raw_ssim
scene_avg_srgb_psnr += frame_avg_srgb_psnr
scene_avg_srgb_ssim += frame_avg_srgb_ssim
scene_avg_raw_psnr = scene_avg_raw_psnr/5
scene_avg_raw_ssim = scene_avg_raw_ssim/5
scene_avg_srgb_psnr = scene_avg_srgb_psnr/5
scene_avg_srgb_ssim = scene_avg_srgb_ssim/5
context = 'scene average raw psnr:{},scene frame average raw ssim:{}'.format(scene_avg_raw_psnr,scene_avg_raw_ssim) + '\n'
f.write(context)
context = 'scene average srgb psnr:{},scene frame average srgb ssim:{}'.format(scene_avg_srgb_psnr,scene_avg_srgb_ssim) + '\n'
f.write(context)
|
app/ReadabiliPy/readabilipy/simplifiers/text.py | Largo/Lurnby | 590 | 12632523 | <filename>app/ReadabiliPy/readabilipy/simplifiers/text.py<gh_stars>100-1000
"""Common text manipulation functions."""
import unicodedata
import regex
matched_punctuation_marks = [('“', '”'), ('‘', '’'), ('(', ')'), ('[', ']'),
('{', '}')]
terminal_punctuation_marks = ['.', ',', '!', ':', ';', '?']
def normalise_unicode(text):
"""Normalise unicode such that things that are visually equivalent
map to the same unicode string where possible."""
normal_form = "NFKC"
text = unicodedata.normalize(normal_form, text)
return text
def normalise_whitespace(text):
"""Replace runs of whitespace characters with a single space as
this is what happens when HTML text is displayed."""
text = regex.sub(r"\s+", " ", text)
# Remove leading and trailing whitespace
text = text.strip()
return text
def normalise_text(text):
"""Normalise unicode and whitespace."""
# Normalise unicode first to try and standardise whitespace characters
# as much as possible before normalising them
text = strip_control_characters(text)
text = normalise_unicode(text)
text = normalise_whitespace(text)
return text
def strip_html_whitespace(text):
"""Simplify HTML by stripping whitespace."""
# Normalise unicode first to try and standardise whitespace characters
# as much as possible before normalising them
text = normalise_text(text)
text = text.replace(" <", "<").replace("> ", ">")
return text
def strip_control_characters(text):
"""Strip out unicode control characters which might
break the parsing."""
# Unicode control characters
# [Cc]: Other, Control [includes new lines]
# [Cf]: Other, Format
# [Cn]: Other, Not Assigned
# [Co]: Other, Private Use
# [Cs]: Other, Surrogate
control_chars = set(['Cc', 'Cf', 'Cn', 'Co', 'Cs'])
retained_chars = ['\t', '\n', '\r', '\f']
# Remove non-printing control characters
return "".join(["" if (unicodedata.category(char) in control_chars) and
(char not in retained_chars) else char for char in text])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.