max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
gengine/app/tests_old/test_groups.py | greck2908/gamification-engine | 347 | 6585 | # -*- coding: utf-8 -*-
from gengine.app.tests.base import BaseDBTest
from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language
from gengine.metadata import DBSession
from gengine.app.model import AuthUser
class TestUserCreation(BaseDBTest):
def test_user_creation(self):
lang = get_or_create_language("en")
user = create_user(
lat = 12.1,
lng = 12.2,
#country = "RO",
#region = "Transylvania",
#city = "Cluj-Napoca",
timezone = "Europe/Bukarest",
language = "en",
additional_public_data = {
"first_name" : "Rudolf",
"last_name" : "<NAME>"
}
)
self.assertTrue(user.lat == 12.1)
self.assertTrue(user.lng == 12.2)
#self.assertTrue(user.country == "RO")
#self.assertTrue(user.region == "Transylvania")
#self.assertTrue(user.city == "Cluj-Napoca")
self.assertTrue(user.timezone == "Europe/Bukarest")
self.assertTrue(user.language_id == lang.id)
self.assertTrue(user.additional_public_data["first_name"] == "Rudolf")
self.assertTrue(user.additional_public_data["last_name"] == "<NAME>")
def test_user_updation(self):
lang = get_or_create_language("en")
user = create_user()
user = update_user(
user_id = user.id,
lat = 14.2,
lng = 16.3,
#country = "EN",
#region = "Transylvania",
#city = "Cluj-Napoca",
timezone = "Europe/Bukarest",
language = "en",
additional_public_data = {
"first_name" : "Rudolf",
"last_name" : "<NAME>"
}
)
# Correct cases
self.assertTrue(user.lat == 14.2)
self.assertTrue(user.lng == 16.3)
#self.assertTrue(user.country == "EN")
#self.assertTrue(user.region == "Transylvania")
#self.assertTrue(user.city == "Cluj-Napoca")
self.assertTrue(user.timezone == "Europe/Bukarest")
self.assertTrue(user.language_id == lang.id)
def test_user_deletion(self):
user1 = create_user()
# Create Second user
user2 = create_user(
lat=85.59,
lng=65.75,
#country="DE",
#region="Niedersachsen",
#city="Osnabrück",
timezone="Europe/Berlin",
language="de",
additional_public_data={
"first_name": "Michael",
"last_name": "Clarke"
},
friends=[1]
)
remaining_users = delete_user(
user_id = user1.id
)
# Correct cases
self.assertNotIn(user1.id, remaining_users)
self.assertEqual(user2.id, remaining_users[0].id)
def test_verify_password(self):
auth_user = AuthUser()
auth_user.password = "<PASSWORD>"
auth_user.active = True
auth_user.email = "<EMAIL>"
DBSession.add(auth_user)
iscorrect = auth_user.verify_password("<PASSWORD>")
self.assertEqual(iscorrect, True)
def test_create_token(self):
user = create_user()
auth_user = AuthUser()
auth_user.user_id = user.id
auth_user.password = "<PASSWORD>"
auth_user.active = True
auth_user.email = "<EMAIL>"
DBSession.add(auth_user)
if auth_user.verify_password("<PASSWORD>"):
token = auth_user.get_or_create_token()
self.assertNotEqual(token, None)
|
mycli/packages/special/main.py | lyrl/mycli | 10,997 | 6604 | <gh_stars>1000+
import logging
from collections import namedtuple
from . import export
log = logging.getLogger(__name__)
NO_QUERY = 0
PARSED_QUERY = 1
RAW_QUERY = 2
SpecialCommand = namedtuple('SpecialCommand',
['handler', 'command', 'shortcut', 'description', 'arg_type', 'hidden',
'case_sensitive'])
COMMANDS = {}
@export
class CommandNotFound(Exception):
pass
@export
def parse_special_command(sql):
command, _, arg = sql.partition(' ')
verbose = '+' in command
command = command.strip().replace('+', '')
return (command, verbose, arg.strip())
@export
def special_command(command, shortcut, description, arg_type=PARSED_QUERY,
hidden=False, case_sensitive=False, aliases=()):
def wrapper(wrapped):
register_special_command(wrapped, command, shortcut, description,
arg_type, hidden, case_sensitive, aliases)
return wrapped
return wrapper
@export
def register_special_command(handler, command, shortcut, description,
arg_type=PARSED_QUERY, hidden=False, case_sensitive=False, aliases=()):
cmd = command.lower() if not case_sensitive else command
COMMANDS[cmd] = SpecialCommand(handler, command, shortcut, description,
arg_type, hidden, case_sensitive)
for alias in aliases:
cmd = alias.lower() if not case_sensitive else alias
COMMANDS[cmd] = SpecialCommand(handler, command, shortcut, description,
arg_type, case_sensitive=case_sensitive,
hidden=True)
@export
def execute(cur, sql):
"""Execute a special command and return the results. If the special command
is not supported a KeyError will be raised.
"""
command, verbose, arg = parse_special_command(sql)
if (command not in COMMANDS) and (command.lower() not in COMMANDS):
raise CommandNotFound
try:
special_cmd = COMMANDS[command]
except KeyError:
special_cmd = COMMANDS[command.lower()]
if special_cmd.case_sensitive:
raise CommandNotFound('Command not found: %s' % command)
# "help <SQL KEYWORD> is a special case. We want built-in help, not
# mycli help here.
if command == 'help' and arg:
return show_keyword_help(cur=cur, arg=arg)
if special_cmd.arg_type == NO_QUERY:
return special_cmd.handler()
elif special_cmd.arg_type == PARSED_QUERY:
return special_cmd.handler(cur=cur, arg=arg, verbose=verbose)
elif special_cmd.arg_type == RAW_QUERY:
return special_cmd.handler(cur=cur, query=sql)
@special_command('help', '\\?', 'Show this help.', arg_type=NO_QUERY, aliases=('\\?', '?'))
def show_help(): # All the parameters are ignored.
headers = ['Command', 'Shortcut', 'Description']
result = []
for _, value in sorted(COMMANDS.items()):
if not value.hidden:
result.append((value.command, value.shortcut, value.description))
return [(None, result, headers, None)]
def show_keyword_help(cur, arg):
"""
Call the built-in "show <command>", to display help for an SQL keyword.
:param cur: cursor
:param arg: string
:return: list
"""
keyword = arg.strip('"').strip("'")
query = "help '{0}'".format(keyword)
log.debug(query)
cur.execute(query)
if cur.description and cur.rowcount > 0:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, '')]
else:
return [(None, None, None, 'No help found for {0}.'.format(keyword))]
@special_command('exit', '\\q', 'Exit.', arg_type=NO_QUERY, aliases=('\\q', ))
@special_command('quit', '\\q', 'Quit.', arg_type=NO_QUERY)
def quit(*_args):
raise EOFError
@special_command('\\e', '\\e', 'Edit command with editor (uses $EDITOR).',
arg_type=NO_QUERY, case_sensitive=True)
@special_command('\\clip', '\\clip', 'Copy query to the system clipboard.',
arg_type=NO_QUERY, case_sensitive=True)
@special_command('\\G', '\\G', 'Display current query results vertically.',
arg_type=NO_QUERY, case_sensitive=True)
def stub():
raise NotImplementedError
|
torchdrug/layers/flow.py | wconnell/torchdrug | 772 | 6610 | import torch
from torch import nn
from torch.nn import functional as F
from torchdrug import layers
class ConditionalFlow(nn.Module):
"""
Conditional flow transformation from `Masked Autoregressive Flow for Density Estimation`_.
.. _Masked Autoregressive Flow for Density Estimation:
https://arxiv.org/pdf/1705.07057.pdf
Parameters:
input_dim (int): input & output dimension
condition_dim (int): condition dimension
hidden_dims (list of int, optional): hidden dimensions
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, condition_dim, hidden_dims=None, activation="relu"):
super(ConditionalFlow, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim
if hidden_dims is None:
hidden_dims = []
self.mlp = layers.MLP(condition_dim, list(hidden_dims) + [input_dim * 2], activation)
self.rescale = nn.Parameter(torch.zeros(1))
def forward(self, input, condition):
"""
Transform data into latent representations.
Parameters:
input (Tensor): input representations
condition (Tensor): conditional representations
Returns:
(Tensor, Tensor): latent representations, log-likelihood of the transformation
"""
scale, bias = self.mlp(condition).chunk(2, dim=-1)
scale = (F.tanh(scale) * self.rescale)
output = (input + bias) * scale.exp()
log_det = scale
return output, log_det
def reverse(self, latent, condition):
"""
Transform latent representations into data.
Parameters:
latent (Tensor): latent representations
condition (Tensor): conditional representations
Returns:
(Tensor, Tensor): input representations, log-likelihood of the transformation
"""
scale, bias = self.mlp(condition).chunk(2, dim=-1)
scale = (F.tanh(scale) * self.rescale)
output = latent / scale.exp() - bias
log_det = scale
return output, log_det |
smartnlp/utils/basic_log.py | msgi/nlp-tour | 1,559 | 6624 | import logging as log
class Log:
def __init__(self, level):
self.level = level
log.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=level)
self.log = log
def info(self, msg):
self.log.info(msg)
def debug(self, msg):
self.log.debug(msg)
def warn(self, msg):
self.log.warn(msg)
def error(self, msg):
self.log.error(msg)
|
applications/cli/commands/model/tests/test_export.py | nparkstar/nauta | 390 | 6632 | #
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from click.testing import CliRunner
from cli_text_consts import ModelExportCmdTexts as Texts
from commands.model.common import workflow_description
from commands.model.export import export
from platform_resources.workflow import ArgoWorkflow, QUEUED_PHASE
FEM_NAME = "EXPORT_1"
SEM_NAME = "EXPORT_2"
FEM_PARAMETERS = "PARAMS_1"
SEM_PARAMETERS = "PARAMS_2"
FEM_START_DATE = '2000-01-01'
FEM_NAMESPACE = 'test-namespace'
TEST_AGROWORKFLOW = ArgoWorkflow(name=FEM_NAME, started_at=FEM_START_DATE, finished_at=None,
namespace=FEM_NAMESPACE, phase=None)
TWO_MODEL_OUTPUT = [workflow_description(name=FEM_NAME, parameters=FEM_PARAMETERS),
workflow_description(name=SEM_NAME, parameters=SEM_PARAMETERS)]
def setup_mocks(mocker):
mocker.patch('commands.model.export.get_kubectl_current_context_namespace',
return_value='fake-namespace')
mocker.patch('platform_resources.workflow.ArgoWorkflow.from_yaml',
return_value=mocker.MagicMock())
mocker.patch('platform_resources.workflow.ArgoWorkflow.get',
return_value=TEST_AGROWORKFLOW)
mocker.patch('os.listdir', return_value=['openvino.yaml', 'tensorflow.yaml', 'some_other_file'])
mocker.patch('commands.model.export.NAUTAConfigMap', return_value=mocker.MagicMock(registry='fake-addr'))
mocker.patch('commands.model.export.Config')
mocker.patch('os.path.isdir', return_value=True)
def test_export(mocker):
setup_mocks(mocker)
result = CliRunner().invoke(export, ["/fake/path", "openvino"])
assert result.exit_code == 0
assert "Successfully created export workflow" in result.output
assert QUEUED_PHASE in result.output
assert FEM_NAME in result.output
assert FEM_START_DATE in result.output
assert FEM_NAMESPACE in result.output
def test_export_inexistent_format(mocker):
setup_mocks(mocker)
result = CliRunner().invoke(export, ["/fake/path", "bad"])
assert result.exit_code == 2
assert "Format: bad does not exist. Choose from:" in result.output
def test_export_failure(mocker):
setup_mocks(mocker)
mocker.patch('platform_resources.workflow.ArgoWorkflow.from_yaml',
return_value=mocker.MagicMock(create=lambda: RuntimeError))
result = CliRunner().invoke(export, ["/fake/path", "openvino"])
assert result.exit_code == 1
assert "Failed to create export workflow" in result.output
def test_export_list(mocker):
mocker.patch("commands.model.export.get_list_of_workflows", return_value=TWO_MODEL_OUTPUT)
result = CliRunner().invoke(export, ["formats"])
assert FEM_NAME in result.output
assert SEM_NAME in result.output
assert FEM_PARAMETERS in result.output
assert SEM_PARAMETERS in result.output
def test_export_list_error(mocker):
mocker.patch("commands.model.export.get_list_of_workflows", side_effect=RuntimeError)
result = CliRunner().invoke(export, ["formats"])
assert Texts.EXPORT_LIST_ERROR_MSG in result.output
def test_export_missing_format(mocker):
setup_mocks(mocker)
result = CliRunner().invoke(export, ["wrong-option"])
assert Texts.MISSING_EXPORT_FORMAT.format(formats=["openvino", "tensorflow"]) in result.output
|
var/spack/repos/builtin/packages/py-mdanalysis/package.py | LiamBindle/spack | 2,360 | 6633 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMdanalysis(PythonPackage):
"""MDAnalysis is a Python toolkit to analyze molecular dynamics
trajectories generated by a wide range of popular simulation
packages including DL_Poly, CHARMM, Amber, NAMD, LAMMPS, and
Gromacs. (See the lists of supported trajectory formats and
topology formats.)"""
homepage = "https://www.mdanalysis.org"
pypi = "MDAnalysis/MDAnalysis-0.19.2.tar.gz"
version('1.0.0', sha256='f45a024aca45e390ff1c45ca90beb2180b78881be377e2a1aa9cd6c109bcfa81')
version('0.20.1', sha256='d04b71b193b9716d2597ffb9938b93f43487fa535da1bb5c1f2baccf356d7df9')
version('0.19.2', sha256='c5395bbafa5efca2e1aee4715d26129844140c47cb8301da0293106cb969de7d')
version('0.19.1', sha256='ff1d694f8598c0833ec340de6a6adb3b5e62b92d0fa94ee6401718ba972db3cc')
version('0.19.0', sha256='248e3b37fc6150e31c609cc18a3927c32aee37b76d29cbfedf635e7e1aa982cf')
version('0.18.0', sha256='a08acea1755112411e7db55e3f282e164b47a59e15794b38744cce6c596f252a')
version('0.17.0', sha256='9bd61760334698cc7b8a57ad26456451e926e9c9e66722594ad8816561348cde')
version('0.16.2', sha256='407d9a9ff1ab8a5e47973714d06fabff220f8d08a28792dee93e88e70e995b0a')
version('0.16.1', sha256='3dc8f5d639ab3a0d152cbd7259ae9372ec8a9bac0f8cb7d3b80ce5adc1e3ee57')
version('0.16.0', sha256='c4824fa1fddd336daa39371436187ebb023366885fb250c2827ed7fce2546bd4')
version('0.15.0', sha256='9088786048b47339cba1f8a586977bbb3bb04ae1bcd0462b59e45bda37e25533')
variant('analysis', default=True,
description='Enable analysis packages: matplotlib, scipy, seaborn')
variant('amber', default=False,
description='Support AMBER netcdf format.')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.17.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.0:', type=('build', 'run'))
depends_on('py-mock', when='@0.18.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.0.0:', type=('build', 'run'))
depends_on('py-joblib', when='@0.16.0:0.20.1', type=('build', 'run'))
depends_on('[email protected]:', when='@1.0.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@:0.15.0', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.0:0.19.2', type=('build', 'run'))
depends_on('[email protected]:', when='@0.20.1:', type=('build', 'run'))
depends_on('[email protected]:', when='@:0.17.0', type=('build', 'run'))
depends_on('[email protected]:', when='@0.18.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@:0.16.2', type=('build', 'run'))
depends_on('[email protected]:', when='@0.17.0:', type=('build', 'run'))
depends_on('py-matplotlib', when='@:0.15.0+analysis', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.0:0.16.1+analysis', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.2:', type=('build', 'run'))
depends_on('py-scipy', when='@:0.16.1+analysis', type=('build', 'run'))
depends_on('py-scipy', when='@0.16.2:0.17.0', type=('build', 'run'))
depends_on('[email protected]:', when='@0.18.0:', type=('build', 'run'))
depends_on('py-scikit-learn', when='@0.16.0:+analysis', type=('build', 'run'))
depends_on('py-seaborn', when='+analysis', type=('build', 'run'))
depends_on('[email protected]:', when='+amber', type=('build', 'run'))
depends_on('hdf5', when='+amber', type=('run'))
|
lib/cherrypy/cherrypy/test/test_sessionauthenticate.py | MiCHiLU/google_appengine_sdk | 790 | 6642 | <gh_stars>100-1000
import cherrypy
from cherrypy.test import helper
class SessionAuthenticateTest(helper.CPWebCase):
def setup_server():
def check(username, password):
# Dummy check_username_and_password function
if username != 'test' or password != 'password':
return 'Wrong login/password'
def augment_params():
# A simple tool to add some things to request.params
# This is to check to make sure that session_auth can handle request
# params (ticket #780)
cherrypy.request.params["test"] = "test"
cherrypy.tools.augment_params = cherrypy.Tool('before_handler',
augment_params, None, priority=30)
class Test:
_cp_config = {'tools.sessions.on': True,
'tools.session_auth.on': True,
'tools.session_auth.check_username_and_password': check,
'tools.augment_params.on': True,
}
def index(self, **kwargs):
return "Hi %s, you are logged in" % cherrypy.request.login
index.exposed = True
cherrypy.tree.mount(Test())
setup_server = staticmethod(setup_server)
def testSessionAuthenticate(self):
# request a page and check for login form
self.getPage('/')
self.assertInBody('<form method="post" action="do_login">')
# setup credentials
login_body = 'username=test&password=password&from_page=/'
# attempt a login
self.getPage('/do_login', method='POST', body=login_body)
self.assertStatus((302, 303))
# get the page now that we are logged in
self.getPage('/', self.cookies)
self.assertBody('Hi test, you are logged in')
# do a logout
self.getPage('/do_logout', self.cookies, method='POST')
self.assertStatus((302, 303))
# verify we are logged out
self.getPage('/', self.cookies)
self.assertInBody('<form method="post" action="do_login">')
|
cmake/utils/gen-ninja-deps.py | stamhe/bitcoin-abc | 1,266 | 6660 | <filename>cmake/utils/gen-ninja-deps.py
#!/usr/bin/env python3
import argparse
import os
import subprocess
parser = argparse.ArgumentParser(description='Produce a dep file from ninja.')
parser.add_argument(
'--build-dir',
help='The build directory.',
required=True)
parser.add_argument(
'--base-dir',
help='The directory for which dependencies are rewriten.',
required=True)
parser.add_argument('--ninja', help='The ninja executable to use.')
parser.add_argument(
'base_target',
help="The target from the base's perspective.")
parser.add_argument(
'targets', nargs='+',
help='The target for which dependencies are extracted.')
parser.add_argument(
'--extra-deps', nargs='+',
help='Extra dependencies.')
args = parser.parse_args()
build_dir = os.path.abspath(args.build_dir)
base_dir = os.path.abspath(args.base_dir)
ninja = args.ninja
base_target = args.base_target
targets = args.targets
extra_deps = args.extra_deps
# Make sure we operate in the right folder.
os.chdir(build_dir)
if ninja is None:
ninja = subprocess.check_output(['command', '-v', 'ninja'])[:-1]
# Construct the set of all targets
all_targets = set()
doto_targets = set()
for t in subprocess.check_output([ninja, '-t', 'targets', 'all']).splitlines():
t, r = t.split(b':')
all_targets.add(t)
if r[:13] == b' C_COMPILER__' or r[:15] == b' CXX_COMPILER__':
doto_targets.add(t)
def parse_ninja_query(query):
deps = dict()
lines = query.splitlines()
while len(lines):
line = lines.pop(0)
if line[0] == ord(' '):
continue
# We have a new target
target = line.split(b':')[0]
assert lines.pop(0)[:8] == b' input:'
inputs = set()
while True:
i = lines.pop(0)
if i[:4] != b' ':
break
'''
ninja has 3 types of input:
1. Explicit dependencies, no prefix;
2. Implicit dependencies, | prefix.
3. Order only dependencies, || prefix.
Order only dependency do not require the target to be rebuilt
and so we ignore them.
'''
i = i[4:]
if i[0] == ord('|'):
if i[1] == ord('|'):
# We reached the order only dependencies.
break
i = i[2:]
inputs.add(i)
deps[target] = inputs
return deps
def extract_deps(workset):
# Recursively extract the dependencies of the target.
deps = dict()
while len(workset) > 0:
query = subprocess.check_output([ninja, '-t', 'query'] + list(workset))
target_deps = parse_ninja_query(query)
deps.update(target_deps)
workset = set()
for d in target_deps.values():
workset.update(t for t in d if t in all_targets and t not in deps)
# Extract build time dependencies.
bt_targets = [t for t in deps if t in doto_targets]
if len(bt_targets) == 0:
return deps
ndeps = subprocess.check_output(
[ninja, '-t', 'deps'] + bt_targets,
stderr=subprocess.DEVNULL)
lines = ndeps.splitlines()
while len(lines) > 0:
line = lines.pop(0)
t, m = line.split(b':')
if m == b' deps not found':
continue
inputs = set()
while True:
i = lines.pop(0)
if i == b'':
break
assert i[:4] == b' '
inputs.add(i[4:])
deps[t] = inputs
return deps
base_dir = base_dir.encode()
def rebase_deps(deps):
rebased = dict()
cache = dict()
def rebase(path):
if path in cache:
return cache[path]
abspath = os.path.abspath(path)
newpath = path if path == abspath else os.path.relpath(
abspath, base_dir)
cache[path] = newpath
return newpath
for t, s in deps.items():
rebased[rebase(t)] = set(rebase(d) for d in s)
return rebased
deps = extract_deps(set(targets))
deps = rebase_deps(deps)
def dump(deps):
for t, d in deps.items():
if len(d) == 0:
continue
str = t.decode() + ": \\\n "
str += " \\\n ".join(sorted(map((lambda x: x.decode()), d)))
print(str)
# Collapse everything under the base target.
basedeps = set() if extra_deps is None else set(d.encode() for d in extra_deps)
for d in deps.values():
basedeps.update(d)
base_target = base_target.encode()
basedeps.discard(base_target)
dump({base_target: basedeps})
|
eve/workers/pykmip/bin/run_server.py | mmg-3/cloudserver | 762 | 6668 | #!/usr/bin/env python
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging # noqa: E402
logging.basicConfig(level=logging.DEBUG)
from kmip.services.server import server # noqa: E402
if __name__ == '__main__':
print('Starting PyKMIP server on 0.0.0.0:5696')
server.main()
|
tensorflow_quantum/python/differentiators/__init__.py | PyJedi/quantum | 1,501 | 6670 | <gh_stars>1000+
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module functions for tfq.differentiators.*"""
from tensorflow_quantum.python.differentiators.adjoint import (
Adjoint,)
from tensorflow_quantum.python.differentiators.linear_combination import (
ForwardDifference,
CentralDifference,
LinearCombination,
)
from tensorflow_quantum.python.differentiators.parameter_shift import (
ParameterShift,)
from tensorflow_quantum.python.differentiators.differentiator import (
Differentiator,)
|
harbor/tests/test_unit.py | tdimnet/integrations-core | 663 | 6689 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from mock import MagicMock
from requests import HTTPError
from datadog_checks.base import AgentCheck
from datadog_checks.dev.http import MockResponse
from .common import HARBOR_COMPONENTS, HARBOR_VERSION, VERSION_1_5, VERSION_1_6, VERSION_1_8
@pytest.mark.usefixtures("patch_requests")
def test_check_health(aggregator, harbor_check, harbor_api):
base_tags = ['tag1:val1', 'tag2']
harbor_check._check_health(harbor_api, base_tags)
if harbor_api.harbor_version >= VERSION_1_8:
components = HARBOR_COMPONENTS
for c in components:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:{}'.format(c)])
elif harbor_api.harbor_version >= VERSION_1_6:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:chartmuseum'])
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
elif harbor_api.harbor_version >= VERSION_1_5:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
else:
aggregator.assert_service_check('harbor.status', AgentCheck.UNKNOWN, tags=base_tags)
@pytest.mark.usefixtures("patch_requests")
def test_check_registries_health(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._check_registries_health(harbor_api, tags)
tags.append('registry:demo')
aggregator.assert_service_check('harbor.registry.status', AgentCheck.OK, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_project_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_project_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.projects.count', 2, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_disk_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_disk_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.disk.free', 5e5, tags=tags)
aggregator.assert_metric('harbor.disk.total', 1e6, tags=tags)
@pytest.mark.usefixtures("patch_requests")
@pytest.mark.skipif(HARBOR_VERSION < VERSION_1_5, reason="The registry.read_only metric is submitted for Harbor 1.5+")
def test_submit_read_only_status(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_read_only_status(harbor_api, tags)
aggregator.assert_metric('harbor.registry.read_only', 0, tags=tags)
def test_api__make_get_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_get_request('{base_url}/api/path') == {"json": True}
harbor_api.http.get = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_get_request('{base_url}/api/path')
def test_api__make_paginated_get_request(harbor_api):
expected_result = [{'item': i} for i in range(20)]
paginated_result = [[expected_result[i], expected_result[i + 1]] for i in range(0, len(expected_result) - 1, 2)]
values = []
for r in paginated_result:
values.append(MockResponse(json_data=r, headers={'link': 'Link: <unused_url>; rel=next; type="text/plain"'}))
values[-1].headers.pop('link')
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(side_effect=values)
assert harbor_api._make_paginated_get_request('{base_url}/api/path') == expected_result
def test_api__make_post_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.post = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_post_request('{base_url}/api/path') == {"json": True}
harbor_api.http.post = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_post_request('{base_url}/api/path')
|
source/vsm-dashboard/vsm_dashboard/test/test_data/swift_data.py | ramkrsna/virtual-storage-manager | 172 | 6692 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vsm_dashboard.api import swift
from .utils import TestDataContainer
def data(TEST):
TEST.containers = TestDataContainer()
TEST.objects = TestDataContainer()
container_1 = swift.Container(dict(name=u"container_one\u6346"))
container_2 = swift.Container(dict(name=u"container_two\u6346"))
TEST.containers.add(container_1, container_2)
object_dict = {"name": u"test_object\u6346",
"content_type": u"text/plain",
"bytes": 128,
"last_modified": None,
"hash": u"object_hash"}
obj_dicts = [object_dict]
obj_data = "Fake Data"
for obj_dict in obj_dicts:
swift_object = swift.StorageObject(obj_dict,
container_1.name,
data=obj_data)
TEST.objects.add(swift_object)
|
examples/multimedia/mmimdb_MFM.py | kapikantzari/MultiBench | 148 | 6714 | import torch
import sys
import os
sys.path.append(os.getcwd())
from utils.helper_modules import Sequential2
from unimodals.common_models import Linear, MLP, MaxOut_MLP
from datasets.imdb.get_data import get_dataloader
from fusions.common_fusions import Concat
from objective_functions.objectives_for_supervised_learning import MFM_objective
from objective_functions.recon import sigmloss1d
from training_structures.Supervised_Learning import train, test
filename = "best_mfm.pt"
traindata, validdata, testdata = get_dataloader(
"../video/multimodal_imdb.hdf5", "../video/mmimdb", vgg=True, batch_size=128)
classes = 23
n_latent = 512
fuse = Sequential2(Concat(), MLP(2*n_latent, n_latent, n_latent//2)).cuda()
encoders = [MaxOut_MLP(512, 512, 300, n_latent, False).cuda(
), MaxOut_MLP(512, 1024, 4096, n_latent, False).cuda()]
head = Linear(n_latent//2, classes).cuda()
decoders = [MLP(n_latent, 600, 300).cuda(), MLP(n_latent, 2048, 4096).cuda()]
intermediates = [MLP(n_latent, n_latent//2, n_latent//2).cuda(),
MLP(n_latent, n_latent//2, n_latent//2).cuda()]
recon_loss = MFM_objective(2.0, [sigmloss1d, sigmloss1d], [
1.0, 1.0], criterion=torch.nn.BCEWithLogitsLoss())
train(encoders, fuse, head, traindata, validdata, 1000, decoders+intermediates, early_stop=True, task="multilabel",
objective_args_dict={"decoders": decoders, "intermediates": intermediates}, save=filename, optimtype=torch.optim.AdamW, lr=5e-3, weight_decay=0.01, objective=recon_loss)
print("Testing:")
model = torch.load(filename).cuda()
test(model, testdata, method_name="MFM", dataset="imdb",
criterion=torch.nn.BCEWithLogitsLoss(), task="multilabel")
|
test/__init__.py | donbowman/rdflib | 1,424 | 6741 | #
import os
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
|
fine-tune/inference_embedding.py | LinHuiqing/nonparaSeq2seqVC_code | 199 | 6754 | import os
import numpy as np
import torch
import argparse
from hparams import create_hparams
from model import lcm
from train import load_model
from torch.utils.data import DataLoader
from reader import TextMelIDLoader, TextMelIDCollate, id2sp
from inference_utils import plot_data
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--checkpoint_path', type=str,
help='directory to save checkpoints')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
checkpoint_path=args.checkpoint_path
hparams = create_hparams(args.hparams)
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path)['state_dict'], strict=False)
_ = model.eval()
def gen_embedding(speaker):
training_list = hparams.training_list
train_set_A = TextMelIDLoader(training_list, hparams.mel_mean_std, hparams.speaker_A,
hparams.speaker_B,
shuffle=False,pids=[speaker])
collate_fn = TextMelIDCollate(lcm(hparams.n_frames_per_step_encoder,
hparams.n_frames_per_step_decoder))
train_loader_A = DataLoader(train_set_A, num_workers=1, shuffle=False,
sampler=None,
batch_size=1, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
with torch.no_grad():
speaker_embeddings = []
for i,batch in enumerate(train_loader_A):
#print i
x, y = model.parse_batch(batch)
text_input_padded, mel_padded, text_lengths, mel_lengths, speaker_id = x
speaker_id, speaker_embedding = model.speaker_encoder.inference(mel_padded)
speaker_embedding = speaker_embedding.data.cpu().numpy()
speaker_embeddings.append(speaker_embedding)
speaker_embeddings = np.vstack(speaker_embeddings)
print(speaker_embeddings.shape)
if not os.path.exists('outdir/embeddings'):
os.makedirs('outdir/embeddings')
np.save('outdir/embeddings/%s.npy'%speaker, speaker_embeddings)
plot_data([speaker_embeddings],
'outdir/embeddings/%s.pdf'%speaker)
print('Generating embedding of %s ...'%hparams.speaker_A)
gen_embedding(hparams.speaker_A)
print('Generating embedding of %s ...'%hparams.speaker_B)
gen_embedding(hparams.speaker_B)
|
doc/samples/pos.py | m4ta1l/doit | 1,390 | 6821 | def task_pos_args():
def show_params(param1, pos):
print('param1 is: {0}'.format(param1))
for index, pos_arg in enumerate(pos):
print('positional-{0}: {1}'.format(index, pos_arg))
return {'actions':[(show_params,)],
'params':[{'name':'param1',
'short':'p',
'default':'default value'},
],
'pos_arg': 'pos',
'verbosity': 2,
}
|
tests/test_flash_vl.py | andr1976/thermo | 380 | 6832 | <reponame>andr1976/thermo
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020, <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
import pytest
from fluids.core import C2K
import thermo
from chemicals.utils import *
from thermo import *
from fluids.numerics import *
from math import *
import json
import os
import numpy as np
def test_C2_C5_PR():
T, P = 300, 3e6
constants = ChemicalConstantsPackage(Tcs=[305.32, 469.7], Pcs=[4872000.0, 3370000.0],
omegas=[0.098, 0.251], Tms=[90.3, 143.15],
Tbs=[184.55, 309.21], CASs=['74-84-0', '109-66-0'],
names=['ethane', 'pentane'], MWs=[30.06904, 72.14878])
HeatCapacityGases = [HeatCapacityGas(poly_fit=(50.0, 1000.0, [7.115386645067898e-21, -3.2034776773408394e-17, 5.957592282542187e-14, -5.91169369931607e-11, 3.391209091071677e-08, -1.158730780040934e-05, 0.002409311277400987, -0.18906638711444712, 37.94602410497228])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [7.537198394065234e-22, -4.946850205122326e-18, 1.4223747507170372e-14, -2.3451318313798008e-11, 2.4271676873997662e-08, -1.6055220805830093e-05, 0.006379734000450042, -1.0360272314628292, 141.84695243411866]))]
correlations = PropertyCorrelationsPackage(constants, HeatCapacityGases=HeatCapacityGases)
zs = ws_to_zs(MWs=constants.MWs, ws=[.5, .5])
eos_kwargs = {'Pcs': constants.Pcs, 'Tcs': constants.Tcs, 'omegas': constants.omegas}
gas = CEOSGas(PRMIX, eos_kwargs, HeatCapacityGases=HeatCapacityGases, T=T, P=P, zs=zs)
liq = CEOSLiquid(PRMIX, eos_kwargs, HeatCapacityGases=HeatCapacityGases, T=T, P=P, zs=zs)
flasher = FlashVL(constants, correlations, liquid=liq, gas=gas)
# Check there are two phases near the dew point. don't bother checking the composition most of the time.
# When this test was written, case is still valid for a dP of 0.00000001 Pa
# Issue here was that (sum_criteria < 1e-7) was the check in the stability test result interpretation
# Fixed it by decreasing the tolerance 10x (1e-8)
res = flasher.flash(P=5475649.470049857+15, T=123.3+273.15, zs=zs)
assert_close1d(res.betas, [0.9999995457838572, 4.5421614280893863e-07], rtol=1e-4)
assert_close1d(res.gas.zs, [0.7058337751720506, 0.29416622482794935], rtol=1e-4)
assert_close1d(res.liquid0.zs, [0.49517964670906095, 0.504820353290939], rtol=1e-4)
# # In this case, the tolerance had to be decreased 10x more - to 1e-9! Triggered at a dP of 0.5
res = flasher.flash(P=5475649.470049857+0.5, T=123.3+273.15, zs=zs)
assert_close1d(res.betas, [0.999999984859061, 1.5140938947055815e-08], rtol=1e-4)
assert_close1d(res.gas.zs, [0.7058336826506021, 0.29416631734939785])
assert_close1d(res.liquid0.zs, [0.4951780663825745, 0.5048219336174254])
# # This one is too close to the border - the VF from SS is less than 0,
# # but if the tolerance is increased, it is positive (and should be)
res = flasher.flash(P=5475649.470049857+0.001, T=123.3+273.15, zs=zs)
assert_close1d(res.betas, [0.9999999999697144, 3.028555184414472e-11], rtol=3e-3)
assert_close1d(res.gas.zs, [0.7058336794959247, 0.29416632050407526])
assert_close1d(res.liquid0.zs, [0.49517801199759515, 0.5048219880024049])
# This one is presently identified as a LL... just check the number of phases
assert flasher.flash(zs=zs, P=6.615e6, T=386).phase_count == 2
def test_flash_TP_K_composition_idependent_unhappiness():
constants = ChemicalConstantsPackage(Tcs=[508.1, 536.2, 512.5], Pcs=[4700000.0, 5330000.0, 8084000.0], omegas=[0.309, 0.21600000000000003, 0.5589999999999999],
MWs=[58.07914, 119.37764000000001, 32.04186], CASs=['67-64-1', '67-66-3', '67-56-1'], names=['acetone', 'chloroform', 'methanol'])
HeatCapacityGases = [HeatCapacityGas(poly_fit=(200.0, 1000.0, [-1.3320002425347943e-21, 6.4063345232664645e-18, -1.251025808150141e-14, 1.2265314167534311e-11, -5.535306305509636e-09, -4.32538332013644e-08, 0.0010438724775716248, -0.19650919978971002, 63.84239495676709])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [1.5389278550737367e-21, -8.289631533963465e-18, 1.9149760160518977e-14, -2.470836671137373e-11, 1.9355882067011222e-08, -9.265600540761629e-06, 0.0024825718663005762, -0.21617464276832307, 48.149539665907696])),
HeatCapacityGas(poly_fit=(50.0, 1000.0, [2.3511458696647882e-21, -9.223721411371584e-18, 1.3574178156001128e-14, -8.311274917169928e-12, 4.601738891380102e-10, 1.78316202142183e-06, -0.0007052056417063217, 0.13263597297874355, 28.44324970462924]))]
VolumeLiquids = [VolumeLiquid(poly_fit=(178.51, 498.1, [6.564241965071999e-23, -1.6568522275506375e-19, 1.800261692081815e-16, -1.0988731296761538e-13, 4.118691518070104e-11, -9.701938804617744e-09, 1.4022905458596618e-06, -0.00011362923883050033, 0.0040109650220160956])),
VolumeLiquid(poly_fit=(209.63, 509.5799999999999, [2.034047306563089e-23, -5.45567626310959e-20, 6.331811062990084e-17, -4.149759318710192e-14, 1.6788970104955462e-11, -4.291900093120011e-09, 6.769385838271721e-07, -6.0166473220815445e-05, 0.0023740769479069054])),
VolumeLiquid(poly_fit=(175.7, 502.5, [3.5725079384600736e-23, -9.031033742820083e-20, 9.819637959370411e-17, -5.993173551565636e-14, 2.2442465416964825e-11, -5.27776114586072e-09, 7.610461006178106e-07, -6.148574498547711e-05, 0.00216398089328537])),]
VaporPressures = [VaporPressure(exp_poly_fit=(178.51, 508.09000000000003, [-1.3233111115238975e-19, 4.2217134794609376e-16, -5.861832547132719e-13, 4.6488594950801467e-10, -2.3199079844570237e-07, 7.548290741523459e-05, -0.015966705328994194, 2.093003523977292, -125.39006100979816])),
VaporPressure(exp_poly_fit=(207.15, 536.4, [-8.714046553871422e-20, 2.910491615051279e-16, -4.2588796020294357e-13, 3.580003116042944e-10, -1.902612144361103e-07, 6.614096470077095e-05, -0.01494801055978542, 2.079082613726621, -130.24643185169472])),
VaporPressure(exp_poly_fit=(175.7, 512.49, [-1.446088049406911e-19, 4.565038519454878e-16, -6.278051259204248e-13, 4.935674274379539e-10, -2.443464113936029e-07, 7.893819658700523e-05, -0.016615779444332356, 2.1842496316772264, -134.19766175812708]))]
liquid = GibbsExcessLiquid(VaporPressures=VaporPressures, VolumeLiquids=VolumeLiquids,
HeatCapacityGases=HeatCapacityGases, use_Poynting=True,
use_phis_sat=False)
correlations = PropertyCorrelationsPackage(constants=constants, skip_missing=True, HeatCapacityGases=HeatCapacityGases,
VolumeLiquids=VolumeLiquids, VaporPressures=VaporPressures)
T, P = 350.0, 1e6
zs = [0.2, 0.0, 0.8]
eos_kwargs = {'Pcs': constants.Pcs, 'Tcs': constants.Tcs, 'omegas':constants.omegas}
gas = IdealGas(HeatCapacityGases=HeatCapacityGases, T=T, P=P, zs=zs)
flashN = FlashVLN(constants, correlations, liquids=[liquid], gas=gas)
# Low - all K under zero
res = flashN.flash(T=T, P=P, zs=zs)
assert_close(res.rho_mass(), 733.1047159397776)
assert 1 == res.phase_count
assert res.liquid0 is not None
# High - all K above zero
res = flashN.flash(T=430, P=1e4, zs=zs)
assert 1 == res.phase_count
assert res.gas is not None
assert_close(res.rho_mass(), 0.10418751067559757)
# One K value is under 1, rest are above - but that component has mole frac of zero
res = flashN.flash(T=420, P=1e4, zs=zs)
assert 1 == res.phase_count
assert res.gas is not None
# phis_at for liquids was broken, breaking this calculation
res = flashN.flash(T=285.5, P=1e4, zs=zs)
assert_close1d(res.betas, [0.21860038882559643, 0.7813996111744036])
assert res.phase_count == 2
# Two cases RR was working on Ks less than 1, and coming up with a made up VF
# Need to check Ks first
res = flashN.flash(T=300.0000, P=900000.0000, zs=[0.5, 0.1, 0.4, 0.0],)
assert 1 == res.phase_count
assert res.gas is None
res = flashN.flash(T=300.0000, P=900000.0000, zs=[.5, 0, 0, .5])
assert 1 == res.phase_count
assert res.gas is None
def test_flash_combustion_products():
P = 1e5
T = 794.5305048838037
zs = [0.5939849621247668, 0.112781954982051, 0.0676691730155464, 0.2255639098776358]
constants = ChemicalConstantsPackage(atomss=[{'N': 2}, {'C': 1, 'O': 2}, {'O': 2}, {'H': 2, 'O': 1}], CASs=['7727-37-9', '124-38-9', '7782-44-7', '7732-18-5'], MWs=[28.0134, 44.0095, 31.9988, 18.01528], names=['nitrogen', 'carbon dioxide', 'oxygen', 'water'], omegas=[0.04, 0.2252, 0.021, 0.344], Pcs=[3394387.5, 7376460.0, 5042945.25, 22048320.0], Tbs=[77.355, 194.67, 90.18799999999999, 373.124], Tcs=[126.2, 304.2, 154.58, 647.14], Tms=[63.15, 216.65, 54.36, 273.15])
correlations = PropertyCorrelationsPackage(constants=constants, skip_missing=True,
HeatCapacityGases=[HeatCapacityGas(poly_fit=(50.0, 1000.0, [-6.496329615255804e-23, 2.1505678500404716e-19, -2.2204849352453665e-16, 1.7454757436517406e-14, 9.796496485269412e-11, -4.7671178529502835e-08, 8.384926355629239e-06, -0.0005955479316119903, 29.114778709934264])),
HeatCapacityGas(poly_fit=(50.0, 1000.0, [-3.1115474168865828e-21, 1.39156078498805e-17, -2.5430881416264243e-14, 2.4175307893014295e-11, -1.2437314771044867e-08, 3.1251954264658904e-06, -0.00021220221928610925, 0.000884685506352987, 29.266811602924644])),
HeatCapacityGas(poly_fit=(50.0, 1000.0, [7.682842888382947e-22, -3.3797331490434755e-18, 6.036320672021355e-15, -5.560319277907492e-12, 2.7591871443240986e-09, -7.058034933954475e-07, 9.350023770249747e-05, -0.005794412013028436, 29.229215579932934])),
HeatCapacityGas(poly_fit=(50.0, 1000.0, [5.543665000518528e-22, -2.403756749600872e-18, 4.2166477594350336e-15, -3.7965208514613565e-12, 1.823547122838406e-09, -4.3747690853614695e-07, 5.437938301211039e-05, -0.003220061088723078, 33.32731489750759]))])
kijs = [[0.0, -0.0122, -0.0159, 0.0], [-0.0122, 0.0, 0.0, 0.0952], [-0.0159, 0.0, 0.0, 0.0], [0.0, 0.0952, 0.0, 0.0]]
eos_kwargs = {'Pcs': constants.Pcs, 'Tcs': constants.Tcs, 'omegas': constants.omegas, 'kijs': kijs}
gas = CEOSGas(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases, T=T, P=P, zs=zs)
liq = CEOSLiquid(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases, T=T, P=P, zs=zs)
flasher = FlashVL(constants, correlations, liquid=liq, gas=gas)
res = flasher.flash(T=T, P=P, zs=zs)
assert res.gas
assert res.phase == 'V'
def test_bubble_T_PR_VL():
# Last point at 8e6 Pa not yet found.
constants = ChemicalConstantsPackage(CASs=['124-38-9', '110-54-3'], MWs=[44.0095, 86.17536], names=['carbon dioxide', 'hexane'], omegas=[0.2252, 0.2975], Pcs=[7376460.0, 3025000.0], Tbs=[194.67, 341.87], Tcs=[304.2, 507.6], Tms=[216.65, 178.075])
correlations = PropertyCorrelationsPackage(constants=constants, skip_missing=True,
HeatCapacityGases=[HeatCapacityGas(poly_fit=(50.0, 1000.0, [-3.1115474168865828e-21, 1.39156078498805e-17, -2.5430881416264243e-14, 2.4175307893014295e-11, -1.2437314771044867e-08, 3.1251954264658904e-06, -0.00021220221928610925, 0.000884685506352987, 29.266811602924644])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [1.3740654453881647e-21, -8.344496203280677e-18, 2.2354782954548568e-14, -3.4659555330048226e-11, 3.410703030634579e-08, -2.1693611029230923e-05, 0.008373280796376588, -1.356180511425385, 175.67091124888998]))])
zs = [.5, .5]
T = 300.0
P = 1e6
eos_kwargs = {'Pcs': constants.Pcs, 'Tcs': constants.Tcs, 'omegas': constants.omegas}
gas = CEOSGas(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases, T=T, P=P, zs=zs)
liq = CEOSLiquid(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases, T=T, P=P, zs=zs)
flasher = FlashVL(constants, correlations, liquid=liq, gas=gas)
res = flasher.flash(P=7.93e6, VF=0, zs=zs)
assert_close(res.T, 419.0621213529388, rtol=1e-6)
def test_PR_four_bubble_dew_cases_VL():
zs=[.5, .5]
T=300.0
P=1E6
constants = ChemicalConstantsPackage(CASs=['98-01-1', '98-00-0'], MWs=[96.08406000000001, 98.09994], names=['2-furaldehyde', 'furfuryl alcohol'], omegas=[0.4522, 0.7340000000000001], Pcs=[5510000.0, 5350000.0], Tbs=[434.65, 441.15], Tcs=[670.0, 632.0], Tms=[235.9, 250.35])
correlations = PropertyCorrelationsPackage(constants=constants, skip_missing=True,
HeatCapacityGases=[HeatCapacityGas(poly_fit=(298, 1000, [4.245751608816354e-21, -2.470461837781697e-17, 6.221823690784335e-14, -8.847967216702641e-11, 7.749899297737877e-08, -4.250059888737765e-05, 0.013882452355067994, -2.1404621487165327, 185.84988012691903])),
HeatCapacityGas(poly_fit=(250.35, 632.0, [-9.534610090167143e-20, 3.4583416772306854e-16, -5.304513883184021e-13, 4.410937690059558e-10, -2.0905505018557675e-07, 5.20661895325169e-05, -0.004134468659764938, -0.3746374641720497, 114.90130267531933]))])
eos_kwargs = {'Pcs': constants.Pcs, 'Tcs': constants.Tcs, 'omegas': constants.omegas}
gas = CEOSGas(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases, T=T, P=P, zs=zs)
liq = CEOSLiquid(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases, T=T, P=P, zs=zs)
flasher = FlashVL(constants, correlations, liquid=liq, gas=gas)
assert_close(flasher.flash(P=1e6, VF=0, zs=zs).T, 539.1838522423529, rtol=1e-6)
assert_close(flasher.flash(P=1e6, VF=1, zs=zs).T, 540.2081697501809, rtol=1e-6)
assert_close(flasher.flash(T=600.0, VF=0, zs=zs).P, 2766476.7473238464, rtol=1e-6)
assert_close(flasher.flash(T=600.0, VF=1, zs=zs).P, 2702616.6490743402, rtol=1e-6)
def test_C1_C10_PT_flash_VL():
IDs = ['methane', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10']
zs=[.1]*10
T=300.0
P=1E5
constants = ChemicalConstantsPackage(CASs=['74-82-8', '74-84-0', '74-98-6', '106-97-8', '109-66-0', '110-54-3', '142-82-5', '111-65-9', '111-84-2', '124-18-5'], MWs=[16.04246, 30.06904, 44.09562, 58.1222, 72.14878, 86.17536, 100.20194000000001, 114.22852, 128.2551, 142.28168], names=['methane', 'ethane', 'propane', 'butane', 'pentane', 'hexane', 'heptane', 'octane', 'nonane', 'decane'], omegas=[0.008, 0.098, 0.152, 0.193, 0.251, 0.2975, 0.3457, 0.39399999999999996, 0.444, 0.49], Pcs=[4599000.0, 4872000.0, 4248000.0, 3796000.0, 3370000.0, 3025000.0, 2740000.0, 2490000.0, 2290000.0, 2110000.0], Tbs=[111.65, 184.55, 231.04, 272.65, 309.21, 341.87, 371.53, 398.77, 423.95, 447.25], Tcs=[190.56400000000002, 305.32, 369.83, 425.12, 469.7, 507.6, 540.2, 568.7, 594.6, 611.7], Tms=[90.75, 90.3, 85.5, 135.05, 143.15, 178.075, 182.15, 216.3, 219.9, 243.225])
correlations = PropertyCorrelationsPackage(constants=constants, skip_missing=True,
HeatCapacityGases=[HeatCapacityGas(poly_fit=(50.0, 1000.0, [6.7703235945157e-22, -2.496905487234175e-18, 3.141019468969792e-15, -8.82689677472949e-13, -1.3709202525543862e-09, 1.232839237674241e-06, -0.0002832018460361874, 0.022944239587055416, 32.67333514157593])),
HeatCapacityGas(poly_fit=(50.0, 1000.0, [7.115386645067898e-21, -3.2034776773408394e-17, 5.957592282542187e-14, -5.91169369931607e-11, 3.391209091071677e-08, -1.158730780040934e-05, 0.002409311277400987, -0.18906638711444712, 37.94602410497228])),
HeatCapacityGas(poly_fit=(50.0, 1000.0, [7.008452174279456e-22, -1.7927920989992578e-18, 1.1218415948991092e-17, 4.23924157032547e-12, -5.279987063309569e-09, 2.5119646468572195e-06, -0.0004080663744697597, 0.1659704314379956, 26.107282495650367])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [-2.608494166540452e-21, 1.3127902917979555e-17, -2.7500977814441112e-14, 3.0563338307642794e-11, -1.866070373718589e-08, 5.4505831355984375e-06, -0.00024022110003950325, 0.04007078628096955, 55.70646822218319])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [7.537198394065234e-22, -4.946850205122326e-18, 1.4223747507170372e-14, -2.3451318313798008e-11, 2.4271676873997662e-08, -1.6055220805830093e-05, 0.006379734000450042, -1.0360272314628292, 141.84695243411866])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [1.3740654453881647e-21, -8.344496203280677e-18, 2.2354782954548568e-14, -3.4659555330048226e-11, 3.410703030634579e-08, -2.1693611029230923e-05, 0.008373280796376588, -1.356180511425385, 175.67091124888998])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [-1.4046935863496273e-21, 5.8024177500786575e-18, -7.977871529098155e-15, 7.331444047402207e-13, 9.954400606484495e-09, -1.2112107913343475e-05, 0.0062964696142858104, -1.0843106737278825, 173.87692850911935])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [-1.069661592422583e-22, -1.2992882995593864e-18, 8.808066659263286e-15, -2.1690080247294972e-11, 2.8519221306107026e-08, -2.187775092823544e-05, 0.009432620102532702, -1.5719488702446165, 217.60587499269303])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [6.513870466670624e-22, -5.318305817618858e-18, 1.8015815307749625e-14, -3.370046452151828e-11, 3.840755097595374e-08, -2.7203677889897072e-05, 0.011224516822410626, -1.842793858054514, 247.3628627781443])),
HeatCapacityGas(poly_fit=(200.0, 1000.0, [-1.702672546011891e-21, 6.6751002084997075e-18, -7.624102919104147e-15, -4.071140876082743e-12, 1.863822577724324e-08, -1.9741705032236747e-05, 0.009781408958916831, -1.6762677829939379, 252.8975930305735]))])
eos_kwargs = {'Pcs': constants.Pcs, 'Tcs': constants.Tcs, 'omegas': constants.omegas}
gas = CEOSGas(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases, T=T, P=P, zs=zs)
liq = CEOSLiquid(PRMIX, eos_kwargs, HeatCapacityGases=correlations.HeatCapacityGases, T=T, P=P, zs=zs)
flasher = FlashVL(constants, correlations, liquid=liq, gas=gas)
res = flasher.flash(T=T, P=P, zs=zs)
assert_close(res.VF, 0.3933480634014041, rtol=1e-5)
def test_combustion_products():
from chemicals.combustion import fuel_air_spec_solver
IDs = ['methane', 'carbon dioxide', 'ethane', 'propane',
'isobutane', 'butane', '2-methylbutane', 'pentane',
'hexane', 'nitrogen', 'oxygen', 'water']
T = C2K(15)
P = 1e5
zs_fuel = [0.9652228316853225, 0.0059558310220860665, 0.018185509193506685, 0.004595963476244076,
0.0009769695915451998, 0.001006970610302194, 0.000472984762445398, 0.0003239924667435125,
0.0006639799746946288, 0.002594967217109564, 0.0, 0.0]
zs_fuel = normalize(zs_fuel)
zs_air = [0.0]*9 + [0.79, 0.21] + [0.0]
constants, properties = ChemicalConstantsPackage.from_IDs(IDs)
combustion = fuel_air_spec_solver(zs_air=zs_air, zs_fuel=zs_fuel, CASs=constants.CASs,
atomss=constants.atomss, n_fuel=1.0, O2_excess=0.1)
zs = combustion['zs_out']
eos_kwargs = {'Pcs': constants.Pcs, 'Tcs': constants.Tcs, 'omegas': constants.omegas}
gas = CEOSGas(PRMIX, eos_kwargs, T=T, P=P, zs=zs, HeatCapacityGases=properties.HeatCapacityGases)
liquid = CEOSLiquid(PRMIX, eos_kwargs, T=T, P=P, zs=zs, HeatCapacityGases=properties.HeatCapacityGases)
flasher = FlashVL(constants, properties, liquid=liquid, gas=gas)
res = flasher.flash(T=400.0, P=1e5, zs=zs)
assert res.phase_count == 1
assert res.gas is not None
def test_furfuryl_alcohol_high_TP():
# Legacy bug, don't even remember what the original issue was
constants = ChemicalConstantsPackage(MWs=[98.09994, 18.01528], Tcs=[632.0, 647.14], Pcs=[5350000.0, 22048320.0], omegas=[0.734, 0.344], names=['furfuryl alcohol', 'water'], CASs=['98-00-0', '7732-18-5'])
correlations = PropertyCorrelationsPackage(constants=constants, skip_missing=True,
HeatCapacityGases=[HeatCapacityGas(load_data=False, poly_fit=(250.35, 632.0, [-9.534610090167143e-20, 3.4583416772306854e-16, -5.304513883184021e-13, 4.410937690059558e-10, -2.0905505018557675e-07, 5.20661895325169e-05, -0.004134468659764938, -0.3746374641720497, 114.90130267531933])),
HeatCapacityGas(load_data=False, poly_fit=(50.0, 1000.0, [5.543665000518528e-22, -2.403756749600872e-18, 4.2166477594350336e-15, -3.7965208514613565e-12, 1.823547122838406e-09, -4.3747690853614695e-07, 5.437938301211039e-05, -0.003220061088723078, 33.32731489750759]))])
eos_kwargs = dict(Tcs=constants.Tcs, Pcs=constants.Pcs, omegas=constants.omegas)
zs = [0.4444445555555555, 1-0.4444445555555555]
T, P = 5774.577777777778, 220483199.99999997
gas = CEOSGas(eos_class=PRMIX, eos_kwargs=eos_kwargs, T=T, P=P, zs=zs, HeatCapacityGases=correlations.HeatCapacityGases)
liquid = CEOSLiquid(eos_class=PRMIX, eos_kwargs=eos_kwargs, T=T, P=P, zs=zs, HeatCapacityGases=correlations.HeatCapacityGases)
flasher = FlashVL(constants, correlations, liquid=liquid, gas=gas)
assert_close(flasher.flash(T=T, P=P, zs=zs).rho_mass(), 227.52709151903954)
def test_flash_GibbsExcessLiquid_ideal_Psat():
# Binary water-ethanol
T = 230.0
P = 1e5
zs = [.4, .6]
MWs = [18.01528, 46.06844]
Tcs = [647.086, 514.7]
Pcs = [22048320.0, 6137000.0]
omegas = [0.344, 0.635]
VaporPressures = [VaporPressure(extrapolation='DIPPR101_ABC|DIPPR101_ABC', exp_poly_fit=(273.17, 647.086, [-2.8478502840358144e-21, 1.7295186670575222e-17, -4.034229148562168e-14, 5.0588958391215855e-11, -3.861625996277003e-08, 1.886271475957639e-05, -0.005928371869421494, 1.1494956887882308, -96.74302379151317])),
VaporPressure(extrapolation='DIPPR101_ABC|DIPPR101_ABC', exp_poly_fit=(159.11, 514.7, [-2.3617526481119e-19, 7.318686894378096e-16, -9.835941684445551e-13, 7.518263303343784e-10, -3.598426432676194e-07, 0.00011171481063640762, -0.022458952185007635, 2.802615041941912, -166.43524219017118]))]
HeatCapacityGases = [HeatCapacityGas(poly_fit=(50.0, 1000.0, [5.543665000518528e-22, -2.403756749600872e-18, 4.2166477594350336e-15, -3.7965208514613565e-12, 1.823547122838406e-09, -4.3747690853614695e-07, 5.437938301211039e-05, -0.003220061088723078, 33.32731489750759])),
HeatCapacityGas(poly_fit=(50.0, 1000.0, [-1.162767978165682e-20, 5.4975285700787494e-17, -1.0861242757337942e-13, 1.1582703354362728e-10, -7.160627710867427e-08, 2.5392014654765875e-05, -0.004732593693568646, 0.5072291035198603, 20.037826650765965]))]
VolumeLiquids = [VolumeLiquid(poly_fit=(273.17, 637.096, [9.00307261049824e-24, -3.097008950027417e-20, 4.608271228765265e-17, -3.8726692841874345e-14, 2.0099220218891486e-11, -6.596204729785676e-09, 1.3368112879131157e-06, -0.00015298762503607717, 0.007589247005014652]),
Psat=VaporPressures[0], Tc=Tcs[0], Pc=Pcs[0], omega=omegas[0]),
VolumeLiquid(poly_fit=(159.11, 504.71000000000004, [5.388587987308587e-23, -1.331077476340645e-19, 1.4083880805283782e-16, -8.327187308842775e-14, 3.006387047487587e-11, -6.781931902982022e-09, 9.331209920256822e-07, -7.153268618320437e-05, 0.0023871634205665524]),
Psat=VaporPressures[1], Tc=Tcs[1], Pc=Pcs[1], omega=omegas[1])]
EnthalpyVaporizations = [EnthalpyVaporization(Tc=647.14, poly_fit_ln_tau=(273.17, 647.095, 647.14, [0.010220675607316746, 0.5442323619614213, 11.013674729940819, 110.72478547661254, 591.3170172192005, 1716.4863395285283, 4063.5975524922624, 17960.502354189244, 53916.28280689388])),
EnthalpyVaporization(Tc=514.0, poly_fit_ln_tau=(159.11, 513.9999486, 514.0, [-0.002197958699297133, -0.1583773493009195, -4.716256555877727, -74.79765793302774, -675.8449382004112, -3387.5058752252276, -7531.327682252346, 5111.75264050548, 50774.16034043739]))]
constants = ChemicalConstantsPackage(Tcs=Tcs, Pcs=Pcs, omegas=omegas, MWs=MWs, CASs=['7732-18-5', '64-17-5'])
correlations = PropertyCorrelationsPackage(constants, HeatCapacityGases=HeatCapacityGases, EnthalpyVaporizations=EnthalpyVaporizations,
VolumeLiquids=VolumeLiquids, VaporPressures=VaporPressures, skip_missing=True)
liquid = GibbsExcessLiquid(VaporPressures=VaporPressures,
HeatCapacityGases=HeatCapacityGases,
VolumeLiquids=VolumeLiquids,
EnthalpyVaporizations=EnthalpyVaporizations,
caloric_basis='Psat', equilibrium_basis='Psat',
T=T, P=P, zs=zs)
gas = IdealGas(T=T, P=P, zs=zs, HeatCapacityGases=HeatCapacityGases)
flasher = FlashVL(constants, correlations, liquid=liquid, gas=gas)
# All points were missing because G_dep was missing
res = flasher.flash(T=300, P=1e5, zs=zs)
assert res.liquid_count == 1
# Failing when two K values were under 1e-10
res = flasher.flash(T=100, P=1e5, zs=zs)
assert res.phase_count == 1
assert res.liquid_count == 1
# Wilson guessess are hard zeros
res = flasher.flash(T=5, P=1e5, zs=zs)
assert res.phase_count == 1
assert res.liquid_count == 1
# Wilson guesses inf, nan, and all zero
res = flasher.flash(T=6.2, P=5e4, zs=zs)
assert res.phase_count == 1
assert res.liquid_count == 1
# One (but not both) fugacity became zero
res = flasher.flash(T=8.4, P=1e-5, zs=zs)
assert res.phase_count == 1
assert res.liquid_count == 1
# Vapor fraction flashes
for VF_value in (0.0, 1e-5, .3, .5, .7, 1-1e-5, 1.0):
VF = flasher.flash(T=T, VF=VF_value, zs=zs)
check = flasher.flash(T=T, P=VF.P, zs=zs)
assert_close(VF.VF, check.VF, rtol=1e-9)
# Not exactly sure where the numerical challenge is occuring, but this is to be expected.
# The tolerance decays at very small numbers
for VF_value in (1e-7, 1e-8, 1-1e-7, 1-1e-8):
VF = flasher.flash(T=T, VF=VF_value, zs=zs)
check = flasher.flash(T=T, P=VF.P, zs=zs)
assert_close(VF.VF, check.VF, rtol=1e-5)
def test_flash_GibbsExcessLiquid_ideal_PsatPoynting():
# Binary water-ethanol
T = 230.0
P = 1e5
zs = [.4, .6]
MWs = [18.01528, 46.06844]
Tcs = [647.086, 514.7]
Pcs = [22048320.0, 6137000.0]
omegas = [0.344, 0.635]
VaporPressures = [VaporPressure(exp_poly_fit=(273.17, 647.086, [-2.8478502840358144e-21, 1.7295186670575222e-17, -4.034229148562168e-14, 5.0588958391215855e-11, -3.861625996277003e-08, 1.886271475957639e-05, -0.005928371869421494, 1.1494956887882308, -96.74302379151317])),
VaporPressure(exp_poly_fit=(159.11, 514.7, [-2.3617526481119e-19, 7.318686894378096e-16, -9.835941684445551e-13, 7.518263303343784e-10, -3.598426432676194e-07, 0.00011171481063640762, -0.022458952185007635, 2.802615041941912, -166.43524219017118]))]
HeatCapacityGases = [HeatCapacityGas(poly_fit=(50.0, 1000.0, [5.543665000518528e-22, -2.403756749600872e-18, 4.2166477594350336e-15, -3.7965208514613565e-12, 1.823547122838406e-09, -4.3747690853614695e-07, 5.437938301211039e-05, -0.003220061088723078, 33.32731489750759])),
HeatCapacityGas(poly_fit=(50.0, 1000.0, [-1.162767978165682e-20, 5.4975285700787494e-17, -1.0861242757337942e-13, 1.1582703354362728e-10, -7.160627710867427e-08, 2.5392014654765875e-05, -0.004732593693568646, 0.5072291035198603, 20.037826650765965]))]
VolumeLiquids = [VolumeLiquid(poly_fit=(273.17, 637.096, [9.00307261049824e-24, -3.097008950027417e-20, 4.608271228765265e-17, -3.8726692841874345e-14, 2.0099220218891486e-11, -6.596204729785676e-09, 1.3368112879131157e-06, -0.00015298762503607717, 0.007589247005014652]),
Psat=VaporPressures[0], Tc=Tcs[0], Pc=Pcs[0], omega=omegas[0]),
VolumeLiquid(poly_fit=(159.11, 504.71000000000004, [5.388587987308587e-23, -1.331077476340645e-19, 1.4083880805283782e-16, -8.327187308842775e-14, 3.006387047487587e-11, -6.781931902982022e-09, 9.331209920256822e-07, -7.153268618320437e-05, 0.0023871634205665524]),
Psat=VaporPressures[1], Tc=Tcs[1], Pc=Pcs[1], omega=omegas[1])]
EnthalpyVaporizations = [EnthalpyVaporization(Tc=647.14, poly_fit_ln_tau=(273.17, 647.095, 647.14, [0.010220675607316746, 0.5442323619614213, 11.013674729940819, 110.72478547661254, 591.3170172192005, 1716.4863395285283, 4063.5975524922624, 17960.502354189244, 53916.28280689388])),
EnthalpyVaporization(Tc=514.0, poly_fit_ln_tau=(159.11, 513.9999486, 514.0, [-0.002197958699297133, -0.1583773493009195, -4.716256555877727, -74.79765793302774, -675.8449382004112, -3387.5058752252276, -7531.327682252346, 5111.75264050548, 50774.16034043739]))]
constants = ChemicalConstantsPackage(Tcs=Tcs, Pcs=Pcs, omegas=omegas, MWs=MWs, CASs=['7732-18-5', '64-17-5'])
correlations = PropertyCorrelationsPackage(constants, HeatCapacityGases=HeatCapacityGases, EnthalpyVaporizations=EnthalpyVaporizations,
VolumeLiquids=VolumeLiquids, VaporPressures=VaporPressures, skip_missing=True)
eoss = [PR(Tc=Tcs[0], Pc=Pcs[0], omega=omegas[0], T=T, P=P),
PR(Tc=Tcs[1], Pc=Pcs[1], omega=omegas[1], T=T, P=P)]
liquid = GibbsExcessLiquid(VaporPressures=VaporPressures,
HeatCapacityGases=HeatCapacityGases,
VolumeLiquids=VolumeLiquids,
EnthalpyVaporizations=EnthalpyVaporizations,
caloric_basis='PhiSat', equilibrium_basis='PhiSat',
eos_pure_instances=eoss,
T=T, P=P, zs=zs)
gas = IdealGas(T=T, P=P, zs=zs, HeatCapacityGases=HeatCapacityGases)
flasher = FlashVL(constants, correlations, liquid=liquid, gas=gas)
# This was failing in pypy for a while instead of CPython
res = flasher.flash(T=15, P=1e5, zs=zs)
assert res.phase_count == 1
assert res.liquid_count == 1
|
tessera-server/tessera/views_api.py | Dimas625/tessera | 379 | 6844 | <reponame>Dimas625/tessera<filename>tessera-server/tessera/views_api.py
# -*- mode:python -*-
import flask
import json
import logging
from datetime import datetime
import inflection
from functools import wraps
from flask import request, url_for
from werkzeug.exceptions import HTTPException
from .client.api.model import *
from . import database
from . import helpers
from .application import db
mgr = database.DatabaseManager(db)
log = logging.getLogger(__name__)
api = flask.Blueprint('api', __name__)
# =============================================================================
# API Helpers
# =============================================================================
def route_api(application, *args, **kwargs):
def decorator(fn):
@application.route(*args, **kwargs)
@wraps(fn)
def wrapper(*args, **kwargs):
headers = None
status_code = 200
try:
value = fn(*args, **kwargs)
except HTTPException as e:
raise helpers.set_exception_response(e)
if isinstance(value, tuple):
if len(value) > 2:
headers = value[2]
status_code = value[1]
value = value[0]
return helpers.jsonify(value, status_code, headers)
return fn
return decorator
def _dashboard_sort_column():
"""Return a SQLAlchemy column descriptor to sort results by, based on
the 'sort' and 'order' request parameters.
"""
columns = {
'created' : database.DashboardRecord.creation_date,
'modified' : database.DashboardRecord.last_modified_date,
'category' : database.DashboardRecord.category,
'id' : database.DashboardRecord.id,
'title' : database.DashboardRecord.title
}
colname = helpers.get_param('sort', 'created')
order = helpers.get_param('order')
column = database.DashboardRecord.creation_date
if colname in columns:
column = columns[colname]
if order == 'desc' or order == u'desc':
return column.desc()
else:
return column.asc()
def _set_dashboard_hrefs(dash):
"""Add the various ReSTful hrefs to an outgoing dashboard
representation. dash should be the dictionary for of the dashboard,
not the model object.
"""
id = dash['id']
dash['href'] = url_for('api.dashboard_get', id=id)
dash['definition_href'] = url_for('api.dashboard_get_definition', id=id)
dash['view_href'] = url_for('ui.dashboard_with_slug',
id=id,
slug=inflection.parameterize(dash['title']))
if 'definition' in dash:
definition = dash['definition']
definition['href'] = url_for('api.dashboard_get_definition', id=id)
return dash
def _dashboards_response(dashboards):
"""Return a Flask response object for a list of dashboards in API
format. dashboards must be a list of dashboard model objects, which
will be converted to their JSON representation.
"""
if not isinstance(dashboards, list):
dashboards = [dashboards]
include_definition = helpers.get_param_boolean('definition', False)
return [ _set_dashboard_hrefs(d.to_json(include_definition=include_definition)) for d in dashboards]
def _set_tag_hrefs(tag):
"""Add ReSTful href attributes to a tag's dictionary
representation.
"""
id = tag['id']
tag['href'] = url_for('api.tag_get', id=id)
return tag
def _tags_response(tags):
"""Return a Flask response object for a list of tags in API
format. tags must be a list of tag model objects, which
will be converted to their JSON representation.
"""
if not isinstance(tags, list):
tags = [tags]
return [_set_tag_hrefs(t.to_json()) for t in tags]
# =============================================================================
# Dashboards
# =============================================================================
@route_api(api, '/dashboard/')
def dashboard_list():
"""Listing for all dashboards. Returns just the metadata, not the
definitions.
"""
imported_from = request.args.get('imported_from')
if imported_from:
query = database.DashboardRecord.query.filter_by(imported_from=imported_from) \
.order_by(_dashboard_sort_column())
else:
query = database.DashboardRecord.query.order_by(_dashboard_sort_column())
dashboards = [d for d in query.all()]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/tagged/<tag>')
def dashboard_list_tagged(tag):
"""Listing for a set of dashboards with a tag applied. Returns just
the metadata, not the definitions.
"""
tag = database.TagRecord.query.filter_by(name=tag).first()
if not tag:
return _dashboards_response([])
dashboards = [d for d in tag.dashboards.order_by(_dashboard_sort_column()) if tag]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/category/<category>')
def dashboard_list_dashboards_in_category(category):
"""Listing for a set of dashboards in a specified category. Returns
just the metadata, not the definitions.
"""
dashboards = [d for d in database.DashboardRecord.query
.filter_by(category=category)
.order_by(_dashboard_sort_column()) ]
return _dashboards_response(dashboards)
@route_api(api, '/dashboard/category/')
def dashboard_list_all_dashboard_categories():
result = db.session.query(
database.DashboardRecord.category,
db.func.count(database.DashboardRecord.category)
).group_by(database.DashboardRecord.category).all()
categories = []
for (name, count) in result:
categories.append({
'name' : name,
'count' : count,
})
return categories
@route_api(api, '/dashboard/<id>')
def dashboard_get(id):
"""Get the metadata for a single dashboard.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
rendering = helpers.get_param('rendering', False)
include_definition = helpers.get_param_boolean('definition', False)
dash = _set_dashboard_hrefs(dashboard.to_json(rendering or include_definition))
if rendering:
dash['preferences'] = helpers.get_preferences()
return dash
@route_api(api, '/dashboard/<id>/for-rendering')
def dashboard_get_for_rendering(id):
"""Get a dashboard with its definition, and current settings necessary
for rendering.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
dash = _set_dashboard_hrefs(dashboard.to_json(True))
return {
'dashboard' : dash,
'preferences' : helpers.get_preferences()
}
@route_api(api, '/dashboard/', methods=['POST'])
def dashboard_create():
"""Create a new dashboard with an empty definition.
"""
dashboard = database.DashboardRecord.from_json(request.json)
if not dashboard.title:
return {
'error_message': "Missing required field 'title'"
}, 400
if 'definition' in request.json:
dashboard.definition = database.DefinitionRecord(dumps(request.json['definition']))
else:
dashboard.definition = database.DefinitionRecord(dumps(DashboardDefinition()))
mgr.store_dashboard(dashboard)
href = url_for('api.dashboard_get', id=dashboard.id)
return {
'dashboard_href' : href,
'view_href' : url_for('ui.dashboard_with_slug',
id=dashboard.id,
slug=inflection.parameterize(dashboard.title))
}, 201, { 'Location' : href }
@route_api(api, '/dashboard/<id>', methods=['PUT'])
def dashboard_update(id):
"""Update the metadata for an existing dashboard.
"""
body = request.json
dashboard = database.DashboardRecord.query.get_or_404(id)
dashboard.merge_from_json(body)
mgr.store_dashboard(dashboard)
# TODO - return similar to create, above
return {}
@route_api(api, '/dashboard/<id>', methods=['DELETE'])
def dashboard_delete(id):
"""Delete a dashboard. Use with caution.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
db.session.delete(dashboard)
db.session.commit()
return {}, 204
@route_api(api, '/dashboard/<id>/definition')
def dashboard_get_definition(id):
"""Fetch the definition for a dashboard. This returns the
representation to use when modifiying a dashboard.
"""
dashboard = database.DashboardRecord.query.filter_by(id=id)[0]
definition = database.DashboardRecord.query.get_or_404(id).definition.to_json()
definition['href'] = url_for('api.dashboard_get_definition', id=id)
definition['dashboard_href'] = url_for('api.dashboard_get', id=id)
return definition
@route_api(api, '/dashboard/<id>/definition', methods=['PUT'])
def dashboard_update_definition(id):
"""Update the definition of the dashboard. This should use the
representation returned by /api/dashboard/<id>/definition, and
should NOT have any embedded variables expanded, nor should it
have complete graphite URLs in the queries.
"""
dashboard = database.DashboardRecord.query.get_or_404(id)
# Validate the payload
definition = DashboardDefinition.from_json(json.loads(request.data.decode('utf-8')))
if dashboard.definition:
dashboard.definition.definition = dumps(definition)
else:
dashboard.definition = database.DashboardRecordDef(request.data)
mgr.store_dashboard(dashboard)
return {}
# =============================================================================
# Tags
# =============================================================================
@route_api(api, '/tag/')
def tag_list():
"""Listing for all tags.
"""
tags = db.session.query(database.TagRecord).all()
return _tags_response(tags)
@route_api(api, '/tag/<id>')
def tag_get(id):
tag = database.TagRecord.query.get_or_404(id)
return _tags_response(tag)
# =============================================================================
# Miscellany
# =============================================================================
@route_api(api, '/preferences/')
def preferences_get():
return helpers.get_preferences()
@route_api(api, '/preferences/', methods=['PUT'])
def preferences_put():
helpers.set_preferences(request.json)
return helpers.get_preferences()
|
tests/test_metadata_options.py | Fatal1ty/mashumaro | 394 | 6852 | from dataclasses import dataclass, field
from datetime import date, datetime, time, timezone
from pathlib import Path
from typing import Any, Dict, Optional, Union
import ciso8601
import pytest
from mashumaro import DataClassDictMixin
from mashumaro.exceptions import UnserializableField
from mashumaro.types import SerializationStrategy
from .entities import (
MutableString,
MyList,
ThirdPartyType,
TypedDictRequiredKeys,
)
def test_ciso8601_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_ciso8601_date_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: date = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=date(2021, 1, 2))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_ciso8601_time_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: time = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=time(3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_pendulum_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=datetime(2008, 12, 29, 7, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2009-W01 0700"})
assert instance == should_be
def test_pendulum_date_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: date = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=date(2008, 12, 29))
instance = DataClass.from_dict({"x": "2009-W01"})
assert instance == should_be
def test_pendulum_time_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: time = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=time(3, 4, 5))
instance = DataClass.from_dict({"x": "2009-W01 030405"})
assert instance == should_be
def test_unsupported_datetime_parser_engine():
with pytest.raises(UnserializableField):
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "unsupported"})
def test_global_function_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"deserialize": ciso8601.parse_datetime_as_naive}
)
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05+03:00"})
assert instance == should_be
def test_local_function_datetime_parser():
def parse_dt(s):
return ciso8601.parse_datetime_as_naive(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": parse_dt})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05+03:00"})
assert instance == should_be
def test_class_method_datetime_parser():
class DateTimeParser:
@classmethod
def parse_dt(cls, s: str) -> datetime:
return datetime.fromisoformat(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": DateTimeParser.parse_dt})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05"})
assert instance == should_be
def test_class_instance_method_datetime_parser():
class DateTimeParser:
def __call__(self, s: str) -> datetime:
return datetime.fromisoformat(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": DateTimeParser()})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05"})
assert instance == should_be
def test_callable_class_instance_datetime_parser():
class CallableDateTimeParser:
def __call__(self, s):
return ciso8601.parse_datetime(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": CallableDateTimeParser()})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_lambda_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"deserialize": lambda s: ciso8601.parse_datetime(s)}
)
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_derived_dataclass_metadata_deserialize_option():
@dataclass
class A:
x: datetime = field(metadata={"deserialize": ciso8601.parse_datetime})
@dataclass
class B(A, DataClassDictMixin):
y: datetime = field(metadata={"deserialize": ciso8601.parse_datetime})
should_be = B(
x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc),
y=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc),
)
instance = B.from_dict(
{"x": "2021-01-02T03:04:05Z", "y": "2021-01-02T03:04:05Z"}
)
assert instance == should_be
def test_bytearray_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: bytearray = field(
metadata={"deserialize": lambda s: s.upper().encode()}
)
should_be = DataClass(x=bytearray(b"ABC"))
instance = DataClass.from_dict({"x": "abc"})
assert instance == should_be
def test_path_like_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Path = field(
metadata={"deserialize": lambda s: Path(str(s).upper())}
)
should_be = DataClass(x=Path("/ABC"))
instance = DataClass.from_dict({"x": "/abc"})
assert instance == should_be
def test_datetime_serialize_option():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"serialize": lambda v: v.strftime("%Y-%m-%d %H:%M:%S")}
)
should_be = {"x": "2021-01-02 03:04:05"}
instance = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
assert instance.to_dict() == should_be
def test_third_party_type_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: ThirdPartyType = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
should_be = DataClass(x=ThirdPartyType(123))
instance = DataClass.from_dict({"x": 123})
assert instance == should_be
assert instance.to_dict() == {"x": 123}
def test_serializable_type_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: MutableString = field(
metadata={
"deserialize": lambda s: MutableString(s.upper()),
"serialize": lambda v: str(v).lower(),
}
)
should_be = DataClass(x=MutableString("ABC"))
instance = DataClass.from_dict({"x": "abc"})
assert instance == should_be
assert instance.to_dict() == {"x": "abc"}
def test_optional_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Optional[ThirdPartyType] = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
instance = DataClass.from_dict({"x": 123})
assert instance
assert instance.x.value == 123
dct = instance.to_dict()
assert dct["x"] == 123
def test_union_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Union[int, str, float, ThirdPartyType] = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
instance = DataClass.from_dict({"x": 1})
assert instance == DataClass(x=ThirdPartyType(value=1))
assert instance.to_dict() == {"x": 1}
def test_serialization_strategy():
class TestSerializationStrategy(SerializationStrategy):
def serialize(self, value):
return [value]
def deserialize(self, value):
return value[0]
@dataclass
class DataClass(DataClassDictMixin):
x: int = field(
metadata={"serialization_strategy": TestSerializationStrategy()}
)
instance = DataClass(x=123)
assert DataClass.from_dict({"x": [123]}) == instance
assert instance.to_dict() == {"x": [123]}
def test_collection_derived_custom_class():
@dataclass
class DataClass(DataClassDictMixin):
x: MyList = field(
metadata={"serialize": lambda v: v, "deserialize": lambda v: v}
)
instance = DataClass(x=[1, 2, 3])
assert DataClass.from_dict({"x": [1, 2, 3]}) == instance
assert instance.to_dict() == {"x": [1, 2, 3]}
def test_dataclass_with_typed_dict_overridden():
def serialize_x(x: TypedDictRequiredKeys) -> Dict[str, Any]:
return {"int": int(x["int"]), "float": float(x["float"])}
def deserialize_x(x: Dict[str, Any]) -> TypedDictRequiredKeys:
return TypedDictRequiredKeys(int=x["int"], float=x["float"])
@dataclass
class DataClass(DataClassDictMixin):
x: TypedDictRequiredKeys = field(
metadata={"serialize": serialize_x, "deserialize": deserialize_x}
)
obj = DataClass(x=TypedDictRequiredKeys(int=1, float=2.0))
data = {"x": {"int": 1, "float": 2.0}}
assert DataClass.from_dict(data) == obj
assert obj.to_dict() == data
|
tests/test_dump.py | flaeppe/astunparse | 189 | 6854 | import ast
import re
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import astunparse
from tests.common import AstunparseCommonTestCase
class DumpTestCase(AstunparseCommonTestCase, unittest.TestCase):
def assertASTEqual(self, dump1, dump2):
# undo the pretty-printing
dump1 = re.sub(r"(?<=[\(\[])\n\s+", "", dump1)
dump1 = re.sub(r"\n\s+", " ", dump1)
self.assertEqual(dump1, dump2)
def check_roundtrip(self, code1, filename="internal", mode="exec"):
ast_ = compile(str(code1), filename, mode, ast.PyCF_ONLY_AST)
dump1 = astunparse.dump(ast_)
dump2 = ast.dump(ast_)
self.assertASTEqual(dump1, dump2)
|
test/test_catalog_manager.py | weknowtraining/athena-glue-service-logs | 133 | 6869 | # pylint: skip-file
from athena_glue_service_logs.catalog_manager import BaseCatalogManager
def test_class_init(mocker):
mocker.patch.multiple(BaseCatalogManager, __abstractmethods__=set())
base_catalog = BaseCatalogManager('us-west-2', 'dbname', 'tablename', 's3://somewhere')
assert base_catalog.database_name == 'dbname'
assert base_catalog.s3_location == 's3://somewhere'
assert base_catalog.table_name == 'tablename'
def test_init_with_partitions(mocker):
mocker.patch.multiple(BaseCatalogManager, __abstractmethods__=set())
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.does_database_exist', return_value=True)
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_database')
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_table')
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_partitions')
base_catalog = BaseCatalogManager('us-west-2', 'dbname', 'tablename', 's3://somewhere')
base_catalog.initialize_with_partitions(['a', 'b', 'c'])
assert BaseCatalogManager.create_database.call_count == 0
BaseCatalogManager.create_table.assert_called_once()
BaseCatalogManager.create_partitions.assert_called_once_with(partition_list=['a', 'b', 'c'])
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.does_database_exist', return_value=False)
base_catalog.initialize_with_partitions(['a', 'b', 'c'])
assert BaseCatalogManager.create_database.call_count == 1
|
data/benchmark.py | Gummary/denet | 343 | 6896 | <reponame>Gummary/denet
"""
CutBlur
Copyright 2020-present NAVER corp.
MIT license
"""
import os
import glob
import data
class BenchmarkSR(data.BaseDataset):
def __init__(self, phase, opt):
root = opt.dataset_root
self.scale = opt.scale
dir_HQ, dir_LQ = self.get_subdir()
self.HQ_paths = sorted(glob.glob(os.path.join(root, dir_HQ, "*.png")))
self.LQ_paths = sorted(glob.glob(os.path.join(root, dir_LQ, "*.png")))
super().__init__(phase, opt)
def get_subdir(self):
dir_HQ = "HR"
dir_LQ = "X{}".format(self.scale)
return dir_HQ, dir_LQ
class BenchmarkDN(BenchmarkSR):
def __init__(self, phase, opt):
self.sigma = opt.sigma
super().__init__(phase, opt)
def get_subdir(self):
dir_HQ = "HQ"
dir_LQ = "{}".format(self.sigma)
return dir_HQ, dir_LQ
class BenchmarkJPEG(BenchmarkSR):
def __init__(self, phase, opt):
self.quality = opt.quality
super().__init__(phase, opt)
def get_subdir(self):
dir_HQ = "HQ"
dir_LQ = "{}".format(self.quality)
return dir_HQ, dir_LQ
|
nn_dataflow/tests/unit_test/test_network.py | Pingziwalk/nn_dataflow | 170 | 6923 | """ $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
from nn_dataflow.core import Network
from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, \
PoolingLayer, EltwiseLayer
class TestNetwork(unittest.TestCase):
''' Tests for Network. '''
# pylint: disable=too-many-public-methods
def setUp(self):
''' Set up. '''
self.network = Network('test_net')
self.network.set_input_layer(InputLayer(3, 224))
self.network.add('c1', ConvLayer(3, 64, 224, 3))
self.network.add('p1', PoolingLayer(64, 7, 32))
self.network.add('f1', FCLayer(64, 1000, 7))
def test_set_input_layer(self):
''' Modifier set_input_layer. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 24))
self.assertIsInstance(network.input_layer(), InputLayer)
self.assertEqual(network.input_layer().nofm, 3)
self.assertEqual(network.input_layer().hofm, 24)
self.assertEqual(network.input_layer().wofm, 24)
self.assertEqual(len(network), 0)
def test_set_input_layer_type(self):
''' Modifier set_input_layer type. '''
network = Network('test_net')
with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'):
network.set_input_layer(Layer(3, 24))
with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'):
network.set_input_layer(ConvLayer(3, 8, 24, 3))
def test_set_input_layer_duplicate(self):
''' Modifier set_input_layer duplicate. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 24))
with self.assertRaisesRegex(KeyError, 'Network: .*input.*'):
network.set_input_layer(InputLayer(3, 24))
def test_add(self):
''' Modifier add. '''
self.assertEqual(len(self.network), 3)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
self.assertEqual(len(self.network), 7)
def test_add_same_key(self):
''' Modifier add same key. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(KeyError, 'Network: .*c1.*'):
network.add('c1', ConvLayer(64, 128, 224, 3))
def test_add_no_input(self):
''' Modifier add no input. '''
network = Network('test_net')
with self.assertRaisesRegex(RuntimeError, 'Network: .*input.*'):
network.add('c1', ConvLayer(3, 64, 224, 3))
def test_add_no_prev(self):
''' Modifier add no prevs. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(KeyError, 'Network: .*prev.*p1.*'):
network.add('p1', PoolingLayer(64, 7, 32), prevs='p1')
def test_add_invalid_type(self):
''' Modifier add invalid type. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
with self.assertRaisesRegex(TypeError, 'Network: .*Layer.*'):
network.add('c1', (3, 64, 224, 3))
def test_add_unmatch_prev(self):
''' Modifier add unmatch prevs. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(ValueError,
'Network: .*c1.*p1.*mismatch fmap.*'):
network.add('p1', PoolingLayer(64, 7, 2))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError,
'Network: .*c1.*c2.*mismatch fmap.*'):
network.add('c2', ConvLayer(64, 128, 220, 3))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*p1.*'):
network.add('p1', PoolingLayer(32, 7, 32))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*c2.*'):
network.add('c2', ConvLayer(32, 128, 224, 3))
self.assertEqual(len(network), 1)
network.add('c2', ConvLayer(64, 128, 224, 3))
with self.assertRaisesRegex(ValueError,
r'Network: .*c1 | c2.*prev.*p1.*'):
network.add('p1', PoolingLayer(128, 7, 32), prevs=('c1', 'c2'))
self.assertEqual(len(network), 2)
def test_add_ext(self):
''' Modifier add_ext. '''
self.assertEqual(len(self.network), 3)
self.network.add_ext('e0', InputLayer(3, 24))
self.assertIsInstance(self.network['e0'], InputLayer)
self.assertEqual(self.network['e0'].nofm, 3)
self.assertEqual(self.network['e0'].hofm, 24)
self.assertEqual(self.network['e0'].wofm, 24)
self.network.add_ext('e1', InputLayer(5, (16, 20)))
self.assertIsInstance(self.network['e1'], InputLayer)
self.assertEqual(self.network['e1'].nofm, 5)
self.assertEqual(self.network['e1'].hofm, 16)
self.assertEqual(self.network['e1'].wofm, 20)
self.assertEqual(len(self.network), 3)
def test_add_ext_same_key(self):
''' Modifier add_ext same key. '''
network = Network('test_net')
network.add_ext('e0', InputLayer(3, 24))
with self.assertRaisesRegex(KeyError, 'Network: .*ext.*'):
network.add_ext('e0', InputLayer(3, 24))
def test_add_ext_invalid_type(self):
''' Modifier add_ext invalid type. '''
network = Network('test_net')
with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'):
network.add_ext('e0', Layer(3, 24))
with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'):
network.add_ext('e0', ConvLayer(3, 8, 24, 3))
def test_prevs(self):
''' Get prevs. '''
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
prevs = self.network.prevs('f1')
self.assertTupleEqual(prevs, ('p1',))
prevs = self.network.prevs('f2')
self.assertTupleEqual(prevs, ('p1',))
prevs = self.network.prevs('f3')
self.assertTupleEqual(prevs, ('f1', 'f2'))
def test_prevs_first(self):
''' Get prevs first layer. '''
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
prevs = self.network.prevs('c1')
self.assertTupleEqual(prevs, (None,))
prevs = self.network.prevs('c2')
self.assertTupleEqual(prevs, (None,))
def test_prevs_input(self):
''' Get prevs input layer. '''
with self.assertRaisesRegex(ValueError, 'Network: .*input.*'):
_ = self.network.prevs(self.network.INPUT_LAYER_KEY)
def test_prevs_ext_next(self):
''' Get prevs next layer of an external layer. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add('n', ConvLayer(6, 3, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'e0'))
prevs = self.network.prevs('n')
self.assertTupleEqual(prevs, (None, 'e0'))
def test_prevs_ext(self):
''' Get prevs external layer. '''
self.network.add_ext('e0', InputLayer(3, 3))
with self.assertRaisesRegex(ValueError, 'Network: .*ext.*'):
_ = self.network.prevs('e0')
def test_nexts(self):
''' Get nexts. '''
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
nexts = self.network.nexts('p1')
self.assertTupleEqual(nexts, ('f1', 'f2'))
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, ('f3', 'e4'))
nexts = self.network.nexts('f2')
self.assertTupleEqual(nexts, ('f3',))
nexts = self.network.nexts('f3')
self.assertTupleEqual(nexts, ('e4',))
def test_nexts_last(self):
''' Get nexts first layer. '''
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, (None,))
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, (None,))
nexts = self.network.nexts('f2')
self.assertTupleEqual(nexts, (None,))
def test_nexts_input(self):
''' Get nexts input layer. '''
nexts = self.network.nexts(self.network.INPUT_LAYER_KEY)
self.assertTupleEqual(nexts, ('c1',))
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
self.network.add('c3', ConvLayer(6, 4, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'c2'))
nexts = self.network.nexts(self.network.INPUT_LAYER_KEY)
self.assertTupleEqual(nexts, ('c1', 'c2', 'c3'))
def test_firsts(self):
''' Get firsts. '''
firsts = self.network.firsts()
self.assertTupleEqual(firsts, ('c1',))
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
self.network.add('c3', ConvLayer(6, 4, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'c2'))
firsts = self.network.firsts()
self.assertTupleEqual(firsts, ('c1', 'c2'))
self.assertIn('c1', firsts)
self.assertNotIn('c3', firsts)
def test_firsts_ext(self):
''' Get firsts with external layers. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add('c2', ConvLayer(3, 3, 224, 1), prevs=('e0',))
self.network.add('c3', ConvLayer(67, 3, 224, 1), prevs=('e0', 'c1'))
self.network.add('c4', ConvLayer(6, 3, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'e0',))
firsts = self.network.firsts()
self.assertIn('c2', firsts)
self.assertNotIn('c3', firsts)
self.assertIn('c4', firsts)
def test_lasts(self):
''' Get lasts. '''
lasts = self.network.lasts()
self.assertTupleEqual(lasts, ('f1',))
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
lasts = self.network.lasts()
self.assertTupleEqual(lasts, ('f1', 'f2'))
def test_ext_layers(self):
''' Get external layers. '''
self.assertTupleEqual(self.network.ext_layers(), tuple())
self.network.add_ext('e0', InputLayer(3, 224))
self.assertTupleEqual(self.network.ext_layers(), ('e0',))
self.network.add_ext('e1', InputLayer(3, 224))
self.assertTupleEqual(self.network.ext_layers(), ('e0', 'e1'))
def test_contains(self):
''' Whether contains. '''
self.assertIn('c1', self.network)
self.assertIn('p1', self.network)
self.assertIn('f1', self.network)
self.assertNotIn('f2', self.network)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.assertIn('f2', self.network)
def test_len(self):
''' Accessor len. '''
self.assertEqual(len(self.network), 3)
network = Network('test_net')
self.assertEqual(len(network), 0)
network.set_input_layer(InputLayer(3, 224))
self.assertEqual(len(network), 0)
network.add('c1', ConvLayer(3, 4, 224, 1))
self.assertEqual(len(network), 1)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.assertEqual(len(self.network), 4)
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.assertEqual(len(self.network), 5)
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.assertEqual(len(self.network), 6)
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
self.assertEqual(len(self.network), 7)
def test_iter(self):
''' Accessor iter. '''
num = 0
for layer in self.network:
self.assertIn(layer, self.network)
self.assertIsInstance(self.network[layer], Layer)
num += 1
self.assertEqual(len(self.network), num)
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
with self.assertRaises(StopIteration):
_ = next(iter(network))
def test_contains_ext(self):
''' Whether contains external layer. '''
self.assertNotIn('e0', self.network)
self.network.add_ext('e0', InputLayer(3, 224))
self.assertIn('e0', self.network)
def test_len_ext(self):
''' Accessor len external layer. '''
self.assertEqual(len(self.network), 3)
self.network.add_ext('e0', InputLayer(3, 224))
self.assertEqual(len(self.network), 3)
def test_iter_ext(self):
''' Accessor iter external layer. '''
self.network.add_ext('e0', InputLayer(3, 224))
for layer in self.network:
self.assertNotEqual(layer, 'e0')
def test_getitem(self):
''' Accessor getitem. '''
self.assertIsInstance(self.network['c1'], ConvLayer)
self.assertIsInstance(self.network['p1'], PoolingLayer)
self.assertIsInstance(self.network['f1'], FCLayer)
def test_getitem_error(self):
''' Accessor getitem. '''
with self.assertRaisesRegex(KeyError, 'Network: .*c2.*'):
_ = self.network['c2']
def test_str(self):
''' Accessor str. '''
string = str(self.network)
for layer in self.network:
self.assertIn(layer, string)
|
sympy/solvers/tests/test_pde.py | nashalex/sympy | 8,323 | 6925 | <gh_stars>1000+
from sympy import (Derivative as D, Eq, exp, sin,
Function, Symbol, symbols, cos, log)
from sympy.core import S
from sympy.solvers.pde import (pde_separate, pde_separate_add, pde_separate_mul,
pdsolve, classify_pde, checkpdesol)
from sympy.testing.pytest import raises
a, b, c, x, y = symbols('a b c x y')
def test_pde_separate_add():
x, y, z, t = symbols("x,y,z,t")
F, T, X, Y, Z, u = map(Function, 'FTXYZu')
eq = Eq(D(u(x, t), x), D(u(x, t), t)*exp(u(x, t)))
res = pde_separate_add(eq, u(x, t), [X(x), T(t)])
assert res == [D(X(x), x)*exp(-X(x)), D(T(t), t)*exp(T(t))]
def test_pde_separate():
x, y, z, t = symbols("x,y,z,t")
F, T, X, Y, Z, u = map(Function, 'FTXYZu')
eq = Eq(D(u(x, t), x), D(u(x, t), t)*exp(u(x, t)))
raises(ValueError, lambda: pde_separate(eq, u(x, t), [X(x), T(t)], 'div'))
def test_pde_separate_mul():
x, y, z, t = symbols("x,y,z,t")
c = Symbol("C", real=True)
Phi = Function('Phi')
F, R, T, X, Y, Z, u = map(Function, 'FRTXYZu')
r, theta, z = symbols('r,theta,z')
# Something simple :)
eq = Eq(D(F(x, y, z), x) + D(F(x, y, z), y) + D(F(x, y, z), z), 0)
# Duplicate arguments in functions
raises(
ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(x), u(z, z)]))
# Wrong number of arguments
raises(ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(x), Y(y)]))
# Wrong variables: [x, y] -> [x, z]
raises(
ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(t), Y(x, y)]))
assert pde_separate_mul(eq, F(x, y, z), [Y(y), u(x, z)]) == \
[D(Y(y), y)/Y(y), -D(u(x, z), x)/u(x, z) - D(u(x, z), z)/u(x, z)]
assert pde_separate_mul(eq, F(x, y, z), [X(x), Y(y), Z(z)]) == \
[D(X(x), x)/X(x), -D(Z(z), z)/Z(z) - D(Y(y), y)/Y(y)]
# wave equation
wave = Eq(D(u(x, t), t, t), c**2*D(u(x, t), x, x))
res = pde_separate_mul(wave, u(x, t), [X(x), T(t)])
assert res == [D(X(x), x, x)/X(x), D(T(t), t, t)/(c**2*T(t))]
# Laplace equation in cylindrical coords
eq = Eq(1/r * D(Phi(r, theta, z), r) + D(Phi(r, theta, z), r, 2) +
1/r**2 * D(Phi(r, theta, z), theta, 2) + D(Phi(r, theta, z), z, 2), 0)
# Separate z
res = pde_separate_mul(eq, Phi(r, theta, z), [Z(z), u(theta, r)])
assert res == [D(Z(z), z, z)/Z(z),
-D(u(theta, r), r, r)/u(theta, r) -
D(u(theta, r), r)/(r*u(theta, r)) -
D(u(theta, r), theta, theta)/(r**2*u(theta, r))]
# Lets use the result to create a new equation...
eq = Eq(res[1], c)
# ...and separate theta...
res = pde_separate_mul(eq, u(theta, r), [T(theta), R(r)])
assert res == [D(T(theta), theta, theta)/T(theta),
-r*D(R(r), r)/R(r) - r**2*D(R(r), r, r)/R(r) - c*r**2]
# ...or r...
res = pde_separate_mul(eq, u(theta, r), [R(r), T(theta)])
assert res == [r*D(R(r), r)/R(r) + r**2*D(R(r), r, r)/R(r) + c*r**2,
-D(T(theta), theta, theta)/T(theta)]
def test_issue_11726():
x, t = symbols("x t")
f = symbols("f", cls=Function)
X, T = symbols("X T", cls=Function)
u = f(x, t)
eq = u.diff(x, 2) - u.diff(t, 2)
res = pde_separate(eq, u, [T(x), X(t)])
assert res == [D(T(x), x, x)/T(x),D(X(t), t, t)/X(t)]
def test_pde_classify():
# When more number of hints are added, add tests for classifying here.
f = Function('f')
eq1 = a*f(x,y) + b*f(x,y).diff(x) + c*f(x,y).diff(y)
eq2 = 3*f(x,y) + 2*f(x,y).diff(x) + f(x,y).diff(y)
eq3 = a*f(x,y) + b*f(x,y).diff(x) + 2*f(x,y).diff(y)
eq4 = x*f(x,y) + f(x,y).diff(x) + 3*f(x,y).diff(y)
eq5 = x**2*f(x,y) + x*f(x,y).diff(x) + x*y*f(x,y).diff(y)
eq6 = y*x**2*f(x,y) + y*f(x,y).diff(x) + f(x,y).diff(y)
for eq in [eq1, eq2, eq3]:
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
for eq in [eq4, eq5, eq6]:
assert classify_pde(eq) == ('1st_linear_variable_coeff',)
def test_checkpdesol():
f, F = map(Function, ['f', 'F'])
eq1 = a*f(x,y) + b*f(x,y).diff(x) + c*f(x,y).diff(y)
eq2 = 3*f(x,y) + 2*f(x,y).diff(x) + f(x,y).diff(y)
eq3 = a*f(x,y) + b*f(x,y).diff(x) + 2*f(x,y).diff(y)
for eq in [eq1, eq2, eq3]:
assert checkpdesol(eq, pdsolve(eq))[0]
eq4 = x*f(x,y) + f(x,y).diff(x) + 3*f(x,y).diff(y)
eq5 = 2*f(x,y) + 1*f(x,y).diff(x) + 3*f(x,y).diff(y)
eq6 = f(x,y) + 1*f(x,y).diff(x) + 3*f(x,y).diff(y)
assert checkpdesol(eq4, [pdsolve(eq5), pdsolve(eq6)]) == [
(False, (x - 2)*F(3*x - y)*exp(-x/S(5) - 3*y/S(5))),
(False, (x - 1)*F(3*x - y)*exp(-x/S(10) - 3*y/S(10)))]
for eq in [eq4, eq5, eq6]:
assert checkpdesol(eq, pdsolve(eq))[0]
sol = pdsolve(eq4)
sol4 = Eq(sol.lhs - sol.rhs, 0)
raises(NotImplementedError, lambda:
checkpdesol(eq4, sol4, solve_for_func=False))
def test_solvefun():
f, F, G, H = map(Function, ['f', 'F', 'G', 'H'])
eq1 = f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)
assert pdsolve(eq1) == Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
assert pdsolve(eq1, solvefun=G) == Eq(f(x, y), G(x - y)*exp(-x/2 - y/2))
assert pdsolve(eq1, solvefun=H) == Eq(f(x, y), H(x - y)*exp(-x/2 - y/2))
def test_pde_1st_linear_constant_coeff_homogeneous():
f, F = map(Function, ['f', 'F'])
u = f(x, y)
eq = 2*u + u.diff(x) + u.diff(y)
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(x - y)*exp(-x - y))
assert checkpdesol(eq, sol)[0]
eq = 4 + (3*u.diff(x)/u) + (2*u.diff(y)/u)
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(2*x - 3*y)*exp(-S(12)*x/13 - S(8)*y/13))
assert checkpdesol(eq, sol)[0]
eq = u + (6*u.diff(x)) + (7*u.diff(y))
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(7*x - 6*y)*exp(-6*x/S(85) - 7*y/S(85)))
assert checkpdesol(eq, sol)[0]
eq = a*u + b*u.diff(x) + c*u.diff(y)
sol = pdsolve(eq)
assert checkpdesol(eq, sol)[0]
def test_pde_1st_linear_constant_coeff():
f, F = map(Function, ['f', 'F'])
u = f(x,y)
eq = -2*u.diff(x) + 4*u.diff(y) + 5*u - exp(x + 3*y)
sol = pdsolve(eq)
assert sol == Eq(f(x,y),
(F(4*x + 2*y)*exp(x/2) + exp(x + 4*y)/15)*exp(-y))
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = (u.diff(x)/u) + (u.diff(y)/u) + 1 - (exp(x + y)/u)
sol = pdsolve(eq)
assert sol == Eq(f(x, y), F(x - y)*exp(-x/2 - y/2) + exp(x + y)/3)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = 2*u + -u.diff(x) + 3*u.diff(y) + sin(x)
sol = pdsolve(eq)
assert sol == Eq(f(x, y),
F(3*x + y)*exp(x/5 - 3*y/5) - 2*sin(x)/5 - cos(x)/5)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = u + u.diff(x) + u.diff(y) + x*y
sol = pdsolve(eq)
assert sol.expand() == Eq(f(x, y),
x + y + (x - y)**2/4 - (x + y)**2/4 + F(x - y)*exp(-x/2 - y/2) - 2).expand()
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = u + u.diff(x) + u.diff(y) + log(x)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
def test_pdsolve_all():
f, F = map(Function, ['f', 'F'])
u = f(x,y)
eq = u + u.diff(x) + u.diff(y) + x**2*y
sol = pdsolve(eq, hint = 'all')
keys = ['1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral', 'default', 'order']
assert sorted(sol.keys()) == keys
assert sol['order'] == 1
assert sol['default'] == '1st_linear_constant_coeff'
assert sol['1st_linear_constant_coeff'].expand() == Eq(f(x, y),
-x**2*y + x**2 + 2*x*y - 4*x - 2*y + F(x - y)*exp(-x/2 - y/2) + 6).expand()
def test_pdsolve_variable_coeff():
f, F = map(Function, ['f', 'F'])
u = f(x, y)
eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
sol = pdsolve(eq, hint="1st_linear_variable_coeff")
assert sol == Eq(u, F(x*y)*exp(y**2/2) + 1)
assert checkpdesol(eq, sol)[0]
eq = x**2*u + x*u.diff(x) + x*y*u.diff(y)
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(y*exp(-x))*exp(-x**2/2))
assert checkpdesol(eq, sol)[0]
eq = y*x**2*u + y*u.diff(x) + u.diff(y)
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(-2*x + y**2)*exp(-x**3/3))
assert checkpdesol(eq, sol)[0]
eq = exp(x)**2*(u.diff(x)) + y
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, y*exp(-2*x)/2 + F(y))
assert checkpdesol(eq, sol)[0]
eq = exp(2*x)*(u.diff(y)) + y*u - u
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(x)*exp(-y*(y - 2)*exp(-2*x)/2))
|
dataapi/AWS/getawsdata.py | gusamarante/Quantequim | 296 | 6932 | """
Author: <NAME>
"""
import numpy as np
import pandas as pd
from datetime import datetime
class TrackerFeeder(object):
"""
Feeder for the trackers of the FinanceHub database.
"""
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, fh_ticker):
"""
grabs trackers from the FH database
:param fh_ticker: str or list with the tickers from the database trackers
:return: pandas DataFrame with tickers on the columns
"""
assert type(fh_ticker) is str or type(fh_ticker) is list or type(fh_ticker) is dict, \
"'tickers' must be a string, list or dict"
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers" WHERE '
if type(fh_ticker) is str:
sql_query = sql_query + "fh_ticker IN ('" + fh_ticker + "')"
elif type(fh_ticker) is list:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(fh_ticker) + "')"
elif type(fh_ticker) is dict:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(list(fh_ticker.keys())) + "')"
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.pivot(index='time_stamp', columns='fh_ticker', values='value')
if type(fh_ticker) is dict:
df = df.rename(fh_ticker, axis=1)
df.index = pd.to_datetime(df.index)
df = df.dropna(how='all')
df = df.sort_index()
return df
def fetch_metadata(self):
"""
Returns the full metadata table of the FH trackers, which is useful to do custom filters and look at what
is in the database.
:return: pandas Dataframe
"""
sql_query = 'SELECT * FROM "trackers_description"'
df = pd.read_sql(sql=sql_query, con=self.conn)
return df
def filter_fetch(self, filter_dict, ret='series'):
"""
Grabs the trackers from the FH database that satisfy the criteria given by 'filter_dict'.
:param filter_dict: dict. Keys must be column names from the metadata table. Values must be
either str or list of str
:param ret: If 'series', returns the a dataframe with the tracker series that staistfy the conditions.
If 'tickers', returns a list of the tickers that staistfy the conditions.
:return: list or pandas DataFrame
"""
assert type(filter_dict) is dict, "'filter_dict' must be a dict"
assert len(filter_dict) > 0, "'filter_dict' is empty"
assert ret.lower() in ['series', 'tickers'], "'ret' must be either 'series' or 'ticker'"
desc_query = 'SELECT fh_ticker FROM trackers_description WHERE '
for col in filter_dict.keys():
if type(filter_dict[col]) is list:
desc_query = desc_query + col + " IN ('" + "', '".join(filter_dict[col]) + "')"
else:
desc_query = desc_query + col + f" IN ('{filter_dict[col]}')"
desc_query = desc_query + ' and '
desc_query = desc_query[:-5]
df = pd.read_sql(sql=desc_query, con=self.conn)
tickers = df.values.flatten().tolist()
if ret == 'tickers':
return tickers
df = self.fetch(tickers)
return df
def filter_parameters(self):
"""
Grabs the possible columns and their respective unique values from the metadata table.
:return: dict. Keys are the column names, values are list of unique values of the column.
"""
df = self.fetch_metadata()
param_dict = {}
for col in df.columns:
param_dict[col] = df[col].unique().tolist()
return param_dict
def fetch_everything(self):
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers"'
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.pivot(index='time_stamp', columns='fh_ticker', values='value')
df.index = pd.to_datetime(df.index)
df = df.dropna(how='all')
df = df.sort_index()
return df
class FocusFeeder(object):
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, index='ipca', frequency='yearly', prediction_scope=None,
dt_ini=None, dt_end=None):
"""
Grabs data from the data base and pivots the results into a dataframe. To assure consistency The function can
only take one index at a time and one frequency at a time. Only'prediction_scope' can be a list.
If no prediction scope is passed, all available prediction scopes are returned.
:param index: String containing the name of the index.
:param frequency: String. 'yearly', 'monthly' or 'quarterly' (availability depends on the index)
:param prediction_scope: string, float or list. Years that the forecasts are for.
:param dt_ini: string. Initial date for the series
:param dt_end: string. End date for the series
:return: pandas DataFrame with the pivoted data.
"""
# Error Checking
self._basic_assertions(index, frequency, prediction_scope)
# Handle formats
index, frequency, prediction_scope, dt_ini, dt_end, pivot \
= self._map_inputs(index, frequency, prediction_scope, dt_ini, dt_end)
# build sql query
sql_query = self._build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end)
# get data
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.drop_duplicates()
# pivoting
df = df.pivot(index='date', columns=pivot, values='value')
df.index = pd.to_datetime(df.index)
return df
def years_ahead(self, index='IPCA', years=1, dt_ini=None, dt_end=None):
"""
The metric atribute is set to 'mean' by default because further projections change smoothly
"""
# Error checking
self._basic_assertions_years_ahead(index, years)
# Handle formats
index, dt_ini, dt_end = self._map_inputs_years_ahead(index, dt_ini, dt_end)
# grabs the index for all available years for each date
df = self.fetch(index=index, frequency='yearly', prediction_scope=None,
dt_ini=dt_ini, dt_end=dt_end)
# creates the new dataframe
df_weighted = pd.DataFrame(index=df.index)
df_weighted[index + ' ' + str(years) + ' year ahead'] = np.nan
# days until year end
df_weighted['D2YE'] = ((df_weighted.index + pd.offsets.YearEnd()) -
pd.to_datetime(df_weighted.index.tolist())).days
for ind in df_weighted.index:
if ind.day == 31 and ind.month == 12:
df_weighted.loc[ind, 'D2YE'] = 0
# loops on each date
for date in df_weighted.index:
df_weighted.loc[date, index + ' ' + str(years) + ' year ahead'] = \
(df.loc[date, str(date.year + years - 1)] * df_weighted.loc[date, 'D2YE'] +
df.loc[date, str(date.year + years)] * (365 - df_weighted.loc[date, 'D2YE'])) / 365
df = df_weighted[[index + ' ' + str(years) + ' year ahead']].interpolate()
df.index = pd.to_datetime(df.index)
return df
@staticmethod
def _basic_assertions(index, frequency, prediction_scope):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert type(frequency) is str, 'frequency must be a string'
@staticmethod
def _map_inputs(index, frequency, prediction_scope, dt_ini, dt_end):
"""Handle formats of the inputs"""
# index
if type(index) is str:
index = index.lower()
elif type(index) is list:
index = [x.lower() for x in index]
# frequency
frequency = frequency.lower()
# prediction_scope
if type(prediction_scope) is str:
prediction_scope = prediction_scope.lower()
elif type(prediction_scope) is list:
prediction_scope = [str(x).lower() for x in prediction_scope]
elif prediction_scope is None:
prediction_scope = None
else:
prediction_scope = str(prediction_scope).lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
# pivot variable (while we have no metrics, its always the prediction scope)
pivot = 'prediction_scope'
return index, frequency, prediction_scope, dt_ini, dt_end, pivot
@staticmethod
def _build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end):
sql_query = 'SELECT DATE, VALUE, PREDICTION_SCOPE FROM "focus_survey" WHERE '
# index (must not be None)
if type(index) is str:
sql_query = sql_query + "lower(INDEX) IN ('" + index + "')"
elif type(index) is list:
sql_query = sql_query + "lower(INDEX) IN ('" + "', '".join(index) + "')"
# frequency
if type(frequency) is str:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + frequency + "')"
elif type(frequency) is list:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + "', '".join(frequency) + "')"
# prediction scope
if type(prediction_scope) is str:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + prediction_scope + "')"
elif type(prediction_scope) is list:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + "', '".join(prediction_scope) + "')"
sql_query = sql_query + " AND DATE BETWEEN '" + dt_ini + "' AND '" + dt_end + "'"
sql_query = sql_query + ' ORDER BY DATE;'
return sql_query
@staticmethod
def _basic_assertions_years_ahead(index, years):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert (type(years) is int) and (years <= 4), 'number of years must be an intger between 1 and 4'
@staticmethod
def _map_inputs_years_ahead(index, dt_ini, dt_end):
"""Handles the format of the inputs of the years_ahead method"""
index = index.lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
return index, dt_ini, dt_end
|
diagrams/outscale/__init__.py | analyticsftw/diagrams | 17,037 | 6959 | <reponame>analyticsftw/diagrams<filename>diagrams/outscale/__init__.py
from diagrams import Node
class _Outscale(Node):
_provider = "outscale"
_icon_dir = "resources/outscale"
fontcolor = "#ffffff"
|
deep_qa/layers/wrappers/output_mask.py | richarajpal/deep_qa | 459 | 6962 | <reponame>richarajpal/deep_qa
from overrides import overrides
from ..masked_layer import MaskedLayer
class OutputMask(MaskedLayer):
"""
This Layer is purely for debugging. You can wrap this on a layer's output to get the mask
output by that layer as a model output, for easier visualization of what the model is actually
doing.
Don't try to use this in an actual model.
"""
@overrides
def compute_mask(self, inputs, mask=None):
return None
@overrides
def call(self, inputs, mask=None): # pylint: disable=unused-argument
return mask
|
regipy/exceptions.py | kamnon/regipy | 190 | 6997 | class RegipyException(Exception):
"""
This is the parent exception for all regipy exceptions
"""
pass
class RegipyGeneralException(RegipyException):
"""
General exception
"""
pass
class RegistryValueNotFoundException(RegipyException):
pass
class NoRegistrySubkeysException(RegipyException):
pass
class NoRegistryValuesException(RegipyException):
pass
class RegistryKeyNotFoundException(RegipyException):
pass
class UnidentifiedHiveException(RegipyException):
pass
class RegistryRecoveryException(RegipyException):
pass
class RegistryParsingException(RegipyException):
"""
Raised when there is a parsing error, most probably a corrupted hive
"""
pass
class NtSidDecodingException(RegipyException):
"""
Raised when the binary Windows NT SID representation can not be decoded
"""
|
histoGAN.py | mahmoudnafifi/HistoGAN | 169 | 7005 | <filename>histoGAN.py
"""
If you find this code useful, please cite our paper:
<NAME>, <NAME>, and <NAME>. "HistoGAN:
Controlling Colors of GAN-Generated and Real Images via Color Histograms."
In CVPR, 2021.
@inproceedings{afifi2021histogan,
title={Histo{GAN}: Controlling Colors of {GAN}-Generated and Real Images via
Color Histograms},
author={<NAME> Brubaker, <NAME>. and Brown, <NAME>.},
booktitle={CVPR},
year={2021}
}
"""
from tqdm import tqdm
from histoGAN import Trainer, NanException
from histogram_classes.RGBuvHistBlock import RGBuvHistBlock
from datetime import datetime
import torch
import argparse
from retry.api import retry_call
import os
from PIL import Image
from torchvision import transforms
import numpy as np
SCALE = 1 / np.sqrt(2.0)
def train_from_folder(
data='./dataset/',
results_dir='./results',
models_dir='./models',
name='test',
new=False,
load_from=-1,
image_size=128,
network_capacity=16,
transparent=False,
batch_size=2,
gradient_accumulate_every=8,
num_train_steps=150000,
learning_rate=2e-4,
num_workers=None,
save_every=1000,
generate=False,
save_noise_latent=False,
target_noise_file=None,
target_latent_file=None,
num_image_tiles=8,
trunc_psi=0.75,
fp16=False,
fq_layers=[],
fq_dict_size=256,
attn_layers=[],
hist_method='inverse-quadratic',
hist_resizing='sampling',
hist_sigma=0.02,
hist_bin=64,
hist_insz=150,
alpha=2,
target_hist=None,
aug_prob=0.0,
dataset_aug_prob=0.0,
aug_types=None):
model = Trainer(
name,
results_dir,
models_dir,
batch_size=batch_size,
gradient_accumulate_every=gradient_accumulate_every,
image_size=image_size,
network_capacity=network_capacity,
transparent=transparent,
lr=learning_rate,
num_workers=num_workers,
save_every=save_every,
trunc_psi=trunc_psi,
fp16=fp16,
fq_layers=fq_layers,
fq_dict_size=fq_dict_size,
attn_layers=attn_layers,
hist_insz=hist_insz,
hist_bin=hist_bin,
hist_sigma=hist_sigma,
hist_resizing=hist_resizing,
hist_method=hist_method,
aug_prob=aug_prob,
dataset_aug_prob=dataset_aug_prob,
aug_types=aug_types
)
if not new:
model.load(load_from)
else:
model.clear()
if generate:
now = datetime.now()
timestamp = now.strftime("%m-%d-%Y_%H-%M-%S")
if save_noise_latent and not os.path.exists('temp'):
os.mkdir('./temp')
if save_noise_latent and not os.path.exists(f'./temp/{name}'):
os.mkdir(f'./temp/{name}')
if target_hist is None:
raise Exception('No target histogram or image is given')
extension = os.path.splitext(target_hist)[1]
if extension == '.npy':
hist = np.load(target_hist)
h = torch.from_numpy(hist).to(device=torch.cuda.current_device())
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(target_hist)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/{samples_name}')
elif str.lower(extension) == '.jpg' or str.lower(extension) == '.png':
histblock = RGBuvHistBlock(insz=hist_insz, h=hist_bin,
resizing=hist_resizing, method=hist_method,
sigma=hist_sigma,
device=torch.cuda.current_device())
transform = transforms.Compose([transforms.ToTensor()])
img = Image.open(target_hist)
img = torch.unsqueeze(transform(img), dim=0).to(
device=torch.cuda.current_device())
h = histblock(img)
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(target_hist)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/{samples_name}')
elif extension == '':
files = [os.path.join(target_hist, f) for f in os.listdir(target_hist) if
os.path.isfile(os.path.join(target_hist, f))]
histblock = RGBuvHistBlock(insz=hist_insz, h=hist_bin,
resizing=hist_resizing, method=hist_method,
sigma=hist_sigma,
device=torch.cuda.current_device())
transform = transforms.Compose([transforms.ToTensor()])
for f in files:
extension = os.path.splitext(f)[1]
if extension == '.npy':
hist = np.load(f)
h = torch.from_numpy(hist).to(device=torch.cuda.current_device())
elif (extension == str.lower(extension) == '.jpg' or str.lower(
extension) == '.png'):
img = Image.open(f)
img = torch.unsqueeze(transform(img), dim=0).to(
device=torch.cuda.current_device())
h = histblock(img)
else:
print(f'Warning: File extension of {f} is not supported.')
continue
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(f)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/'
f'{samples_name}')
else:
print('The file extension of target image is not supported.')
raise NotImplementedError
return
print('\nStart training....\n')
print(f'Alpha = {alpha}')
model.set_data_src(data)
for _ in tqdm(range(num_train_steps - model.steps), mininterval=10.,
desc=f'{name}<{data}>'):
retry_call(model.train, fargs=[alpha], tries=3, exceptions=NanException)
if _ % 50 == 0:
model.print_log()
def get_args():
parser = argparse.ArgumentParser(description='Train/Test HistoGAN.')
parser.add_argument('--data', dest='data', default='./dataset/')
parser.add_argument('--results_dir', dest='results_dir',
default='./results_HistoGAN')
parser.add_argument('--models_dir', dest='models_dir', default='./models')
parser.add_argument('--target_hist', dest='target_hist', default=None)
parser.add_argument('--name', dest='name', default='histoGAN_model')
parser.add_argument('--new', dest='new', default=False)
parser.add_argument('--load_from', dest='load_from', default=-1)
parser.add_argument('--image_size', dest='image_size', default=256, type=int)
parser.add_argument('--network_capacity', dest='network_capacity', default=16,
type=int)
parser.add_argument('--transparent', dest='transparent', default=False)
parser.add_argument('--batch_size', dest='batch_size', default=2, type=int)
parser.add_argument('--gradient_accumulate_every',
dest='gradient_accumulate_every', default=8, type=int)
parser.add_argument('--num_train_steps', dest='num_train_steps',
default=1500000, type=int)
parser.add_argument('--learning_rate', dest='learning_rate', default=2e-4,
type=float)
parser.add_argument('--num_workers', dest='num_workers', default=None)
parser.add_argument('--save_every', dest='save_every', default=5000,
type=int)
parser.add_argument('--generate', dest='generate', default=False)
parser.add_argument('--save_noise_latent', dest='save_n_l', default=False)
parser.add_argument('--target_noise_file', dest='target_n', default=None)
parser.add_argument('--target_latent_file', dest='target_l', default=None)
parser.add_argument('--num_image_tiles', dest='num_image_tiles',
default=16, type=int)
parser.add_argument('--trunc_psi', dest='trunc_psi', default=0.75,
type=float)
parser.add_argument('--fp 16', dest='fp16', default=False)
parser.add_argument('--fq_layers', dest='fq_layers', default=[])
parser.add_argument('--fq_dict_size', dest='fq_dict_size', default=256,
type=int)
parser.add_argument('--attn_layers', dest='attn_layers', default=[])
parser.add_argument('--gpu', dest='gpu', default=0, type=int)
parser.add_argument('--hist_bin', dest='hist_bin', default=64, type=int)
parser.add_argument('--hist_insz', dest='hist_insz', default=150, type=int)
parser.add_argument('--hist_method', dest='hist_method',
default='inverse-quadratic')
parser.add_argument('--hist_resizing', dest='hist_resizing',
default='interpolation')
parser.add_argument('--hist_sigma', dest='hist_sigma', default=0.02,
type=float)
parser.add_argument('--alpha', dest='alpha', default=2, type=float)
parser.add_argument('--aug_prob', dest='aug_prob', default=0.0, type=float,
help='Probability of discriminator augmentation. It '
'applies operations specified in --aug_types.')
parser.add_argument('--dataset_aug_prob', dest='dataset_aug_prob',
default=0.0, type=float,
help='Probability of dataset augmentation. It applies '
'random cropping')
parser.add_argument('--aug_types', dest='aug_types',
default=['translation', 'cutout'], nargs='+',
help='Options include: translation, cutout, and color')
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
torch.cuda.set_device(args.gpu)
train_from_folder(
data=args.data,
results_dir=args.results_dir,
models_dir=args.models_dir,
name=args.name,
new=args.new,
load_from=args.load_from,
image_size=args.image_size,
network_capacity=args.network_capacity,
transparent=args.transparent,
batch_size=args.batch_size,
gradient_accumulate_every=args.gradient_accumulate_every,
num_train_steps=args.num_train_steps,
learning_rate=args.learning_rate,
num_workers=args.num_workers,
save_every=args.save_every,
generate=args.generate,
save_noise_latent=args.save_n_l,
target_noise_file=args.target_n,
target_latent_file=args.target_l,
num_image_tiles=args.num_image_tiles,
trunc_psi=args.trunc_psi,
fp16=args.fp16,
fq_layers=args.fq_layers,
fq_dict_size=args.fq_dict_size,
attn_layers=args.attn_layers,
hist_method=args.hist_method,
hist_resizing=args.hist_resizing,
hist_sigma=args.hist_sigma,
hist_bin=args.hist_bin,
hist_insz=args.hist_insz,
target_hist=args.target_hist,
alpha=args.alpha,
aug_prob=args.aug_prob,
dataset_aug_prob=args.dataset_aug_prob,
aug_types=args.aug_types
)
|
benchmark/python/ffi/benchmark_ffi.py | grygielski/incubator-mxnet | 211 | 7007 | <filename>benchmark/python/ffi/benchmark_ffi.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import timeit
import itertools
import argparse
import os
class OpArgMngr(object):
"""Operator argument manager for storing operator workloads."""
args = {}
@staticmethod
def add_workload(funcname, *args, **kwargs):
if "_specifier" not in kwargs:
_specifier = funcname
else:
_specifier = kwargs["_specififer"]
del kwargs["_specififer"]
if _specifier in OpArgMngr.args:
raise ValueError("duplicate {}".format(_specifier))
OpArgMngr.args[_specifier] = {'args': args, 'kwargs': kwargs, 'funcname': funcname}
def generate_workloads():
array_pool = {}
shapes = []
for ndim in range(4):
shapes.extend(list(itertools.product(range(4), repeat=ndim)))
for shape in shapes:
name = 'x'.join(str(i) for i in shape)
if name in array_pool:
raise ValueError("duplicate array {}".format(name))
array_pool[name] = dnp.ones(shape)
return array_pool
def prepare_workloads():
pool = generate_workloads()
OpArgMngr.add_workload("zeros", (2, 2))
OpArgMngr.add_workload("full", (2, 2), 10)
OpArgMngr.add_workload("identity", 3)
OpArgMngr.add_workload("ones", (2, 2))
OpArgMngr.add_workload("einsum", "ii", pool['2x2'], optimize=False)
OpArgMngr.add_workload("unique", pool['1'], return_index=True, return_inverse=True, return_counts=True, axis=-1)
OpArgMngr.add_workload("dstack", (pool['2x1'], pool['2x1'], pool['2x1'], pool['2x1']))
OpArgMngr.add_workload("polyval", dnp.arange(10), pool['2x2'])
OpArgMngr.add_workload("ediff1d", pool['2x2'], pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("nan_to_num", pool['2x2'])
OpArgMngr.add_workload("tri", 2, 3, 4)
OpArgMngr.add_workload("tensordot", pool['2x2'], pool['2x2'], ((1, 0), (0, 1)))
OpArgMngr.add_workload("cumsum", pool['3x2'], axis=0, out=pool['3x2'])
OpArgMngr.add_workload("random.shuffle", pool['3'])
OpArgMngr.add_workload("equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("not_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("greater_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("maximum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("minimum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("sum", pool['2x2'], axis=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("std", pool['2x2'], axis=0, ddof=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("var", pool['2x2'], axis=0, ddof=1, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("average", pool['2x2'], weights=pool['2'], axis=1, returned=True)
OpArgMngr.add_workload("histogram", pool['2x2'], bins=10, range=(0.0, 10.0))
OpArgMngr.add_workload("add", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("cross", pool['2'], pool['2'])
OpArgMngr.add_workload("linalg.eig", pool['3x3'])
OpArgMngr.add_workload("linalg.eigh", pool['3x3'])
OpArgMngr.add_workload("linalg.det", pool['3x3'])
OpArgMngr.add_workload("linalg.slogdet", pool['3x3'])
OpArgMngr.add_workload("linalg.matrix_rank", pool['3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.svd", pool['3x3'])
OpArgMngr.add_workload("linalg.cholesky", pool['1x1'])
OpArgMngr.add_workload("linalg.qr", pool['3x3'])
OpArgMngr.add_workload("linalg.lstsq", pool['2x1'], pool['2'], rcond=None)
OpArgMngr.add_workload("linalg.eigvals", pool['1x1'])
OpArgMngr.add_workload("linalg.eigvalsh", pool['1x1'], UPLO='L')
OpArgMngr.add_workload("linalg.inv", pool['1x1'])
OpArgMngr.add_workload("linalg.pinv", pool['2x3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.solve", pool['1x1'], pool['1'])
OpArgMngr.add_workload("linalg.tensorinv", pool['1x1'], ind=2)
OpArgMngr.add_workload("linalg.norm", pool['3x3'])
OpArgMngr.add_workload("linalg.tensorsolve", pool['1x1x1'], pool['1x1x1'], (2, 0, 1))
OpArgMngr.add_workload("tile", pool['2x2'], 1)
OpArgMngr.add_workload("trace", pool['2x2'])
OpArgMngr.add_workload("transpose", pool['2x2'])
OpArgMngr.add_workload("split", pool['3x3'], (0, 1, 2), axis=1)
OpArgMngr.add_workload("vstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("argmax", pool['3x2'], axis=-1)
OpArgMngr.add_workload("argmin", pool['3x2'], axis=-1)
OpArgMngr.add_workload("atleast_1d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_2d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_3d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("argsort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("sort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("indices", dimensions=(1, 2, 3))
OpArgMngr.add_workload("subtract", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("multiply", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mod", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("remainder", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("true_divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("power", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("lcm", pool['2x2'].astype('int32'), pool['2x2'].astype('int32'))
OpArgMngr.add_workload("diff", pool['2x2'], n=1, axis=-1)
OpArgMngr.add_workload("inner", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.multinomial", n=2, pvals=[1/6.]*6, size=(2,2))
OpArgMngr.add_workload("random.rand", 3, 2)
OpArgMngr.add_workload("random.randn", 2, 2)
OpArgMngr.add_workload("nonzero", pool['2x2'])
OpArgMngr.add_workload("tril", pool['2x2'], k=0)
OpArgMngr.add_workload("random.choice", pool['2'], size=(2, 2))
OpArgMngr.add_workload("take", pool['2'], dnp.array([1,0], dtype='int64'))
OpArgMngr.add_workload("clip", pool['2x2'], 0, 1)
OpArgMngr.add_workload("expand_dims", pool['2x2'], axis=0)
OpArgMngr.add_workload("broadcast_to", pool['2x2'], (2, 2, 2))
OpArgMngr.add_workload("full_like", pool['2x2'], 2)
OpArgMngr.add_workload("zeros_like", pool['2x2'])
OpArgMngr.add_workload("ones_like", pool['2x2'])
OpArgMngr.add_workload("bitwise_and", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_xor", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_or", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("copysign", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("arctan2", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("hypot", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("ldexp", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("logical_and", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_or", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_xor", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.uniform", low=0, high=1, size=1)
OpArgMngr.add_workload("random.exponential", scale=2, size=(2,2))
OpArgMngr.add_workload("random.rayleigh", scale=2, size=(2,2))
OpArgMngr.add_workload("random.weibull", a=2, size=(2,2))
OpArgMngr.add_workload("random.pareto", a=2, size=(2,2))
OpArgMngr.add_workload("random.power", a=2, size=(2,2))
OpArgMngr.add_workload("random.logistic", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("random.gumbel", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("where", pool['2x3'], pool['2x3'], pool['2x1'])
OpArgMngr.add_workload("may_share_memory", pool['2x3'][:0], pool['2x3'][:1])
OpArgMngr.add_workload('squeeze', pool['2x2'], axis=None)
OpArgMngr.add_workload("pad", pool['2x2'], pad_width=((1,2),(1,2)), mode="constant")
OpArgMngr.add_workload("prod", pool['2x2'], axis=1, dtype="float64", keepdims=False)
OpArgMngr.add_workload("around", pool['2x2'], decimals=0)
OpArgMngr.add_workload("round", pool['2x2'], decimals=1)
OpArgMngr.add_workload("repeat", pool['2x2'], repeats=1, axis=None)
OpArgMngr.add_workload("diagflat", pool['2x2'], k=1)
OpArgMngr.add_workload("diag", pool['2x2'], k=1)
OpArgMngr.add_workload("diagonal", pool['2x2x2'], offset=-1, axis1=0, axis2=1)
OpArgMngr.add_workload("diag_indices_from", pool['2x2'])
OpArgMngr.add_workload("bincount", dnp.arange(3, dtype=int), pool['3'], minlength=4)
OpArgMngr.add_workload("percentile", pool['2x2x2'], 80, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("quantile", pool['2x2x2'], 0.8, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("all", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("any", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("roll", pool["2x2"], 1, axis=0)
OpArgMngr.add_workload("rot90", pool["2x2"], 2)
OpArgMngr.add_workload("column_stack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("hstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("triu", pool['3x3'])
OpArgMngr.add_workload("array_split", pool['2x2'], 2, axis=1)
OpArgMngr.add_workload("vsplit", pool['2x2'], 2)
OpArgMngr.add_workload("hsplit", pool['2x2'], 2)
OpArgMngr.add_workload("dsplit", pool['2x2x2'], 2)
OpArgMngr.add_workload("arange", 10)
OpArgMngr.add_workload("concatenate", (pool['1x2'], pool['1x2'], pool['1x2']), axis=0)
OpArgMngr.add_workload("append", pool['2x2'], pool['1x2'], axis=0)
OpArgMngr.add_workload("insert", pool['3x2'], 1, pool['1x1'], axis=0)
OpArgMngr.add_workload("delete", pool['3x2'], 1, axis=0)
OpArgMngr.add_workload("blackman", 12)
OpArgMngr.add_workload("eye", 5)
OpArgMngr.add_workload("hamming", 12)
OpArgMngr.add_workload("hanning", 12)
OpArgMngr.add_workload("linspace", 0, 10, 8, endpoint=False)
OpArgMngr.add_workload("logspace", 2.0, 3.0, num=4, base=2.0, dtype=onp.float32)
OpArgMngr.add_workload("matmul", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mean", pool['2x2'], axis=0, keepdims=True)
OpArgMngr.add_workload("random.gamma", 1, size=(2, 3))
OpArgMngr.add_workload("random.normal", 1, size=(2, 3))
OpArgMngr.add_workload("max", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("min", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amax", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amin", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
unary_ops = ['negative', 'reciprocal', 'abs', 'sign', 'rint', 'ceil', 'floor',
'bitwise_not', 'trunc', 'fix', 'square', 'sqrt', 'cbrt', 'exp',
'log', 'log10', 'log2', 'log1p', 'expm1', 'logical_not', 'isnan',
'isinf', 'isposinf', 'isneginf', 'isfinite', 'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan', 'degrees', 'radians', 'sinh', 'cosh',
'tanh', 'arcsinh', 'arccosh', 'arctanh'] # 'rad2deg', 'deg2rad' cannot run without tvm
for unary_op in unary_ops:
if unary_op == "bitwise_not":
OpArgMngr.add_workload(unary_op, dnp.ones((2, 2), dtype=int))
else:
OpArgMngr.add_workload(unary_op, pool['2x2'])
def benchmark_helper(f, *args, **kwargs):
number = 10000
return timeit.timeit(lambda: f(*args, **kwargs), number=number) / number
def get_op(module, funcname):
funcname = funcname.split(".")
for fname in funcname:
module = getattr(module, fname)
return module
def run_benchmark(packages):
results = {}
for (k, v) in OpArgMngr.args.items():
result = {}
for (name, package) in packages.items():
print('{}.{} running...'.format(name, k))
op = get_op(package["module"], v["funcname"])
args = [package["data"](arg) for arg in v["args"]]
kwargs = {k: package["data"](v) for (k, v) in v["kwargs"].items()}
benchmark = benchmark_helper(op, *args, **kwargs)
result[name] = benchmark
results[k] = result
return results
def show_results(results):
print("{:>24}{:>24}{:>24}".format("name", "package", "time(us)"))
for (specifier, d) in results.items():
for (k, v) in d.items():
print("{:>24}{:>24}{:>24}".format(specifier, k, v * 10 ** 6))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ffi_type')
parsed = parser.parse_args()
if parsed.ffi_type == "cython":
os.environ['MXNET_ENABLE_CYTHON'] = '1'
os.environ['MXNET_ENFORCE_CYTHON'] = '1'
elif parsed.ffi_type == "ctypes":
os.environ['MXNET_ENABLE_CYTHON'] = '0'
else:
raise ValueError("unknown ffi_type {}",format(parsed.ffi_type))
os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
import mxnet as mx
import numpy as onp
from mxnet import np as dnp
mx.npx.set_np(dtype=False)
packages = {
"onp": {
"module": onp,
"data": lambda arr: arr.asnumpy() if isinstance(arr, dnp.ndarray) else arr
},
"dnp": {
"module": dnp,
"data": lambda arr: arr
}
}
prepare_workloads()
results = run_benchmark(packages)
show_results(results)
|
decatt/model.py | achyudh/castor | 132 | 7043 | import sys
import math
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class DecAtt(nn.Module):
def __init__(self, num_units, num_classes, embedding_size, dropout, device=0,
training=True, project_input=True,
use_intra_attention=False, distance_biases=10, max_sentence_length=30):
"""
Create the model based on MLP networks.
:param num_units: size of the networks
:param num_classes: number of classes in the problem
:param embedding_size: size of each word embedding
:param use_intra_attention: whether to use intra-attention model
:param training: whether to create training tensors (optimizer)
:p/word_embeddingaram project_input: whether to project input embeddings to a
different dimensionality
:param distance_biases: number of different distances with biases used
in the intra-attention model
"""
super().__init__()
self.arch = "DecAtt"
self.num_units = num_units
self.num_classes = num_classes
self.project_input = project_input
self.embedding_size = embedding_size
self.distance_biases = distance_biases
self.intra_attention = False
self.max_sentence_length = max_sentence_length
self.device = device
self.bias_embedding = nn.Embedding(max_sentence_length,1)
self.linear_layer_project = nn.Linear(embedding_size, num_units, bias=False)
#self.linear_layer_intra = nn.Sequential(nn.Linear(num_units, num_units), nn.ReLU(), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_attend = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_compare = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_aggregate = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Linear(num_units, num_classes), nn.LogSoftmax())
self.init_weight()
def init_weight(self):
self.linear_layer_project.weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].bias.data.fill_(0)
self.linear_layer_attend[4].weight.data.normal_(0, 0.01)
self.linear_layer_attend[4].bias.data.fill_(0)
self.linear_layer_compare[1].weight.data.normal_(0, 0.01)
self.linear_layer_compare[1].bias.data.fill_(0)
self.linear_layer_compare[4].weight.data.normal_(0, 0.01)
self.linear_layer_compare[4].bias.data.fill_(0)
self.linear_layer_aggregate[1].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[1].bias.data.fill_(0)
self.linear_layer_aggregate[4].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[4].bias.data.fill_(0)
#self.word_embedding.weight.data.copy_(torch.from_numpy(self.pretrained_emb))
def attention_softmax3d(self, raw_attentions):
reshaped_attentions = raw_attentions.view(-1, raw_attentions.size(2))
out = nn.functional.softmax(reshaped_attentions, dim=1)
return out.view(raw_attentions.size(0),raw_attentions.size(1),raw_attentions.size(2))
def _transformation_input(self, embed_sent):
embed_sent = self.linear_layer_project(embed_sent)
result = embed_sent
if self.intra_attention:
f_intra = self.linear_layer_intra(embed_sent)
f_intra_t = torch.transpose(f_intra, 1, 2)
raw_attentions = torch.matmul(f_intra, f_intra_t)
time_steps = embed_sent.size(1)
r = torch.arange(0, time_steps)
r_matrix = r.view(1,-1).expand(time_steps,time_steps)
raw_index = r_matrix-r.view(-1,1)
clipped_index = torch.clamp(raw_index,0,self.distance_biases-1)
clipped_index = Variable(clipped_index.long())
if torch.cuda.is_available():
clipped_index = clipped_index.to(self.device)
bias = self.bias_embedding(clipped_index)
bias = torch.squeeze(bias)
raw_attentions += bias
attentions = self.attention_softmax3d(raw_attentions)
attended = torch.matmul(attentions, embed_sent)
result = torch.cat([embed_sent,attended],2)
return result
def attend(self, sent1, sent2, lsize_list, rsize_list):
"""
Compute inter-sentence attention. This is step 1 (attend) in the paper
:param sent1: tensor in shape (batch, time_steps, num_units),
the projected sentence 1
:param sent2: tensor in shape (batch, time_steps, num_units)
:return: a tuple of 3-d tensors, alfa and beta.
"""
repr1 = self.linear_layer_attend(sent1)
repr2 = self.linear_layer_attend(sent2)
repr2 = torch.transpose(repr2,1,2)
raw_attentions = torch.matmul(repr1, repr2)
#self.mask = generate_mask(lsize_list, rsize_list)
# masked = mask(self.raw_attentions, rsize_list)
#masked = raw_attentions * self.mask
att_sent1 = self.attention_softmax3d(raw_attentions)
beta = torch.matmul(att_sent1, sent2) #input2_soft
raw_attentions_t = torch.transpose(raw_attentions,1,2).contiguous()
#self.mask_t = torch.transpose(self.mask, 1, 2).contiguous()
# masked = mask(raw_attentions_t, lsize_list)
#masked = raw_attentions_t * self.mask_t
att_sent2 = self.attention_softmax3d(raw_attentions_t)
alpha = torch.matmul(att_sent2,sent1) #input1_soft
return alpha, beta
def compare(self, sentence, soft_alignment):
"""
Apply a feed forward network to compare o ne sentence to its
soft alignment with the other.
:param sentence: embedded and projected sentence,
shape (batch, time_steps, num_units)
:param soft_alignment: tensor with shape (batch, time_steps, num_units)
:return: a tensor (batch, time_steps, num_units)
"""
sent_alignment = torch.cat([sentence, soft_alignment],2)
out = self.linear_layer_compare(sent_alignment)
#out, (state, _) = self.lstm_compare(out)
return out
def aggregate(self, v1, v2):
"""
Aggregate the representations induced from both sentences and their
representations
:param v1: tensor with shape (batch, time_steps, num_units)
:param v2: tensor with shape (batch, time_steps, num_units)
:return: logits over classes, shape (batch, num_classes)
"""
v1_sum = torch.sum(v1,1)
v2_sum = torch.sum(v2,1)
out = self.linear_layer_aggregate(torch.cat([v1_sum,v2_sum],1))
return out
def forward(self, sent1, sent2, ext_feats=None, word_to_doc_count=None, raw_sent1=None, raw_sent2=None):
lsize_list = [len(s.split(" ")) for s in raw_sent1]
rsize_list = [len(s.split(" ")) for s in raw_sent2]
sent1 = sent1.permute(0, 2, 1)
sent2 = sent2.permute(0, 2, 1)
sent1 = self._transformation_input(sent1)
sent2 = self._transformation_input(sent2)
alpha, beta = self.attend(sent1, sent2, lsize_list, rsize_list)
v1 = self.compare(sent1, beta)
v2 = self.compare(sent2, alpha)
logits = self.aggregate(v1, v2)
return logits
|
basic_code/networks.py | J-asy/Emotion-FAN | 275 | 7044 | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torch
import numpy as np
import cv2
import pdb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def norm_angle(angle):
norm_angle = sigmoid(10 * (abs(angle) / 0.7853975 - 1))
return norm_angle
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
###''' self-attention; relation-attention '''
class ResNet_AT(nn.Module):
def __init__(self, block, layers, num_classes=1000, end2end=True, at_type=''):
self.inplanes = 64
self.end2end = end2end
super(ResNet_AT, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.6)
self.alpha = nn.Sequential(nn.Linear(512, 1),
nn.Sigmoid())
self.beta = nn.Sequential(nn.Linear(1024, 1),
nn.Sigmoid())
self.pred_fc1 = nn.Linear(512, 7)
self.pred_fc2 = nn.Linear(1024, 7)
self.at_type = at_type
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x='', phrase='train', AT_level='first_level',vectors='',vm='',alphas_from1='',index_matrix=''):
vs = []
alphas = []
assert phrase == 'train' or phrase == 'eval'
assert AT_level == 'first_level' or AT_level == 'second_level' or AT_level == 'pred'
if phrase == 'train':
num_pair = 3
for i in range(num_pair):
f = x[:, :, :, :, i] # x[128,3,224,224]
f = self.conv1(f)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
vs.append(f)
alphas.append(self.alpha(self.dropout(f)))
vs_stack = torch.stack(vs, dim=2)
alphas_stack = torch.stack(alphas, dim=2)
if self.at_type == 'self-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
if self.at_type == 'self_relation-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
betas = []
for i in range(len(vs)):
vs[i] = torch.cat([vs[i], vm1], dim=1)
betas.append(self.beta(self.dropout(vs[i])))
cascadeVs_stack = torch.stack(vs, dim=2)
betas_stack = torch.stack(betas, dim=2)
output = cascadeVs_stack.mul(betas_stack * alphas_stack).sum(2).div((betas_stack * alphas_stack).sum(2))
if self.at_type == 'self-attention':
vm1 = self.dropout(vm1)
pred_score = self.pred_fc1(vm1)
if self.at_type == 'self_relation-attention':
output = self.dropout2(output)
pred_score = self.pred_fc2(output)
return pred_score
if phrase == 'eval':
if AT_level == 'first_level':
f = self.conv1(x)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
alphas = self.alpha(self.dropout(f))
return f, alphas
if AT_level == 'second_level':
assert self.at_type == 'self_relation-attention'
vms = index_matrix.permute(1, 0).mm(vm) # [381, 21783] -> [21783,381] * [381,512] --> [21783, 512]
vs_cate = torch.cat([vectors, vms], dim=1)
betas = self.beta(self.dropout(vs_cate))
''' keywords: mean_fc ; weight_sourcefc; sum_alpha; weightmean_sourcefc '''
''' alpha * beta '''
weight_catefc = vs_cate.mul(alphas_from1) # [21570,512] * [21570,1] --->[21570,512]
alpha_beta = alphas_from1.mul(betas)
sum_alphabetas = index_matrix.mm(alpha_beta) # [380,21570] * [21570,1] -> [380,1]
weightmean_catefc = index_matrix.mm(weight_catefc).div(sum_alphabetas)
weightmean_catefc = self.dropout2(weightmean_catefc)
pred_score = self.pred_fc2(weightmean_catefc)
return pred_score
if AT_level == 'pred':
if self.at_type == 'self-attention':
pred_score = self.pred_fc1(self.dropout(vm))
return pred_score
''' self-attention; relation-attention '''
def resnet18_at(pretrained=False, **kwargs):
# Constructs base a ResNet-18 model.
model = ResNet_AT(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
|
keras_cv_attention_models/resnest/resnest.py | dcleres/keras_cv_attention_models | 140 | 7051 | <reponame>dcleres/keras_cv_attention_models
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from keras_cv_attention_models.aotnet import AotNet
from keras_cv_attention_models.download_and_load import reload_model_weights
from keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias
PRETRAINED_DICT = {
"resnest101": {"imagenet": "63f9ebdcd32529cbc4b4fbbec3d1bb2f"},
"resnest200": {"imagenet": "8e211dcb089b588e18d36ba7cdf92ef0"},
"resnest269": {"imagenet": "4309ed1b0a8ae92f2b1143dc3512c5c7"},
"resnest50": {"imagenet": "eee7b20a229821f730ab205b6afeb369"},
}
def rsoftmax(inputs, groups):
if groups > 1:
nn = tf.reshape(inputs, [-1, 1, groups, inputs.shape[-1] // groups])
# nn = tf.transpose(nn, [0, 2, 1, 3])
nn = tf.nn.softmax(nn, axis=2)
nn = tf.reshape(nn, [-1, 1, 1, inputs.shape[-1]])
else:
nn = keras.layers.Activation("sigmoid")(inputs)
return nn
def split_attention_conv2d(inputs, filters, kernel_size=3, strides=1, downsample_first=False, groups=2, activation="relu", name=""):
h_axis, w_axis = [2, 3] if K.image_data_format() == "channels_first" else [1, 2]
in_channels = inputs.shape[-1]
conv_strides = strides if downsample_first else 1
if groups == 1:
logits = conv2d_no_bias(inputs, filters, kernel_size, strides=conv_strides, padding="same", name=name and name + "1_")
else:
# Using groups=2 is slow in `mixed_float16` policy
# logits = conv2d_no_bias(inputs, filters * groups, kernel_size, padding="same", groups=groups, name=name and name + "1_")
logits = []
splitted_inputs = tf.split(inputs, groups, axis=-1)
for ii in range(groups):
conv_name = name and name + "1_g{}_".format(ii + 1)
logits.append(conv2d_no_bias(splitted_inputs[ii], filters, kernel_size, strides=conv_strides, padding="same", name=conv_name))
logits = tf.concat(logits, axis=-1)
logits = batchnorm_with_activation(logits, activation=activation, name=name and name + "1_")
if groups > 1:
splited = tf.split(logits, groups, axis=-1)
gap = tf.reduce_sum(splited, axis=0)
else:
gap = logits
gap = tf.reduce_mean(gap, [h_axis, w_axis], keepdims=True)
reduction_factor = 4
inter_channels = max(in_channels * groups // reduction_factor, 32)
atten = keras.layers.Conv2D(inter_channels, kernel_size=1, name=name and name + "2_conv")(gap)
atten = batchnorm_with_activation(atten, activation=activation, name=name and name + "2_")
atten = keras.layers.Conv2D(filters * groups, kernel_size=1, name=name and name + "3_conv")(atten)
atten = rsoftmax(atten, groups)
out = keras.layers.Multiply()([atten, logits])
if groups > 1:
out = tf.split(out, groups, axis=-1)
out = tf.reduce_sum(out, axis=0)
if not downsample_first and strides > 1:
out = keras.layers.ZeroPadding2D(padding=1, name=name and name + "pool_pad")(out)
out = keras.layers.AveragePooling2D(3, strides=2, name=name and name + "pool")(out)
return out
def ResNest(input_shape=(224, 224, 3), stem_type="deep", attn_types="sa", bn_after_attn=False, shortcut_type="avg", pretrained="imagenet", **kwargs):
kwargs.pop("kwargs", None)
model = AotNet(**locals(), **kwargs)
reload_model_weights(model, pretrained_dict=PRETRAINED_DICT, sub_release="resnest", pretrained=pretrained)
return model
def ResNest50(input_shape=(224, 224, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 4, 6, 3], stem_width=64, model_name="resnest50", **locals(), **kwargs)
def ResNest101(input_shape=(256, 256, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 4, 23, 3], stem_width=128, model_name="resnest101", **locals(), **kwargs)
def ResNest200(input_shape=(320, 320, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 24, 36, 3], stem_width=128, model_name="resnest200", **locals(), **kwargs)
def ResNest269(input_shape=(416, 416, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 30, 48, 8], stem_width=128, model_name="resnest269", **locals(), **kwargs)
|
continuum/datasets/dtd.py | oleksost/continuum | 282 | 7055 | <filename>continuum/datasets/dtd.py
import os
from typing import List
import numpy as np
from torchvision import datasets as torchdata
from continuum.datasets import ImageFolderDataset
from continuum import download
from continuum.tasks import TaskType
class DTD(ImageFolderDataset):
"""Describable Textures Dataset (DTD)
Reference:
* Describing Textures in the Wild
<NAME> and <NAME> and <NAME> and <NAME> and and <NAME>
CVPR 2014
"""
url = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
def __init__(self, data_path: str, train: bool = True, download: bool = True, split: int = 1):
super().__init__(data_path=data_path, train=train, download=download, data_type=TaskType.IMAGE_PATH)
if not (1 <= int(split) <= 10):
raise ValueError(f"Available splits are [1, ..., 10], not {split}")
self.split = split
def _download(self):
archive_path = os.path.join(self.data_path, "dtd-r1.0.1.tar.gz")
if not os.path.exists(archive_path):
print("Downloading DTD dataset...")
download.download(self.url, self.data_path)
if not os.path.exists(os.path.join(self.data_path, "dtd")):
print("Uncompressing images...")
download.untar(archive_path)
def get_data(self):
x, y, t = self._format(torchdata.ImageFolder(os.path.join(self.data_path, "dtd", "images")).imgs)
if self.train:
index_files = [
os.path.join(self.data_path, "dtd", "labels", f"train{str(self.split)}.txt"),
os.path.join(self.data_path, "dtd", "labels", f"val{str(self.split)}.txt")
]
else:
index_files = [
os.path.join(self.data_path, "dtd", "labels", f"test{str(self.split)}.txt")
]
valid_paths = set()
for index_file in index_files:
with open(index_file) as f:
valid_paths.update(
map(lambda p: os.path.join(self.data_path, "dtd", "images", p.strip()),
f.readlines()
)
)
valid_paths = np.array(list(valid_paths))
indexes = np.isin(x, valid_paths)
return x[indexes], y[indexes], None
|
algorithms/maths/chinese_remainder_theorem.py | hbqdev/algorithms | 22,426 | 7072 | <reponame>hbqdev/algorithms<filename>algorithms/maths/chinese_remainder_theorem.py
from algorithms.maths.gcd import gcd
from typing import List
def solve_chinese_remainder(num : List[int], rem : List[int]):
"""
Computes the smallest x that satisfies the chinese remainder theorem
for a system of equations.
The system of equations has the form:
x % num[0] = rem[0]
x % num[1] = rem[1]
...
x % num[k - 1] = rem[k - 1]
Where k is the number of elements in num and rem, k > 0.
All numbers in num needs to be pariwise coprime otherwise an exception is raised
returns x: the smallest value for x that satisfies the system of equations
"""
if not len(num) == len(rem):
raise Exception("num and rem should have equal length")
if not len(num) > 0:
raise Exception("Lists num and rem need to contain at least one element")
for n in num:
if not n > 1:
raise Exception("All numbers in num needs to be > 1")
if not _check_coprime(num):
raise Exception("All pairs of numbers in num are not coprime")
k = len(num)
x = 1
while True:
i = 0
while i < k:
if x % num[i] != rem[i]:
break
i += 1
if i == k:
return x
else:
x += 1
def _check_coprime(l : List[int]):
for i in range(len(l)):
for j in range(len(l)):
if i == j:
continue
if gcd(l[i], l[j]) != 1:
return False
return True
|
exercises/ja/exc_03_16_01.py | Jette16/spacy-course | 2,085 | 7073 | <reponame>Jette16/spacy-course<gh_stars>1000+
import spacy
nlp = spacy.load("ja_core_news_sm")
text = (
"チックフィレイはジョージア州カレッジパークに本社を置く、"
"チキンサンドを専門とするアメリカのファストフードレストランチェーンです。"
)
# トークナイズのみ行う
doc = nlp(text)
print([token.text for token in doc])
|
webium/controls/select.py | kejkz/webium | 152 | 7104 | from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.remote.webelement import WebElement
class Select(WebElement):
"""
Implements logic to work with Web List UI elements
"""
@property
def is_multiple(self):
value = self.get_attribute('multiple')
return value is not None and not value == 'false'
def select_option(self, option):
"""
Performs selection of provided item from Web List
@params option - string item name
"""
items_list = self.get_options()
for item in items_list:
if item.get_attribute("value") == option:
item.click()
break
def get_options(self):
"""
Performs search for provided item in Web List
"""
return self.find_elements_by_tag_name('option')
def get_attribute_selected(self, attribute):
"""
Performs search of selected item from Web List
Return attribute of selected item
@params attribute - string attribute name
"""
items_list = self.get_options()
return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None)
def get_value_selected(self):
"""
Performs search of selected item from Web List
Return value of selected item
"""
return self.get_attribute_selected('value')
def get_text_selected(self):
"""
Performs search of selected item from Web List
Return text of selected item
"""
return self.get_attribute_selected('text')
def select_by_visible_text(self, text):
"""
Performs search of selected item from Web List
@params text - string visible text
"""
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
return
matched = True
# in case the target option isn't found by xpath
# attempt to find it by direct comparison among options which contain at least the longest token from the text
if len(opts) == 0 and ' ' in text:
sub_string_without_space = self._get_longest_token(text)
if sub_string_without_space == "":
candidates = self.get_options()
else:
xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space))
candidates = self.find_elements_by_xpath(xpath)
for candidate in candidates:
if text == candidate.text:
self._set_selected(candidate)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: " + str(text))
@staticmethod
def _escape_string(value):
if '"' in value and "'" in value:
substrings = value.split('"')
result = ['concat(']
for substring in substrings:
result.append('"{0}"'.format(substring))
result.append(', \'"\', ')
result.pop()
if value.endswith('"'):
result.append(', \'"\'')
return ''.join(result) + ')'
if '"' in value:
return "'{0}'".format(value)
return '"{0}"'.format(value)
@staticmethod
def _get_longest_token(value):
items = value.split(' ')
longest = ''
for item in items:
if len(item) > len(longest):
longest = item
return longest
@staticmethod
def _set_selected(option):
if not option.is_selected():
option.click()
|
mindarmour/utils/logger.py | hboshnak/mindarmour | 139 | 7125 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Util for log module. """
import logging
_LOGGER = logging.getLogger('MA')
def _find_caller():
"""
Bind findCaller() method, which is used to find the stack frame of the
caller so that we can note the source file name, line number and
function name.
"""
return _LOGGER.findCaller()
class LogUtil:
"""
Logging module.
Raises:
SyntaxError: If create this class.
"""
_instance = None
_logger = None
_extra_fmt = ' [%s] [%s] '
def __init__(self):
raise SyntaxError('can not instance, please use get_instance.')
@staticmethod
def get_instance():
"""
Get instance of class `LogUtil`.
Returns:
Object, instance of class `LogUtil`.
"""
if LogUtil._instance is None:
LogUtil._instance = object.__new__(LogUtil)
LogUtil._logger = _LOGGER
LogUtil._init_logger()
return LogUtil._instance
@staticmethod
def _init_logger():
"""
Initialize logger.
"""
LogUtil._logger.setLevel(logging.WARNING)
log_fmt = '[%(levelname)s] %(name)s(%(process)d:%(thread)d,' \
'%(processName)s):%(asctime)s%(message)s'
log_fmt = logging.Formatter(log_fmt)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_fmt)
# add the handlers to the logger
LogUtil._logger.handlers = []
LogUtil._logger.addHandler(console_handler)
LogUtil._logger.propagate = False
def set_level(self, level):
"""
Set the logging level of this logger, level must be an integer or a
string. Supported levels are 'NOTSET'(integer: 0), 'ERROR'(integer: 1-40),
'WARNING'('WARN', integer: 1-30), 'INFO'(integer: 1-20) and 'DEBUG'(integer: 1-10).
For example, if logger.set_level('WARNING') or logger.set_level(21), then
logger.warn() and logger.error() in scripts would be printed while running,
while logger.info() or logger.debug() would not be printed.
Args:
level (Union[int, str]): Level of logger.
"""
self._logger.setLevel(level)
def add_handler(self, handler):
"""
Add other handler supported by logging module.
Args:
handler (logging.Handler): Other handler supported by logging module.
Raises:
ValueError: If handler is not an instance of logging.Handler.
"""
if isinstance(handler, logging.Handler):
self._logger.addHandler(handler)
else:
raise ValueError('handler must be an instance of logging.Handler,'
' but got {}'.format(type(handler)))
def debug(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'DEBUG'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.debug(self._extra_fmt + msg, file_info, tag, *args)
def info(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'INFO'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.info(self._extra_fmt + msg, file_info, tag, *args)
def warn(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'WARNING'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.warning(self._extra_fmt + msg, file_info, tag, *args)
def error(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'ERROR'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.error(self._extra_fmt + msg, file_info, tag, *args)
|
src/wormhole/__main__.py | dmgolembiowski/magic-wormhole | 2,801 | 7139 | from __future__ import absolute_import, print_function, unicode_literals
if __name__ == "__main__":
from .cli import cli
cli.wormhole()
else:
# raise ImportError('this module should not be imported')
pass
|
apex/contrib/multihead_attn/self_multihead_attn_func.py | Muflhi01/apex | 6,523 | 7148 | import torch
import torch.nn.functional as F
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
is_additive_mask,
dropout_prob,
):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
scale_t = torch.tensor([scale])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
# Input Linear GEMM
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim*3]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (seql_q*seqs x embed_dim*3)
if use_biases_t[0]:
input_lin_results = torch.addmm(
input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
input_lin_results = torch.mm(
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)), input_weights.transpose(0, 1)
)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul1_results = torch.empty(
(queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype, device=torch.device("cuda")
)
matmul1_results = torch.baddbmm(
matmul1_results,
queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results,
beta=0.0,
alpha=scale_t[0],
)
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert len(mask.size()) == 2, "Timing mask is not 2D!"
assert mask.size(0) == mask.size(1), "Sequence length should match!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float("-inf"))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
if is_additive_mask:
matmul1_results = matmul1_results + mask.unsqueeze(1).unsqueeze(2)
else:
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float("-inf"))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1.0 - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
matmul2_results = torch.empty(
(dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype,
device=torch.device("cuda"),
).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
matmul2_results = (
matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1), inputs.size(2))
)
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
if use_biases_t[0]:
outputs = torch.addmm(
output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
outputs = torch.mm(
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)), output_weights.transpose(0, 1)
)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
ctx.save_for_backward(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
) = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t[0]
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Slice out q,k,v from one big set of gradients entering the input linear's bprop (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights
)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)),
)
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
if use_biases_t[0]:
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0
)
else:
output_bias_grads = None
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
queries_grads = torch.baddbmm(
queries_grads.transpose(0, 1),
softmax_grads,
keys.transpose(0, 1),
out=queries_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
keys_grads = torch.baddbmm(
keys_grads.transpose(0, 1),
softmax_grads.transpose(1, 2),
queries.transpose(0, 1),
out=keys_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Input Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_results_grads = input_lin_results_grads.view(
inputs.size(0) * inputs.size(1), heads_t[0] * 3 * head_dim
)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
# Input Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, 3*embed_dim(3072)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (3*embed_dim x embed_dim)
input_weight_grads = torch.mm(
input_lin_results_grads.transpose(0, 1), inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2))
)
if use_biases_t[0]:
input_bias_grads = torch.sum(input_lin_results_grads, 0)
else:
input_bias_grads = None
return (
None,
None,
None,
None,
input_grads,
input_weight_grads,
output_weight_grads,
input_bias_grads,
output_bias_grads,
None,
None,
)
self_attn_func = SelfAttnFunc.apply
|
tests/test_app/library/loans/admin.py | Pijuli/django-jazzmin | 972 | 7154 | <gh_stars>100-1000
from django.contrib import admin
from django.urls import path
from .models import BookLoan, Library
from .views import CustomView
class BookLoanInline(admin.StackedInline):
model = BookLoan
extra = 1
readonly_fields = ("id", "duration")
fields = (
"book",
"imprint",
"status",
"due_back",
"borrower",
"loan_start",
"duration",
)
@admin.register(BookLoan)
class BookLoanAdmin(admin.ModelAdmin):
list_display = ("book", "status", "borrower", "due_back", "id")
list_filter = ("status", "due_back")
autocomplete_fields = ("borrower",)
search_fields = ("book__title",)
readonly_fields = ("id",)
fieldsets = (
(None, {"fields": ("book", "imprint", "id")}),
("Availability", {"fields": ("status", "due_back", "duration", "borrower")}),
)
def get_urls(self):
"""
Add in a custom view to demonstrate =
"""
urls = super().get_urls()
return urls + [path("custom_view", CustomView.as_view(), name="custom_view")]
def response_change(self, request, obj):
ret = super().response_change(request, obj)
if "reserve" in request.POST:
obj.status = "r"
obj.save()
return ret
@admin.register(Library)
class LibraryAdmin(admin.ModelAdmin):
list_display = ("name", "address", "librarian")
|
test/sanity_import_vpp_papi.py | amithbraj/vpp | 751 | 7162 | #!/usr/bin/env python3
""" sanity check script """
import vpp_papi
|
src/trusted/validator_arm/dgen_decoder_output.py | cohortfsllc/cohort-cocl2-sandbox | 2,151 | 7191 | <filename>src/trusted/validator_arm/dgen_decoder_output.py
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating the decoder based on parsed
table representations.
"""
import dgen_opt
import dgen_output
import dgen_actuals
# This file generates the class decoder Decoder as defined by the
# decoder tables. The code is specifically written to minimize the
# number of decoder classes needed to parse valid ARM
# instructions. Many rows in the table use the same decoder class. In
# addition, we optimize tables by merging, so long as the same decoder
# class is built.
#
# The following files are generated:
#
# decoder.h
# decoder.cc
#
# decoder.h declares the generated decoder parser class while
# decoder.cc contains the implementation of that decoder class.
#
# For testing purposes (see dgen_test_output.py) different rules are
# applied. Note: It may be worth reading dgen_test_output.py preamble
# to get a better understanding of decoder actions, and why we need
# the "action_filter" methods.
"""The current command line arguments to use"""
_cl_args = {}
NEWLINE_STR="""
"""
COMMENTED_NEWLINE_STR="""
//"""
# Defines the header for decoder.h
H_HEADER="""%(FILE_HEADER)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/decode.h"
#include "%(FILENAME_BASE)s_actuals.h"
namespace nacl_arm_dec {
"""
DECODER_DECLARE_HEADER="""
// Defines a decoder class selector for instructions.
class %(decoder_name)s : DecoderState {
public:
explicit %(decoder_name)s();
// Parses the given instruction, returning the decoder to use.
virtual const ClassDecoder& decode(const Instruction) const;
// Returns the class decoder to use to process the fictitious instruction
// that is inserted before the first instruction in the code block by
// the validator.
const ClassDecoder &fictitious_decoder() const {
return %(fictitious_decoder)s_instance_;
}
private:
"""
DECODER_DECLARE_METHOD_COMMENTS="""
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction.
"""
DECODER_DECLARE_METHOD="""
inline const ClassDecoder& decode_%(table_name)s(
const Instruction inst) const;
"""
DECODER_DECLARE_FIELD_COMMENTS="""
// The following fields define the set of class decoders
// that can be returned by the API function "decode". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be built once (and reused
// for each call to "decode")."""
DECODER_DECLARE_FIELD="""
const %(decoder)s %(decoder)s_instance_;"""
DECODER_DECLARE_FOOTER="""
};
"""
H_FOOTER="""
} // namespace nacl_arm_dec
#endif // %(IFDEF_NAME)s
"""
def generate_h(decoder, decoder_name, filename, out, cl_args):
"""Entry point to the decoder for .h file.
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.h')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'IFDEF_NAME': dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('.h')],
'decoder_name': decoder_name,
}
out.write(H_HEADER % values)
values['fictitious_decoder'] = (
decoder.get_value('FictitiousFirst').actual())
out.write(DECODER_DECLARE_HEADER % values)
out.write(DECODER_DECLARE_METHOD_COMMENTS)
for table in decoder.tables():
values['table_name'] = table.name
out.write(DECODER_DECLARE_METHOD % values)
out.write(DECODER_DECLARE_FIELD_COMMENTS)
for action in decoder.action_filter(['actual']).decoders():
values['decoder'] = action.actual()
out.write(DECODER_DECLARE_FIELD % values)
out.write(DECODER_DECLARE_FOOTER % values)
out.write(H_FOOTER % values)
# Defines the header for DECODER.h
CC_HEADER="""%(FILE_HEADER)s
#include "%(header_filename)s"
namespace nacl_arm_dec {
"""
CONSTRUCTOR_HEADER="""
%(decoder_name)s::%(decoder_name)s() : DecoderState()"""
CONSTRUCTOR_FIELD_INIT="""
, %(decoder)s_instance_()"""
CONSTRUCTOR_FOOTER="""
{}
"""
METHOD_HEADER="""
// Implementation of table: %(table_name)s.
// Specified by: %(citation)s
const ClassDecoder& %(decoder_name)s::decode_%(table_name)s(
const Instruction inst) const
{"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
METHOD_DISPATCH_CLASS_DECODER="""
return %(decoder)s_instance_;"""
METHOD_DISPATCH_SUBMETHOD="""
return decode_%(subtable_name)s(inst);"""
METHOD_DISPATCH_CLOSE="""
}
"""
METHOD_FOOTER="""
// Catch any attempt to fall though ...
return %(not_implemented)s_instance_;
}
"""
DECODER_METHOD_HEADER="""
const ClassDecoder& %(decoder_name)s::decode(const Instruction inst) const {"""
DECODER_METHOD_TRACE="""
fprintf(stderr, "Parsing %%08x\\n", inst.Bits());"""
DECODER_METHOD_FOOTER="""
return decode_%(entry_table_name)s(inst);
}
"""
CC_FOOTER="""
} // namespace nacl_arm_dec
"""
def generate_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the decoder in .cc file
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.cc')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed
# tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'header_filename': filename[:-2] + 'h',
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(CC_HEADER % values)
_generate_constructors(decoder, values, out)
_generate_methods(decoder, values, out)
out.write(DECODER_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(DECODER_METHOD_TRACE % values)
out.write(DECODER_METHOD_FOOTER % values)
out.write(CC_FOOTER % values)
def _generate_constructors(decoder, values, out):
out.write(CONSTRUCTOR_HEADER % values)
for decoder in decoder.action_filter(['actual']).decoders():
values['decoder'] = decoder.actual()
out.write(CONSTRUCTOR_FIELD_INIT % values)
out.write(CONSTRUCTOR_FOOTER % values)
def _generate_methods(decoder, values, out):
global _cl_args
for table in decoder.tables():
# Add the default row as the last in the optimized row, so that
# it is applied if all other rows do not.
opt_rows = sorted(dgen_opt.optimize_rows(table.rows(False)))
if table.default_row:
opt_rows.append(table.default_row)
opt_rows = table.add_column_to_rows(opt_rows)
print ("Table %s: %d rows minimized to %d"
% (table.name, len(table.rows()), len(opt_rows)))
values['table_name'] = table.name
values['citation'] = table.citation
out.write(METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(METHOD_HEADER_TRACE % values)
# Add message to stop compilation warnings if this table
# doesn't require subtables to select a class decoder.
if not table.methods():
out.write("\n UNREFERENCED_PARAMETER(inst);")
count = 0
for row in opt_rows:
count = count + 1
# Each row consists of a set of bit patterns defining if the row
# is applicable. Convert this into a sequence of anded C test
# expressions. For example, convert the following pair of bit
# patterns:
#
# xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxx0101
#
# Each instruction is masked to get the the bits, and then
# tested against the corresponding expected bits. Hence, the
# above example is converted to:
#
# ((inst & 0x0F000000) != 0x0C000000) &&
# ((inst & 0x0000000F) != 0x00000005)
out.write(METHOD_DISPATCH_BEGIN %
row.patterns[0].to_commented_bool())
for p in row.patterns[1:]:
out.write(METHOD_DISPATCH_CONTINUE % p.to_commented_bool())
out.write(METHOD_DISPATCH_END)
if _cl_args.get('trace') == 'True':
out.write(METHOD_DISPATCH_TRACE % count)
if row.action.__class__.__name__ == 'DecoderAction':
values['decoder'] = row.action.actual()
out.write(METHOD_DISPATCH_CLASS_DECODER % values)
elif row.action.__class__.__name__ == 'DecoderMethod':
values['subtable_name'] = row.action.name
out.write(METHOD_DISPATCH_SUBMETHOD % values)
else:
raise Exception('Bad table action: %s' % repr(row.action))
out.write(METHOD_DISPATCH_CLOSE % values)
values['not_implemented'] = decoder.get_value('NotImplemented').actual()
out.write(METHOD_FOOTER % values)
|
logistic-regression/plot_binary_losses.py | eliben/deep-learning-samples | 183 | 7203 | # Helper code to plot binary losses.
#
# <NAME> (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
fig, ax = plt.subplots()
fig.set_tight_layout(True)
xs = np.linspace(-2, 2, 500)
# plot L0/1 loss
ax.plot(xs, np.where(xs < 0, np.ones_like(xs), np.zeros_like(xs)),
color='r', linewidth=2.0, label='$L_{01}$')
# plot square loss
ax.plot(xs, (xs - 1) ** 2, linestyle='-.', label='$L_2$')
# plot hinge loss
ax.plot(xs, np.maximum(np.zeros_like(xs), 1 - xs),
color='g', linewidth=2.0, label='$L_h$')
ax.grid(True)
plt.ylim((-1, 4))
ax.legend()
fig.savefig('loss.png', dpi=80)
plt.show()
|
shapeshifter/tests/conftest.py | martinogden/django-shapeshifter | 164 | 7206 | <filename>shapeshifter/tests/conftest.py
from pytest_djangoapp import configure_djangoapp_plugin
pytest_plugins = configure_djangoapp_plugin(
extend_INSTALLED_APPS=[
'django.contrib.sessions',
'django.contrib.messages',
],
extend_MIDDLEWARE=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
)
|
tests/zoo/tree.py | dynalz/odmantic | 486 | 7220 | <reponame>dynalz/odmantic
import enum
from typing import Dict, List
from odmantic.field import Field
from odmantic.model import Model
class TreeKind(str, enum.Enum):
BIG = "big"
SMALL = "small"
class TreeModel(Model):
name: str = Field(primary_key=True, default="<NAME> montagnes")
average_size: float = Field(mongo_name="size")
discovery_year: int
kind: TreeKind
genesis_continents: List[str]
per_continent_density: Dict[str, float]
|
examples/model_zoo/build_binaries.py | Embracing/unrealcv | 1,617 | 7226 | import subprocess, os
ue4_win = r"C:\Program Files\Epic Games\UE_4.16"
ue4_linux = "/home/qiuwch/workspace/UE416"
ue4_mac = '/Users/Shared/Epic Games/UE_4.16'
win_uprojects = [
r'C:\qiuwch\workspace\uprojects\UE4RealisticRendering\RealisticRendering.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene1\ArchinteriorsVol2Scene1.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene2\ArchinteriorsVol2Scene2.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene3\ArchinteriorsVol2Scene3.uproject',
r'C:\qiuwch\workspace\uprojects\UE4UrbanCity\UrbanCity.uproject',
r'D:\workspace\uprojects\Matinee\Matinee.uproject',
r'D:\workspace\uprojects\PhotorealisticCharacter\PhotorealisticCharacter2.uproject',
]
linux_uprojects = [
os.path.expanduser('~/workspace/uprojects/UE4RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser("~/workspace/uprojects/UE4UrbanCity/UrbanCity.uproject"),
]
mac_uprojects = [
os.path.expanduser('~/workspace/UnrealEngine/Templates/FP_FirstPerson/FP_FirstPerson.uproject'),
os.path.expanduser('~/uprojects/RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser('~/uprojects/UE4UrbanCity/UrbanCity.uproject'),
]
uprojects = []
for uproject_path in win_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_win,
log_file = 'log/win_%s.log' % uproject_name
),
)
for uproject_path in linux_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_linux,
log_file = 'log/linux_%s.log' % uproject_name
),
)
for uproject_path in mac_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_mac,
log_file = 'log/mac_%s.log' % uproject_name
),
)
if __name__ == '__main__':
for uproject in uprojects:
uproject_path = uproject['uproject_path']
if not os.path.isfile(uproject_path):
print("Can not find uproject file %s, skip this project" % uproject_path)
continue
cmd = [
'python', 'build.py',
'--UE4', uproject['ue4_path'],
# '--output', uproject['output_folder'],
uproject['uproject_path']
]
print(cmd)
subprocess.call(cmd,
stdout = open(uproject['log_file'], 'w'))
with open(uproject['log_file']) as f:
lines = f.readlines()
print(''.join(lines[-10:])) # Print the last few lines
|
rigl/experimental/jax/pruning/pruning.py | vishalbelsare/rigl | 276 | 7229 | <filename>rigl/experimental/jax/pruning/pruning.py
# coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Functions for pruning FLAX masked models."""
import collections
from typing import Any, Callable, Mapping, Optional, Union
import flax
import jax.numpy as jnp
from rigl.experimental.jax.pruning import masked
def weight_magnitude(weights):
"""Creates weight magnitude-based saliencies, given a weight matrix."""
return jnp.absolute(weights)
def prune(
model,
pruning_rate,
saliency_fn = weight_magnitude,
mask = None,
compare_fn = jnp.greater):
"""Returns a mask for a model where the params in each layer are pruned using a saliency function.
Args:
model: The model to create a pruning mask for.
pruning_rate: The fraction of lowest magnitude saliency weights that are
pruned. If a float, the same rate is used for all layers, otherwise if it
is a mapping, it must contain a rate for all masked layers in the model.
saliency_fn: A function that returns a float number used to rank
the importance of individual weights in the layer.
mask: If the model has an existing mask, the mask will be applied before
pruning the model.
compare_fn: A pairwise operator to compare saliency with threshold, and
return True if the saliency indicates the value should not be masked.
Returns:
A pruned mask for the given model.
"""
if not mask:
mask = masked.simple_mask(model, jnp.ones, masked.WEIGHT_PARAM_NAMES)
if not isinstance(pruning_rate, collections.Mapping):
pruning_rate_dict = {}
for param_name, _ in masked.iterate_mask(mask):
# Get the layer name from the parameter's full name/path.
layer_name = param_name.split('/')[-2]
pruning_rate_dict[layer_name] = pruning_rate
pruning_rate = pruning_rate_dict
for param_path, param_mask in masked.iterate_mask(mask):
split_param_path = param_path.split('/')
layer_name = split_param_path[-2]
param_name = split_param_path[-1]
# If we don't have a pruning rate for the given layer, don't mask it.
if layer_name in pruning_rate and mask[layer_name][param_name] is not None:
param_value = model.params[layer_name][
masked.MaskedModule.UNMASKED][param_name]
# Here any existing mask is first applied to weight matrix.
# Note: need to check explicitly is not None for np array.
if param_mask is not None:
saliencies = saliency_fn(param_mask * param_value)
else:
saliencies = saliency_fn(param_value)
# TODO: Use partition here (partial sort) instead of sort,
# since it's O(N), not O(N log N), however JAX doesn't support it.
sorted_param = jnp.sort(jnp.abs(saliencies.flatten()))
# Figure out the weight magnitude threshold.
threshold_index = jnp.round(pruning_rate[layer_name] *
sorted_param.size).astype(jnp.int32)
threshold = sorted_param[threshold_index]
mask[layer_name][param_name] = jnp.array(
compare_fn(saliencies, threshold), dtype=jnp.int32)
return mask
|
snoopy/server/transforms/Maltego.py | aiddenkeli/Snoopy | 432 | 7254 | #!/usr/bin/python
#
# This might be horrible code...
# ...but it works
# Feel free to re-write in a better way
# And if you want to - send it to us, we'll update ;)
# <EMAIL> (2010/10/18)
#
import sys
from xml.dom import minidom
class MaltegoEntity(object):
value = "";
weight = 100;
displayInformation = "";
additionalFields = [];
iconURL = "";
entityType = "Phrase"
def __init__(self,eT=None,v=None):
if (eT is not None):
self.entityType = eT;
if (v is not None):
self.value = v;
self.additionalFields = None;
self.additionalFields = [];
self.weight = 100;
self.displayInformation = "";
self.iconURL = "";
def setType(self,eT=None):
if (eT is not None):
self.entityType = eT;
def setValue(self,eV=None):
if (eV is not None):
self.value = eV;
def setWeight(self,w=None):
if (w is not None):
self.weight = w;
def setDisplayInformation(self,di=None):
if (di is not None):
self.displayInformation = di;
def addAdditionalFields(self,fieldName=None,displayName=None,matchingRule=False,value=None):
self.additionalFields.append([fieldName,displayName,matchingRule,value]);
def setIconURL(self,iU=None):
if (iU is not None):
self.iconURL = iU;
def returnEntity(self):
print "<Entity Type=\"" + str(self.entityType) + "\">";
print "<Value>" + str(self.value) + "</Value>";
print "<Weight>" + str(self.weight) + "</Weight>";
if (self.displayInformation is not None):
print "<DisplayInformation><Label Name=\"\" Type=\"text/html\"><![CDATA[" + str(self.displayInformation) + "]]></Label></DisplayInformation>";
if (len(self.additionalFields) > 0):
print "<AdditionalFields>";
for i in range(len(self.additionalFields)):
if (str(self.additionalFields[i][2]) <> "strict"):
print "<Field Name=\"" + str(self.additionalFields[i][0]) + "\" DisplayName=\"" + str(self.additionalFields[i][1]) + "\">" + str(self.additionalFields[i][3]) + "</Field>";
else:
print "<Field MatchingRule=\"" + str(self.additionalFields[i][2]) + "\" Name=\"" + str(self.additionalFields[i][0]) + "\" DisplayName=\"" + str(self.additionalFields[i][1]) + "\">" + str(self.additionalFields[i][3]) + "</Field>";
print "</AdditionalFields>";
if (len(self.iconURL) > 0):
print "<IconURL>" + self.iconURL + "</IconURL>";
print "</Entity>";
class MaltegoTransform(object):
entities = []
exceptions = []
UIMessages = []
#def __init__(self):
#empty.
def addEntity(self,enType,enValue):
me = MaltegoEntity(enType,enValue);
self.addEntityToMessage(me);
return self.entities[len(self.entities)-1];
def addEntityToMessage(self,maltegoEntity):
self.entities.append(maltegoEntity);
def addUIMessage(self,message,messageType="Inform"):
self.UIMessages.append([messageType,message]);
def addException(self,exceptionString):
self.exceptions.append(exceptionString);
def throwExceptions(self):
print "<MaltegoMessage>";
print "<MaltegoTransformExceptionMessage>";
print "<Exceptions>"
for i in range(len(self.exceptions)):
print "<Exception>" + self.exceptions[i] + "</Exceptions>";
print "</Exceptions>"
print "</MaltegoTransformExceptionMessage>";
print "</MaltegoMessage>";
def returnOutput(self):
print "<MaltegoMessage>";
print "<MaltegoTransformResponseMessage>";
print "<Entities>"
for i in range(len(self.entities)):
self.entities[i].returnEntity();
print "</Entities>"
print "<UIMessages>"
for i in range(len(self.UIMessages)):
print "<UIMessage MessageType=\"" + self.UIMessages[i][0] + "\">" + self.UIMessages[i][1] + "</UIMessage>";
print "</UIMessages>"
print "</MaltegoTransformResponseMessage>";
print "</MaltegoMessage>";
def writeSTDERR(self,msg):
sys.stderr.write(str(msg));
def heartbeat(self):
self.writeSTDERR("+");
def progress(self,percent):
self.writeSTDERR("%" + str(percent));
def debug(self,msg):
self.writeSTDERR("D:" + str(msg));
class MaltegoMsg:
def __init__(self,MaltegoXML=""):
xmldoc = minidom.parseString(MaltegoXML)
#read the easy stuff like value, limits etc
self.Value = self.i_getNodeValue(xmldoc,"Value")
self.Weight = self.i_getNodeValue(xmldoc,"Weight")
self.Slider = self.i_getNodeAttributeValue(xmldoc,"Limits","SoftLimit")
self.Type = self.i_getNodeAttributeValue(xmldoc,"Entity","Type")
#read additional fields
AdditionalFields = {}
try:
AFNodes= xmldoc.getElementsByTagName("AdditionalFields")[0]
Settings = AFNodes.getElementsByTagName("Field")
for node in Settings:
AFName = node.attributes["Name"].value;
AFValue = self.i_getText(node.childNodes);
AdditionalFields[AFName] = AFValue
except:
#sure this is not the right way...;)
dontcare=1
#parse transform settings
TransformSettings = {}
try:
TSNodes= xmldoc.getElementsByTagName("TransformFields")[0]
Settings = TSNodes.getElementsByTagName("Field")
for node in Settings:
TSName = node.attributes["Name"].value;
TSValue = self.i_getText(node.childNodes);
TransformSettings[TSName] = TSValue
except:
dontcare=1
#load back into object
self.AdditionalFields = AdditionalFields
self.TransformSettings = TransformSettings
def i_getText(self,nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def i_getNodeValue(self,node,Tag):
return self.i_getText(node.getElementsByTagName(Tag)[0].childNodes)
def i_getNodeAttributeValue(self,node,Tag,Attribute):
return node.getElementsByTagName(Tag)[0].attributes[Attribute].value;
|
tests/conftest.py | bbhunter/fuzz-lightyear | 169 | 7277 | import pytest
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_OPERATION
from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_TAG
from fuzz_lightyear.datastore import get_excluded_operations
from fuzz_lightyear.datastore import get_included_tags
from fuzz_lightyear.datastore import get_non_vulnerable_operations
from fuzz_lightyear.datastore import get_user_defined_mapping
from fuzz_lightyear.plugins import get_enabled_plugins
from fuzz_lightyear.request import get_victim_session_factory
from fuzz_lightyear.supplements.abstraction import get_abstraction
@pytest.fixture(autouse=True)
def clear_caches():
get_abstraction.cache_clear()
get_user_defined_mapping.cache_clear()
get_enabled_plugins.cache_clear()
get_victim_session_factory.cache_clear()
get_excluded_operations.cache_clear()
get_non_vulnerable_operations.cache_clear()
get_included_tags.cache_clear()
_ALL_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_ALL_POST_FUZZ_HOOKS_BY_TAG.clear()
_RERUN_POST_FUZZ_HOOKS_BY_OPERATION.clear()
_RERUN_POST_FUZZ_HOOKS_BY_TAG.clear()
@pytest.fixture(autouse=True)
def ignore_hypothesis_non_interactive_example_warning():
"""In theory we're not supposed to use hypothesis'
strategy.example(), but fuzz-lightyear isn't using
hypothesis in a normal way.
"""
import warnings
from hypothesis.errors import NonInteractiveExampleWarning
warnings.filterwarnings(
'ignore',
category=NonInteractiveExampleWarning,
)
|
src/tests/test_stop_at_task.py | francesco-p/FACIL | 243 | 7289 | <filename>src/tests/test_stop_at_task.py
from tests import run_main_and_assert
FAST_LOCAL_TEST_ARGS = "--exp-name local_test --datasets mnist" \
" --network LeNet --num-tasks 5 --seed 1 --batch-size 32" \
" --nepochs 2 --num-workers 0 --stop-at-task 3"
def test_finetuning_stop_at_task():
args_line = FAST_LOCAL_TEST_ARGS
args_line += " --approach finetuning"
run_main_and_assert(args_line)
|
Python/contains-duplicate.py | shreyventure/LeetCode-Solutions | 388 | 7290 | <gh_stars>100-1000
# Autor: <NAME> (@optider)
# Github Profile: https://github.com/Optider/
# Problem Link: https://leetcode.com/problems/contains-duplicate/
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
count = {}
for n in nums :
if count.get(n) != None :
return True
count[n] = 1
return False
|
build/android/gyp/dex.py | google-ar/chromium | 2,151 | 7291 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import optparse
import os
import sys
import tempfile
import zipfile
from util import build_utils
def _CheckFilePathEndsWithJar(parser, file_path):
if not file_path.endswith(".jar"):
# dx ignores non .jar files.
parser.error("%s does not end in .jar" % file_path)
def _CheckFilePathsEndWithJar(parser, file_paths):
for file_path in file_paths:
_CheckFilePathEndsWithJar(parser, file_path)
def _RemoveUnwantedFilesFromZip(dex_path):
iz = zipfile.ZipFile(dex_path, 'r')
tmp_dex_path = '%s.tmp.zip' % dex_path
oz = zipfile.ZipFile(tmp_dex_path, 'w', zipfile.ZIP_DEFLATED)
for i in iz.namelist():
if i.endswith('.dex'):
oz.writestr(i, iz.read(i))
os.remove(dex_path)
os.rename(tmp_dex_path, dex_path)
def _ParseArgs(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--android-sdk-tools',
help='Android sdk build tools directory.')
parser.add_option('--output-directory',
default=os.getcwd(),
help='Path to the output build directory.')
parser.add_option('--dex-path', help='Dex output path.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME.')
parser.add_option('--proguard-enabled',
help='"true" if proguard is enabled.')
parser.add_option('--debug-build-proguard-enabled',
help='"true" if proguard is enabled for debug build.')
parser.add_option('--proguard-enabled-input-path',
help=('Path to dex in Release mode when proguard '
'is enabled.'))
parser.add_option('--no-locals', default='0',
help='Exclude locals list from the dex file.')
parser.add_option('--incremental',
action='store_true',
help='Enable incremental builds when possible.')
parser.add_option('--inputs', help='A list of additional input paths.')
parser.add_option('--excluded-paths',
help='A list of paths to exclude from the dex file.')
parser.add_option('--main-dex-list-path',
help='A file containing a list of the classes to '
'include in the main dex.')
parser.add_option('--multidex-configuration-path',
help='A JSON file containing multidex build configuration.')
parser.add_option('--multi-dex', default=False, action='store_true',
help='Generate multiple dex files.')
options, paths = parser.parse_args(args)
required_options = ('android_sdk_tools',)
build_utils.CheckOptions(options, parser, required=required_options)
if options.multidex_configuration_path:
with open(options.multidex_configuration_path) as multidex_config_file:
multidex_config = json.loads(multidex_config_file.read())
options.multi_dex = multidex_config.get('enabled', False)
if options.multi_dex and not options.main_dex_list_path:
logging.warning('multidex cannot be enabled without --main-dex-list-path')
options.multi_dex = False
elif options.main_dex_list_path and not options.multi_dex:
logging.warning('--main-dex-list-path is unused if multidex is not enabled')
if options.inputs:
options.inputs = build_utils.ParseGnList(options.inputs)
_CheckFilePathsEndWithJar(parser, options.inputs)
if options.excluded_paths:
options.excluded_paths = build_utils.ParseGnList(options.excluded_paths)
if options.proguard_enabled_input_path:
_CheckFilePathEndsWithJar(parser, options.proguard_enabled_input_path)
_CheckFilePathsEndWithJar(parser, paths)
return options, paths
def _AllSubpathsAreClassFiles(paths, changes):
for path in paths:
if any(not p.endswith('.class') for p in changes.IterChangedSubpaths(path)):
return False
return True
def _DexWasEmpty(paths, changes):
for path in paths:
if any(p.endswith('.class')
for p in changes.old_metadata.IterSubpaths(path)):
return False
return True
def _IterAllClassFiles(changes):
for path in changes.IterAllPaths():
for subpath in changes.IterAllSubpaths(path):
if subpath.endswith('.class'):
yield path
def _MightHitDxBug(changes):
# We've seen dx --incremental fail for small libraries. It's unlikely a
# speed-up anyways in this case.
num_classes = sum(1 for x in _IterAllClassFiles(changes))
if num_classes < 10:
return True
# We've also been able to consistently produce a failure by adding an empty
# line to the top of the first .java file of a library.
# https://crbug.com/617935
first_file = next(_IterAllClassFiles(changes))
for path in changes.IterChangedPaths():
for subpath in changes.IterChangedSubpaths(path):
if first_file == subpath:
return True
return False
def _RunDx(changes, options, dex_cmd, paths):
with build_utils.TempDir() as classes_temp_dir:
# --multi-dex is incompatible with --incremental.
if options.multi_dex:
dex_cmd.append('--main-dex-list=%s' % options.main_dex_list_path)
else:
# --incremental tells dx to merge all newly dex'ed .class files with
# what that already exist in the output dex file (existing classes are
# replaced).
# Use --incremental when .class files are added or modified, but not when
# any are removed (since it won't know to remove them).
if (options.incremental
and not _MightHitDxBug(changes)
and changes.AddedOrModifiedOnly()):
changed_inputs = set(changes.IterChangedPaths())
changed_paths = [p for p in paths if p in changed_inputs]
if not changed_paths:
return
# When merging in other dex files, there's no easy way to know if
# classes were removed from them.
if (_AllSubpathsAreClassFiles(changed_paths, changes)
and not _DexWasEmpty(changed_paths, changes)):
dex_cmd.append('--incremental')
for path in changed_paths:
changed_subpaths = set(changes.IterChangedSubpaths(path))
# Note: |changed_subpaths| may be empty if nothing changed.
if changed_subpaths:
build_utils.ExtractAll(path, path=classes_temp_dir,
predicate=lambda p: p in changed_subpaths)
paths = [classes_temp_dir]
dex_cmd += paths
build_utils.CheckOutput(dex_cmd, print_stderr=False)
if options.dex_path.endswith('.zip'):
_RemoveUnwantedFilesFromZip(options.dex_path)
def _OnStaleMd5(changes, options, dex_cmd, paths):
_RunDx(changes, options, dex_cmd, paths)
build_utils.WriteJson(
[os.path.relpath(p, options.output_directory) for p in paths],
options.dex_path + '.inputs')
def main(args):
options, paths = _ParseArgs(args)
if ((options.proguard_enabled == 'true'
and options.configuration_name == 'Release')
or (options.debug_build_proguard_enabled == 'true'
and options.configuration_name == 'Debug')):
paths = [options.proguard_enabled_input_path]
if options.inputs:
paths += options.inputs
if options.excluded_paths:
# Excluded paths are relative to the output directory.
exclude_paths = options.excluded_paths
paths = [p for p in paths if not
os.path.relpath(p, options.output_directory) in exclude_paths]
input_paths = list(paths)
dx_binary = os.path.join(options.android_sdk_tools, 'dx')
# See http://crbug.com/272064 for context on --force-jumbo.
# See https://github.com/android/platform_dalvik/commit/dd140a22d for
# --num-threads.
# See http://crbug.com/658782 for why -JXmx2G was added.
dex_cmd = [dx_binary, '-JXmx2G', '--num-threads=8', '--dex', '--force-jumbo',
'--output', options.dex_path]
if options.no_locals != '0':
dex_cmd.append('--no-locals')
if options.multi_dex:
input_paths.append(options.main_dex_list_path)
dex_cmd += [
'--multi-dex',
'--minimal-main-dex',
]
output_paths = [
options.dex_path,
options.dex_path + '.inputs',
]
# An escape hatch to be able to check if incremental dexing is causing
# problems.
force = int(os.environ.get('DISABLE_INCREMENTAL_DX', 0))
build_utils.CallAndWriteDepfileIfStale(
lambda changes: _OnStaleMd5(changes, options, dex_cmd, paths),
options,
input_paths=input_paths,
input_strings=dex_cmd,
output_paths=output_paths,
force=force,
pass_changes=True)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
tests/test_utils_log.py | FingerCrunch/scrapy | 41,267 | 7298 | import sys
import logging
import unittest
from testfixtures import LogCapture
from twisted.python.failure import Failure
from scrapy.utils.log import (failure_to_exc_info, TopLevelFormatter,
LogCounterHandler, StreamLogger)
from scrapy.utils.test import get_crawler
from scrapy.extensions import telnet
class FailureToExcInfoTest(unittest.TestCase):
def test_failure(self):
try:
0 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
failure = Failure()
self.assertTupleEqual(exc_info, failure_to_exc_info(failure))
def test_non_failure(self):
self.assertIsNone(failure_to_exc_info('test'))
class TopLevelFormatterTest(unittest.TestCase):
def setUp(self):
self.handler = LogCapture()
self.handler.addFilter(TopLevelFormatter(['test']))
def test_top_level_logger(self):
logger = logging.getLogger('test')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_children_logger(self):
logger = logging.getLogger('test.test1')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_overlapping_name_logger(self):
logger = logging.getLogger('test2')
with self.handler as log:
logger.warning('test log msg')
log.check(('test2', 'WARNING', 'test log msg'))
def test_different_name_logger(self):
logger = logging.getLogger('different')
with self.handler as log:
logger.warning('test log msg')
log.check(('different', 'WARNING', 'test log msg'))
class LogCounterHandlerTest(unittest.TestCase):
def setUp(self):
settings = {'LOG_LEVEL': 'WARNING'}
if not telnet.TWISTED_CONCH_AVAILABLE:
# disable it to avoid the extra warning
settings['TELNETCONSOLE_ENABLED'] = False
self.logger = logging.getLogger('test')
self.logger.setLevel(logging.NOTSET)
self.logger.propagate = False
self.crawler = get_crawler(settings_dict=settings)
self.handler = LogCounterHandler(self.crawler)
self.logger.addHandler(self.handler)
def tearDown(self):
self.logger.propagate = True
self.logger.removeHandler(self.handler)
def test_init(self):
self.assertIsNone(self.crawler.stats.get_value('log_count/DEBUG'))
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
self.assertIsNone(self.crawler.stats.get_value('log_count/WARNING'))
self.assertIsNone(self.crawler.stats.get_value('log_count/ERROR'))
self.assertIsNone(self.crawler.stats.get_value('log_count/CRITICAL'))
def test_accepted_level(self):
self.logger.error('test log msg')
self.assertEqual(self.crawler.stats.get_value('log_count/ERROR'), 1)
def test_filtered_out_level(self):
self.logger.debug('test log msg')
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
class StreamLoggerTest(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
logger = logging.getLogger('test')
logger.setLevel(logging.WARNING)
sys.stdout = StreamLogger(logger, logging.ERROR)
def tearDown(self):
sys.stdout = self.stdout
def test_redirect(self):
with LogCapture() as log:
print('test log msg')
log.check(('test', 'ERROR', 'test log msg'))
|
demoproject/demoproject/urls.py | alvnary18/django-nvd3 | 302 | 7305 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^piechart/', views.demo_piechart, name='demo_piechart'),
url(r'^linechart/', views.demo_linechart, name='demo_linechart'),
url(r'^linechart_without_date/', views.demo_linechart_without_date, name='demo_linechart_without_date'),
url(r'^linewithfocuschart/', views.demo_linewithfocuschart, name='demo_linewithfocuschart'),
url(r'^multibarchart/', views.demo_multibarchart, name='demo_multibarchart'),
url(r'^stackedareachart/', views.demo_stackedareachart, name='demo_stackedareachart'),
url(r'^multibarhorizontalchart/', views.demo_multibarhorizontalchart, name='demo_multibarhorizontalchart'),
url(r'^lineplusbarchart/', views.demo_lineplusbarchart, name='demo_lineplusbarchart'),
url(r'^cumulativelinechart/', views.demo_cumulativelinechart, name='demo_cumulativelinechart'),
url(r'^discretebarchart/', views.demo_discretebarchart, name='demo_discretebarchart'),
url(r'^discretebarchart_with_date/', views.demo_discretebarchart_with_date, name='demo_discretebarchart_date'),
url(r'^scatterchart/', views.demo_scatterchart, name='demo_scatterchart'),
url(r'^linechart_with_ampm/', views.demo_linechart_with_ampm, name='demo_linechart_with_ampm'),
# url(r'^demoproject/', include('demoproject.foo.urls')),
]
|
src/gui/tcltk/tcl/tests/langbench/proc.py | gspu/bitkeeper | 342 | 7344 | <reponame>gspu/bitkeeper<filename>src/gui/tcltk/tcl/tests/langbench/proc.py
#!/usr/bin/python
def a(val):
return b(val)
def b(val):
return c(val)
def c(val):
return d(val)
def d(val):
return e(val)
def e(val):
return f(val)
def f(val):
return g(val, 2)
def g(v1, v2):
return h(v1, v2, 3)
def h(v1, v2, v3):
return i(v1, v2, v3, 4)
def i(v1, v2, v3, v4):
return j(v1, v2, v3, v4, 5)
def j(v1, v2, v3, v4, v5):
return v1 + v2 + v3 + v4 + v5
n = 100000
while n > 0:
x = a(n)
n = n - 1
print "x=%d" % x
|
azure-devops/azext_devops/test/common/test_format.py | doggy8088/azure-devops-cli-extension | 326 | 7349 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date
class TestFormatMethods(unittest.TestCase):
def test_trim_for_display(self):
input = 'Gallery extensions for Portal Extension'
output = trim_for_display(input, 20)
self.assertEqual(output, 'Gallery extensions f...')
input = 'Aex platform'
output = trim_for_display(input, 20)
self.assertEqual(output, input)
input = ''
output = trim_for_display(input, 20)
self.assertEqual(output, input)
input = None
output = trim_for_display(input, 20)
self.assertEqual(output, input)
def test_date_time_to_only_date(self):
input = '2019-02-24T02:45:41.277000+00:00'
output = date_time_to_only_date(input)
self.assertEqual(output, '2019-02-24')
input = 'Aex platform'
output = date_time_to_only_date(input)
self.assertEqual(output, input)
if __name__ == '__main__':
unittest.main() |
tests/attr/test_kernel_shap.py | trsvchn/captum | 3,140 | 7398 | <filename>tests/attr/test_kernel_shap.py
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from typing import Any, Callable, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric
from captum.attr._core.kernel_shap import KernelShap
from tests.helpers.basic import (
BaseTest,
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
)
from tests.helpers.basic_models import (
BasicModel_MultiLayer,
BasicModel_MultiLayer_MultiInput,
)
class Test(BaseTest):
def setUp(self) -> None:
super().setUp()
try:
import sklearn # noqa: F401
assert (
sklearn.__version__ >= "0.23.0"
), "Must have sklearn version 0.23.0 or higher"
except (ImportError, AssertionError):
raise unittest.SkipTest("Skipping KernelShap tests, sklearn not available.")
def test_linear_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
baseline = torch.tensor([[10.0, 20.0, 10.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[40.0, 120.0, 80.0],
n_samples=500,
baselines=baseline,
expected_coefs=[40.0, 120.0, 80.0],
)
def test_simple_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[76.66666, 196.66666, 116.66666],
perturbations_per_eval=(1, 2, 3),
n_samples=500,
)
def test_simple_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[275.0, 275.0, 115.0],
feature_mask=torch.tensor([[0, 0, 1]]),
perturbations_per_eval=(1, 2, 3),
expected_coefs=[275.0, 115.0],
)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_kernel_shap_with_show_progress(self, mock_stderr) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]], requires_grad=True)
# test progress output for each batch size
for bsz in (1, 2, 3):
self._kernel_shap_test_assert(
net,
inp,
[76.66666, 196.66666, 116.66666],
perturbations_per_eval=(bsz,),
n_samples=500,
show_progress=True,
)
output = mock_stderr.getvalue()
# to test if progress calculation aligns with the actual iteration
# all perturbations_per_eval should reach progress of 100%
assert (
"Kernel Shap attribution: 100%" in output
), f"Error progress output: {repr(output)}"
mock_stderr.seek(0)
mock_stderr.truncate(0)
def test_simple_kernel_shap_with_baselines(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[20.0, 50.0, 30.0]])
self._kernel_shap_test_assert(
net,
inp,
[248.0, 248.0, 104.0],
feature_mask=torch.tensor([[0, 0, 1]]),
baselines=4,
perturbations_per_eval=(1, 2, 3),
)
def test_simple_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[7.0, 32.5, 10.5], [76.66666, 196.66666, 116.66666]],
perturbations_per_eval=(1, 2, 3),
n_samples=20000,
)
def test_simple_batch_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer()
inp = torch.tensor([[2.0, 10.0, 3.0], [20.0, 50.0, 30.0]], requires_grad=True)
self._kernel_shap_test_assert(
net,
inp,
[[39.5, 39.5, 10.5], [275.0, 275.0, 115.0]],
feature_mask=torch.tensor([[0, 0, 1], [1, 1, 0]]),
perturbations_per_eval=(1, 2, 3),
n_samples=100,
expected_coefs=[[39.5, 10.5], [115.0, 275.0]],
)
def test_multi_input_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0]])
expected = (
[[90, 0, 0]],
[[78, 0, 198]],
[[0, 398, 38]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2000,
)
def test_multi_input_kernel_shap_with_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[20.0, 50.0, 30.0]])
inp2 = torch.tensor([[0.0, 100.0, 0.0]])
inp3 = torch.tensor([[2.0, 10.0, 3.0]])
mask1 = torch.tensor([[0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 0, 0]])
expected = (
[[255.0, 595.0, 255.0]],
[[255.0, 595.0, 0.0]],
[[255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
)
expected_with_baseline = (
[[184, 580.0, 184]],
[[184, 580.0, -12.0]],
[[184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
def test_multi_input_batch_kernel_shap_without_mask(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 0.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 0.0, 50.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [0.0, 10.0, 0.0]])
expected = (
[[90, 0, 0], [78.0, 198.0, 118.0]],
[[78, 0, 198], [0.0, 398.0, 0.0]],
[[0, 398, 38], [0.0, 38.0, 0.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
n_samples=2500,
expected_coefs=[
[90.0, 0, 0, 78, 0, 198, 0, 398, 38],
[78.0, 198.0, 118.0, 0.0, 398.0, 0.0, 0.0, 38.0, 0.0],
],
)
def test_multi_input_batch_kernel_shap(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [2.0, 10.0, 3.0]])
mask1 = torch.tensor([[1, 1, 1], [0, 1, 0]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2], [0, 0, 0]])
expected = (
[[1088.6666, 1088.6666, 1088.6666], [255.0, 595.0, 255.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 595.0, 0.0]],
[[76.6666, 1088.6666, 156.6666], [255.0, 255.0, 255.0]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
n_samples=300,
)
expected_with_baseline = (
[[1040, 1040, 1040], [184, 580.0, 184]],
[[52, 1040, 132], [184, 580.0, -12.0]],
[[52, 1040, 132], [184, 184, 184]],
)
self._kernel_shap_test_assert(
net,
(inp1, inp2, inp3),
expected_with_baseline,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
baselines=(2, 3.0, 4),
perturbations_per_eval=(1, 2, 3),
)
# Remaining tests are for cases where forward function returns a scalar
# as either a float, integer, 0d tensor or 1d tensor.
def test_single_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).item()
)
def test_single_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(lambda inp: torch.sum(net(inp)))
def test_single_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: torch.sum(net(inp)).reshape(1)
)
def test_single_kernel_shap_scalar_int(self) -> None:
net = BasicModel_MultiLayer()
self._single_input_scalar_kernel_shap_assert(
lambda inp: int(torch.sum(net(inp)).item())
)
def _single_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp = torch.tensor([[2.0, 10.0, 3.0]], requires_grad=True)
mask = torch.tensor([[0, 0, 1]])
self._kernel_shap_test_assert(
func,
inp,
[[79.0, 79.0, 21.0]],
feature_mask=mask,
perturbations_per_eval=(1,),
target=None,
)
def test_multi_inp_kernel_shap_scalar_tensor_0d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(lambda *inp: torch.sum(net(*inp)))
def test_multi_inp_kernel_shap_scalar_tensor_1d(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).reshape(1)
)
def test_multi_inp_kernel_shap_scalar_tensor_int(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: int(torch.sum(net(*inp)).item())
)
def test_multi_inp_kernel_shap_scalar_float(self) -> None:
net = BasicModel_MultiLayer_MultiInput()
self._multi_input_scalar_kernel_shap_assert(
lambda *inp: torch.sum(net(*inp)).item()
)
def _multi_input_scalar_kernel_shap_assert(self, func: Callable) -> None:
inp1 = torch.tensor([[23.0, 100.0, 0.0], [20.0, 50.0, 30.0]])
inp2 = torch.tensor([[20.0, 50.0, 30.0], [0.0, 100.0, 0.0]])
inp3 = torch.tensor([[0.0, 100.0, 10.0], [20.0, 10.0, 13.0]])
mask1 = torch.tensor([[1, 1, 1]])
mask2 = torch.tensor([[0, 1, 2]])
mask3 = torch.tensor([[0, 1, 2]])
expected = (
[[3850.6666, 3850.6666, 3850.6666]],
[[306.6666, 3850.6666, 410.6666]],
[[306.6666, 3850.6666, 410.6666]],
)
self._kernel_shap_test_assert(
func,
(inp1, inp2, inp3),
expected,
additional_input=(1,),
feature_mask=(mask1, mask2, mask3),
perturbations_per_eval=(1,),
target=None,
n_samples=1500,
)
def _kernel_shap_test_assert(
self,
model: Callable,
test_input: TensorOrTupleOfTensorsGeneric,
expected_attr,
feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None,
additional_input: Any = None,
perturbations_per_eval: Tuple[int, ...] = (1,),
baselines: BaselineType = None,
target: Union[None, int] = 0,
n_samples: int = 100,
delta: float = 1.0,
expected_coefs: Union[None, List[float], List[List[float]]] = None,
show_progress: bool = False,
) -> None:
for batch_size in perturbations_per_eval:
kernel_shap = KernelShap(model)
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
show_progress=show_progress,
)
assertTensorTuplesAlmostEqual(
self, attributions, expected_attr, delta=delta, mode="max"
)
if expected_coefs is not None:
# Test with return_input_shape = False
attributions = kernel_shap.attribute(
test_input,
target=target,
feature_mask=feature_mask,
additional_forward_args=additional_input,
baselines=baselines,
perturbations_per_eval=batch_size,
n_samples=n_samples,
return_input_shape=False,
show_progress=show_progress,
)
assertTensorAlmostEqual(
self, attributions, expected_coefs, delta=delta, mode="max"
)
if __name__ == "__main__":
unittest.main()
|
tests/test_utils_obj_value.py | ZSD-tim/dayu_widgets | 157 | 7412 | <reponame>ZSD-tim/dayu_widgets
"""
Test get_obj_value set_obj_value has_obj_value
"""
import pytest
from dayu_widgets import utils
class _HasNameAgeObject(object):
def __init__(self, name, age):
super(_HasNameAgeObject, self).__init__()
self.name = name
self.age = age
@pytest.mark.parametrize('obj', (
{'name': 'xiaoming', 'age': 18},
_HasNameAgeObject('xiaoming', 18)
))
class TestObjValue(object):
"""Test get_obj_value has_obj_value set_obj_value collection."""
@pytest.mark.parametrize('attr, default, result', (
('name', 'hhh', 'xiaoming'),
('age', 0, 18),
('score', 0, 0)
))
def test_get_obj_value(self, obj, attr, default, result):
"""Test get_obj_value with dict/object as arg. """
assert utils.get_obj_value(obj, attr, default) == result
@pytest.mark.parametrize('attr, result', (
('name', True),
('age', True),
('sex', False),
))
def test_has_obj_value(self, obj, attr, result):
"""Test has_obj_value with dict/object as arg. """
assert utils.has_obj_value(obj, attr) == result
@pytest.mark.parametrize('attr, value', (
('name', 'xiaohua'),
('age', 30),
('id', 80),
))
def test_set_obj_value(self, obj, attr, value):
"""Test set_obj_value with dict/object as arg. """
utils.set_obj_value(obj, attr, value)
assert utils.get_obj_value(obj, attr) == value
|
desktop/core/ext-py/PyYAML-3.12/tests/lib3/test_all.py | kokosing/hue | 5,079 | 7413 |
import sys, yaml, test_appliance
def main(args=None):
collections = []
import test_yaml
collections.append(test_yaml)
if yaml.__with_libyaml__:
import test_yaml_ext
collections.append(test_yaml_ext)
return test_appliance.run(collections, args)
if __name__ == '__main__':
main()
|
pajbot/apiwrappers/authentication/access_token.py | JoachimFlottorp/pajbot | 128 | 7447 | <gh_stars>100-1000
import datetime
from abc import ABC, abstractmethod
import pajbot
class AccessToken(ABC):
SHOULD_REFRESH_THRESHOLD = 0.9
"""Fraction between 0 and 1 indicating what fraction/percentage of the specified full validity period
should actually be utilized. E.g. if this is set to 0.9, the implementation will refresh the token
once at least 90% of the full validity period (expires_in) is over."""
def __init__(self, access_token, created_at, expires_in, token_type, refresh_token, scope):
self.access_token = access_token
self.created_at = created_at
# can both be None
self.expires_in = expires_in
if self.expires_in is not None:
self.expires_at = self.created_at + self.expires_in
else:
self.expires_at = None
self.token_type = token_type
# can be None
self.refresh_token = refresh_token
# always a list, can be empty list
self.scope = scope
@abstractmethod
def can_refresh(self):
pass
def should_refresh(self):
"""Returns True if less than 10% of the token's lifetime remains, False otherwise"""
if not self.can_refresh():
return False
# intended lifetime of the token
if self.expires_at is not None:
expires_after = self.expires_at - self.created_at
else:
# this is a token that never expires
# because we don't want any issues, refresh it anyways
expires_after = datetime.timedelta(hours=1)
# how much time has passed since token creation
token_age = pajbot.utils.now() - self.created_at
# maximum token age before token should be refreshed (90% of the total token lifetime)
max_token_age = expires_after * self.SHOULD_REFRESH_THRESHOLD
# expired?
return token_age >= max_token_age
def jsonify(self):
"""serialize for storage"""
if self.expires_in is None:
expires_in_milliseconds = None
else:
expires_in_milliseconds = self.expires_in.total_seconds() * 1000
return {
"access_token": self.access_token,
"created_at": self.created_at.timestamp() * 1000,
"expires_in": expires_in_milliseconds,
"token_type": self.token_type,
"refresh_token": self.refresh_token,
"scope": self.scope,
}
@classmethod
def from_json(cls, json_data):
"""deserialize json produced by jsonify()"""
if json_data["expires_in"] is None:
expires_in = None
else:
expires_in = datetime.timedelta(milliseconds=json_data["expires_in"])
return cls(
access_token=json_data["access_token"],
created_at=pajbot.utils.datetime_from_utc_milliseconds(json_data["created_at"]),
expires_in=expires_in,
token_type=json_data["token_type"],
refresh_token=json_data["refresh_token"],
scope=json_data["scope"],
)
@classmethod
def from_api_response(cls, response):
"""Construct new object from twitch response json data"""
# expires_in is only missing for old Client-IDs to which twitch will respond with
# infinitely-lived tokens (the "expires_in" field is absent in that case).
expires_in_seconds = response.get("expires_in", None)
if expires_in_seconds is None:
expires_in = None
else:
expires_in = datetime.timedelta(seconds=expires_in_seconds)
return cls(
access_token=response["access_token"],
created_at=pajbot.utils.now(),
expires_in=expires_in,
token_type=response["token_type"],
refresh_token=response.get("refresh_token", None),
scope=response.get("scope", []),
)
@abstractmethod
def refresh(self, api):
pass
class UserAccessToken(AccessToken):
def can_refresh(self):
return self.refresh_token is not None
def refresh(self, api):
if not self.can_refresh():
raise ValueError("This user access token cannot be refreshed, it has no refresh token")
return api.refresh_user_access_token(self.refresh_token)
@staticmethod
def from_implicit_auth_flow_token(access_token):
return UserAccessToken(
access_token=access_token,
created_at=None,
expires_in=None,
token_type="bearer",
refresh_token=None,
scope=[],
)
class AppAccessToken(AccessToken):
def can_refresh(self):
return True
def refresh(self, api):
return api.get_app_access_token(self.scope)
|
matchzoo/metrics/precision.py | ChrisRBXiong/MatchZoo-py | 468 | 7467 | <filename>matchzoo/metrics/precision.py
"""Precision for ranking."""
import numpy as np
from matchzoo.engine.base_metric import (
BaseMetric, sort_and_couple, RankingMetric
)
class Precision(RankingMetric):
"""Precision metric."""
ALIAS = 'precision'
def __init__(self, k: int = 1, threshold: float = 0.):
"""
:class:`PrecisionMetric` constructor.
:param k: Number of results to consider.
:param threshold: the label threshold of relevance degree.
"""
self._k = k
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS}@{self._k}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate precision@k.
Example:
>>> y_true = [0, 0, 0, 1]
>>> y_pred = [0.2, 0.4, 0.3, 0.1]
>>> Precision(k=1)(y_true, y_pred)
0.0
>>> Precision(k=2)(y_true, y_pred)
0.0
>>> Precision(k=4)(y_true, y_pred)
0.25
>>> Precision(k=5)(y_true, y_pred)
0.2
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Precision @ k
:raises: ValueError: len(r) must be >= k.
"""
if self._k <= 0:
raise ValueError(f"k must be greater than 0."
f"{self._k} received.")
coupled_pair = sort_and_couple(y_true, y_pred)
precision = 0.0
for idx, (label, score) in enumerate(coupled_pair):
if idx >= self._k:
break
if label > self._threshold:
precision += 1.
return precision / self._k
|
tests/test_table/test_pivot.py | andriyor/agate | 663 | 7503 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
try:
from cdecimal import Decimal
except ImportError: # pragma: no cover
from decimal import Decimal
from agate import Table
from agate.aggregations import Sum
from agate.computations import Percent
from agate.data_types import Number, Text
from agate.testcase import AgateTestCase
class TestPivot(AgateTestCase):
def setUp(self):
self.rows = (
('joe', 'white', 'male', 20, 'blue'),
('jane', 'white', 'female', 20, 'blue'),
('josh', 'black', 'male', 20, 'blue'),
('jim', 'latino', 'male', 25, 'blue'),
('julia', 'white', 'female', 25, 'green'),
('joan', 'asian', 'female', 25, 'green')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['name', 'race', 'gender', 'age', 'color']
self.column_types = [self.text_type, self.text_type, self.text_type, self.number_type, self.text_type]
def test_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender')
pivot_rows = (
('white', 1, 2),
('black', 1, 0),
('latino', 1, 0),
('asian', 0, 1)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertRowNames(pivot_table, ['white', 'black', 'latino', 'asian'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(lambda r: r['gender'])
pivot_rows = (
('male', 3),
('female', 3)
)
self.assertColumnNames(pivot_table, ['group', 'Count'])
self.assertRowNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda_group_name(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(lambda r: r['gender'], key_name='gender')
pivot_rows = (
('male', 3),
('female', 3)
)
self.assertColumnNames(pivot_table, ['gender', 'Count'])
self.assertRowNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda_group_name_sequence_invalid(self):
table = Table(self.rows, self.column_names, self.column_types)
with self.assertRaises(ValueError):
table.pivot(['race', 'gender'], key_name='foo')
def test_pivot_no_key(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(pivot='gender')
pivot_rows = (
(3, 3),
)
self.assertColumnNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_no_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race')
pivot_rows = (
('white', 3),
('black', 1),
('latino', 1),
('asian', 1)
)
self.assertColumnNames(pivot_table, ['race', 'Count'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_sum(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender', Sum('age'))
pivot_rows = (
('white', 20, 45),
('black', 20, 0),
('latino', 25, 0),
('asian', 0, 25)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_multiple_keys(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(['race', 'gender'], 'age')
pivot_rows = (
('white', 'male', 1, 0),
('white', 'female', 1, 1),
('black', 'male', 1, 0),
('latino', 'male', 0, 1),
('asian', 'female', 0, 1),
)
self.assertRows(pivot_table, pivot_rows)
self.assertColumnNames(pivot_table, ['race', 'gender', '20', '25'])
self.assertRowNames(pivot_table, [
('white', 'male'),
('white', 'female'),
('black', 'male'),
('latino', 'male'),
('asian', 'female'),
])
self.assertColumnTypes(pivot_table, [Text, Text, Number, Number])
def test_pivot_multiple_keys_no_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(['race', 'gender'])
pivot_rows = (
('white', 'male', 1),
('white', 'female', 2),
('black', 'male', 1),
('latino', 'male', 1),
('asian', 'female', 1),
)
self.assertRows(pivot_table, pivot_rows)
self.assertColumnNames(pivot_table, ['race', 'gender', 'Count'])
self.assertColumnTypes(pivot_table, [Text, Text, Number])
def test_pivot_default_value(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender', default_value=None)
pivot_rows = (
('white', 1, 2),
('black', 1, None),
('latino', 1, None),
('asian', None, 1)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', computation=Percent('Count'))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(50)),
('female', Decimal(50)),
)
self.assertColumnNames(pivot_table, ['gender', 'Percent'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute_pivots(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', 'color', computation=Percent('Count'))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(50), 0),
('female', Decimal(1) / Decimal(6) * Decimal(100), Decimal(1) / Decimal(3) * Decimal(100)),
)
self.assertColumnNames(pivot_table, ['gender', 'blue', 'green'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute_kwargs(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', 'color', computation=Percent('Count', total=8))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(3) / Decimal(8) * Decimal(100), 0),
('female', Decimal(1) / Decimal(8) * Decimal(100), Decimal(2) / Decimal(8) * Decimal(100)),
)
self.assertColumnNames(pivot_table, ['gender', 'blue', 'green'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
|
utils/data/dataset_catalog.py | rs9899/Parsing-R-CNN | 289 | 7538 | <gh_stars>100-1000
import os.path as osp
# Root directory of project
ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Path to data dir
_DATA_DIR = osp.abspath(osp.join(ROOT_DIR, 'data'))
# Required dataset entry keys
_IM_DIR = 'image_directory'
_ANN_FN = 'annotation_file'
# Available datasets
COMMON_DATASETS = {
'coco_2017_train': {
_IM_DIR:
_DATA_DIR + '/coco/images/train2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/instances_train2017.json',
},
'coco_2017_val': {
_IM_DIR:
_DATA_DIR + '/coco/images/val2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/instances_val2017.json',
},
'coco_2017_test': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2017.json',
},
'coco_2017_test-dev': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2017.json',
},
'keypoints_coco_2017_train': {
_IM_DIR:
_DATA_DIR + '/coco/images/train2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_train2017.json'
},
'keypoints_coco_2017_val': {
_IM_DIR:
_DATA_DIR + '/coco/images/val2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/person_keypoints_val2017.json'
},
'keypoints_coco_2017_test': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test2017.json'
},
'keypoints_coco_2017_test-dev': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/image_info_test-dev2017.json',
},
'dense_coco_2017_train': {
_IM_DIR:
_DATA_DIR + '/coco/images/train2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/DensePoseData/densepose_coco_train2017.json',
},
'dense_coco_2017_val': {
_IM_DIR:
_DATA_DIR + '/coco/images/val2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/DensePoseData/densepose_coco_val2017.json',
},
'dense_coco_2017_test': {
_IM_DIR:
_DATA_DIR + '/coco/images/test2017',
_ANN_FN:
_DATA_DIR + '/coco/annotations/DensePoseData/densepose_coco_test.json',
},
'CIHP_train': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/CIHP/train_img',
_ANN_FN:
_DATA_DIR + '/CIHP/annotations/CIHP_train.json',
},
'CIHP_val': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/CIHP/val_img',
_ANN_FN:
_DATA_DIR + '/CIHP/annotations/CIHP_val.json',
},
'CIHP_test': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/CIHP/test_img',
_ANN_FN:
_DATA_DIR + '/CIHP/annotations/CIHP_test.json',
},
'MHP-v2_train': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/train_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_train.json',
},
'MHP-v2_val': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/val_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_val.json',
},
'MHP-v2_test': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/test_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_test_all.json',
},
'MHP-v2_test_inter_top10': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/test_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_test_inter_top10.json',
},
'MHP-v2_test_inter_top20': { # new addition by wzh
_IM_DIR:
_DATA_DIR + '/MHP-v2/test_img',
_ANN_FN:
_DATA_DIR + '/MHP-v2/annotations/MHP-v2_test_inter_top20.json',
},
'PASCAL-Person-Part_train': { # new addition by soeaver
_IM_DIR:
_DATA_DIR + '/PASCAL-Person-Part/train_img',
_ANN_FN:
_DATA_DIR + '/PASCAL-Person-Part/annotations/pascal_person_part_train.json',
},
'PASCAL-Person-Part_test': { # new addition by soeaver
_IM_DIR:
_DATA_DIR + '/PASCAL-Person-Part/test_img',
_ANN_FN:
_DATA_DIR + '/PASCAL-Person-Part/annotations/pascal_person_part_test.json',
}
}
|
sdk/python/pulumi_gcp/accesscontextmanager/service_perimeter.py | sisisin/pulumi-gcp | 121 | 7549 | <gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ServicePerimeterArgs', 'ServicePerimeter']
@pulumi.input_type
class ServicePerimeterArgs:
def __init__(__self__, *,
parent: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input['ServicePerimeterSpecArgs']] = None,
status: Optional[pulumi.Input['ServicePerimeterStatusArgs']] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a ServicePerimeter resource.
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[str] title: Human readable title. Must be unique within the Policy.
:param pulumi.Input[str] description: Description of the ServicePerimeter and its use. Does not affect
behavior.
:param pulumi.Input[str] name: Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
:param pulumi.Input[str] perimeter_type: Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
:param pulumi.Input['ServicePerimeterSpecArgs'] spec: Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
:param pulumi.Input['ServicePerimeterStatusArgs'] status: ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
:param pulumi.Input[bool] use_explicit_dry_run_spec: Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
pulumi.set(__self__, "parent", parent)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if perimeter_type is not None:
pulumi.set(__self__, "perimeter_type", perimeter_type)
if spec is not None:
pulumi.set(__self__, "spec", spec)
if status is not None:
pulumi.set(__self__, "status", status)
if use_explicit_dry_run_spec is not None:
pulumi.set(__self__, "use_explicit_dry_run_spec", use_explicit_dry_run_spec)
@property
@pulumi.getter
def parent(self) -> pulumi.Input[str]:
"""
The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: pulumi.Input[str]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
"""
Human readable title. Must be unique within the Policy.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the ServicePerimeter and its use. Does not affect
behavior.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="perimeterType")
def perimeter_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
"""
return pulumi.get(self, "perimeter_type")
@perimeter_type.setter
def perimeter_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "perimeter_type", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['ServicePerimeterSpecArgs']]:
"""
Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['ServicePerimeterSpecArgs']]):
pulumi.set(self, "spec", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['ServicePerimeterStatusArgs']]:
"""
ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['ServicePerimeterStatusArgs']]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="useExplicitDryRunSpec")
def use_explicit_dry_run_spec(self) -> Optional[pulumi.Input[bool]]:
"""
Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
return pulumi.get(self, "use_explicit_dry_run_spec")
@use_explicit_dry_run_spec.setter
def use_explicit_dry_run_spec(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_explicit_dry_run_spec", value)
@pulumi.input_type
class _ServicePerimeterState:
def __init__(__self__, *,
create_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input['ServicePerimeterSpecArgs']] = None,
status: Optional[pulumi.Input['ServicePerimeterStatusArgs']] = None,
title: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering ServicePerimeter resources.
:param pulumi.Input[str] create_time: Time the AccessPolicy was created in UTC.
:param pulumi.Input[str] description: Description of the ServicePerimeter and its use. Does not affect
behavior.
:param pulumi.Input[str] name: Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[str] perimeter_type: Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
:param pulumi.Input['ServicePerimeterSpecArgs'] spec: Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
:param pulumi.Input['ServicePerimeterStatusArgs'] status: ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
:param pulumi.Input[str] title: Human readable title. Must be unique within the Policy.
:param pulumi.Input[str] update_time: Time the AccessPolicy was updated in UTC.
:param pulumi.Input[bool] use_explicit_dry_run_spec: Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if parent is not None:
pulumi.set(__self__, "parent", parent)
if perimeter_type is not None:
pulumi.set(__self__, "perimeter_type", perimeter_type)
if spec is not None:
pulumi.set(__self__, "spec", spec)
if status is not None:
pulumi.set(__self__, "status", status)
if title is not None:
pulumi.set(__self__, "title", title)
if update_time is not None:
pulumi.set(__self__, "update_time", update_time)
if use_explicit_dry_run_spec is not None:
pulumi.set(__self__, "use_explicit_dry_run_spec", use_explicit_dry_run_spec)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[pulumi.Input[str]]:
"""
Time the AccessPolicy was created in UTC.
"""
return pulumi.get(self, "create_time")
@create_time.setter
def create_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_time", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the ServicePerimeter and its use. Does not affect
behavior.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parent(self) -> Optional[pulumi.Input[str]]:
"""
The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter(name="perimeterType")
def perimeter_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
"""
return pulumi.get(self, "perimeter_type")
@perimeter_type.setter
def perimeter_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "perimeter_type", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['ServicePerimeterSpecArgs']]:
"""
Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['ServicePerimeterSpecArgs']]):
pulumi.set(self, "spec", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['ServicePerimeterStatusArgs']]:
"""
ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['ServicePerimeterStatusArgs']]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
Human readable title. Must be unique within the Policy.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[pulumi.Input[str]]:
"""
Time the AccessPolicy was updated in UTC.
"""
return pulumi.get(self, "update_time")
@update_time.setter
def update_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_time", value)
@property
@pulumi.getter(name="useExplicitDryRunSpec")
def use_explicit_dry_run_spec(self) -> Optional[pulumi.Input[bool]]:
"""
Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
return pulumi.get(self, "use_explicit_dry_run_spec")
@use_explicit_dry_run_spec.setter
def use_explicit_dry_run_spec(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_explicit_dry_run_spec", value)
class ServicePerimeter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']]] = None,
status: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']]] = None,
title: Optional[pulumi.Input[str]] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
ServicePerimeter describes a set of GCP resources which can freely import
and export data amongst themselves, but not export outside of the
ServicePerimeter. If a request with a source within this ServicePerimeter
has a target outside of the ServicePerimeter, the request will be blocked.
Otherwise the request is allowed. There are two types of Service Perimeter
- Regular and Bridge. Regular Service Perimeters cannot overlap, a single
GCP project can only belong to a single regular Service Perimeter. Service
Perimeter Bridges can contain only GCP projects as members, a single GCP
project may belong to multiple Service Perimeter Bridges.
To get more information about ServicePerimeter, see:
* [API documentation](https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters)
* How-to Guides
* [Service Perimeter Quickstart](https://cloud.google.com/vpc-service-controls/docs/quickstart)
> **Warning:** If you are using User ADCs (Application Default Credentials) with this resource,
you must specify a `billing_project` and set `user_project_override` to true
in the provider configuration. Otherwise the ACM API will return a 403 error.
Your account must have the `serviceusage.services.use` permission on the
`billing_project` you defined.
## Example Usage
### Access Context Manager Service Perimeter Basic
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeter("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
title="restrict_storage")
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
require_screen_lock=False,
),
regions=[
"CH",
"IT",
"US",
],
)],
),
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="chromeos_no_lock")
```
### Access Context Manager Service Perimeter Secure Data Exchange
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
secure_data_exchange = gcp.accesscontextmanager.ServicePerimeters("secure-data-exchange",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
service_perimeters=[
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
title="",
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
),
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
title="",
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["bigtable.googleapis.com"],
vpc_accessible_services=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusVpcAccessibleServicesArgs(
enable_restriction=True,
allowed_services=["bigquery.googleapis.com"],
),
),
),
])
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="secure_data_exchange",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
require_screen_lock=False,
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
),
regions=[
"CH",
"IT",
"US",
],
)],
))
test_access = gcp.accesscontextmanager.ServicePerimeter("test-access",
parent=f"accessPolicies/{google_access_context_manager_access_policy['test-access']['name']}",
title="%s",
perimeter_type="PERIMETER_TYPE_REGULAR",
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=[
"bigquery.googleapis.com",
"storage.googleapis.com",
],
access_levels=[access_level.name],
vpc_accessible_services=gcp.accesscontextmanager.ServicePerimeterStatusVpcAccessibleServicesArgs(
enable_restriction=True,
allowed_services=[
"bigquery.googleapis.com",
"storage.googleapis.com",
],
),
ingress_policies=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyArgs(
ingress_from=gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFromArgs(
sources=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFromSourceArgs(
access_level=google_access_context_manager_access_level["test-access"]["name"],
)],
identity_type="ANY_IDENTITY",
),
ingress_to=gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToArgs(
resources=["*"],
operations=[
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationArgs(
service_name="bigquery.googleapis.com",
method_selectors=[
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="BigQueryStorage.ReadRows",
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="TableService.ListTables",
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
permission="bigquery.jobs.get",
),
],
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationArgs(
service_name="storage.googleapis.com",
method_selectors=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="google.storage.objects.create",
)],
),
],
),
)],
egress_policies=[gcp.accesscontextmanager.ServicePerimeterStatusEgressPolicyArgs(
egress_from=gcp.accesscontextmanager.ServicePerimeterStatusEgressPolicyEgressFromArgs(
identity_type="ANY_USER_ACCOUNT",
),
)],
))
```
### Access Context Manager Service Perimeter Dry Run
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeter("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
spec=gcp.accesscontextmanager.ServicePerimeterSpecArgs(
restricted_services=["storage.googleapis.com"],
),
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=["bigquery.googleapis.com"],
),
title="restrict_bigquery_dryrun_storage",
use_explicit_dry_run_spec=True)
```
## Import
ServicePerimeter can be imported using any of these accepted formats
```sh
$ pulumi import gcp:accesscontextmanager/servicePerimeter:ServicePerimeter default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the ServicePerimeter and its use. Does not affect
behavior.
:param pulumi.Input[str] name: Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[str] perimeter_type: Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
:param pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']] spec: Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']] status: ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
:param pulumi.Input[str] title: Human readable title. Must be unique within the Policy.
:param pulumi.Input[bool] use_explicit_dry_run_spec: Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServicePerimeterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
ServicePerimeter describes a set of GCP resources which can freely import
and export data amongst themselves, but not export outside of the
ServicePerimeter. If a request with a source within this ServicePerimeter
has a target outside of the ServicePerimeter, the request will be blocked.
Otherwise the request is allowed. There are two types of Service Perimeter
- Regular and Bridge. Regular Service Perimeters cannot overlap, a single
GCP project can only belong to a single regular Service Perimeter. Service
Perimeter Bridges can contain only GCP projects as members, a single GCP
project may belong to multiple Service Perimeter Bridges.
To get more information about ServicePerimeter, see:
* [API documentation](https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters)
* How-to Guides
* [Service Perimeter Quickstart](https://cloud.google.com/vpc-service-controls/docs/quickstart)
> **Warning:** If you are using User ADCs (Application Default Credentials) with this resource,
you must specify a `billing_project` and set `user_project_override` to true
in the provider configuration. Otherwise the ACM API will return a 403 error.
Your account must have the `serviceusage.services.use` permission on the
`billing_project` you defined.
## Example Usage
### Access Context Manager Service Perimeter Basic
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeter("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
title="restrict_storage")
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
require_screen_lock=False,
),
regions=[
"CH",
"IT",
"US",
],
)],
),
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="chromeos_no_lock")
```
### Access Context Manager Service Perimeter Secure Data Exchange
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
secure_data_exchange = gcp.accesscontextmanager.ServicePerimeters("secure-data-exchange",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
service_perimeters=[
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
title="",
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["storage.googleapis.com"],
),
),
gcp.accesscontextmanager.ServicePerimetersServicePerimeterArgs(
name=access_policy.name.apply(lambda name: f"accessPolicies/{name}/servicePerimeters/"),
title="",
status=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusArgs(
restricted_services=["bigtable.googleapis.com"],
vpc_accessible_services=gcp.accesscontextmanager.ServicePerimetersServicePerimeterStatusVpcAccessibleServicesArgs(
enable_restriction=True,
allowed_services=["bigquery.googleapis.com"],
),
),
),
])
access_level = gcp.accesscontextmanager.AccessLevel("access-level",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
title="secure_data_exchange",
basic=gcp.accesscontextmanager.AccessLevelBasicArgs(
conditions=[gcp.accesscontextmanager.AccessLevelBasicConditionArgs(
device_policy=gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyArgs(
require_screen_lock=False,
os_constraints=[gcp.accesscontextmanager.AccessLevelBasicConditionDevicePolicyOsConstraintArgs(
os_type="DESKTOP_CHROME_OS",
)],
),
regions=[
"CH",
"IT",
"US",
],
)],
))
test_access = gcp.accesscontextmanager.ServicePerimeter("test-access",
parent=f"accessPolicies/{google_access_context_manager_access_policy['test-access']['name']}",
title="%s",
perimeter_type="PERIMETER_TYPE_REGULAR",
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=[
"bigquery.googleapis.com",
"storage.googleapis.com",
],
access_levels=[access_level.name],
vpc_accessible_services=gcp.accesscontextmanager.ServicePerimeterStatusVpcAccessibleServicesArgs(
enable_restriction=True,
allowed_services=[
"bigquery.googleapis.com",
"storage.googleapis.com",
],
),
ingress_policies=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyArgs(
ingress_from=gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFromArgs(
sources=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressFromSourceArgs(
access_level=google_access_context_manager_access_level["test-access"]["name"],
)],
identity_type="ANY_IDENTITY",
),
ingress_to=gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToArgs(
resources=["*"],
operations=[
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationArgs(
service_name="bigquery.googleapis.com",
method_selectors=[
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="BigQueryStorage.ReadRows",
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="TableService.ListTables",
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
permission="bigquery.jobs.get",
),
],
),
gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationArgs(
service_name="storage.googleapis.com",
method_selectors=[gcp.accesscontextmanager.ServicePerimeterStatusIngressPolicyIngressToOperationMethodSelectorArgs(
method="google.storage.objects.create",
)],
),
],
),
)],
egress_policies=[gcp.accesscontextmanager.ServicePerimeterStatusEgressPolicyArgs(
egress_from=gcp.accesscontextmanager.ServicePerimeterStatusEgressPolicyEgressFromArgs(
identity_type="ANY_USER_ACCOUNT",
),
)],
))
```
### Access Context Manager Service Perimeter Dry Run
```python
import pulumi
import pulumi_gcp as gcp
access_policy = gcp.accesscontextmanager.AccessPolicy("access-policy",
parent="organizations/123456789",
title="my policy")
service_perimeter = gcp.accesscontextmanager.ServicePerimeter("service-perimeter",
parent=access_policy.name.apply(lambda name: f"accessPolicies/{name}"),
spec=gcp.accesscontextmanager.ServicePerimeterSpecArgs(
restricted_services=["storage.googleapis.com"],
),
status=gcp.accesscontextmanager.ServicePerimeterStatusArgs(
restricted_services=["bigquery.googleapis.com"],
),
title="restrict_bigquery_dryrun_storage",
use_explicit_dry_run_spec=True)
```
## Import
ServicePerimeter can be imported using any of these accepted formats
```sh
$ pulumi import gcp:accesscontextmanager/servicePerimeter:ServicePerimeter default {{name}}
```
:param str resource_name: The name of the resource.
:param ServicePerimeterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServicePerimeterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']]] = None,
status: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']]] = None,
title: Optional[pulumi.Input[str]] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServicePerimeterArgs.__new__(ServicePerimeterArgs)
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
if parent is None and not opts.urn:
raise TypeError("Missing required property 'parent'")
__props__.__dict__["parent"] = parent
__props__.__dict__["perimeter_type"] = perimeter_type
__props__.__dict__["spec"] = spec
__props__.__dict__["status"] = status
if title is None and not opts.urn:
raise TypeError("Missing required property 'title'")
__props__.__dict__["title"] = title
__props__.__dict__["use_explicit_dry_run_spec"] = use_explicit_dry_run_spec
__props__.__dict__["create_time"] = None
__props__.__dict__["update_time"] = None
super(ServicePerimeter, __self__).__init__(
'gcp:accesscontextmanager/servicePerimeter:ServicePerimeter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
create_time: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
perimeter_type: Optional[pulumi.Input[str]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']]] = None,
status: Optional[pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']]] = None,
title: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None,
use_explicit_dry_run_spec: Optional[pulumi.Input[bool]] = None) -> 'ServicePerimeter':
"""
Get an existing ServicePerimeter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_time: Time the AccessPolicy was created in UTC.
:param pulumi.Input[str] description: Description of the ServicePerimeter and its use. Does not affect
behavior.
:param pulumi.Input[str] name: Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
:param pulumi.Input[str] parent: The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
:param pulumi.Input[str] perimeter_type: Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
:param pulumi.Input[pulumi.InputType['ServicePerimeterSpecArgs']] spec: Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['ServicePerimeterStatusArgs']] status: ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
:param pulumi.Input[str] title: Human readable title. Must be unique within the Policy.
:param pulumi.Input[str] update_time: Time the AccessPolicy was updated in UTC.
:param pulumi.Input[bool] use_explicit_dry_run_spec: Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServicePerimeterState.__new__(_ServicePerimeterState)
__props__.__dict__["create_time"] = create_time
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["parent"] = parent
__props__.__dict__["perimeter_type"] = perimeter_type
__props__.__dict__["spec"] = spec
__props__.__dict__["status"] = status
__props__.__dict__["title"] = title
__props__.__dict__["update_time"] = update_time
__props__.__dict__["use_explicit_dry_run_spec"] = use_explicit_dry_run_spec
return ServicePerimeter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
Time the AccessPolicy was created in UTC.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the ServicePerimeter and its use. Does not affect
behavior.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name for the ServicePerimeter. The short_name component must
begin with a letter and only include alphanumeric and '_'.
Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parent(self) -> pulumi.Output[str]:
"""
The AccessPolicy this ServicePerimeter lives in.
Format: accessPolicies/{policy_id}
"""
return pulumi.get(self, "parent")
@property
@pulumi.getter(name="perimeterType")
def perimeter_type(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the type of the Perimeter. There are two types: regular and
bridge. Regular Service Perimeter contains resources, access levels,
and restricted services. Every resource can be in at most
ONE regular Service Perimeter.
In addition to being in a regular service perimeter, a resource can also
be in zero or more perimeter bridges. A perimeter bridge only contains
resources. Cross project operations are permitted if all effected
resources share some perimeter (whether bridge or regular). Perimeter
Bridge does not contain access levels or services: those are governed
entirely by the regular perimeter that resource is in.
Perimeter Bridges are typically useful when building more complex
topologies with many independent perimeters that need to share some data
with a common perimeter, but should not be able to share data among
themselves.
Default value is `PERIMETER_TYPE_REGULAR`.
Possible values are `PERIMETER_TYPE_REGULAR` and `PERIMETER_TYPE_BRIDGE`.
"""
return pulumi.get(self, "perimeter_type")
@property
@pulumi.getter
def spec(self) -> pulumi.Output[Optional['outputs.ServicePerimeterSpec']]:
"""
Proposed (or dry run) ServicePerimeter configuration.
This configuration allows to specify and test ServicePerimeter configuration
without enforcing actual access restrictions. Only allowed to be set when
the `useExplicitDryRunSpec` flag is set.
Structure is documented below.
"""
return pulumi.get(self, "spec")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional['outputs.ServicePerimeterStatus']]:
"""
ServicePerimeter configuration. Specifies sets of resources,
restricted services and access levels that determine
perimeter content and boundaries.
Structure is documented below.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def title(self) -> pulumi.Output[str]:
"""
Human readable title. Must be unique within the Policy.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> pulumi.Output[str]:
"""
Time the AccessPolicy was updated in UTC.
"""
return pulumi.get(self, "update_time")
@property
@pulumi.getter(name="useExplicitDryRunSpec")
def use_explicit_dry_run_spec(self) -> pulumi.Output[Optional[bool]]:
"""
Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists
for all Service Perimeters, and that spec is identical to the status for those
Service Perimeters. When this flag is set, it inhibits the generation of the
implicit spec, thereby allowing the user to explicitly provide a
configuration ("spec") to use in a dry-run version of the Service Perimeter.
This allows the user to test changes to the enforced config ("status") without
actually enforcing them. This testing is done through analyzing the differences
between currently enforced and suggested restrictions. useExplicitDryRunSpec must
bet set to True if any of the fields in the spec are set to non-default values.
"""
return pulumi.get(self, "use_explicit_dry_run_spec")
|
selfdrive/boardd/tests/test_boardd_api.py | 919bot/Tessa | 114 | 7563 | <reponame>919bot/Tessa
import random
import numpy as np
import selfdrive.boardd.tests.boardd_old as boardd_old
import selfdrive.boardd.boardd as boardd
from common.realtime import sec_since_boot
from cereal import log
import unittest
def generate_random_can_data_list():
can_list = []
cnt = random.randint(1, 64)
for j in range(cnt):
can_data = np.random.bytes(random.randint(1, 8))
can_list.append([random.randint(0, 128), random.randint(0, 128), can_data, random.randint(0, 128)])
return can_list, cnt
class TestBoarddApiMethods(unittest.TestCase):
def test_correctness(self):
for i in range(1000):
can_list, _ = generate_random_can_data_list()
# Sendcan
# Old API
m_old = boardd_old.can_list_to_can_capnp(can_list, 'sendcan').to_bytes()
# new API
m = boardd.can_list_to_can_capnp(can_list, 'sendcan')
ev_old = log.Event.from_bytes(m_old)
ev = log.Event.from_bytes(m)
self.assertEqual(ev_old.which(), ev.which())
self.assertEqual(len(ev.sendcan), len(ev_old.sendcan))
for i in range(len(ev.sendcan)):
attrs = ['address', 'busTime', 'dat', 'src']
for attr in attrs:
self.assertEqual(getattr(ev.sendcan[i], attr, 'new'), getattr(ev_old.sendcan[i], attr, 'old'))
# Can
m_old = boardd_old.can_list_to_can_capnp(can_list, 'can').to_bytes()
# new API
m = boardd.can_list_to_can_capnp(can_list, 'can')
ev_old = log.Event.from_bytes(m_old)
ev = log.Event.from_bytes(m)
self.assertEqual(ev_old.which(), ev.which())
self.assertEqual(len(ev.can), len(ev_old.can))
for i in range(len(ev.can)):
attrs = ['address', 'busTime', 'dat', 'src']
for attr in attrs:
self.assertEqual(getattr(ev.can[i], attr, 'new'), getattr(ev_old.can[i], attr, 'old'))
def test_performance(self):
can_list, cnt = generate_random_can_data_list()
recursions = 1000
n1 = sec_since_boot()
for i in range(recursions):
boardd_old.can_list_to_can_capnp(can_list, 'sendcan').to_bytes()
n2 = sec_since_boot()
elapsed_old = n2 - n1
# print('Old API, elapsed time: {} secs'.format(elapsed_old))
n1 = sec_since_boot()
for i in range(recursions):
boardd.can_list_to_can_capnp(can_list)
n2 = sec_since_boot()
elapsed_new = n2 - n1
# print('New API, elapsed time: {} secs'.format(elapsed_new))
self.assertTrue(elapsed_new < elapsed_old / 2)
if __name__ == '__main__':
unittest.main()
|
saleor/product/migrations/0141_update_descritpion_fields.py | fairhopeweb/saleor | 15,337 | 7586 | # Generated by Django 3.1.5 on 2021-02-17 11:04
from django.db import migrations
import saleor.core.db.fields
import saleor.core.utils.editorjs
def update_empty_description_field(apps, schema_editor):
Category = apps.get_model("product", "Category")
CategoryTranslation = apps.get_model("product", "CategoryTranslation")
Collection = apps.get_model("product", "Collection")
CollectionTranslation = apps.get_model("product", "CollectionTranslation")
Product = apps.get_model("product", "Product")
ProductTranslation = apps.get_model("product", "ProductTranslation")
models = [
Category,
CategoryTranslation,
Collection,
CollectionTranslation,
Product,
ProductTranslation,
]
for model in models:
model.objects.filter(description={}).update(description=None)
class Migration(migrations.Migration):
dependencies = [
("product", "0140_auto_20210125_0905"),
]
operations = [
migrations.AlterField(
model_name="category",
name="description",
field=saleor.core.db.fields.SanitizedJSONField(
blank=True,
null=True,
sanitizer=saleor.core.utils.editorjs.clean_editor_js,
),
),
migrations.AlterField(
model_name="categorytranslation",
name="description",
field=saleor.core.db.fields.SanitizedJSONField(
blank=True,
null=True,
sanitizer=saleor.core.utils.editorjs.clean_editor_js,
),
),
migrations.AlterField(
model_name="collection",
name="description",
field=saleor.core.db.fields.SanitizedJSONField(
blank=True,
null=True,
sanitizer=saleor.core.utils.editorjs.clean_editor_js,
),
),
migrations.AlterField(
model_name="collectiontranslation",
name="description",
field=saleor.core.db.fields.SanitizedJSONField(
blank=True,
null=True,
sanitizer=saleor.core.utils.editorjs.clean_editor_js,
),
),
migrations.AlterField(
model_name="product",
name="description",
field=saleor.core.db.fields.SanitizedJSONField(
blank=True,
null=True,
sanitizer=saleor.core.utils.editorjs.clean_editor_js,
),
),
migrations.AlterField(
model_name="producttranslation",
name="description",
field=saleor.core.db.fields.SanitizedJSONField(
blank=True,
null=True,
sanitizer=saleor.core.utils.editorjs.clean_editor_js,
),
),
migrations.RunPython(
update_empty_description_field,
migrations.RunPython.noop,
),
]
|
torch/_VF.py | Hacky-DH/pytorch | 60,067 | 7588 | """
This makes the functions in torch._C._VariableFunctions available as
torch._VF.<funcname>
without mypy being able to find them.
A subset of those functions are mapped to ATen functions in
torch/jit/_builtins.py
See https://github.com/pytorch/pytorch/issues/21478 for the reason for
introducing torch._VF
"""
import torch
import sys
import types
class VFModule(types.ModuleType):
vf: types.ModuleType
def __init__(self, name):
super(VFModule, self).__init__(name)
self.vf = torch._C._VariableFunctions
def __getattr__(self, attr):
return getattr(self.vf, attr)
sys.modules[__name__] = VFModule(__name__)
|
transformers/tests/tokenization_xlnet_test.py | deepbluesea/transformers | 270 | 7616 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from transformers.tokenization_xlnet import (XLNetTokenizer, SPIECE_UNDERLINE)
from .tokenization_tests_commons import CommonTestCases
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'fixtures/test_sentencepiece.model')
class XLNetTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = XLNetTokenizer
def setUp(self):
super(XLNetTokenizationTest, self).setUp()
# We have a SentencePiece fixture for testing
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return XLNetTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u"This is a test"
output_text = u"This is a test"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize(u'This is a test')
self.assertListEqual(tokens, [u'▁This', u'▁is', u'▁a', u'▁t', u'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's', u'é', u'.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids, [8, 21, 84, 55, 24, 19, 7, 0,
602, 347, 347, 347, 3, 12, 66,
46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in',
SPIECE_UNDERLINE + u'', u'<unk>', u'2', u'0', u'0', u'0', u',',
SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's',
u'<unk>', u'.'])
def test_tokenizer_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'', u'i', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
self.assertListEqual(tokenizer.tokenize(u"H\u00E9llo"), [u"▁he", u"ll", u"o"])
def test_tokenizer_no_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b', u'or',
u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
def test_sequence_builders(self):
tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
text = tokenizer.encode("sequence builders")
text_2 = tokenizer.encode("multi-sequence build")
encoded_sentence = tokenizer.add_special_tokens_single_sequence(text)
encoded_pair = tokenizer.add_special_tokens_sequence_pair(text, text_2)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_2 + [4, 3]
if __name__ == '__main__':
unittest.main()
|
docker_squash/version.py | pombredanne/docker-scripts | 513 | 7622 | <reponame>pombredanne/docker-scripts
version = "1.0.10.dev0"
|
Algo and DSA/LeetCode-Solutions-master/Python/smallest-greater-multiple-made-of-two-digits.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 7656 | # Time: sum(O(l * 2^l) for l in range(1, 11)) = O(20 * 2^10) = O(1)
# Space: O(1)
class Solution(object):
def findInteger(self, k, digit1, digit2):
"""
:type k: int
:type digit1: int
:type digit2: int
:rtype: int
"""
MAX_NUM_OF_DIGITS = 10
INT_MAX = 2**31-1
if digit1 < digit2:
digit1, digit2 = digit2, digit1
total = 2
for l in xrange(1, MAX_NUM_OF_DIGITS+1):
for mask in xrange(total):
curr, bit = 0, total>>1
while bit:
curr = curr*10 + (digit1 if mask&bit else digit2)
bit >>= 1
if k < curr <= INT_MAX and curr%k == 0:
return curr
total <<= 1
return -1
|
turbo_transformers/python/tests/__init__.py | xcnick/TurboTransformers | 1,147 | 7658 | <gh_stars>1000+
# Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
|
lmdb/cffi.py | hirnimeshrampuresoftware/py-lmdb | 185 | 7683 | <reponame>hirnimeshrampuresoftware/py-lmdb
#
# Copyright 2013 The py-lmdb authors, all rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted only as authorized by the OpenLDAP
# Public License.
#
# A copy of this license is available in the file LICENSE in the
# top-level directory of the distribution or, alternatively, at
# <http://www.OpenLDAP.org/license.html>.
#
# OpenLDAP is a registered trademark of the OpenLDAP Foundation.
#
# Individual files and/or contributed packages may be copyright by
# other parties and/or subject to additional restrictions.
#
# This work also contains materials derived from public sources.
#
# Additional information about OpenLDAP can be obtained at
# <http://www.openldap.org/>.
#
"""
CPython/CFFI wrapper for OpenLDAP's "Lightning" MDB database.
Please see https://lmdb.readthedocs.io/
"""
from __future__ import absolute_import
from __future__ import with_statement
import errno
import inspect
import os
import sys
import threading
is_win32 = sys.platform == 'win32'
if is_win32:
import msvcrt
try:
import __builtin__
except ImportError:
import builtins as __builtin__ # type: ignore
import lmdb
try:
from lmdb import _config
except ImportError:
_config = None # type: ignore
__all__ = [
'Cursor',
'Environment',
'Transaction',
'_Database',
'enable_drop_gil',
'version',
]
__all__ += [
'BadDbiError',
'BadRslotError',
'BadTxnError',
'BadValsizeError',
'CorruptedError',
'CursorFullError',
'DbsFullError',
'DiskError',
'Error',
'IncompatibleError',
'InvalidError',
'InvalidParameterError',
'KeyExistsError',
'LockError',
'MapFullError',
'MapResizedError',
'MemoryError',
'NotFoundError',
'PageFullError',
'PageNotFoundError',
'PanicError',
'ReadersFullError',
'ReadonlyError',
'TlsFullError',
'TxnFullError',
'VersionMismatchError',
]
# Handle moronic Python 3 mess.
UnicodeType = getattr(__builtin__, 'unicode', str)
BytesType = getattr(__builtin__, 'bytes', str)
O_0755 = int('0755', 8)
O_0111 = int('0111', 8)
EMPTY_BYTES = UnicodeType().encode()
# Used to track context across CFFI callbacks.
_callbacks = threading.local()
_CFFI_CDEF = '''
typedef int mode_t;
typedef ... MDB_env;
typedef struct MDB_txn MDB_txn;
typedef struct MDB_cursor MDB_cursor;
typedef unsigned int MDB_dbi;
enum MDB_cursor_op {
MDB_FIRST,
MDB_FIRST_DUP,
MDB_GET_BOTH,
MDB_GET_BOTH_RANGE,
MDB_GET_CURRENT,
MDB_GET_MULTIPLE,
MDB_LAST,
MDB_LAST_DUP,
MDB_NEXT,
MDB_NEXT_DUP,
MDB_NEXT_MULTIPLE,
MDB_NEXT_NODUP,
MDB_PREV,
MDB_PREV_DUP,
MDB_PREV_NODUP,
MDB_SET,
MDB_SET_KEY,
MDB_SET_RANGE,
...
};
typedef enum MDB_cursor_op MDB_cursor_op;
struct MDB_val {
size_t mv_size;
void *mv_data;
...;
};
typedef struct MDB_val MDB_val;
struct MDB_stat {
unsigned int ms_psize;
unsigned int ms_depth;
size_t ms_branch_pages;
size_t ms_leaf_pages;
size_t ms_overflow_pages;
size_t ms_entries;
...;
};
typedef struct MDB_stat MDB_stat;
struct MDB_envinfo {
void *me_mapaddr;
size_t me_mapsize;
size_t me_last_pgno;
size_t me_last_txnid;
unsigned int me_maxreaders;
unsigned int me_numreaders;
...;
};
typedef struct MDB_envinfo MDB_envinfo;
typedef int (*MDB_cmp_func)(const MDB_val *a, const MDB_val *b);
typedef void (*MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr,
void *relctx);
char *mdb_strerror(int err);
int mdb_env_create(MDB_env **env);
int mdb_env_open(MDB_env *env, const char *path, unsigned int flags,
mode_t mode);
int mdb_env_copy2(MDB_env *env, const char *path, int flags);
int mdb_env_copyfd2(MDB_env *env, int fd, int flags);
int mdb_env_stat(MDB_env *env, MDB_stat *stat);
int mdb_env_info(MDB_env *env, MDB_envinfo *stat);
int mdb_env_get_maxkeysize(MDB_env *env);
int mdb_env_sync(MDB_env *env, int force);
void mdb_env_close(MDB_env *env);
int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff);
int mdb_env_get_flags(MDB_env *env, unsigned int *flags);
int mdb_env_get_path(MDB_env *env, const char **path);
int mdb_env_set_mapsize(MDB_env *env, size_t size);
int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers);
int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers);
int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs);
int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags,
MDB_txn **txn);
int mdb_txn_commit(MDB_txn *txn);
void mdb_txn_reset(MDB_txn *txn);
int mdb_txn_renew(MDB_txn *txn);
void mdb_txn_abort(MDB_txn *txn);
size_t mdb_txn_id(MDB_txn *txn);
int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags,
MDB_dbi *dbi);
int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat);
int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del_);
int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data);
int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor);
void mdb_cursor_close(MDB_cursor *cursor);
int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags);
int mdb_cursor_count(MDB_cursor *cursor, size_t *countp);
int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val*data, int op);
typedef int (MDB_msg_func)(const char *msg, void *ctx);
int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx);
int mdb_reader_check(MDB_env *env, int *dead);
int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags);
#define MDB_VERSION_MAJOR ...
#define MDB_VERSION_MINOR ...
#define MDB_VERSION_PATCH ...
#define EACCES ...
#define EAGAIN ...
#define EINVAL ...
#define ENOMEM ...
#define ENOSPC ...
#define MDB_BAD_RSLOT ...
#define MDB_BAD_DBI ...
#define MDB_BAD_TXN ...
#define MDB_BAD_VALSIZE ...
#define MDB_CORRUPTED ...
#define MDB_CURSOR_FULL ...
#define MDB_DBS_FULL ...
#define MDB_INCOMPATIBLE ...
#define MDB_INVALID ...
#define MDB_KEYEXIST ...
#define MDB_MAP_FULL ...
#define MDB_MAP_RESIZED ...
#define MDB_NOTFOUND ...
#define MDB_PAGE_FULL ...
#define MDB_PAGE_NOTFOUND ...
#define MDB_PANIC ...
#define MDB_READERS_FULL ...
#define MDB_TLS_FULL ...
#define MDB_TXN_FULL ...
#define MDB_VERSION_MISMATCH ...
#define MDB_APPEND ...
#define MDB_APPENDDUP ...
#define MDB_CP_COMPACT ...
#define MDB_CREATE ...
#define MDB_DUPFIXED ...
#define MDB_DUPSORT ...
#define MDB_INTEGERDUP ...
#define MDB_INTEGERKEY ...
#define MDB_MAPASYNC ...
#define MDB_NODUPDATA ...
#define MDB_NOLOCK ...
#define MDB_NOMEMINIT ...
#define MDB_NOMETASYNC ...
#define MDB_NOOVERWRITE ...
#define MDB_NORDAHEAD ...
#define MDB_NOSUBDIR ...
#define MDB_NOSYNC ...
#define MDB_NOTLS ...
#define MDB_RDONLY ...
#define MDB_REVERSEKEY ...
#define MDB_WRITEMAP ...
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen);
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
char *val_s, size_t vallen,
unsigned int flags);
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi,
char *key_s, size_t keylen,
MDB_val *val_out);
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op);
static int pymdb_cursor_put(MDB_cursor *cursor,
char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags);
// Prefaults a range
static void preload(int rc, void *x, size_t size);
'''
_CFFI_CDEF_PATCHED = '''
int mdb_env_copy3(MDB_env *env, const char *path, unsigned int flags, MDB_txn *txn);
int mdb_env_copyfd3(MDB_env *env, int fd, unsigned int flags, MDB_txn *txn);
'''
_CFFI_VERIFY = '''
#include <sys/stat.h>
#include "lmdb.h"
#include "preload.h"
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
// CFFI will use PyString_AS_STRING when passed as an argument.
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
MDB_val *val_out)
{
MDB_val key = {keylen, key_s};
int rc = mdb_get(txn, dbi, &key, val_out);
return rc;
}
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen, unsigned int flags)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
return mdb_put(txn, dbi, &key, &val, flags);
}
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
char *val_s, size_t vallen)
{
MDB_val key = {keylen, key_s};
MDB_val val = {vallen, val_s};
MDB_val *valptr;
if(vallen == 0) {
valptr = NULL;
} else {
valptr = &val;
}
return mdb_del(txn, dbi, &key, valptr);
}
static int pymdb_cursor_get(MDB_cursor *cursor,
char *key_s, size_t key_len,
char *data_s, size_t data_len,
MDB_val *key, MDB_val *data, int op)
{
MDB_val tmp_key = {key_len, key_s};
MDB_val tmp_data = {data_len, data_s};
int rc = mdb_cursor_get(cursor, &tmp_key, &tmp_data, op);
if(! rc) {
*key = tmp_key;
*data = tmp_data;
}
return rc;
}
static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen,
char *val_s, size_t vallen, int flags)
{
MDB_val tmpkey = {keylen, key_s};
MDB_val tmpval = {vallen, val_s};
return mdb_cursor_put(cursor, &tmpkey, &tmpval, flags);
}
'''
if not lmdb._reading_docs():
import cffi
# Try to use distutils-bundled CFFI configuration to avoid a recompile and
# potential compile errors during first module import.
_config_vars = _config.CONFIG if _config else {
'extra_compile_args': ['-w'],
'extra_sources': ['lib/mdb.c', 'lib/midl.c'],
'extra_include_dirs': ['lib'],
'extra_library_dirs': [],
'libraries': []
}
_have_patched_lmdb = '-DHAVE_PATCHED_LMDB=1' in _config.CONFIG['extra_compile_args'] # type: ignore
if _have_patched_lmdb:
_CFFI_CDEF += _CFFI_CDEF_PATCHED
_ffi = cffi.FFI()
_ffi.cdef(_CFFI_CDEF)
_lib = _ffi.verify(_CFFI_VERIFY,
modulename='lmdb_cffi',
ext_package='lmdb',
sources=_config_vars['extra_sources'],
extra_compile_args=_config_vars['extra_compile_args'],
include_dirs=_config_vars['extra_include_dirs'],
libraries=_config_vars['libraries'],
library_dirs=_config_vars['extra_library_dirs'])
@_ffi.callback("int(char *, void *)")
def _msg_func(s, _):
"""mdb_msg_func() callback. Appends `s` to _callbacks.msg_func list.
"""
_callbacks.msg_func.append(_ffi.string(s).decode())
return 0
class Error(Exception):
"""Raised when an LMDB-related error occurs, and no more specific
:py:class:`lmdb.Error` subclass exists."""
def __init__(self, what, code=0):
self.what = what
self.code = code
self.reason = _ffi.string(_lib.mdb_strerror(code))
msg = what
if code:
msg = '%s: %s' % (what, self.reason)
hint = getattr(self, 'MDB_HINT', None)
if hint:
msg += ' (%s)' % (hint,)
Exception.__init__(self, msg)
class KeyExistsError(Error):
"""Key/data pair already exists."""
MDB_NAME = 'MDB_KEYEXIST'
class NotFoundError(Error):
"""No matching key/data pair found.
Normally py-lmdb indicates a missing key by returning ``None``, or a
user-supplied default value, however LMDB may return this error where
py-lmdb does not know to convert it into a non-exceptional return.
"""
MDB_NAME = 'MDB_NOTFOUND'
class PageNotFoundError(Error):
"""Request page not found."""
MDB_NAME = 'MDB_PAGE_NOTFOUND'
class CorruptedError(Error):
"""Located page was of the wrong type."""
MDB_NAME = 'MDB_CORRUPTED'
class PanicError(Error):
"""Update of meta page failed."""
MDB_NAME = 'MDB_PANIC'
class VersionMismatchError(Error):
"""Database environment version mismatch."""
MDB_NAME = 'MDB_VERSION_MISMATCH'
class InvalidError(Error):
"""File is not an MDB file."""
MDB_NAME = 'MDB_INVALID'
class MapFullError(Error):
"""Environment map_size= limit reached."""
MDB_NAME = 'MDB_MAP_FULL'
MDB_HINT = 'Please use a larger Environment(map_size=) parameter'
class DbsFullError(Error):
"""Environment max_dbs= limit reached."""
MDB_NAME = 'MDB_DBS_FULL'
MDB_HINT = 'Please use a larger Environment(max_dbs=) parameter'
class ReadersFullError(Error):
"""Environment max_readers= limit reached."""
MDB_NAME = 'MDB_READERS_FULL'
MDB_HINT = 'Please use a larger Environment(max_readers=) parameter'
class TlsFullError(Error):
"""Thread-local storage keys full - too many environments open."""
MDB_NAME = 'MDB_TLS_FULL'
class TxnFullError(Error):
"""Transaciton has too many dirty pages - transaction too big."""
MDB_NAME = 'MDB_TXN_FULL'
MDB_HINT = 'Please do less work within your transaction'
class CursorFullError(Error):
"""Internal error - cursor stack limit reached."""
MDB_NAME = 'MDB_CURSOR_FULL'
class PageFullError(Error):
"""Internal error - page has no more space."""
MDB_NAME = 'MDB_PAGE_FULL'
class MapResizedError(Error):
"""Database contents grew beyond environment map_size=."""
MDB_NAME = 'MDB_MAP_RESIZED'
class IncompatibleError(Error):
"""Operation and DB incompatible, or DB flags changed."""
MDB_NAME = 'MDB_INCOMPATIBLE'
class BadRslotError(Error):
"""Invalid reuse of reader locktable slot."""
MDB_NAME = 'MDB_BAD_RSLOT'
class BadDbiError(Error):
"""The specified DBI was changed unexpectedly."""
MDB_NAME = 'MDB_BAD_DBI'
class BadTxnError(Error):
"""Transaction cannot recover - it must be aborted."""
MDB_NAME = 'MDB_BAD_TXN'
class BadValsizeError(Error):
"""Too big key/data, key is empty, or wrong DUPFIXED size."""
MDB_NAME = 'MDB_BAD_VALSIZE'
class ReadonlyError(Error):
"""An attempt was made to modify a read-only database."""
MDB_NAME = 'EACCES'
class InvalidParameterError(Error):
"""An invalid parameter was specified."""
MDB_NAME = 'EINVAL'
class LockError(Error):
"""The environment was locked by another process."""
MDB_NAME = 'EAGAIN'
class MemoryError(Error):
"""Out of memory."""
MDB_NAME = 'ENOMEM'
class DiskError(Error):
"""No more disk space."""
MDB_NAME = 'ENOSPC'
# Prepare _error_map, a mapping of integer MDB_ERROR_CODE to exception class.
if not lmdb._reading_docs():
_error_map = {}
for obj in list(globals().values()):
if inspect.isclass(obj) and issubclass(obj, Error) and obj is not Error:
_error_map[getattr(_lib, obj.MDB_NAME)] = obj
del obj
def _error(what, rc):
"""Lookup and instantiate the correct exception class for the error code
`rc`, using :py:class:`Error` if no better class exists."""
return _error_map.get(rc, Error)(what, rc)
class Some_LMDB_Resource_That_Was_Deleted_Or_Closed(object):
"""We need this because CFFI on PyPy treats None as cffi.NULL, instead of
throwing an exception it feeds LMDB null pointers. That means simply
replacing native handles with None during _invalidate() will cause NULL
pointer dereferences. Instead use this class, and its weird name to cause a
TypeError, with a very obvious string in the exception text.
The only alternatives to this are inserting a check around every single use
of a native handle to ensure the handle is still valid prior to calling
LMDB, or doing no crash-safety checking at all.
"""
def __nonzero__(self):
return 0
def __bool__(self):
return False
def __repr__(self):
return "<This used to be a LMDB resource but it was deleted or closed>"
_invalid = Some_LMDB_Resource_That_Was_Deleted_Or_Closed()
def _mvbuf(mv):
"""Convert a MDB_val cdata to a CFFI buffer object."""
return _ffi.buffer(mv.mv_data, mv.mv_size)
def _mvstr(mv):
"""Convert a MDB_val cdata to Python bytes."""
return _ffi.buffer(mv.mv_data, mv.mv_size)[:]
def preload(mv):
_lib.preload(0, mv.mv_data, mv.mv_size)
def enable_drop_gil():
"""Deprecated."""
def version(subpatch=False):
"""
Return a tuple of integers `(major, minor, patch)` describing the LMDB
library version that the binding is linked against. The version of the
binding itself is available from ``lmdb.__version__``.
`subpatch`:
If true, returns a 4 integer tuple consisting of the same plus
an extra integer that represents any patches applied by py-lmdb
itself (0 representing no patches).
"""
if subpatch:
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH,
1 if _have_patched_lmdb else 0)
return (_lib.MDB_VERSION_MAJOR,
_lib.MDB_VERSION_MINOR,
_lib.MDB_VERSION_PATCH)
class Environment(object):
"""
Structure for a database environment. An environment may contain multiple
databases, all residing in the same shared-memory map and underlying disk
file.
To write to the environment a :py:class:`Transaction` must be created. One
simultaneous write transaction is allowed, however there is no limit on the
number of read transactions even when a write transaction exists.
This class is aliased to `lmdb.open`.
It is a serious error to have open the same LMDB file in the same process at
the same time. Failure to heed this may lead to data corruption and
interpreter crash.
Equivalent to `mdb_env_open()
<http://lmdb.tech/doc/group__mdb.html#ga1fe2740e25b1689dc412e7b9faadba1b>`_
`path`:
Location of directory (if `subdir=True`) or file prefix to store
the database.
`map_size`:
Maximum size database may grow to; used to size the memory mapping.
If database grows larger than ``map_size``, an exception will be
raised and the user must close and reopen :py:class:`Environment`.
On 64-bit there is no penalty for making this huge (say 1TB). Must
be <2GB on 32-bit.
.. note::
**The default map size is set low to encourage a crash**, so
users can figure out a good value before learning about this
option too late.
`subdir`:
If ``True``, `path` refers to a subdirectory to store the data and
lock files in, otherwise it refers to a filename prefix.
`readonly`:
If ``True``, disallow any write operations. Note the lock file is
still modified. If specified, the ``write`` flag to
:py:meth:`begin` or :py:class:`Transaction` is ignored.
`metasync`:
If ``False``, flush system buffers to disk only once per
transaction, omit the metadata flush. Defer that until the system
flushes files to disk, or next commit or :py:meth:`sync`.
This optimization maintains database integrity, but a system crash
may undo the last committed transaction. I.e. it preserves the ACI
(atomicity, consistency, isolation) but not D (durability) database
property.
`sync`:
If ``False``, don't flush system buffers to disk when committing a
transaction. This optimization means a system crash can corrupt the
database or lose the last transactions if buffers are not yet
flushed to disk.
The risk is governed by how often the system flushes dirty buffers
to disk and how often :py:meth:`sync` is called. However, if the
filesystem preserves write order and `writemap=False`, transactions
exhibit ACI (atomicity, consistency, isolation) properties and only
lose D (durability). I.e. database integrity is maintained, but a
system crash may undo the final transactions.
Note that `sync=False, writemap=True` leaves the system with no
hint for when to write transactions to disk, unless :py:meth:`sync`
is called. `map_async=True, writemap=True` may be preferable.
`mode`:
File creation mode.
`create`:
If ``False``, do not create the directory `path` if it is missing.
`readahead`:
If ``False``, LMDB will disable the OS filesystem readahead
mechanism, which may improve random read performance when a
database is larger than RAM.
`writemap`:
If ``True``, use a writeable memory map unless `readonly=True`.
This is faster and uses fewer mallocs, but loses protection from
application bugs like wild pointer writes and other bad updates
into the database. Incompatible with nested transactions.
Processes with and without `writemap` on the same environment do
not cooperate well.
`meminit`:
If ``False`` LMDB will not zero-initialize buffers prior to writing
them to disk. This improves performance but may cause old heap data
to be written saved in the unused portion of the buffer. Do not use
this option if your application manipulates confidential data (e.g.
plaintext passwords) in memory. This option is only meaningful when
`writemap=False`; new pages are always zero-initialized when
`writemap=True`.
`map_async`:
When ``writemap=True``, use asynchronous flushes to disk. As with
``sync=False``, a system crash can then corrupt the database or
lose the last transactions. Calling :py:meth:`sync` ensures
on-disk database integrity until next commit.
`max_readers`:
Maximum number of simultaneous read transactions. Can only be set
by the first process to open an environment, as it affects the size
of the lock file and shared memory area. Attempts to simultaneously
start more than this many *read* transactions will fail.
`max_dbs`:
Maximum number of databases available. If 0, assume environment
will be used as a single database.
`max_spare_txns`:
Read-only transactions to cache after becoming unused. Caching
transactions avoids two allocations, one lock and linear scan
of the shared environment per invocation of :py:meth:`begin`,
:py:class:`Transaction`, :py:meth:`get`, :py:meth:`gets`, or
:py:meth:`cursor`. Should match the process's maximum expected
concurrent transactions (e.g. thread count).
`lock`:
If ``False``, don't do any locking. If concurrent access is
anticipated, the caller must manage all concurrency itself. For
proper operation the caller must enforce single-writer semantics,
and must ensure that no readers are using old transactions while a
writer is active. The simplest approach is to use an exclusive lock
so that no readers may be active at all when a writer begins.
"""
def __init__(self, path, map_size=10485760, subdir=True,
readonly=False, metasync=True, sync=True, map_async=False,
mode=O_0755, create=True, readahead=True, writemap=False,
meminit=True, max_readers=126, max_dbs=0, max_spare_txns=1,
lock=True):
self._max_spare_txns = max_spare_txns
self._spare_txns = []
envpp = _ffi.new('MDB_env **')
rc = _lib.mdb_env_create(envpp)
if rc:
raise _error("mdb_env_create", rc)
self._env = envpp[0]
self._deps = set()
self._creating_db_in_readonly = False
self.set_mapsize(map_size)
rc = _lib.mdb_env_set_maxreaders(self._env, max_readers)
if rc:
raise _error("mdb_env_set_maxreaders", rc)
rc = _lib.mdb_env_set_maxdbs(self._env, max_dbs)
if rc:
raise _error("mdb_env_set_maxdbs", rc)
if create and subdir and not readonly:
try:
os.mkdir(path, mode)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
flags = _lib.MDB_NOTLS
if not subdir:
flags |= _lib.MDB_NOSUBDIR
if readonly:
flags |= _lib.MDB_RDONLY
self.readonly = readonly
if not metasync:
flags |= _lib.MDB_NOMETASYNC
if not sync:
flags |= _lib.MDB_NOSYNC
if map_async:
flags |= _lib.MDB_MAPASYNC
if not readahead:
flags |= _lib.MDB_NORDAHEAD
if writemap:
flags |= _lib.MDB_WRITEMAP
if not meminit:
flags |= _lib.MDB_NOMEMINIT
if not lock:
flags |= _lib.MDB_NOLOCK
if isinstance(path, UnicodeType):
path = path.encode(sys.getfilesystemencoding())
rc = _lib.mdb_env_open(self._env, path, flags, mode & ~O_0111)
if rc:
raise _error(path, rc)
with self.begin(db=object()) as txn:
self._db = _Database(
env=self,
txn=txn,
name=None,
reverse_key=False,
dupsort=False,
create=True,
integerkey=False,
integerdup=False,
dupfixed=False
)
self._dbs = {None: self._db}
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self.close()
def __del__(self):
self.close()
_env = None
_deps = None
_spare_txns = None
_dbs = None
def set_mapsize(self, map_size):
"""Change the maximum size of the map file. This function will fail if
any transactions are active in the current process.
`map_size`:
The new size in bytes.
Equivalent to `mdb_env_set_mapsize()
<http://lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>`_
Warning:
There's a data race in the underlying library that may cause
catastrophic loss of data if you use this method.
You are safe if one of the following are true:
* Only one process accessing a particular LMDB file ever calls
this method.
* You use locking external to this library to ensure that only one
process accessing the current LMDB file can be inside this function.
"""
rc = _lib.mdb_env_set_mapsize(self._env, map_size)
if rc:
raise _error("mdb_env_set_mapsize", rc)
def close(self):
"""Close the environment, invalidating any open iterators, cursors, and
transactions. Repeat calls to :py:meth:`close` have no effect.
Equivalent to `mdb_env_close()
<http://lmdb.tech/doc/group__mdb.html#ga4366c43ada8874588b6a62fbda2d1e95>`_
"""
if self._env:
if self._deps:
while self._deps:
self._deps.pop()._invalidate()
self._deps = None
if self._spare_txns:
while self._spare_txns:
_lib.mdb_txn_abort(self._spare_txns.pop())
self._spare_txns = None
if self._dbs:
self._dbs.clear()
self._dbs = None
self._db = None
_lib.mdb_env_close(self._env)
self._env = _invalid
def path(self):
"""Directory path or file name prefix where this environment is
stored.
Equivalent to `mdb_env_get_path()
<http://lmdb.tech/doc/group__mdb.html#gac699fdd8c4f8013577cb933fb6a757fe>`_
"""
path = _ffi.new('char **')
rc = _lib.mdb_env_get_path(self._env, path)
if rc:
raise _error("mdb_env_get_path", rc)
return _ffi.string(path[0]).decode(sys.getfilesystemencoding())
def copy(self, path, compact=False, txn=None):
"""Make a consistent copy of the environment in the given destination
directory.
`compact`:
If ``True``, perform compaction while copying: omit free pages and
sequentially renumber all pages in output. This option consumes
more CPU and runs more slowly than the default, but may produce a
smaller output database.
`txn`:
If provided, the backup will be taken from the database with
respect to that transaction, otherwise a temporary read-only
transaction will be created. Note: this parameter being non-None
is not available if the module was built with LMDB_PURE. Note:
this parameter may be set only if compact=True.
Equivalent to `mdb_env_copy2() or mdb_env_copy3()
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
"""
flags = _lib.MDB_CP_COMPACT if compact else 0
if txn and not _have_patched_lmdb:
raise TypeError("Non-patched LMDB doesn't support transaction with env.copy")
if txn and not flags:
raise TypeError("txn argument only compatible with compact=True")
encoded = path.encode(sys.getfilesystemencoding())
if _have_patched_lmdb:
rc = _lib.mdb_env_copy3(self._env, encoded, flags, txn._txn if txn else _ffi.NULL)
if rc:
raise _error("mdb_env_copy3", rc)
else:
rc = _lib.mdb_env_copy2(self._env, encoded, flags)
if rc:
raise _error("mdb_env_copy2", rc)
def copyfd(self, fd, compact=False, txn=None):
"""Copy a consistent version of the environment to file descriptor
`fd`.
`compact`:
If ``True``, perform compaction while copying: omit free pages and
sequentially renumber all pages in output. This option consumes
more CPU and runs more slowly than the default, but may produce a
smaller output database.
`txn`:
If provided, the backup will be taken from the database with
respect to that transaction, otherwise a temporary read-only
transaction will be created. Note: this parameter being non-None
is not available if the module was built with LMDB_PURE.
Equivalent to `mdb_env_copyfd2() or mdb_env_copyfd3
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
"""
if txn and not _have_patched_lmdb:
raise TypeError("Non-patched LMDB doesn't support transaction with env.copy")
if is_win32:
# Convert C library handle to kernel handle.
fd = msvcrt.get_osfhandle(fd)
flags = _lib.MDB_CP_COMPACT if compact else 0
if txn and not flags:
raise TypeError("txn argument only compatible with compact=True")
if _have_patched_lmdb:
rc = _lib.mdb_env_copyfd3(self._env, fd, flags, txn._txn if txn else _ffi.NULL)
if rc:
raise _error("mdb_env_copyfd3", rc)
else:
rc = _lib.mdb_env_copyfd2(self._env, fd, flags)
if rc:
raise _error("mdb_env_copyfd2", rc)
def sync(self, force=False):
"""Flush the data buffers to disk.
Equivalent to `mdb_env_sync()
<http://lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>`_
Data is always written to disk when :py:meth:`Transaction.commit` is
called, but the operating system may keep it buffered. MDB always
flushes the OS buffers upon commit as well, unless the environment was
opened with `sync=False` or `metasync=False`.
`force`:
If ``True``, force a synchronous flush. Otherwise if the
environment was opened with `sync=False` the flushes will be
omitted, and with `map_async=True` they will be asynchronous.
"""
rc = _lib.mdb_env_sync(self._env, force)
if rc:
raise _error("mdb_env_sync", rc)
def _convert_stat(self, st):
"""Convert a MDB_stat to a dict.
"""
return {
"psize": st.ms_psize,
"depth": st.ms_depth,
"branch_pages": st.ms_branch_pages,
"leaf_pages": st.ms_leaf_pages,
"overflow_pages": st.ms_overflow_pages,
"entries": st.ms_entries
}
def stat(self):
"""stat()
Return some environment statistics for the default database as a dict:
+--------------------+---------------------------------------+
| ``psize`` | Size of a database page in bytes. |
+--------------------+---------------------------------------+
| ``depth`` | Height of the B-tree. |
+--------------------+---------------------------------------+
| ``branch_pages`` | Number of internal (non-leaf) pages. |
+--------------------+---------------------------------------+
| ``leaf_pages`` | Number of leaf pages. |
+--------------------+---------------------------------------+
| ``overflow_pages`` | Number of overflow pages. |
+--------------------+---------------------------------------+
| ``entries`` | Number of data items. |
+--------------------+---------------------------------------+
Equivalent to `mdb_env_stat()
<http://lmdb.tech/doc/group__mdb.html#gaf881dca452050efbd434cd16e4bae255>`_
"""
st = _ffi.new('MDB_stat *')
rc = _lib.mdb_env_stat(self._env, st)
if rc:
raise _error("mdb_env_stat", rc)
return self._convert_stat(st)
def info(self):
"""Return some nice environment information as a dict:
+--------------------+---------------------------------------------+
| ``map_addr`` | Address of database map in RAM. |
+--------------------+---------------------------------------------+
| ``map_size`` | Size of database map in RAM. |
+--------------------+---------------------------------------------+
| ``last_pgno`` | ID of last used page. |
+--------------------+---------------------------------------------+
| ``last_txnid`` | ID of last committed transaction. |
+--------------------+---------------------------------------------+
| ``max_readers`` | Number of reader slots allocated in the |
| | lock file. Equivalent to the value of |
| | `maxreaders=` specified by the first |
| | process opening the Environment. |
+--------------------+---------------------------------------------+
| ``num_readers`` | Maximum number of reader slots in |
| | simultaneous use since the lock file was |
| | initialized. |
+--------------------+---------------------------------------------+
Equivalent to `mdb_env_info()
<http://lmdb.tech/doc/group__mdb.html#ga18769362c7e7d6cf91889a028a5c5947>`_
"""
info = _ffi.new('MDB_envinfo *')
rc = _lib.mdb_env_info(self._env, info)
if rc:
raise _error("mdb_env_info", rc)
return {
"map_addr": int(_ffi.cast('long', info.me_mapaddr)),
"map_size": info.me_mapsize,
"last_pgno": info.me_last_pgno,
"last_txnid": info.me_last_txnid,
"max_readers": info.me_maxreaders,
"num_readers": info.me_numreaders
}
def flags(self):
"""Return a dict describing Environment constructor flags used to
instantiate this environment."""
flags_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_env_get_flags(self._env, flags_)
if rc:
raise _error("mdb_env_get_flags", rc)
flags = flags_[0]
return {
'subdir': not (flags & _lib.MDB_NOSUBDIR),
'readonly': bool(flags & _lib.MDB_RDONLY),
'metasync': not (flags & _lib.MDB_NOMETASYNC),
'sync': not (flags & _lib.MDB_NOSYNC),
'map_async': bool(flags & _lib.MDB_MAPASYNC),
'readahead': not (flags & _lib.MDB_NORDAHEAD),
'writemap': bool(flags & _lib.MDB_WRITEMAP),
'meminit': not (flags & _lib.MDB_NOMEMINIT),
'lock': not (flags & _lib.MDB_NOLOCK),
}
def max_key_size(self):
"""Return the maximum size in bytes of a record's key part. This
matches the ``MDB_MAXKEYSIZE`` constant set at compile time."""
return _lib.mdb_env_get_maxkeysize(self._env)
def max_readers(self):
"""Return the maximum number of readers specified during open of the
environment by the first process. This is the same as `max_readers=`
specified to the constructor if this process was the first to open the
environment."""
readers_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_env_get_maxreaders(self._env, readers_)
if rc:
raise _error("mdb_env_get_maxreaders", rc)
return readers_[0]
def readers(self):
"""Return a multi line Unicode string describing the current state of
the reader lock table."""
_callbacks.msg_func = []
try:
rc = _lib.mdb_reader_list(self._env, _msg_func, _ffi.NULL)
if rc:
raise _error("mdb_reader_list", rc)
return UnicodeType().join(_callbacks.msg_func)
finally:
del _callbacks.msg_func
def reader_check(self):
"""Search the reader lock table for stale entries, for example due to a
crashed process. Returns the number of stale entries that were cleared.
"""
reaped = _ffi.new('int[]', 1)
rc = _lib.mdb_reader_check(self._env, reaped)
if rc:
raise _error('mdb_reader_check', rc)
return reaped[0]
def open_db(self, key=None, txn=None, reverse_key=False, dupsort=False,
create=True, integerkey=False, integerdup=False,
dupfixed=False):
"""
Open a database, returning an instance of :py:class:`_Database`. Repeat
:py:meth:`Environment.open_db` calls for the same name will return the
same handle. As a special case, the main database is always open.
Equivalent to `mdb_dbi_open()
<http://lmdb.tech/doc/group__mdb.html#gac08cad5b096925642ca359a6d6f0562a>`_
Named databases are implemented by *storing a special descriptor in the
main database*. All databases in an environment *share the same file*.
Because the descriptor is present in the main database, attempts to
create a named database will fail if a key matching the database's name
already exists. Furthermore *the key is visible to lookups and
enumerations*. If your main database keyspace conflicts with the names
you use for named databases, then move the contents of your main
database to another named database.
::
>>> env = lmdb.open('/tmp/test', max_dbs=2)
>>> with env.begin(write=True) as txn
... txn.put('somename', 'somedata')
>>> # Error: database cannot share name of existing key!
>>> subdb = env.open_db('somename')
A newly created database will not exist if the transaction that created
it aborted, nor if another process deleted it. The handle resides in
the shared environment, it is not owned by the current transaction or
process. Only one thread should call this function; it is not
mutex-protected in a read-only transaction.
The `dupsort`, `integerkey`, `integerdup`, and `dupfixed` parameters are
ignored if the database already exists. The state of those settings are
persistent and immutable per database. See :py:meth:`_Database.flags`
to view the state of those options for an opened database. A consequence
of the immutability of these flags is that the default non-named database
will never have these flags set.
Preexisting transactions, other than the current transaction and any
parents, must not use the new handle, nor must their children.
`key`:
Bytestring database name. If ``None``, indicates the main
database should be returned, otherwise indicates a named
database should be created inside the main database.
In other words, *a key representing the database will be
visible in the main database, and the database name cannot
conflict with any existing key.*
`txn`:
Transaction used to create the database if it does not exist.
If unspecified, a temporarily write transaction is used. Do not
call :py:meth:`open_db` from inside an existing transaction
without supplying it here. Note the passed transaction must
have `write=True`.
`reverse_key`:
If ``True``, keys are compared from right to left (e.g. DNS
names).
`dupsort`:
Duplicate keys may be used in the database. (Or, from another
perspective, keys may have multiple data items, stored in
sorted order.) By default keys must be unique and may have only
a single data item.
`create`:
If ``True``, create the database if it doesn't exist, otherwise
raise an exception.
`integerkey`:
If ``True``, indicates keys in the database are C unsigned
or ``size_t`` integers encoded in native byte order. Keys must
all be either unsigned or ``size_t``, they cannot be mixed in a
single database.
`integerdup`:
If ``True``, values in the
database are C unsigned or ``size_t`` integers encode din
native byte order. Implies `dupsort` and `dupfixed` are
``True``.
`dupfixed`:
If ``True``, values for each key
in database are of fixed size, allowing each additional
duplicate value for a key to be stored without a header
indicating its size. Implies `dupsort` is ``True``.
"""
if isinstance(key, UnicodeType):
raise TypeError('key must be bytes')
if key is None and (reverse_key or dupsort or integerkey or integerdup
or dupfixed):
raise ValueError('May not set flags on the main database')
db = self._dbs.get(key)
if db:
return db
if integerdup:
dupfixed = True
if dupfixed:
dupsort = True
if txn:
db = _Database(self, txn, key, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed)
else:
try:
self._creating_db_in_readonly = True
with self.begin(write=not self.readonly) as txn:
db = _Database(self, txn, key, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed)
finally:
self._creating_db_in_readonly = False
self._dbs[key] = db
return db
def begin(self, db=None, parent=None, write=False, buffers=False):
"""Shortcut for :py:class:`lmdb.Transaction`"""
return Transaction(self, db, parent, write, buffers)
class _Database(object):
"""
Internal database handle. This class is opaque, save a single method.
Should not be constructed directly. Use :py:meth:`Environment.open_db`
instead.
"""
def __init__(self, env, txn, name, reverse_key, dupsort, create,
integerkey, integerdup, dupfixed):
env._deps.add(self)
self._deps = set()
self._name = name
flags = 0
if reverse_key:
flags |= _lib.MDB_REVERSEKEY
if dupsort:
flags |= _lib.MDB_DUPSORT
if create:
flags |= _lib.MDB_CREATE
if integerkey:
flags |= _lib.MDB_INTEGERKEY
if integerdup:
flags |= _lib.MDB_INTEGERDUP
if dupfixed:
flags |= _lib.MDB_DUPFIXED
dbipp = _ffi.new('MDB_dbi *')
self._dbi = None
rc = _lib.mdb_dbi_open(txn._txn, name or _ffi.NULL, flags, dbipp)
if rc:
raise _error("mdb_dbi_open", rc)
self._dbi = dbipp[0]
self._load_flags(txn)
def _load_flags(self, txn):
"""Load MDB's notion of the database flags."""
flags_ = _ffi.new('unsigned int[]', 1)
rc = _lib.mdb_dbi_flags(txn._txn, self._dbi, flags_)
if rc:
raise _error("mdb_dbi_flags", rc)
self._flags = flags_[0]
def flags(self, *args):
"""Return the database's associated flags as a dict of _Database
constructor kwargs."""
if len(args) > 1:
raise TypeError('flags takes 0 or 1 arguments')
return {
'reverse_key': bool(self._flags & _lib.MDB_REVERSEKEY),
'dupsort': bool(self._flags & _lib.MDB_DUPSORT),
'integerkey': bool(self._flags & _lib.MDB_INTEGERKEY),
'integerdup': bool(self._flags & _lib.MDB_INTEGERDUP),
'dupfixed': bool(self._flags & _lib.MDB_DUPFIXED),
}
def _invalidate(self):
self._dbi = _invalid
open = Environment
class Transaction(object):
"""
A transaction object. All operations require a transaction handle,
transactions may be read-only or read-write. Write transactions may not
span threads. Transaction objects implement the context manager protocol,
so that reliable release of the transaction happens even in the face of
unhandled exceptions:
.. code-block:: python
# Transaction aborts correctly:
with env.begin(write=True) as txn:
crash()
# Transaction commits automatically:
with env.begin(write=True) as txn:
txn.put('a', 'b')
Equivalent to `mdb_txn_begin()
<http://lmdb.tech/doc/group__mdb.html#gad7ea55da06b77513609efebd44b26920>`_
`env`:
Environment the transaction should be on.
`db`:
Default named database to operate on. If unspecified, defaults to
the environment's main database. Can be overridden on a per-call
basis below.
`parent`:
``None``, or a parent transaction (see lmdb.h).
`write`:
Transactions are read-only by default. To modify the database, you
must pass `write=True`. This flag is ignored if
:py:class:`Environment` was opened with ``readonly=True``.
`buffers`:
If ``True``, indicates :py:func:`buffer` objects should be yielded
instead of bytestrings. This setting applies to the
:py:class:`Transaction` instance itself and any :py:class:`Cursors
<Cursor>` created within the transaction.
This feature significantly improves performance, since MDB has a
zero-copy design, but it requires care when manipulating the
returned buffer objects. The benefit of this facility is diminished
when using small keys and values.
"""
# If constructor fails, then __del__ will attempt to access these
# attributes.
_env = _invalid
_txn = _invalid
_parent = None
_write = False
# Mutations occurred since transaction start. Required to know when Cursor
# key/value must be refreshed.
_mutations = 0
def __init__(self, env, db=None, parent=None, write=False, buffers=False):
env._deps.add(self)
self.env = env # hold ref
self._db = db or env._db
self._env = env._env
self._key = _ffi.new('MDB_val *')
self._val = _ffi.new('MDB_val *')
self._to_py = _mvbuf if buffers else _mvstr
self._deps = set()
if parent:
self._parent = parent
parent_txn = parent._txn
parent._deps.add(self)
else:
parent_txn = _ffi.NULL
if write:
if env.readonly:
msg = 'Cannot start write transaction with read-only env'
raise _error(msg, _lib.EACCES)
txnpp = _ffi.new('MDB_txn **')
rc = _lib.mdb_txn_begin(self._env, parent_txn, 0, txnpp)
if rc:
raise _error("mdb_txn_begin", rc)
self._txn = txnpp[0]
self._write = True
else:
try: # Exception catch in order to avoid racy 'if txns:' test
if env._creating_db_in_readonly: # Don't use spare txns for creating a DB when read-only
raise IndexError
self._txn = env._spare_txns.pop()
env._max_spare_txns += 1
rc = _lib.mdb_txn_renew(self._txn)
if rc:
while self._deps:
self._deps.pop()._invalidate()
_lib.mdb_txn_abort(self._txn)
self._txn = _invalid
self._invalidate()
raise _error("mdb_txn_renew", rc)
except IndexError:
txnpp = _ffi.new('MDB_txn **')
flags = _lib.MDB_RDONLY
rc = _lib.mdb_txn_begin(self._env, parent_txn, flags, txnpp)
if rc:
raise _error("mdb_txn_begin", rc)
self._txn = txnpp[0]
def _invalidate(self):
if self._txn:
self.abort()
self.env._deps.discard(self)
self._parent = None
self._env = _invalid
def __del__(self):
self.abort()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
self.abort()
else:
self.commit()
def id(self):
"""id()
Return the transaction's ID.
This returns the identifier associated with this transaction. For a
read-only transaction, this corresponds to the snapshot being read;
concurrent readers will frequently have the same transaction ID.
"""
return _lib.mdb_txn_id(self._txn)
def stat(self, db):
"""stat(db)
Return statistics like :py:meth:`Environment.stat`, except for a single
DBI. `db` must be a database handle returned by :py:meth:`open_db`.
"""
st = _ffi.new('MDB_stat *')
rc = _lib.mdb_stat(self._txn, db._dbi, st)
if rc:
raise _error('mdb_stat', rc)
return self.env._convert_stat(st)
def drop(self, db, delete=True):
"""Delete all keys in a named database and optionally delete the named
database itself. Deleting the named database causes it to become
unavailable, and invalidates existing cursors.
Equivalent to `mdb_drop()
<http://lmdb.tech/doc/group__mdb.html#gab966fab3840fc54a6571dfb32b00f2db>`_
"""
while db._deps:
db._deps.pop()._invalidate()
rc = _lib.mdb_drop(self._txn, db._dbi, delete)
self._mutations += 1
if rc:
raise _error("mdb_drop", rc)
if db._name in self.env._dbs:
del self.env._dbs[db._name]
def _cache_spare(self):
# In order to avoid taking and maintaining a lock, a race is allowed
# below which may result in more spare txns than desired. It seems
# unlikely the race could ever result in a large amount of spare txns,
# and in any case a correctly configured program should not be opening
# more read-only transactions than there are configured spares.
if self.env._max_spare_txns > 0:
_lib.mdb_txn_reset(self._txn)
self.env._spare_txns.append(self._txn)
self.env._max_spare_txns -= 1
self._txn = _invalid
self._invalidate()
return True
return False
def commit(self):
"""Commit the pending transaction.
Equivalent to `mdb_txn_commit()
<http://lmdb.tech/doc/group__mdb.html#ga846fbd6f46105617ac9f4d76476f6597>`_
"""
while self._deps:
self._deps.pop()._invalidate()
if self._write or not self._cache_spare():
rc = _lib.mdb_txn_commit(self._txn)
self._txn = _invalid
if rc:
raise _error("mdb_txn_commit", rc)
self._invalidate()
def abort(self):
"""Abort the pending transaction. Repeat calls to :py:meth:`abort` have
no effect after a previously successful :py:meth:`commit` or
:py:meth:`abort`, or after the associated :py:class:`Environment` has
been closed.
Equivalent to `mdb_txn_abort()
<http://lmdb.tech/doc/group__mdb.html#ga73a5938ae4c3239ee11efa07eb22b882>`_
"""
if self._txn:
while self._deps:
self._deps.pop()._invalidate()
if self._write or not self._cache_spare():
rc = _lib.mdb_txn_abort(self._txn)
self._txn = _invalid
if rc:
raise _error("mdb_txn_abort", rc)
self._invalidate()
def get(self, key, default=None, db=None):
"""Fetch the first value matching `key`, returning `default` if `key`
does not exist. A cursor must be used to fetch all values for a key in
a `dupsort=True` database.
Equivalent to `mdb_get()
<http://lmdb.tech/doc/group__mdb.html#ga8bf10cd91d3f3a83a34d04ce6b07992d>`_
"""
rc = _lib.pymdb_get(self._txn, (db or self._db)._dbi,
key, len(key), self._val)
if rc:
if rc == _lib.MDB_NOTFOUND:
return default
raise _error("mdb_cursor_get", rc)
preload(self._val)
return self._to_py(self._val)
def put(self, key, value, dupdata=True, overwrite=True, append=False,
db=None):
"""Store a record, returning ``True`` if it was written, or ``False``
to indicate the key was already present and `overwrite=False`.
On success, the cursor is positioned on the new record.
Equivalent to `mdb_put()
<http://lmdb.tech/doc/group__mdb.html#ga4fa8573d9236d54687c61827ebf8cac0>`_
`key`:
Bytestring key to store.
`value`:
Bytestring value to store.
`dupdata`:
If ``False`` and database was opened with `dupsort=True`, will return
``False`` if the key already has that value. In other words, this only
affects the return value.
`overwrite`:
If ``False``, do not overwrite any existing matching key. If
False and writing to a dupsort=True database, this will not add a value
to the key and this function will return ``False``.
`append`:
If ``True``, append the pair to the end of the database without
comparing its order first. Appending a key that is not greater
than the highest existing key will fail and return ``False``.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
flags |= _lib.MDB_APPEND
rc = _lib.pymdb_put(self._txn, (db or self._db)._dbi,
key, len(key), value, len(value), flags)
self._mutations += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
return False
raise _error("mdb_put", rc)
return True
def replace(self, key, value, db=None):
"""Use a temporary cursor to invoke :py:meth:`Cursor.replace`.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
with Cursor(db or self._db, self) as curs:
return curs.replace(key, value)
def pop(self, key, db=None):
"""Use a temporary cursor to invoke :py:meth:`Cursor.pop`.
`db`:
Named database to operate on. If unspecified, defaults to the
database given to the :py:class:`Transaction` constructor.
"""
with Cursor(db or self._db, self) as curs:
return curs.pop(key)
def delete(self, key, value=EMPTY_BYTES, db=None):
"""Delete a key from the database.
Equivalent to `mdb_del()
<http://lmdb.tech/doc/group__mdb.html#gab8182f9360ea69ac0afd4a4eaab1ddb0>`_
`key`:
The key to delete.
value:
If the database was opened with dupsort=True and value is not
the empty bytestring, then delete elements matching only this
`(key, value)` pair, otherwise all values for key are deleted.
Returns True if at least one key was deleted.
"""
if value is None: # for bug-compatibility with cpython impl
value = EMPTY_BYTES
rc = _lib.pymdb_del(self._txn, (db or self._db)._dbi,
key, len(key), value, len(value))
self._mutations += 1
if rc:
if rc == _lib.MDB_NOTFOUND:
return False
raise _error("mdb_del", rc)
return True
def cursor(self, db=None):
"""Shortcut for ``lmdb.Cursor(db, self)``"""
return Cursor(db or self._db, self)
class Cursor(object):
"""
Structure for navigating a database.
Equivalent to `mdb_cursor_open()
<http://lmdb.tech/doc/group__mdb.html#ga9ff5d7bd42557fd5ee235dc1d62613aa>`_
`db`:
:py:class:`_Database` to navigate.
`txn`:
:py:class:`Transaction` to navigate.
As a convenience, :py:meth:`Transaction.cursor` can be used to quickly
return a cursor:
::
>>> env = lmdb.open('/tmp/foo')
>>> child_db = env.open_db('child_db')
>>> with env.begin() as txn:
... cursor = txn.cursor() # Cursor on main database.
... cursor2 = txn.cursor(child_db) # Cursor on child database.
Cursors start in an unpositioned state. If :py:meth:`iternext` or
:py:meth:`iterprev` are used in this state, iteration proceeds from the
start or end respectively. Iterators directly position using the cursor,
meaning strange behavior results when multiple iterators exist on the same
cursor.
.. note::
From the perspective of the Python binding, cursors return to an
'unpositioned' state once any scanning or seeking method (e.g.
:py:meth:`next`, :py:meth:`prev_nodup`, :py:meth:`set_range`) returns
``False`` or raises an exception. This is primarily to ensure safe,
consistent semantics in the face of any error condition.
When the Cursor returns to an unpositioned state, its :py:meth:`key`
and :py:meth:`value` return empty strings to indicate there is no
active position, although internally the LMDB cursor may still have a
valid position.
This may lead to slightly surprising behaviour when iterating the
values for a `dupsort=True` database's keys, since methods such as
:py:meth:`iternext_dup` will cause Cursor to appear unpositioned,
despite it returning ``False`` only to indicate there are no more
values for the current key. In that case, simply calling
:py:meth:`next` would cause iteration to resume at the next available
key.
This behaviour may change in future.
Iterator methods such as :py:meth:`iternext` and :py:meth:`iterprev` accept
`keys` and `values` arguments. If both are ``True``, then the value of
:py:meth:`item` is yielded on each iteration. If only `keys` is ``True``,
:py:meth:`key` is yielded, otherwise only :py:meth:`value` is yielded.
Prior to iteration, a cursor can be positioned anywhere in the database:
::
>>> with env.begin() as txn:
... cursor = txn.cursor()
... if not cursor.set_range('5'): # Position at first key >= '5'.
... print('Not found!')
... else:
... for key, value in cursor: # Iterate from first key >= '5'.
... print((key, value))
Iteration is not required to navigate, and sometimes results in ugly or
inefficient code. In cases where the iteration order is not obvious, or is
related to the data being read, use of :py:meth:`set_key`,
:py:meth:`set_range`, :py:meth:`key`, :py:meth:`value`, and :py:meth:`item`
may be preferable:
::
>>> # Record the path from a child to the root of a tree.
>>> path = ['child14123']
>>> while path[-1] != 'root':
... assert cursor.set_key(path[-1]), \\
... 'Tree is broken! Path: %s' % (path,)
... path.append(cursor.value())
"""
def __init__(self, db, txn):
db._deps.add(self)
txn._deps.add(self)
self.db = db # hold ref
self.txn = txn # hold ref
self._dbi = db._dbi
self._txn = txn._txn
self._key = _ffi.new('MDB_val *')
self._val = _ffi.new('MDB_val *')
self._valid = False
self._to_py = txn._to_py
curpp = _ffi.new('MDB_cursor **')
self._cur = None
rc = _lib.mdb_cursor_open(self._txn, self._dbi, curpp)
if rc:
raise _error("mdb_cursor_open", rc)
self._cur = curpp[0]
# If Transaction.mutations!=last_mutation, must MDB_GET_CURRENT to
# refresh `key' and `val'.
self._last_mutation = txn._mutations
def _invalidate(self):
if self._cur:
_lib.mdb_cursor_close(self._cur)
self.db._deps.discard(self)
self.txn._deps.discard(self)
self._cur = _invalid
self._dbi = _invalid
self._txn = _invalid
def __del__(self):
self._invalidate()
def close(self):
"""Close the cursor, freeing its associated resources."""
self._invalidate()
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self._invalidate()
def key(self):
"""Return the current key."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
return self._to_py(self._key)
def value(self):
"""Return the current value."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._val)
def item(self):
"""Return the current `(key, value)` pair."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._key), self._to_py(self._val)
def _iter(self, op, keys, values):
if not values:
get = self.key
elif not keys:
get = self.value
else:
get = self.item
cur = self._cur
key = self._key
val = self._val
rc = 0
while self._valid:
yield get()
rc = _lib.mdb_cursor_get(cur, key, val, op)
self._valid = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
raise _error("mdb_cursor_get", rc)
def iternext(self, keys=True, values=True):
"""Return a forward iterator that yields the current element before
calling :py:meth:`next`, repeating until the end of the database is
reached. As a convenience, :py:class:`Cursor` implements the iterator
protocol by automatically returning a forward iterator when invoked:
::
>>> # Equivalent:
>>> it = iter(cursor)
>>> it = cursor.iternext(keys=True, values=True)
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT, keys, values)
__iter__ = iternext
def iternext_dup(self, keys=False, values=True):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_dup`,
repeating until the last value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
.. code-block:: python
if not cursor.set_key("foo"):
print("No values found for 'foo'")
else:
for idx, data in enumerate(cursor.iternext_dup()):
print("%d'th value for 'foo': %s" % (idx, data))
"""
return self._iter(_lib.MDB_NEXT_DUP, keys, values)
def iternext_nodup(self, keys=True, values=False):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_nodup`,
repeating until the end of the database is reached.
Only meaningful for databases opened with `dupsort=True`.
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
.. code-block:: python
for key in cursor.iternext_nodup():
print("Key '%s' has %d values" % (key, cursor.count()))
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT_NODUP, keys, values)
def iterprev(self, keys=True, values=True):
"""Return a reverse iterator that yields the current element before
calling :py:meth:`prev`, until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
::
>>> with env.begin() as txn:
... for i, (key, value) in enumerate(txn.cursor().iterprev()):
... print('%dth last item is (%r, %r)' % (1+i, key, value))
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV, keys, values)
def iterprev_dup(self, keys=False, values=True):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_dup`,
repeating until the first value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
"""
return self._iter(_lib.MDB_PREV_DUP, keys, values)
def iterprev_nodup(self, keys=True, values=False):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_nodup`,
repeating until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
Only meaningful for databases opened with `dupsort=True`.
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV_NODUP, keys, values)
def _cursor_get(self, op):
rc = _lib.mdb_cursor_get(self._cur, self._key, self._val, op)
self._valid = v = not rc
self._last_mutation = self.txn._mutations
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def _cursor_get_kv(self, op, k, v):
rc = _lib.pymdb_cursor_get(self._cur, k, len(k), v, len(v),
self._key, self._val, op)
self._valid = v = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def first(self):
"""Move to the first key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the first value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST)
def first_dup(self):
"""Move to the first value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST_DUP)
def last(self):
"""Move to the last key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the last value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST)
def last_dup(self):
"""Move to the last value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST_DUP)
def prev(self):
"""Move to the previous element, returning ``True`` on success or
``False`` if there is no previous item.
For databases opened with `dupsort=True`, moves to the previous data
item ("duplicate") for the current key if one exists, otherwise moves
to the previous key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV)
def prev_dup(self):
"""Move to the previous value ("duplicate") of the current key,
returning ``True`` on success or ``False`` if there is no previous
value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_DUP)
def prev_nodup(self):
"""Move to the last value ("duplicate") of the previous key, returning
``True`` on success or ``False`` if there is no previous key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_NODUP)
def next(self):
"""Move to the next element, returning ``True`` on success or ``False``
if there is no next element.
For databases opened with `dupsort=True`, moves to the next value
("duplicate") for the current key if one exists, otherwise moves to the
first value of the next key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT)
def next_dup(self):
"""Move to the next value ("duplicate") of the current key, returning
``True`` on success or ``False`` if there is no next value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_DUP)
def next_nodup(self):
"""Move to the first value ("duplicate") of the next key, returning
``True`` on success or ``False`` if there is no next key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_NODUP)
def set_key(self, key):
"""Seek exactly to `key`, returning ``True`` on success or ``False`` if
the exact key was not found. It is an error to :py:meth:`set_key` the
empty bytestring.
For databases opened with `dupsort=True`, moves to the first value
("duplicate") for the key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_SET_KEY
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES)
def set_key_dup(self, key, value):
"""Seek exactly to `(key, value)`, returning ``True`` on success or
``False`` if the exact key and value was not found. It is an error
to :py:meth:`set_key` the empty bytestring.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_GET_BOTH
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_GET_BOTH, key, value)
def get(self, key, default=None):
"""Equivalent to :py:meth:`set_key()`, except :py:meth:`value` is
returned when `key` is found, otherwise `default`.
"""
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
return self.value()
return default
def getmulti(self, keys, dupdata=False, dupfixed_bytes=None, keyfixed=False):
"""Returns an iterable of `(key, value)` 2-tuples containing results
for each key in the iterable `keys`.
`keys`:
Iterable to read keys from.
`dupdata`:
If ``True`` and database was opened with `dupsort=True`, read
all duplicate values for each matching key.
`dupfixed_bytes`:
If database was opened with `dupsort=True` and `dupfixed=True`,
accepts the size of each value, in bytes, and applies an
optimization reducing the number of database lookups.
`keyfixed`:
If `dupfixed_bytes` is set and database key size is fixed,
setting keyfixed=True will result in this function returning
a memoryview to the results as a structured array of bytes.
The structured array can be instantiated by passing the
memoryview buffer to NumPy:
.. code-block:: python
key_bytes, val_bytes = 4, 8
dtype = np.dtype([(f'S{key_bytes}', f'S{val_bytes}}')])
arr = np.frombuffer(
cur.getmulti(keys, dupdata=True, dupfixed_bytes=val_bytes, keyfixed=True)
)
"""
if dupfixed_bytes and dupfixed_bytes < 0:
raise _error("dupfixed_bytes must be a positive integer.")
elif (dupfixed_bytes or keyfixed) and not dupdata:
raise _error("dupdata is required for dupfixed_bytes/key_bytes.")
elif keyfixed and not dupfixed_bytes:
raise _error("dupfixed_bytes is required for key_bytes.")
if dupfixed_bytes:
get_op = _lib.MDB_GET_MULTIPLE
next_op = _lib.MDB_NEXT_MULTIPLE
else:
get_op = _lib.MDB_GET_CURRENT
next_op = _lib.MDB_NEXT_DUP
a = bytearray()
lst = list()
for key in keys:
if self.set_key(key):
while self._valid:
self._cursor_get(get_op)
preload(self._val)
key = self._to_py(self._key)
val = self._to_py(self._val)
if dupfixed_bytes:
gen = (
(key, val[i:i + dupfixed_bytes])
for i in range(0, len(val), dupfixed_bytes))
if keyfixed:
for k, v in gen:
a.extend(k + v)
else:
for k, v in gen:
lst.append((k, v))
else:
lst.append((key, val))
if dupdata:
self._cursor_get(next_op)
else:
break
if keyfixed:
return memoryview(a)
else:
return lst
def set_range(self, key):
"""Seek to the first key greater than or equal to `key`, returning
``True`` on success, or ``False`` to indicate key was past end of
database. Behaves like :py:meth:`first` if `key` is the empty
bytestring.
For databases opened with `dupsort=True`, moves to the first value
("duplicate") for the key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_SET_RANGE
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
if not key:
return self.first()
return self._cursor_get_kv(_lib.MDB_SET_RANGE, key, EMPTY_BYTES)
def set_range_dup(self, key, value):
"""Seek to the first key/value pair greater than or equal to `key`,
returning ``True`` on success, or ``False`` to indicate that `value` was past the
last value of `key` or that `(key, value)` was past the end end of database.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_GET_BOTH_RANGE
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
rc = self._cursor_get_kv(_lib.MDB_GET_BOTH_RANGE, key, value)
# issue #126: MDB_GET_BOTH_RANGE does not satisfy its documentation,
# and fails to update `key` and `value` on success. Therefore
# explicitly call MDB_GET_CURRENT after MDB_GET_BOTH_RANGE.
self._cursor_get(_lib.MDB_GET_CURRENT)
return rc
def delete(self, dupdata=False):
"""Delete the current element and move to the next, returning ``True``
on success or ``False`` if the database was empty.
If `dupdata` is ``True``, delete all values ("duplicates") for the
current key, otherwise delete only the currently positioned value. Only
meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_del()
<http://lmdb.tech/doc/group__mdb.html#ga26a52d3efcfd72e5bf6bd6960bf75f95>`_
"""
v = self._valid
if v:
flags = _lib.MDB_NODUPDATA if dupdata else 0
rc = _lib.mdb_cursor_del(self._cur, flags)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_del", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
v = rc == 0
return v
def count(self):
"""Return the number of values ("duplicates") for the current key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_count()
<http://lmdb.tech/doc/group__mdb.html#ga4041fd1e1862c6b7d5f10590b86ffbe2>`_
"""
countp = _ffi.new('size_t *')
rc = _lib.mdb_cursor_count(self._cur, countp)
if rc:
raise _error("mdb_cursor_count", rc)
return countp[0]
def put(self, key, val, dupdata=True, overwrite=True, append=False):
"""Store a record, returning ``True`` if it was written, or ``False``
to indicate the key was already present and `overwrite=False`. On
success, the cursor is positioned on the key.
Equivalent to `mdb_cursor_put()
<http://lmdb.tech/doc/group__mdb.html#ga1f83ccb40011837ff37cc32be01ad91e>`_
`key`:
Bytestring key to store.
`val`:
Bytestring value to store.
`dupdata`:
If ``False`` and database was opened with `dupsort=True`, will return
``False`` if the key already has that value. In other words, this only
affects the return value.
`overwrite`:
If ``False``, do not overwrite the value for the key if it
exists, just return ``False``. For databases opened with
`dupsort=True`, ``False`` will always be returned if a
duplicate key/value pair is inserted, regardless of the setting
for `overwrite`.
`append`:
If ``True``, append the pair to the end of the database without
comparing its order first. Appending a key that is not greater
than the highest existing key will fail and return ``False``.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
if self.txn._db._flags & _lib.MDB_DUPSORT:
flags |= _lib.MDB_APPENDDUP
else:
flags |= _lib.MDB_APPEND
rc = _lib.pymdb_cursor_put(self._cur, key, len(key), val, len(val), flags)
self.txn._mutations += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
return False
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return True
def putmulti(self, items, dupdata=True, overwrite=True, append=False):
"""Invoke :py:meth:`put` for each `(key, value)` 2-tuple from the
iterable `items`. Elements must be exactly 2-tuples, they may not be of
any other type, or tuple subclass.
Returns a tuple `(consumed, added)`, where `consumed` is the number of
elements read from the iterable, and `added` is the number of new
entries added to the database. `added` may be less than `consumed` when
`overwrite=False`.
`items`:
Iterable to read records from.
`dupdata`:
If ``True`` and database was opened with `dupsort=True`, add
pair as a duplicate if the given key already exists. Otherwise
overwrite any existing matching key.
`overwrite`:
If ``False``, do not overwrite the value for the key if it
exists, just return ``False``. For databases opened with
`dupsort=True`, ``False`` will always be returned if a
duplicate key/value pair is inserted, regardless of the setting
for `overwrite`.
`append`:
If ``True``, append records to the end of the database without
comparing their order first. Appending a key that is not
greater than the highest existing key will cause corruption.
"""
flags = 0
if not dupdata:
flags |= _lib.MDB_NODUPDATA
if not overwrite:
flags |= _lib.MDB_NOOVERWRITE
if append:
if self.txn._db._flags & _lib.MDB_DUPSORT:
flags |= _lib.MDB_APPENDDUP
else:
flags |= _lib.MDB_APPEND
added = 0
skipped = 0
for key, value in items:
rc = _lib.pymdb_cursor_put(self._cur, key, len(key),
value, len(value), flags)
self.txn._mutations += 1
added += 1
if rc:
if rc == _lib.MDB_KEYEXIST:
skipped += 1
else:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return added, added - skipped
def replace(self, key, val):
"""Store a record, returning its previous value if one existed. Returns
``None`` if no previous value existed. This uses the best available
mechanism to minimize the cost of a `set-and-return-previous`
operation.
For databases opened with `dupsort=True`, only the first data element
("duplicate") is returned if it existed, all data elements are removed
and the new `(key, data)` pair is inserted.
`key`:
Bytestring key to store.
`value`:
Bytestring value to store.
"""
if self.db._flags & _lib.MDB_DUPSORT:
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
preload(self._val)
old = _mvstr(self._val)
self.delete(True)
else:
old = None
self.put(key, val)
return old
flags = _lib.MDB_NOOVERWRITE
keylen = len(key)
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), flags)
self.txn._mutations += 1
if not rc:
return
if rc != _lib.MDB_KEYEXIST:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
old = _mvstr(self._val)
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), 0)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_put", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return old
def pop(self, key):
"""Fetch a record's value then delete it. Returns ``None`` if no
previous value existed. This uses the best available mechanism to
minimize the cost of a `delete-and-return-previous` operation.
For databases opened with `dupsort=True`, the first data element
("duplicate") for the key will be popped.
`key`:
Bytestring key to delete.
"""
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
preload(self._val)
old = _mvstr(self._val)
rc = _lib.mdb_cursor_del(self._cur, 0)
self.txn._mutations += 1
if rc:
raise _error("mdb_cursor_del", rc)
self._cursor_get(_lib.MDB_GET_CURRENT)
return old
def _iter_from(self, k, reverse):
"""Helper for centidb. Please do not rely on this interface, it may be
removed in future.
"""
if not k and not reverse:
found = self.first()
else:
found = self.set_range(k)
if reverse:
if not found:
self.last()
return self.iterprev()
else:
if not found:
return iter(())
return self.iternext()
|
test_scripts/pyfora2/containerTests.py | ufora/ufora | 571 | 7686 | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pyfora
import ufora.config.Setup as Setup
import ufora.FORA.python.PurePython.DictTestCases as DictTestCases
import ufora.FORA.python.PurePython.ListTestCases as ListTestCases
import ufora.FORA.python.PurePython.TupleTestCases as TupleTestCases
import ufora.FORA.python.PurePython.ExecutorTestCommon as ExecutorTestCommon
import ufora.test.ClusterSimulation as ClusterSimulation
class ExecutorSimulationTest(
unittest.TestCase,
ExecutorTestCommon.ExecutorTestCommon,
DictTestCases.DictTestCases,
ListTestCases.ListTestCases,
TupleTestCases.TupleTestCases):
@classmethod
def setUpClass(cls):
cls.config = Setup.config()
cls.executor = None
cls.simulation = ClusterSimulation.Simulator.createGlobalSimulator()
cls.simulation.startService()
cls.simulation.getDesirePublisher().desireNumberOfWorkers(1)
@classmethod
def tearDownClass(cls):
cls.simulation.stopService()
@classmethod
def create_executor(cls, allowCached=True):
if not allowCached:
return pyfora.connect('http://localhost:30000')
if cls.executor is None:
cls.executor = pyfora.connect('http://localhost:30000')
cls.executor.stayOpenOnExit = True
return cls.executor
if __name__ == '__main__':
import ufora.config.Mainline as Mainline
Mainline.UnitTestMainline()
|
tests/functional/controllers/test_group_controller_superuser.py | roscisz/TensorHive | 129 | 7762 | <filename>tests/functional/controllers/test_group_controller_superuser.py<gh_stars>100-1000
from tensorhive.models.Group import Group
from fixtures.controllers import API_URI as BASE_URI, HEADERS
from http import HTTPStatus
from importlib import reload
import json
import auth_patcher
ENDPOINT = BASE_URI + '/groups'
def setup_module(_):
auth_patches = auth_patcher.get_patches(superuser=True)
for auth_patch in auth_patches:
auth_patch.start()
for module in auth_patcher.CONTROLLER_MODULES:
reload(module)
for auth_patch in auth_patches:
auth_patch.stop()
# POST /groups
def test_create_group(tables, client):
group_name = 'TestGroup'
data = {'name': group_name}
resp = client.post(ENDPOINT, headers=HEADERS, data=json.dumps(data))
resp_json = json.loads(resp.data.decode('utf-8'))
assert resp.status_code == HTTPStatus.CREATED
assert resp_json['group']['id'] is not None
assert resp_json['group']['name'] == group_name
assert Group.get(int(resp_json['group']['id'])) is not None
# PUT /groups/{id}
def test_update_group(tables, client, new_group):
new_group.save()
new_group_name = new_group.name + '111'
resp = client.put(ENDPOINT + '/' + str(new_group.id), headers=HEADERS, data=json.dumps({'name': new_group_name}))
resp_json = json.loads(resp.data.decode('utf-8'))
assert resp.status_code == HTTPStatus.OK
assert resp_json['group']['name'] == new_group_name
assert Group.get(new_group.id).name == new_group_name
# PUT /groups/{id} - nonexistent id
def test_update_group_that_doesnt_exist(tables, client):
non_existent_id = '777'
resp = client.put(ENDPOINT + '/' + non_existent_id, headers=HEADERS, data=json.dumps({'name': 'test'}))
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}
def test_delete_group(tables, client, new_group):
new_group.save()
resp = client.delete(ENDPOINT + '/' + str(new_group.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
# Let's get all groups to verify
resp = client.get(ENDPOINT, headers=HEADERS)
resp_json = json.loads(resp.data.decode('utf-8'))
assert len(resp_json) == 0
# DELETE /groups/{id} - nonexistent id
def test_delete_group_that_doesnt_exist(tables, client):
non_existent_id = '777'
resp = client.delete(ENDPOINT + '/' + non_existent_id, headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}/users/{id}
def test_add_user_to_a_group(tables, client, new_group, new_user):
new_group.save()
new_user.save()
resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert new_group in new_user.groups
assert new_user in new_group.users
# DELETE /groups/{id}/users/{id}
def test_remove_user_from_a_group(tables, client, new_group_with_member):
new_group_with_member.save()
user = new_group_with_member.users[0]
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group_with_member.id, user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert new_group_with_member not in user.groups
assert user not in new_group_with_member.users
# PUT /groups/{id}/users/{id} - nonexistent user id
def test_add_nonexistent_user_to_a_group(tables, client, new_group):
new_group.save()
nonexistent_user_id = '777'
resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}/users/{id} - nonexistent group id
def test_add_user_to_nonexistent_group(tables, client, new_user):
new_user.save()
nonexistent_group_id = '777'
resp = client.put(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}/users/{id} - nonexistent user id
def test_remove_nonexistent_user_from_a_group(tables, client, new_group):
new_group.save()
nonexistent_user_id = '777'
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}/users/{id} - nonexistent group id
def test_remove_user_from_a_nonexistent_group(tables, client, new_user):
new_user.save()
nonexistent_group_id = '777'
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}
def test_set_group_as_a_default(tables, client, new_group):
new_group.save()
resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': True}), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert Group.get(new_group.id).is_default
# PUT /groups/{id}
def test_mark_default_group_as_non_default(tables, client, new_group):
new_group.is_default = True
new_group.save()
resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': False}),
headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert Group.get(new_group.id).is_default is False
|
test/mitmproxy/addons/test_proxyserver.py | KarlParkinson/mitmproxy | 24,939 | 7769 | <reponame>KarlParkinson/mitmproxy
import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy import exceptions
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.connection import Address
from mitmproxy.proxy import layers, server_hooks
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.test import taddons, tflow
from mitmproxy.test.tflow import tclient_conn, tserver_conn
class HelperAddon:
def __init__(self):
self.flows = []
self.layers = [
lambda ctx: layers.modes.HttpProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
lambda ctx: layers.TCPLayer(ctx),
]
def request(self, f):
self.flows.append(f)
def tcp_start(self, f):
self.flows.append(f)
def next_layer(self, nl):
nl.layer = self.layers.pop(0)(nl.context)
@asynccontextmanager
async def tcp_server(handle_conn) -> Address:
server = await asyncio.start_server(handle_conn, '127.0.0.1', 0)
await server.start_serving()
try:
yield server.sockets[0].getsockname()
finally:
server.close()
@pytest.mark.asyncio
async def test_start_stop():
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
assert await reader.readuntil(b"\r\n\r\n") == b"GET /hello HTTP/1.1\r\n\r\n"
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
writer.close()
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert not ps.server
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
assert ps.server
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 204 No Content\r\n\r\n"
assert repr(ps) == "ProxyServer(running, 1 active conns)"
tctx.configure(ps, server=False)
await tctx.master.await_log("Stopping server", level="info")
assert not ps.server
assert state.flows
assert state.flows[0].request.path == "/hello"
assert state.flows[0].response.status_code == 204
# Waiting here until everything is really torn down... takes some effort.
conn_handler = list(ps._connections.values())[0]
client_handler = conn_handler.transports[conn_handler.client].handler
writer.close()
await writer.wait_closed()
try:
await client_handler
except asyncio.CancelledError:
pass
for _ in range(5):
# Get all other scheduled coroutines to run.
await asyncio.sleep(0)
assert repr(ps) == "ProxyServer(stopped, 0 active conns)"
@pytest.mark.asyncio
async def test_inject() -> None:
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
while s := await reader.read(1):
writer.write(s.upper())
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 200 Connection established\r\n\r\n"
writer.write(b"a")
assert await reader.read(1) == b"A"
ps.inject_tcp(state.flows[0], False, b"b")
assert await reader.read(1) == b"B"
ps.inject_tcp(state.flows[0], True, b"c")
assert await reader.read(1) == b"c"
@pytest.mark.asyncio
async def test_inject_fail() -> None:
ps = Proxyserver()
with taddons.context(ps) as tctx:
ps.inject_websocket(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject WebSocket messages into non-WebSocket flows.", level="warn")
ps.inject_tcp(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject TCP messages into non-TCP flows.", level="warn")
ps.inject_websocket(
tflow.twebsocketflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
ps.inject_websocket(
tflow.ttcpflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
@pytest.mark.asyncio
async def test_warn_no_nextlayer():
"""
Test that we log an error if the proxy server is started without NextLayer addon.
That is a mean trap to fall into when writing end-to-end tests.
"""
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening at", level="info")
assert tctx.master.has_log("Warning: Running proxyserver without nextlayer addon!", level="warn")
await ps.shutdown_server()
def test_self_connect():
server = tserver_conn()
client = tclient_conn()
server.address = ("localhost", 8080)
ps = Proxyserver()
with taddons.context(ps) as tctx:
# not calling .running() here to avoid unnecessary socket
ps.options = tctx.options
ps.server_connect(
server_hooks.ServerConnectionHookData(server, client)
)
assert server.error == "Stopped mitmproxy from recursively connecting to itself."
def test_options():
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, body_size_limit="invalid")
tctx.configure(ps, body_size_limit="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, stream_large_bodies="invalid")
tctx.configure(ps, stream_large_bodies="1m")
|
evennia/scripts/migrations/0013_auto_20191025_0831.py | Jaykingamez/evennia | 1,544 | 7773 | # Generated by Django 2.2.6 on 2019-10-25 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("scripts", "0012_auto_20190128_1820")]
operations = [
migrations.AlterField(
model_name="scriptdb",
name="db_typeclass_path",
field=models.CharField(
db_index=True,
help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.",
max_length=255,
null=True,
verbose_name="typeclass",
),
)
]
|
tests/test_pyqrcodeng_issue13.py | dbajar/segno | 254 | 7774 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- <NAME>
# All rights reserved.
#
# License: BSD License
#
"""\
Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>.
The initial test was created by Mathieu <https://github.com/albatros69>,
see the above mentioned pull request.
Adapted for Segno to check if it suffers from the same problem.
"""
from __future__ import absolute_import, unicode_literals
import segno
def test_autodetect():
data = 'Émetteur'
qr = segno.make(data)
assert qr.mode == 'byte'
def test_encoding():
encoding = 'iso-8859-15'
data = 'Émetteur'
qr = segno.make(data.encode(encoding))
assert qr.mode == 'byte'
qr2 = segno.make(data, encoding=encoding)
assert qr2 == qr
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
osp/test/corpus/syllabus/test_text.py | davidmcclure/open-syllabus-project | 220 | 7787 | <reponame>davidmcclure/open-syllabus-project<gh_stars>100-1000
from osp.corpus.syllabus import Syllabus
from osp.test.utils import requires_tika
def test_empty(mock_osp):
"""
Should return None if the file is empty.
"""
path = mock_osp.add_file(content='', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == None
def test_plaintext(mock_osp):
"""
Should extract text from vanilla text files.
"""
path = mock_osp.add_file(content='text', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_html(mock_osp):
"""
Should extract text from HTML files.
"""
path = mock_osp.add_file(content='<p>text</p>', ftype='html')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_pdf(mock_osp):
"""
Should extract text from PDF files.
"""
path = mock_osp.add_file(content='text', ftype='pdf')
syllabus = Syllabus(path)
assert syllabus.text.strip() == 'text'
@requires_tika
def test_office(mock_osp):
"""
Should extract text from office files.
"""
path = mock_osp.add_file(content='text', ftype='docx')
syllabus = Syllabus(path)
assert syllabus.text.strip() == 'text'
|
examples/hello-pt/custom/cifar10validator.py | ArnovanHilten/NVFlare | 155 | 7830 | # Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, ToTensor, Normalize
from nvflare.apis.dxo import from_shareable, DataKind, DXO
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from simple_network import SimpleNetwork
class Cifar10Validator(Executor):
def __init__(self, validate_task_name=AppConstants.TASK_VALIDATION):
super(Cifar10Validator, self).__init__()
self._validate_task_name = validate_task_name
# Setup the model
self.model = SimpleNetwork()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transforms = Compose([
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self.test_data = CIFAR10(root='~/data', train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=4, shuffle=False)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self.do_validation(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"f's data: {val_accuracy}')
dxo = DXO(data_kind=DataKind.METRICS, data={'val_acc': val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def do_validation(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct/float(total)
return metric
|
api-reference-examples/python/te-tag-query/api-example-update.py | b-bold/ThreatExchange | 997 | 7862 | #!/usr/bin/env python
# ================================================================
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ================================================================
import sys
import json
import TE
TE.Net.setAppTokenFromEnvName("TX_ACCESS_TOKEN")
postParams = {
"descriptor_id": "4036655176350945", # ID of the descriptor to be updated
"reactions": "INGESTED,IN_REVIEW",
}
showURLs = False
dryRun = False
validationErrorMessage, serverSideError, responseBody = TE.Net.updateThreatDescriptor(
postParams, showURLs, dryRun
)
if validationErrorMessage != None:
sys.stderr.write(validationErrorMessage + "\n")
sys.exit(1)
if serverSideError != None:
sys.stderr.write(str(serverSideError) + "\n")
sys.stderr.write(json.dumps(responseBody) + "\n")
sys.exit(1)
print(json.dumps(responseBody))
|
aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/EditJobTemplateRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 7866 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data
class EditJobTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'EditJobTemplate')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StderrRedirectPath(self):
return self.get_query_params().get('StderrRedirectPath')
def set_StderrRedirectPath(self,StderrRedirectPath):
self.add_query_param('StderrRedirectPath',StderrRedirectPath)
def get_ClockTime(self):
return self.get_query_params().get('ClockTime')
def set_ClockTime(self,ClockTime):
self.add_query_param('ClockTime',ClockTime)
def get_CommandLine(self):
return self.get_query_params().get('CommandLine')
def set_CommandLine(self,CommandLine):
self.add_query_param('CommandLine',CommandLine)
def get_ArrayRequest(self):
return self.get_query_params().get('ArrayRequest')
def set_ArrayRequest(self,ArrayRequest):
self.add_query_param('ArrayRequest',ArrayRequest)
def get_PackagePath(self):
return self.get_query_params().get('PackagePath')
def set_PackagePath(self,PackagePath):
self.add_query_param('PackagePath',PackagePath)
def get_Mem(self):
return self.get_query_params().get('Mem')
def set_Mem(self,Mem):
self.add_query_param('Mem',Mem)
def get_StdoutRedirectPath(self):
return self.get_query_params().get('StdoutRedirectPath')
def set_StdoutRedirectPath(self,StdoutRedirectPath):
self.add_query_param('StdoutRedirectPath',StdoutRedirectPath)
def get_Variables(self):
return self.get_query_params().get('Variables')
def set_Variables(self,Variables):
self.add_query_param('Variables',Variables)
def get_RunasUser(self):
return self.get_query_params().get('RunasUser')
def set_RunasUser(self,RunasUser):
self.add_query_param('RunasUser',RunasUser)
def get_ReRunable(self):
return self.get_query_params().get('ReRunable')
def set_ReRunable(self,ReRunable):
self.add_query_param('ReRunable',ReRunable)
def get_Thread(self):
return self.get_query_params().get('Thread')
def set_Thread(self,Thread):
self.add_query_param('Thread',Thread)
def get_TemplateId(self):
return self.get_query_params().get('TemplateId')
def set_TemplateId(self,TemplateId):
self.add_query_param('TemplateId',TemplateId)
def get_Priority(self):
return self.get_query_params().get('Priority')
def set_Priority(self,Priority):
self.add_query_param('Priority',Priority)
def get_Gpu(self):
return self.get_query_params().get('Gpu')
def set_Gpu(self,Gpu):
self.add_query_param('Gpu',Gpu)
def get_Node(self):
return self.get_query_params().get('Node')
def set_Node(self,Node):
self.add_query_param('Node',Node)
def get_Task(self):
return self.get_query_params().get('Task')
def set_Task(self,Task):
self.add_query_param('Task',Task)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Queue(self):
return self.get_query_params().get('Queue')
def set_Queue(self,Queue):
self.add_query_param('Queue',Queue) |
machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py | Hinson-A/guyueclass | 227 | 7885 | import pybullet as p
import pybullet_data
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from math import sqrt
import random
import time
import math
import cv2
import torch
import os
def random_crop(imgs, out):
"""
args:
imgs: shape (B,C,H,W)
out: output size (e.g. 84)
"""
n, c, h, w = imgs.shape
crop_max = h - out + 1
w1 = np.random.randint(0, crop_max, n)
h1 = np.random.randint(0, crop_max, n)
cropped = np.empty((n, c, out, out), dtype=imgs.dtype)
for i, (img, w11, h11) in enumerate(zip(imgs, w1, h1)):
cropped[i] = img[:, h11:h11 + out, w11:w11 + out]
return cropped
class KukaReachVisualEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
kMaxEpisodeSteps = 700
kImageSize = {'width': 96, 'height': 96}
kFinalImageSize = {'width': 84, 'height': 84}
def __init__(self, is_render=False, is_good_view=False):
self.is_render = is_render
self.is_good_view = is_good_view
if self.is_render:
p.connect(p.GUI)
else:
p.connect(p.DIRECT)
self.x_low_obs = 0.2
self.x_high_obs = 0.7
self.y_low_obs = -0.3
self.y_high_obs = 0.3
self.z_low_obs = 0
self.z_high_obs = 0.55
self.x_low_action = -0.4
self.x_high_action = 0.4
self.y_low_action = -0.4
self.y_high_action = 0.4
self.z_low_action = -0.6
self.z_high_action = 0.3
self.step_counter = 0
self.urdf_root_path = pybullet_data.getDataPath()
# lower limits for null space
self.lower_limits = [-.967, -2, -2.96, 0.19, -2.96, -2.09, -3.05]
# upper limits for null space
self.upper_limits = [.967, 2, 2.96, 2.29, 2.96, 2.09, 3.05]
# joint ranges for null space
self.joint_ranges = [5.8, 4, 5.8, 4, 5.8, 4, 6]
# restposes for null space
self.rest_poses = [0, 0, 0, 0.5 * math.pi, 0, -math.pi * 0.5 * 0.66, 0]
# joint damping coefficents
self.joint_damping = [
0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001, 0.00001
]
self.init_joint_positions = [
0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684,
-0.006539
]
self.orientation = p.getQuaternionFromEuler(
[0., -math.pi, math.pi / 2.])
self.camera_parameters = {
'width': 960.,
'height': 720,
'fov': 60,
'near': 0.1,
'far': 100.,
'eye_position': [0.59, 0, 0.8],
'target_position': [0.55, 0, 0.05],
'camera_up_vector':
[1, 0, 0], # I really do not know the parameter's effect.
'light_direction': [
0.5, 0, 1
], # the direction is from the light source position to the origin of the world frame.
}
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=[0.55, 0, 0.05],
distance=.7,
yaw=90,
pitch=-70,
roll=0,
upAxisIndex=2)
self.projection_matrix = p.computeProjectionMatrixFOV(
fov=self.camera_parameters['fov'],
aspect=self.camera_parameters['width'] /
self.camera_parameters['height'],
nearVal=self.camera_parameters['near'],
farVal=self.camera_parameters['far'])
p.configureDebugVisualizer(lightPosition=[5, 0, 5])
p.resetDebugVisualizerCamera(cameraDistance=1.5,
cameraYaw=0,
cameraPitch=-40,
cameraTargetPosition=[0.55, -0.35, 0.2])
self.action_space = spaces.Box(low=np.array(
[self.x_low_action, self.y_low_action, self.z_low_action]),
high=np.array([
self.x_high_action,
self.y_high_action,
self.z_high_action
]),
dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=1,
shape=(1, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.seed()
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.step_counter = 0
p.resetSimulation()
# p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
self.terminated = False
p.setGravity(0, 0, -10)
# 这些是周围那些白线,用来观察是否超过了obs的边界
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_high_obs, 0],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_low_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_low_obs, self.y_high_obs, self.z_high_obs])
p.addUserDebugLine(
lineFromXYZ=[self.x_high_obs, self.y_low_obs, self.z_high_obs],
lineToXYZ=[self.x_high_obs, self.y_high_obs, self.z_high_obs])
p.loadURDF(os.path.join(self.urdf_root_path, "plane.urdf"),
basePosition=[0, 0, -0.65])
self.kuka_id = p.loadURDF(os.path.join(self.urdf_root_path,
"kuka_iiwa/model.urdf"),
useFixedBase=True)
table_uid = p.loadURDF(os.path.join(self.urdf_root_path,
"table/table.urdf"),
basePosition=[0.5, 0, -0.65])
p.changeVisualShape(table_uid, -1, rgbaColor=[1, 1, 1, 1])
self.object_id = p.loadURDF(os.path.join(self.urdf_root_path,
"random_urdfs/000/000.urdf"),
basePosition=[
random.uniform(self.x_low_obs,
self.x_high_obs),
random.uniform(self.y_low_obs,
self.y_high_obs), 0.01
])
self.num_joints = p.getNumJoints(self.kuka_id)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.init_joint_positions[i],
)
self.robot_pos_obs = p.getLinkState(self.kuka_id,
self.num_joints - 1)[4]
p.stepSimulation()
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
p.enableJointForceTorqueSensor(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints - 1,
enableSensor=True)
self.object_pos = p.getBasePositionAndOrientation(self.object_id)[0]
self.images = self.images[:, :, :
3] # the 4th channel is alpha channel, we do not need it.
return self._process_image(self.images)
def _process_image(self, image):
"""Convert the RGB pic to gray pic and add a channel 1
Args:
image ([type]): [description]
"""
if image is not None:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = cv2.resize(image, (self.kImageSize['width'], self.kImageSize['height']))[None, :, :] / 255.
return image
else:
return np.zeros((1, self.kImageSize['width'], self.kImageSize['height']))
def step(self, action):
dv = 0.005
dx = action[0] * dv
dy = action[1] * dv
dz = action[2] * dv
self.current_pos = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.new_robot_pos = [
self.current_pos[0] + dx, self.current_pos[1] + dy,
self.current_pos[2] + dz
]
self.robot_joint_positions = p.calculateInverseKinematics(
bodyUniqueId=self.kuka_id,
endEffectorLinkIndex=self.num_joints - 1,
targetPosition=[
self.new_robot_pos[0], self.new_robot_pos[1],
self.new_robot_pos[2]
],
targetOrientation=self.orientation,
jointDamping=self.joint_damping,
)
for i in range(self.num_joints):
p.resetJointState(
bodyUniqueId=self.kuka_id,
jointIndex=i,
targetValue=self.robot_joint_positions[i],
)
p.stepSimulation()
# 在代码开始部分,如果定义了is_good_view,那么机械臂的动作会变慢,方便观察
if self.is_good_view:
time.sleep(0.05)
self.step_counter += 1
return self._reward()
def _reward(self):
# 一定注意是取第4个值,请参考pybullet手册的这个函数返回值的说明
self.robot_state = p.getLinkState(self.kuka_id, self.num_joints - 1)[4]
self.object_state = np.array(
p.getBasePositionAndOrientation(self.object_id)[0]).astype(
np.float32)
square_dx = (self.robot_state[0] - self.object_state[0]) ** 2
square_dy = (self.robot_state[1] - self.object_state[1]) ** 2
square_dz = (self.robot_state[2] - self.object_state[2]) ** 2
# 用机械臂末端和物体的距离作为奖励函数的依据
self.distance = sqrt(square_dx + square_dy + square_dz)
# print(self.distance)
x = self.robot_state[0]
y = self.robot_state[1]
z = self.robot_state[2]
# 如果机械比末端超过了obs的空间,也视为done,而且会给予一定的惩罚
terminated = bool(x < self.x_low_obs or x > self.x_high_obs
or y < self.y_low_obs or y > self.y_high_obs
or z < self.z_low_obs or z > self.z_high_obs)
if terminated:
reward = -0.1
self.terminated = True
# 如果机械臂一直无所事事,在最大步数还不能接触到物体,也需要给一定的惩罚
elif self.step_counter > self.kMaxEpisodeSteps:
reward = -0.1
self.terminated = True
elif self.distance < 0.1:
reward = 1
self.terminated = True
else:
reward = 0
self.terminated = False
info = {'distance:', self.distance}
(_, _, px, _,
_) = p.getCameraImage(width=960,
height=960,
viewMatrix=self.view_matrix,
projectionMatrix=self.projection_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
self.images = px
self.processed_image = self._process_image(self.images)
# self.observation=self.robot_state
self.observation = self.object_state
return self.processed_image, reward, self.terminated, info
def close(self):
p.disconnect()
def _get_force_sensor_value(self):
force_sensor_value = p.getJointState(bodyUniqueId=self.kuka_id,
jointIndex=self.num_joints -
1)[2][2]
# the first 2 stands for jointReactionForces, the second 2 stands for Fz,
# the pybullet methods' return is a tuple,so can not
# index it with str like dict. I think it can be improved
# that return value is a dict rather than tuple.
return force_sensor_value
class CustomSkipFrame(gym.Wrapper):
""" Make a 4 frame skip, so the observation space will change to (4,84,84) from (1,84,84)
Args:
gym ([type]): [description]
"""
def __init__(self, env, skip=4):
super(CustomSkipFrame, self).__init__(env)
self.observation_space = spaces.Box(low=0,
high=1,
shape=(skip, self.kFinalImageSize['width'], self.kFinalImageSize['height']))
self.skip = skip
def step(self, action):
total_reward = 0
states = []
state, reward, done, info = self.env.step(action)
for i in range(self.skip):
if not done:
state, reward, done, info = self.env.step(action)
total_reward += reward
states.append(state)
else:
states.append(state)
states = np.concatenate(states, 0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width']), reward, done, info
def reset(self):
state = self.env.reset()
states = np.concatenate([state for _ in range(self.skip)],
0)[None, :, :, :]
return random_crop(states.astype(np.float32), self.kFinalImageSize['width'])
if __name__ == '__main__':
# 这一部分是做baseline,即让机械臂随机选择动作,看看能够得到的分数
import matplotlib.pyplot as plt
env = KukaReachVisualEnv(is_render=False)
env = CustomSkipFrame(env)
print(env.observation_space.shape)
print(env.action_space.shape)
print(env.action_space.n)
# for _ in range(20):
# action=env.action_space.sample()
# print(action)
# env.step(action)
#
# state = env.reset()
# print(state.shape)
# img = state[0][0]
# plt.imshow(img, cmap='gray')
# plt.show()
|
mne/io/cnt/tests/test_cnt.py | stevemats/mne-python | 1,953 | 7899 |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne import pick_types
from mne.datasets import testing
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.cnt import read_raw_cnt
from mne.annotations import read_annotations
data_path = testing.data_path(download=False)
fname = op.join(data_path, 'CNT', 'scan41_short.cnt')
@testing.requires_testing_data
def test_data():
"""Test reading raw cnt files."""
with pytest.warns(RuntimeWarning, match='number of bytes'):
raw = _test_raw_reader(read_raw_cnt, input_fname=fname,
eog='auto', misc=['NA1', 'LEFT_EAR'])
# make sure we use annotations event if we synthesized stim
assert len(raw.annotations) == 6
eog_chs = pick_types(raw.info, eog=True, exclude=[])
assert len(eog_chs) == 2 # test eog='auto'
assert raw.info['bads'] == ['LEFT_EAR', 'VEOGR'] # test bads
# the data has "05/10/200 17:35:31" so it is set to None
assert raw.info['meas_date'] is None
@testing.requires_testing_data
def test_compare_events_and_annotations():
"""Test comparing annotations and events."""
with pytest.warns(RuntimeWarning, match='Could not parse meas date'):
raw = read_raw_cnt(fname)
events = np.array([[333, 0, 7],
[1010, 0, 7],
[1664, 0, 109],
[2324, 0, 7],
[2984, 0, 109]])
annot = read_annotations(fname)
assert len(annot) == 6
assert_array_equal(annot.onset[:-1], events[:, 0] / raw.info['sfreq'])
assert 'STI 014' not in raw.info['ch_names']
|
packages/pyright-internal/src/tests/samples/unnecessaryCast1.py | sasano8/pyright | 4,391 | 7933 | # This sample tests the type checker's reportUnnecessaryCast feature.
from typing import cast, Union
def foo(a: int):
# This should generate an error if
# reportUnnecessaryCast is enabled.
b = cast(int, a)
c: Union[int, str] = "hello"
d = cast(int, c)
|
tests/ut/python/parallel/test_manual_gatherv2.py | PowerOlive/mindspore | 3,200 | 7938 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
class Net(Cell):
def __init__(self,
strategy1=None,
strategy2=None,
strategy3=None,
axis=0,
init_flag=True,
split_tuple=(4, 4),
split_string="manual_split",
param_shape=(8, 8)):
super().__init__()
self.gatherv2 = P.Gather().shard(strategy1)
self.gatherv2.add_prim_attr(split_string, split_tuple)
self.mul = P.Mul().shard(strategy2)
self.reshape = P.Reshape()
self.matmul = P.MatMul().shard(strategy3)
self.matmul.add_prim_attr("forward_reduce_scatter", True)
if init_flag:
self.param = Parameter(initializer("ones", param_shape, ms.float32), name="gatherv2_param")
else:
self.param = Parameter(Tensor(np.ones(param_shape), dtype=ms.float32), name="gatherv2_param")
self.mul_weight = Parameter(initializer("ones", (8, 8, 8), ms.float32), name="mul_weight")
self.matmul_weight = Parameter(initializer("ones", (64, 16), ms.float32), name="matmul_weight")
self.axis = axis
def construct(self, x, b):
out = self.gatherv2(self.param, x, self.axis)
out = self.mul(out, self.mul_weight)
out = self.reshape(out, (8, 64))
out = self.matmul(out, self.matmul_weight)
return out
_x = Tensor(np.ones([8, 8]), dtype=ms.int32)
_b = Tensor(np.ones([64, 8]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_cell_graph_executor.compile(train_net, _x, _b, auto_parallel_mode=True)
context.reset_auto_parallel_context()
def test_normal_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
compile_net(net)
def test_normal_split2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 4, 1), (1, 4, 1))
strategy3 = ((1, 4), (4, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=17)
strategy1 = ((4, 8), (1, 4))
strategy2 = ((1, 4, 8), (1, 4, 8))
strategy3 = ((1, 32), (32, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split_with_offset():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_string="manual_split_with_offset", split_tuple=((4, 0), (4, 4)))
compile_net(net)
def test_auto_parallel_error():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=2, global_rank=0)
net = Net()
with pytest.raises(RuntimeError):
compile_net(net)
def test_axis_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, axis=1)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (8, 1))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (1, 8))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error4():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 8), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error5():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_split_tuple_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=((5, 0), (5, 5)))
with pytest.raises(RuntimeError):
compile_net(net)
def test_parameter_use_tensor_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, init_flag=False)
with pytest.raises(RuntimeError):
compile_net(net)
|
ClemBot.Bot/bot/api/tag_route.py | makayla-moster/ClemBot | 121 | 7939 | <gh_stars>100-1000
from bot.api.api_client import ApiClient
from bot.api.base_route import BaseRoute
import typing as t
from bot.models import Tag
class TagRoute(BaseRoute):
def __init__(self, api_client: ApiClient):
super().__init__(api_client)
async def create_tag(self, name: str, content: str, guild_id: int, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'Name': name,
'Content': content,
'GuildId': guild_id,
'UserId': user_id,
}
tag_dict = await self._client.post('tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_content(self, guild_id: int, name: str, content: str, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'Content': content
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_owner(self, guild_id: int, name: str, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'UserId': user_id
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag(self, guild_id: int, name: str) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
}
tag_dict = await self._client.get('bot/tags', data=json)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag_content(self, guild_id: int, name: str) -> t.Optional[str]:
json = {
'GuildId': guild_id,
'Name': name,
}
resp = await self._client.get('bot/tags', data=json)
return None if resp is None else resp['content']
async def delete_tag(self, guild_id: int, name: str, **kwargs):
"""
Makes a call to the API to delete a tag w/ the given GuildId and Name.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- content The content of the tag.
- guildId The guild id the tag was in.
"""
json = {
'GuildId': guild_id,
'Name': name,
}
return await self._client.delete('bot/tags', data=json, **kwargs)
async def add_tag_use(self, guild_id: int, name: str, channel_id: int, user_id: int):
"""
Makes a call to the API to say a tag w/ the given Name was used.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- guildId The guild id the tag is in.
"""
json = {
'GuildId': guild_id,
'Name': name,
'ChannelId': channel_id,
'UserId': user_id
}
return await self._client.post('bot/tags/invoke', data=json)
async def get_guilds_tags(self, guild_id: int) -> t.Iterator[Tag]:
resp = await self._client.get(f'guilds/{guild_id}/tags')
if not resp:
return []
return [Tag.from_dict(i) for i in resp['tags']]
|
openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py | unpilbaek/OpenFermion-Cirq | 278 | 7955 | <filename>openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz
def test_swap_network_trotter_hubbard_ansatz_param_bounds():
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 1, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(1, 4, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 2, 1.0, 4.0)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-2.0, 2.0), (-1.0, 1.0)]
|
targets/baremetal-sdk/curie-bsp/setup.py | ideas-detoxes/jerryscript | 4,324 | 7984 | #!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fnmatch
import os
def build_soft_links(project_path, jerry_path):
""" Creates soft links into the @project_path. """
if not os.path.exists(project_path):
os.makedirs(project_path)
links = [
{ # arc
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc'),
'link_name': 'arc'
},
{ # include
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include'),
'link_name': 'include'
},
{ # quark
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark'),
'link_name': 'quark'
},
{ # quark/jerryscript
'src': jerry_path,
'link_name': os.path.join('quark', 'jerryscript')
}
]
for link in links:
src = os.path.join(jerry_path, link['src'])
link_name = os.path.join(project_path, link['link_name'])
if not os.path.islink(link_name):
os.symlink(src, link_name)
print("Created symlink '{link_name}' -> '{src}'".format(src=src, link_name=link_name))
def find_sources(root_dir, sub_dir):
"""
Find .c and .S files inside the @root_dir/@sub_dir directory.
Note: the returned paths will be relative to the @root_dir directory.
"""
src_dir = os.path.join(root_dir, sub_dir)
matches = []
for root, dirnames, filenames in os.walk(src_dir):
for filename in fnmatch.filter(filenames, '*.[c|S]'):
file_path = os.path.join(root, filename)
relative_path = os.path.relpath(file_path, root_dir)
matches.append(relative_path)
return matches
def build_jerry_data(jerry_path):
"""
Build up a dictionary which contains the following items:
- sources: list of JerryScript sources which should be built.
- dirs: list of JerryScript dirs used.
- cflags: CFLAGS for the build.
"""
jerry_sources = []
jerry_dirs = set()
for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]:
for file in find_sources(os.path.normpath(jerry_path), sub_dir):
path = os.path.join('jerryscript', file)
jerry_sources.append(path)
jerry_dirs.add(os.path.split(path)[0])
jerry_cflags = [
'-DJERRY_GLOBAL_HEAP_SIZE=10',
'-DJERRY_NDEBUG',
'-DJERRY_DISABLE_HEAVY_DEBUG',
'-DJERRY_BUILTIN_NUMBER=0',
'-DJERRY_BUILTIN_STRING=0',
'-DJERRY_BUILTIN_BOOLEAN=0',
#'-DJERRY_BUILTIN_ERRORS=0',
'-DJERRY_BUILTIN_ARRAY=0',
'-DJERRY_BUILTIN_MATH=0',
'-DJERRY_BUILTIN_JSON=0',
'-DJERRY_BUILTIN_DATE=0',
'-DJERRY_BUILTIN_REGEXP=0',
'-DJERRY_BUILTIN_ANNEXB=0',
'-DJERRY_ESNEXT=0',
'-DJERRY_LCACHE=0',
'-DJERRY_PROPERTY_HASHMAP=0',
]
return {
'sources': jerry_sources,
'dirs': jerry_dirs,
'cflags': jerry_cflags,
}
def write_file(path, content):
""" Writes @content into the file at specified by the @path. """
norm_path = os.path.normpath(path)
with open(norm_path, "w+") as f:
f.write(content)
print("Wrote file '{0}'".format(norm_path))
def build_obj_y(source_list):
"""
Build obj-y additions from the @source_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['obj-y += {0}.o'.format(os.path.splitext(fname)[0]) for fname in source_list])
def build_cflags_y(cflags_list):
"""
Build cflags-y additions from the @cflags_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['cflags-y += {0}'.format(cflag) for cflag in cflags_list])
def build_mkdir(dir_list):
""" Build mkdir calls for each dir in the @dir_list. """
return '\n'.join(['\t$(AT)mkdir -p {0}'.format(os.path.join('$(OUT_SRC)', path)) for path in dir_list])
def create_root_kbuild(project_path):
""" Creates @project_path/Kbuild.mk file. """
root_kbuild_path = os.path.join(project_path, 'Kbuild.mk')
root_kbuild_content = '''
obj-$(CONFIG_QUARK_SE_ARC) += arc/
obj-$(CONFIG_QUARK_SE_QUARK) += quark/
'''
write_file(root_kbuild_path, root_kbuild_content)
def create_root_makefile(project_path):
""" Creates @project_path/Makefile file. """
root_makefile_path = os.path.join(project_path, 'Makefile')
root_makefile_content = '''
THIS_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
T := $(abspath $(THIS_DIR)/../..)
PROJECT := {project_name}
BOARD := curie_101
ifeq ($(filter curie_101, $(BOARD)),)
$(error The curie jerry sample application can only run on the curie_101 Board)
endif
BUILDVARIANT ?= debug
quark_DEFCONFIG = $(PROJECT_PATH)/quark/defconfig
arc_DEFCONFIG = $(PROJECT_PATH)/arc/defconfig
# Optional: set the default version
VERSION_MAJOR := 1
VERSION_MINOR := 0
VERSION_PATCH := 0
include $(T)/build/project.mk
'''.format(project_name=project_name)
write_file(root_makefile_path, root_makefile_content)
def create_arc_kbuild(project_path):
""" Creates @project_path/arc/Kbuild.mk file. """
arc_path = os.path.join(project_path, 'arc')
arc_kbuild_path = os.path.join(arc_path, 'Kbuild.mk')
arc_sources = find_sources(arc_path, '.')
arc_kbuild_content = build_obj_y(arc_sources)
write_file(arc_kbuild_path, arc_kbuild_content)
def create_quark_kbuild(project_path, jerry_path):
""" Creates @project_path/quark/Kbuild.mk file. """
quark_kbuild_path = os.path.join(project_path, 'quark', 'Kbuild.mk')
# Extract a few JerryScript related data
jerry_data = build_jerry_data(jerry_path)
jerry_objects = build_obj_y(jerry_data['sources'])
jerry_defines = jerry_data['cflags']
jerry_build_dirs = build_mkdir(jerry_data['dirs'])
quark_include_paths = [
'include',
'jerryscript',
os.path.join('jerryscript', 'jerry-math', 'include'),
os.path.join('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include')
] + list(jerry_data['dirs'])
quark_includes = [
'-Wno-error',
] + ['-I%s' % os.path.join(project_path, 'quark', path) for path in quark_include_paths]
quark_cflags = build_cflags_y(jerry_defines + quark_includes)
quark_kbuild_content = '''
{cflags}
obj-y += main.o
{objects}
build_dirs:
{dirs}
$(OUT_SRC): build_dirs
'''.format(objects=jerry_objects, cflags=quark_cflags, dirs=jerry_build_dirs)
write_file(quark_kbuild_path, quark_kbuild_content)
def main(curie_path, project_name, jerry_path):
project_path = os.path.join(curie_path, 'wearable_device_sw', 'projects', project_name)
build_soft_links(project_path, jerry_path)
create_root_kbuild(project_path)
create_root_makefile(project_path)
create_arc_kbuild(project_path)
create_quark_kbuild(project_path, jerry_path)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage:')
print('{script_name} [full or relative path of Curie_BSP]'.format(script_name=sys.argv[0]))
sys.exit(1)
project_name = 'curie_bsp_jerry'
file_dir = os.path.dirname(os.path.abspath(__file__))
jerry_path = os.path.join(file_dir, "..", "..", "..")
curie_path = os.path.join(os.getcwd(), sys.argv[1])
main(curie_path, project_name, jerry_path)
|
example/dec/dec.py | TheBurningCrusade/A_mxnet | 159 | 7991 | # pylint: skip-file
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], Y[i]] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9/10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = model.extract_feature(self.feature, args, test_iter, N, self.xpu).values()[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = model.extract_feature(self.feature, args, test_iter, N, self.xpu).values()[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print np.std(np.bincount(y_pred)), np.bincount(y_pred)
print np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0]
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
|
telemetry/telemetry/testing/internal/fake_gpu_info.py | tingshao/catapult | 2,151 | 8008 | <reponame>tingshao/catapult<filename>telemetry/telemetry/testing/internal/fake_gpu_info.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This dictionary of GPU information was captured from a run of
# Telemetry on a Linux workstation with NVIDIA GPU. It helps test
# telemetry.internal.platform's GPUInfo class, and specifically the
# attributes it expects to find in the dictionary; if the code changes
# in an incompatible way, tests using this fake GPU info will begin
# failing, indicating this fake data must be updated.
#
# To regenerate it, import pdb in
# telemetry/internal/platform/gpu_info.py and add a call to
# pdb.set_trace() in GPUInfo.FromDict before the return statement.
# Print the attrs dictionary in the debugger and copy/paste the result
# on the right-hand side of this assignment. Then run:
#
# pyformat [this file name] | sed -e "s/'/'/g"
#
# and put the output into this file.
FAKE_GPU_INFO = {
'feature_status':
{
'flash_stage3d': 'enabled',
'gpu_compositing': 'enabled',
'video_decode': 'unavailable_software',
'flash_3d': 'enabled',
'webgl': 'enabled',
'video_encode': 'enabled',
'multiple_raster_threads': 'enabled_on',
'2d_canvas': 'unavailable_software',
'rasterization': 'disabled_software',
'flash_stage3d_baseline': 'enabled'
},
'aux_attributes':
{
'optimus': False,
'sandboxed': True,
'basic_info_state': 1,
'adapter_luid': 0.0,
'driver_version': '331.79',
'direct_rendering': True,
'amd_switchable': False,
'context_info_state': 1,
'process_crash_count': 0,
'pixel_shader_version': '4.40',
'gl_ws_version': '1.4',
'can_lose_context': False,
'driver_vendor': 'NVIDIA',
'max_msaa_samples': '64',
'software_rendering': False,
'gl_version': '4.4.0 NVIDIA 331.79',
'gl_ws_vendor': 'NVIDIA Corporation',
'vertex_shader_version': '4.40',
'initialization_time': 1.284043,
'gl_reset_notification_strategy': 33362,
'gl_ws_extensions':
'GLX_EXT_visual_info GLX_EXT_visual_rating GLX_SGIX_fbconfig '
'GLX_SGIX_pbuffer GLX_SGI_video_sync GLX_SGI_swap_control '
'GLX_EXT_swap_control GLX_EXT_swap_control_tear '
'GLX_EXT_texture_from_pixmap GLX_EXT_buffer_age '
'GLX_ARB_create_context GLX_ARB_create_context_profile '
'GLX_EXT_create_context_es_profile '
'GLX_EXT_create_context_es2_profile '
'GLX_ARB_create_context_robustness GLX_ARB_multisample '
'GLX_NV_float_buffer GLX_ARB_fbconfig_float GLX_NV_swap_group'
' GLX_EXT_framebuffer_sRGB GLX_NV_multisample_coverage '
'GLX_NV_copy_image GLX_NV_video_capture ',
'gl_renderer': 'Quadro 600/PCIe/SSE2',
'driver_date': '',
'gl_vendor': 'NVIDIA Corporation',
'gl_extensions':
'GL_AMD_multi_draw_indirect GL_ARB_arrays_of_arrays '
'GL_ARB_base_instance GL_ARB_blend_func_extended '
'GL_ARB_buffer_storage GL_ARB_clear_buffer_object '
'GL_ARB_clear_texture GL_ARB_color_buffer_float '
'GL_ARB_compatibility GL_ARB_compressed_texture_pixel_storage'
' GL_ARB_conservative_depth GL_ARB_compute_shader '
'GL_ARB_compute_variable_group_size GL_ARB_copy_buffer '
'GL_ARB_copy_image GL_ARB_debug_output '
'GL_ARB_depth_buffer_float GL_ARB_depth_clamp '
'GL_ARB_depth_texture GL_ARB_draw_buffers '
'GL_ARB_draw_buffers_blend GL_ARB_draw_indirect '
'GL_ARB_draw_elements_base_vertex GL_ARB_draw_instanced '
'GL_ARB_enhanced_layouts GL_ARB_ES2_compatibility '
'GL_ARB_ES3_compatibility GL_ARB_explicit_attrib_location '
'GL_ARB_explicit_uniform_location '
'GL_ARB_fragment_coord_conventions '
'GL_ARB_fragment_layer_viewport GL_ARB_fragment_program '
'GL_ARB_fragment_program_shadow GL_ARB_fragment_shader '
'GL_ARB_framebuffer_no_attachments GL_ARB_framebuffer_object '
'GL_ARB_framebuffer_sRGB GL_ARB_geometry_shader4 '
'GL_ARB_get_program_binary GL_ARB_gpu_shader5 '
'GL_ARB_gpu_shader_fp64 GL_ARB_half_float_pixel '
'GL_ARB_half_float_vertex GL_ARB_imaging '
'GL_ARB_indirect_parameters GL_ARB_instanced_arrays '
'GL_ARB_internalformat_query GL_ARB_internalformat_query2 '
'GL_ARB_invalidate_subdata GL_ARB_map_buffer_alignment '
'GL_ARB_map_buffer_range GL_ARB_multi_bind '
'GL_ARB_multi_draw_indirect GL_ARB_multisample '
'GL_ARB_multitexture GL_ARB_occlusion_query '
'GL_ARB_occlusion_query2 GL_ARB_pixel_buffer_object '
'GL_ARB_point_parameters GL_ARB_point_sprite '
'GL_ARB_program_interface_query GL_ARB_provoking_vertex '
'GL_ARB_robust_buffer_access_behavior GL_ARB_robustness '
'GL_ARB_sample_shading GL_ARB_sampler_objects '
'GL_ARB_seamless_cube_map GL_ARB_separate_shader_objects '
'GL_ARB_shader_atomic_counters GL_ARB_shader_bit_encoding '
'GL_ARB_shader_draw_parameters GL_ARB_shader_group_vote '
'GL_ARB_shader_image_load_store GL_ARB_shader_image_size '
'GL_ARB_shader_objects GL_ARB_shader_precision '
'GL_ARB_query_buffer_object '
'GL_ARB_shader_storage_buffer_object GL_ARB_shader_subroutine'
' GL_ARB_shader_texture_lod GL_ARB_shading_language_100 '
'GL_ARB_shading_language_420pack '
'GL_ARB_shading_language_include '
'GL_ARB_shading_language_packing GL_ARB_shadow '
'GL_ARB_stencil_texturing GL_ARB_sync '
'GL_ARB_tessellation_shader GL_ARB_texture_border_clamp '
'GL_ARB_texture_buffer_object '
'GL_ARB_texture_buffer_object_rgb32 '
'GL_ARB_texture_buffer_range GL_ARB_texture_compression '
'GL_ARB_texture_compression_bptc '
'GL_ARB_texture_compression_rgtc GL_ARB_texture_cube_map '
'GL_ARB_texture_cube_map_array GL_ARB_texture_env_add '
'GL_ARB_texture_env_combine GL_ARB_texture_env_crossbar '
'GL_ARB_texture_env_dot3 GL_ARB_texture_float '
'GL_ARB_texture_gather GL_ARB_texture_mirror_clamp_to_edge '
'GL_ARB_texture_mirrored_repeat GL_ARB_texture_multisample '
'GL_ARB_texture_non_power_of_two GL_ARB_texture_query_levels '
'GL_ARB_texture_query_lod GL_ARB_texture_rectangle '
'GL_ARB_texture_rg GL_ARB_texture_rgb10_a2ui '
'GL_ARB_texture_stencil8 GL_ARB_texture_storage '
'GL_ARB_texture_storage_multisample GL_ARB_texture_swizzle '
'GL_ARB_texture_view GL_ARB_timer_query '
'GL_ARB_transform_feedback2 GL_ARB_transform_feedback3 '
'GL_ARB_transform_feedback_instanced GL_ARB_transpose_matrix '
'GL_ARB_uniform_buffer_object GL_ARB_vertex_array_bgra '
'GL_ARB_vertex_array_object GL_ARB_vertex_attrib_64bit '
'GL_ARB_vertex_attrib_binding GL_ARB_vertex_buffer_object '
'GL_ARB_vertex_program GL_ARB_vertex_shader '
'GL_ARB_vertex_type_10f_11f_11f_rev '
'GL_ARB_vertex_type_2_10_10_10_rev GL_ARB_viewport_array '
'GL_ARB_window_pos GL_ATI_draw_buffers GL_ATI_texture_float '
'GL_ATI_texture_mirror_once GL_S3_s3tc GL_EXT_texture_env_add'
' GL_EXT_abgr GL_EXT_bgra GL_EXT_bindable_uniform '
'GL_EXT_blend_color GL_EXT_blend_equation_separate '
'GL_EXT_blend_func_separate GL_EXT_blend_minmax '
'GL_EXT_blend_subtract GL_EXT_compiled_vertex_array '
'GL_EXT_Cg_shader GL_EXT_depth_bounds_test '
'GL_EXT_direct_state_access GL_EXT_draw_buffers2 '
'GL_EXT_draw_instanced GL_EXT_draw_range_elements '
'GL_EXT_fog_coord GL_EXT_framebuffer_blit '
'GL_EXT_framebuffer_multisample '
'GL_EXTX_framebuffer_mixed_formats '
'GL_EXT_framebuffer_multisample_blit_scaled '
'GL_EXT_framebuffer_object GL_EXT_framebuffer_sRGB '
'GL_EXT_geometry_shader4 GL_EXT_gpu_program_parameters '
'GL_EXT_gpu_shader4 GL_EXT_multi_draw_arrays '
'GL_EXT_packed_depth_stencil GL_EXT_packed_float '
'GL_EXT_packed_pixels GL_EXT_pixel_buffer_object '
'GL_EXT_point_parameters GL_EXT_provoking_vertex '
'GL_EXT_rescale_normal GL_EXT_secondary_color '
'GL_EXT_separate_shader_objects '
'GL_EXT_separate_specular_color '
'GL_EXT_shader_image_load_store GL_EXT_shadow_funcs '
'GL_EXT_stencil_two_side GL_EXT_stencil_wrap GL_EXT_texture3D'
' GL_EXT_texture_array GL_EXT_texture_buffer_object '
'GL_EXT_texture_compression_dxt1 '
'GL_EXT_texture_compression_latc '
'GL_EXT_texture_compression_rgtc '
'GL_EXT_texture_compression_s3tc GL_EXT_texture_cube_map '
'GL_EXT_texture_edge_clamp GL_EXT_texture_env_combine '
'GL_EXT_texture_env_dot3 GL_EXT_texture_filter_anisotropic '
'GL_EXT_texture_integer GL_EXT_texture_lod '
'GL_EXT_texture_lod_bias GL_EXT_texture_mirror_clamp '
'GL_EXT_texture_object GL_EXT_texture_shared_exponent '
'GL_EXT_texture_sRGB GL_EXT_texture_sRGB_decode '
'GL_EXT_texture_storage GL_EXT_texture_swizzle '
'GL_EXT_timer_query GL_EXT_transform_feedback2 '
'GL_EXT_vertex_array GL_EXT_vertex_array_bgra '
'GL_EXT_vertex_attrib_64bit GL_EXT_x11_sync_object '
'GL_EXT_import_sync_object GL_IBM_rasterpos_clip '
'GL_IBM_texture_mirrored_repeat GL_KHR_debug '
'GL_KTX_buffer_region GL_NV_bindless_multi_draw_indirect '
'GL_NV_blend_equation_advanced GL_NV_blend_square '
'GL_NV_compute_program5 GL_NV_conditional_render '
'GL_NV_copy_depth_to_color GL_NV_copy_image '
'GL_NV_depth_buffer_float GL_NV_depth_clamp '
'GL_NV_draw_texture GL_NV_ES1_1_compatibility '
'GL_NV_explicit_multisample GL_NV_fence GL_NV_float_buffer '
'GL_NV_fog_distance GL_NV_fragment_program '
'GL_NV_fragment_program_option GL_NV_fragment_program2 '
'GL_NV_framebuffer_multisample_coverage '
'GL_NV_geometry_shader4 GL_NV_gpu_program4 '
'GL_NV_gpu_program4_1 GL_NV_gpu_program5 '
'GL_NV_gpu_program5_mem_extended GL_NV_gpu_program_fp64 '
'GL_NV_gpu_shader5 GL_NV_half_float GL_NV_light_max_exponent '
'GL_NV_multisample_coverage GL_NV_multisample_filter_hint '
'GL_NV_occlusion_query GL_NV_packed_depth_stencil '
'GL_NV_parameter_buffer_object GL_NV_parameter_buffer_object2'
' GL_NV_path_rendering GL_NV_pixel_data_range '
'GL_NV_point_sprite GL_NV_primitive_restart '
'GL_NV_register_combiners GL_NV_register_combiners2 '
'GL_NV_shader_atomic_counters GL_NV_shader_atomic_float '
'GL_NV_shader_buffer_load GL_NV_shader_storage_buffer_object '
'GL_ARB_sparse_texture GL_NV_texgen_reflection '
'GL_NV_texture_barrier GL_NV_texture_compression_vtc '
'GL_NV_texture_env_combine4 GL_NV_texture_expand_normal '
'GL_NV_texture_multisample GL_NV_texture_rectangle '
'GL_NV_texture_shader GL_NV_texture_shader2 '
'GL_NV_texture_shader3 GL_NV_transform_feedback '
'GL_NV_transform_feedback2 GL_NV_vdpau_interop '
'GL_NV_vertex_array_range GL_NV_vertex_array_range2 '
'GL_NV_vertex_attrib_integer_64bit '
'GL_NV_vertex_buffer_unified_memory GL_NV_vertex_program '
'GL_NV_vertex_program1_1 GL_NV_vertex_program2 '
'GL_NV_vertex_program2_option GL_NV_vertex_program3 '
'GL_NVX_conditional_render GL_NVX_gpu_memory_info '
'GL_SGIS_generate_mipmap GL_SGIS_texture_lod '
'GL_SGIX_depth_texture GL_SGIX_shadow GL_SUN_slice_accum '
},
'devices':
[
{
'device_string': '',
'vendor_id': 4318.0,
'device_id': 3576.0,
'vendor_string': ''
}],
'driver_bug_workarounds':
['clear_uniforms_before_first_program_use',
'disable_gl_path_rendering',
'init_gl_position_in_vertex_shader',
'init_vertex_attributes',
'remove_pow_with_constant_exponent',
'scalarize_vec_and_mat_constructor_args',
'use_current_program_after_successful_link',
'use_virtualized_gl_contexts']
}
|
src/vulnix/nvd.py | dermetfan/vulnix | 217 | 8012 | <filename>src/vulnix/nvd.py
from BTrees import OOBTree
from datetime import datetime, date, timedelta
from persistent import Persistent
from .vulnerability import Vulnerability
import fcntl
import glob
import gzip
import json
import logging
import os
import os.path as p
import requests
import transaction
import ZODB
import ZODB.FileStorage
DEFAULT_MIRROR = 'https://nvd.nist.gov/feeds/json/cve/1.1/'
DEFAULT_CACHE_DIR = '~/.cache/vulnix'
_log = logging.getLogger(__name__)
class NVD(object):
"""Access to the National Vulnerability Database.
https://nvd.nist.gov/
"""
def __init__(self, mirror=DEFAULT_MIRROR, cache_dir=DEFAULT_CACHE_DIR):
self.mirror = mirror.rstrip('/') + '/'
self.cache_dir = p.expanduser(cache_dir)
current = date.today().year
self.available_archives = [y for y in range(current-5, current+1)]
def lock(self):
self._lock = open(p.join(self.cache_dir, 'lock'), 'a')
try:
fcntl.lockf(self._lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
_log.info('Waiting for NVD lock...')
fcntl.lockf(self._lock, fcntl.LOCK_EX)
def __enter__(self):
"""Keeps database connection open while in this context."""
_log.debug('Opening database in %s', self.cache_dir)
os.makedirs(self.cache_dir, exist_ok=True)
self.lock()
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
try:
self._root.setdefault('advisory', OOBTree.OOBTree())
self._root.setdefault('by_product', OOBTree.OOBTree())
self._root.setdefault('meta', Meta())
# may trigger exceptions if the database is inconsistent
list(self._root['by_product'].keys())
if 'archives' in self._root:
_log.warn('Pre-1.9.0 database found - rebuilding')
self.reinit()
except (TypeError, EOFError):
_log.warn('Incompatible objects found in database - rebuilding DB')
self.reinit()
return self
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
if exc_type is None:
if self.meta.should_pack():
_log.debug('Packing database')
self._db.pack()
transaction.commit()
else:
transaction.abort()
self._connection.close()
self._db.close()
self._lock = None
def reinit(self):
"""Remove old DB and rebuild it from scratch."""
self._root = None
transaction.abort()
self._connection.close()
self._db = None
for f in glob.glob(p.join(self.cache_dir, "Data.fs*")):
os.unlink(f)
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
self._root['advisory'] = OOBTree.OOBTree()
self._root['by_product'] = OOBTree.OOBTree()
self._root['meta'] = Meta()
@property
def meta(self):
return self._root['meta']
def relevant_archives(self):
"""Returns list of NVD archives to check.
If there was an update within the last two hours, nothing is
done. If the last update was recent enough to be covered by
the 'modified' feed, only that is checked. Else, all feeds
are checked.
"""
last_update = self.meta.last_update
if last_update > datetime.now() - timedelta(hours=2):
return []
# the "modified" feed is sufficient if used frequently enough
if last_update > datetime.now() - timedelta(days=7):
return ['modified']
return self.available_archives
def update(self):
"""Download archives (if changed) and add CVEs to database."""
changed = []
for a in self.relevant_archives():
arch = Archive(a)
changed.append(arch.download(self.mirror, self.meta))
self.add(arch)
if any(changed):
self.meta.last_update = datetime.now()
self.reindex()
def add(self, archive):
advisories = self._root['advisory']
for (cve_id, adv) in archive.items():
advisories[cve_id] = adv
def reindex(self):
"""Regenerate product index."""
_log.info('Reindexing database')
del self._root['by_product']
bp = OOBTree.OOBTree()
for vuln in self._root['advisory'].values():
if vuln.nodes:
for prod in (n.product for n in vuln.nodes):
bp.setdefault(prod, [])
bp[prod].append(vuln)
self._root['by_product'] = bp
transaction.commit()
def by_id(self, cve_id):
"""Returns vuln or raises KeyError."""
return self._root['advisory'][cve_id]
def by_product(self, product):
"""Returns list of matching vulns or empty list."""
try:
return self._root['by_product'][product]
except KeyError:
return []
def affected(self, pname, version):
"""Returns list of matching vulnerabilities."""
res = set()
for vuln in self.by_product(pname):
if vuln.match(pname, version):
res.add(vuln)
return res
class Archive:
"""Single JSON data structure from NIST NVD."""
def __init__(self, name):
"""Creates JSON feed object.
`name` consists of a year or "modified".
"""
self.name = name
self.download_uri = 'nvdcve-1.1-{}.json.gz'.format(name)
self.advisories = {}
def download(self, mirror, meta):
"""Fetches compressed JSON data from NIST.
Nothing is done if we have already seen the same version of
the feed before.
Returns True if anything has been loaded successfully.
"""
url = mirror + self.download_uri
_log.info('Loading %s', url)
r = requests.get(url, headers=meta.headers_for(url))
r.raise_for_status()
if r.status_code == 200:
_log.debug('Loading JSON feed "%s"', self.name)
self.parse(gzip.decompress(r.content))
meta.update_headers_for(url, r.headers)
return True
else:
_log.debug('Skipping JSON feed "%s" (%s)', self.name, r.reason)
return False
def parse(self, nvd_json):
added = 0
raw = json.loads(nvd_json)
for item in raw['CVE_Items']:
try:
vuln = Vulnerability.parse(item)
self.advisories[vuln.cve_id] = vuln
added += 1
except ValueError:
_log.debug('Failed to parse NVD item: %s', item)
_log.debug("Added %s vulnerabilities", added)
def items(self):
return self.advisories.items()
class Meta(Persistent):
"""Metadate for database maintenance control"""
pack_counter = 0
last_update = datetime(1970, 1, 1)
etag = None
def should_pack(self):
self.pack_counter += 1
if self.pack_counter > 25:
self.pack_counter = 0
return True
return False
def headers_for(self, url):
"""Returns dict of additional request headers."""
if self.etag and url in self.etag:
return {'If-None-Match': self.etag[url]}
return {}
def update_headers_for(self, url, resp_headers):
"""Updates self from HTTP response headers."""
if 'ETag' in resp_headers:
if self.etag is None:
self.etag = OOBTree.OOBTree()
self.etag[url] = resp_headers['ETag']
|
autoindent_code_JASS_war3map_j.py | gil9red/SimplePyScripts | 117 | 8016 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import re
DEBUG = False
def merge_str_literal(text: str) -> str:
def _on_match(m: re.Match):
return m.group().replace('"+"', '')
return re.sub(r'".+?"(\+".+?")+ ', _on_match, text)
lines = """
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
""".strip().splitlines()
stack = []
items = []
for line in lines:
if line.startswith('globals'):
stack.append('globals')
elif line.startswith('endglobals'):
stack.pop(-1)
stack.append('endglobals')
elif line.startswith('function'):
stack.append('function')
elif line.startswith('endfunction'):
stack.pop(-1)
stack.append('endfunction')
elif line.startswith('loop'):
stack.append('loop')
elif line.startswith('endloop'):
stack.pop(-1)
stack.append('endloop')
elif line.startswith('if'):
stack.append('if')
elif line.startswith('elseif'):
stack.pop(-1)
stack.append('elseif')
elif line.startswith('else'):
stack.pop(-1)
stack.append('else')
elif line.startswith('endif'):
stack.pop(-1)
stack.append('endif')
else:
stack.append(line[:8] + '...')
indent = len(stack) - 1
line = merge_str_literal(line)
items.append(' ' * indent + line)
DEBUG and print(f'{indent}. {line!r}', stack)
# Add empty line after endglobals and endfunction
if line.startswith('endglobals') or line.startswith('endfunction'):
items.append('')
if stack[-1] not in ['globals', 'function', 'loop', 'if', 'elseif', 'else']:
stack.pop(-1)
new_text = '\n'.join(items).strip()
print(new_text)
"""
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
"""
|
fmpy/cswrapper/__init__.py | CSchulzeTLK/FMPy | 225 | 8023 | <reponame>CSchulzeTLK/FMPy
def add_cswrapper(filename, outfilename=None):
from fmpy import read_model_description, extract, sharedLibraryExtension, platform, __version__
from lxml import etree
import os
from shutil import copyfile, rmtree
if outfilename is None:
outfilename = filename
model_description = read_model_description(filename)
if model_description.fmiVersion != '2.0':
raise Exception("%s is not an FMI 2.0 FMU." % filename)
if model_description.modelExchange is None:
raise Exception("%s does not support Model Exchange." % filename)
unzipdir = extract(filename)
xml = os.path.join(unzipdir, 'modelDescription.xml')
tree = etree.parse(xml)
root = tree.getroot()
# update description
generation_tool = root.attrib.get('generationTool', 'Unknown') + " with FMPy %s Co-Simulation wrapper" % __version__
root.attrib['generationTool'] = generation_tool
# remove any existing <CoSimulation> element
for e in root.findall('CoSimulation'):
root.remove(e)
for i, child in enumerate(root):
if child.tag == 'ModelExchange':
break
model_identifier = '%s_%s_%s' % (model_description.modelExchange.modelIdentifier,
model_description.numberOfContinuousStates,
model_description.numberOfEventIndicators)
e = etree.Element("CoSimulation")
e.attrib['modelIdentifier'] = model_identifier
root.insert(i + 1, e)
tree.write(xml, pretty_print=True, encoding='utf-8')
shared_library = os.path.join(os.path.dirname(__file__), 'cswrapper' + sharedLibraryExtension)
license_file = os.path.join(os.path.dirname(__file__), 'license.txt')
licenses_dir = os.path.join(unzipdir, 'documentation', 'licenses')
if not os.path.isdir(licenses_dir):
os.mkdir(licenses_dir)
copyfile(src=shared_library, dst=os.path.join(unzipdir, 'binaries', platform, model_identifier + sharedLibraryExtension))
copyfile(license_file, os.path.join(unzipdir, 'documentation', 'licenses', 'fmpy-cswrapper.txt'))
create_zip_archive(outfilename, unzipdir)
rmtree(unzipdir, ignore_errors=True)
def create_zip_archive(filename, source_dir):
import zipfile
import os
with zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) as zf:
base_path = os.path.normpath(source_dir)
for dirpath, dirnames, filenames in os.walk(source_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, os.path.relpath(path, base_path))
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, base_path))
|
test/dict_parameter_test.py | shouldsee/luigi | 14,755 | 8024 | <gh_stars>1000+
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest, in_parse
import luigi
import luigi.interface
import json
import collections
class DictParameterTask(luigi.Task):
param = luigi.DictParameter()
class DictParameterTest(unittest.TestCase):
_dict = collections.OrderedDict([('username', 'me'), ('password', '<PASSWORD>')])
def test_parse(self):
d = luigi.DictParameter().parse(json.dumps(DictParameterTest._dict))
self.assertEqual(d, DictParameterTest._dict)
def test_serialize(self):
d = luigi.DictParameter().serialize(DictParameterTest._dict)
self.assertEqual(d, '{"username": "me", "password": "<PASSWORD>"}')
def test_parse_and_serialize(self):
inputs = ['{"username": "me", "password": "<PASSWORD>"}', '{"password": "<PASSWORD>", "username": "me"}']
for json_input in inputs:
_dict = luigi.DictParameter().parse(json_input)
self.assertEqual(json_input, luigi.DictParameter().serialize(_dict))
def test_parse_interface(self):
in_parse(["DictParameterTask", "--param", '{"username": "me", "password": "<PASSWORD>"}'],
lambda task: self.assertEqual(task.param, DictParameterTest._dict))
def test_serialize_task(self):
t = DictParameterTask(DictParameterTest._dict)
self.assertEqual(str(t), 'DictParameterTask(param={"username": "me", "password": "<PASSWORD>"})')
def test_parse_invalid_input(self):
self.assertRaises(ValueError, lambda: luigi.DictParameter().parse('{"invalid"}'))
def test_hash_normalize(self):
self.assertRaises(TypeError, lambda: hash(luigi.DictParameter().parse('{"a": {"b": []}}')))
a = luigi.DictParameter().normalize({"a": [{"b": []}]})
b = luigi.DictParameter().normalize({"a": [{"b": []}]})
self.assertEqual(hash(a), hash(b))
|
tests/test_sentiments.py | rajeshkumargp/TextBlob | 6,608 | 8073 | <reponame>rajeshkumargp/TextBlob
from __future__ import unicode_literals
import unittest
from nose.tools import * # PEP8 asserts
from nose.plugins.attrib import attr
from textblob.sentiments import PatternAnalyzer, NaiveBayesAnalyzer, DISCRETE, CONTINUOUS
class TestPatternSentiment(unittest.TestCase):
def setUp(self):
self.analyzer = PatternAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, CONTINUOUS)
def test_analyze(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1)
n1_result = self.analyzer.analyze(n1)
assert_true(p1_result[0] > 0)
assert_true(n1_result[0] < 0)
assert_equal(p1_result.polarity, p1_result[0])
assert_equal(p1_result.subjectivity, p1_result[1])
def test_analyze_assessments(self):
p1 = "I feel great this morning."
n1 = "This is a terrible car."
p1_result = self.analyzer.analyze(p1,keep_assessments=True)
n1_result = self.analyzer.analyze(n1,keep_assessments=True)
p1_assessment = p1_result.assessments[0]
n1_assessment = n1_result.assessments[0]
assert_true(p1_assessment[1] > 0)
assert_true(n1_assessment[1] < 0)
assert_equal(p1_result.polarity, p1_assessment[1])
assert_equal(p1_result.subjectivity, p1_assessment[2])
class TestNaiveBayesAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = NaiveBayesAnalyzer()
def test_kind(self):
assert_equal(self.analyzer.kind, DISCRETE)
@attr('slow')
def test_analyze(self):
p1 = 'I feel great this morning.'
n1 = 'This is a terrible car.'
p1_result = self.analyzer.analyze(p1)
assert_equal(p1_result[0], 'pos')
assert_equal(self.analyzer.analyze(n1)[0], 'neg')
# The 2nd item should be the probability that it is positive
assert_true(isinstance(p1_result[1], float))
# 3rd item is probability that it is negative
assert_true(isinstance(p1_result[2], float))
assert_about_equal(p1_result[1] + p1_result[2], 1)
assert_equal(p1_result.classification, p1_result[0])
assert_equal(p1_result.p_pos, p1_result[1])
assert_equal(p1_result.p_neg, p1_result[2])
def assert_about_equal(first, second, places=4):
return assert_equal(round(first, places), second)
if __name__ == '__main__':
unittest.main()
|
tests/scripts/thread-cert/test_network_layer.py | AdityaHPatwardhan/openthread | 2,962 | 8076 | <gh_stars>1000+
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import struct
import unittest
import common
import network_layer
def any_eid():
return bytearray([random.getrandbits(8) for _ in range(16)])
def any_mac_extended_address():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_rloc16():
return random.getrandbits(16)
def any_ml_eid():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_status():
return random.getrandbits(1)
def any_seconds():
return random.getrandbits(32)
def any_id_sequence():
return random.getrandbits(8)
def any_router_id_mask():
return random.getrandbits(64)
def any_options(count=None):
count = count if count is not None else random.randint(0, 255)
return [random.getrandbits(8) for _ in range(count)]
def any_tlv_data(length=None):
_type = random.getrandbits(8)
length = length if length is not None else random.getrandbits(8)
value = bytearray([random.getrandbits(8) for _ in range(length)])
return bytearray([_type, length]) + value
def any_tlvs_data(count=None):
count = count if count is not None else random.randint(0, 16)
data = bytearray()
for _ in range(count):
data += any_tlv_data(random.randint(1, 15))
return data
class TestTargetEid(unittest.TestCase):
def test_should_return_eid_value_when_eid_property_is_called(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# WHEN
actual_eid = target_eid.eid
# THEN
self.assertEqual(eid, actual_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
eid = any_eid()
target_eid = network_layer.TargetEid(eid)
# THEN
self.assertEqual(target_eid, network_layer.TargetEid(eid))
class TestTargetEidFactory(unittest.TestCase):
def test_should_create_TargetEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
eid = any_eid()
factory = network_layer.TargetEidFactory()
# WHEN
target_eid = factory.parse(io.BytesIO(eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(target_eid, network_layer.TargetEid))
self.assertEqual(eid, target_eid.eid)
class TestMacExtendedAddress(unittest.TestCase):
def test_should_return_mac_address_value_when_mac_address_property_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# WHEN
actual_mac_address = mac_extended_address.mac_address
# THEN
self.assertEqual(mac_address, actual_mac_address)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
mac_address = any_mac_extended_address()
mac_extended_address = network_layer.MacExtendedAddress(mac_address)
# THEN
self.assertEqual(mac_extended_address, network_layer.MacExtendedAddress(mac_address))
class TestMacExtendedAddressFactory(unittest.TestCase):
def test_should_create_MacExtendedAddress_from_bytearray_when_parse_method_is_called(self):
# GIVEN
mac_address = any_mac_extended_address()
factory = network_layer.MacExtendedAddressFactory()
# WHEN
mac_extended_address = factory.parse(io.BytesIO(mac_address), common.MessageInfo())
# THEN
self.assertTrue(isinstance(mac_extended_address, network_layer.MacExtendedAddress))
self.assertEqual(mac_address, mac_extended_address.mac_address)
class TestRloc16(unittest.TestCase):
def test_should_return_rloc16_value_when_rloc16_property_is_called(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# WHEN
actual_rloc16 = rloc16_obj.rloc16
# THEN
self.assertEqual(rloc16, actual_rloc16)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
rloc16 = any_rloc16()
rloc16_obj = network_layer.Rloc16(rloc16)
# THEN
self.assertEqual(rloc16_obj, network_layer.Rloc16(rloc16))
class TestRloc16Factory(unittest.TestCase):
def test_should_create_Rloc16_from_bytearray_when_parse_method_is_called(self):
# GIVEN
rloc16 = any_rloc16()
factory = network_layer.Rloc16Factory()
data = bytearray(struct.pack(">H", rloc16))
# WHEN
rloc16_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(rloc16_obj, network_layer.Rloc16))
self.assertEqual(rloc16, rloc16_obj.rloc16)
class TestMlEid(unittest.TestCase):
def test_should_return_ml_eid_value_when_ml_eid_property_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# WHEN
actual_ml_eid = ml_eid_obj.ml_eid
# THEN
self.assertEqual(ml_eid, actual_ml_eid)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
ml_eid = any_ml_eid()
ml_eid_obj = network_layer.MlEid(ml_eid)
# THEN
self.assertEqual(ml_eid_obj, network_layer.MlEid(ml_eid))
class TestMlEidFactory(unittest.TestCase):
def test_should_create_MlEid_from_bytearray_when_parse_method_is_called(self):
# GIVEN
ml_eid = any_ml_eid()
factory = network_layer.MlEidFactory()
# WHEN
ml_eid_obj = factory.parse(io.BytesIO(ml_eid), common.MessageInfo())
# THEN
self.assertTrue(isinstance(ml_eid_obj, network_layer.MlEid))
self.assertEqual(ml_eid, ml_eid_obj.ml_eid)
class TestStatus(unittest.TestCase):
def test_should_return_status_value_when_status_property_is_called(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# WHEN
actual_status = status_obj.status
# THEN
self.assertEqual(status, actual_status)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
status = any_status()
status_obj = network_layer.Status(status)
# THEN
self.assertEqual(status_obj, network_layer.Status(status))
class TestStatusFactory(unittest.TestCase):
def test_should_create_Status_from_bytearray_when_parse_method_is_called(self):
# GIVEN
status = any_status()
factory = network_layer.StatusFactory()
data = bytearray([status])
# WHEN
status_obj = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(status_obj, network_layer.Status))
self.assertEqual(status, status_obj.status)
class TestTimeSinceLastTransaction(unittest.TestCase):
def test_should_return_seconds_value_when_seconds_property_is_called(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# WHEN
actual_seconds = time_since_last_transaction.seconds
# THEN
self.assertEqual(seconds, actual_seconds)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
seconds = any_seconds()
time_since_last_transaction = network_layer.TimeSinceLastTransaction(seconds)
# THEN
self.assertEqual(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction(seconds),
)
class TestTimeSinceLastTransactionFactory(unittest.TestCase):
def test_should_create_TimeSinceLastTransaction_from_bytearray_when_parse_method_is_called(self):
# GIVEN
seconds = any_seconds()
factory = network_layer.TimeSinceLastTransactionFactory()
data = bytearray(struct.pack(">L", seconds))
# WHEN
time_since_last_transaction = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(
time_since_last_transaction,
network_layer.TimeSinceLastTransaction,
))
self.assertEqual(seconds, time_since_last_transaction.seconds)
class TestRouterMask(unittest.TestCase):
def test_should_return_id_sequence_value_when_id_sequence_property_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_mask = network_layer.RouterMask(id_sequence, any_router_id_mask())
# WHEN
actual_id_sequence = router_mask.id_sequence
# THEN
self.assertEqual(id_sequence, actual_id_sequence)
def test_should_return_router_id_mask_value_when_router_id_mask_property_is_called(self):
# GIVEN
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(any_id_sequence(), router_id_mask)
# WHEN
actual_router_id_mask = router_mask.router_id_mask
# THEN
self.assertEqual(router_id_mask, actual_router_id_mask)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
router_mask = network_layer.RouterMask(id_sequence, router_id_mask)
# THEN
self.assertEqual(router_mask, network_layer.RouterMask(id_sequence, router_id_mask))
class TestRouterMaskFactory(unittest.TestCase):
def test_should_create_RouterMask_from_bytearray_when_parse_method_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
factory = network_layer.RouterMaskFactory()
data = bytearray([id_sequence]) + struct.pack(">Q", router_id_mask)
# WHEN
router_mask = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(router_mask, network_layer.RouterMask))
self.assertEqual(id_sequence, router_mask.id_sequence)
self.assertEqual(router_id_mask, router_mask.router_id_mask)
class TestNdOption(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# WHEN
actual_options = nd_option.options
# THEN
self.assertEqual(options, actual_options)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
options = any_options()
nd_option = network_layer.NdOption(options)
# THEN
self.assertEqual(nd_option, network_layer.NdOption(options))
class TestNdOptionFactory(unittest.TestCase):
def test_should_create_NdOption_from_bytearray_when_parse_method_is_called(self):
# GIVEN
options = any_options()
factory = network_layer.NdOptionFactory()
data = bytearray(options)
# WHEN
nd_option = factory.parse(io.BytesIO(data), common.MessageInfo())
# THEN
self.assertTrue(isinstance(nd_option, network_layer.NdOption))
self.assertEqual(options, nd_option.options)
class TestThreadNetworkData(unittest.TestCase):
def test_should_return_options_value_when_options_property_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# WHEN
actual_tlvs = thread_network_data.tlvs
# THEN
self.assertEqual(tlvs, actual_tlvs)
def test_should_return_True_when_try_to_equal_two_the_same_type_objects_with_the_same_values(self):
# GIVEN
tlvs = any_tlvs_data()
thread_network_data = network_layer.ThreadNetworkData(tlvs)
# THEN
self.assertEqual(thread_network_data, network_layer.ThreadNetworkData(tlvs))
class TestThreadNetworkDataFactory(unittest.TestCase):
def test_should_create_ThreadNetworkData_from_bytearray_when_parse_method_is_called(self):
# GIVEN
tlvs = any_tlvs_data()
class DummyNetworkDataTlvsFactory:
def parse(self, data, message_info):
return bytearray(data.read())
factory = network_layer.ThreadNetworkDataFactory(DummyNetworkDataTlvsFactory())
# WHEN
thread_network_data = factory.parse(io.BytesIO(tlvs), common.MessageInfo())
# THEN
self.assertTrue(isinstance(thread_network_data, network_layer.ThreadNetworkData))
self.assertEqual(tlvs, thread_network_data.tlvs)
if __name__ == "__main__":
unittest.main()
|
salt/modules/kernelpkg_linux_apt.py | markgras/salt | 9,425 | 8077 | <filename>salt/modules/kernelpkg_linux_apt.py
"""
Manage Linux kernel packages on APT-based systems
"""
import functools
import logging
import re
try:
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.exceptions import CommandExecutionError
HAS_REQUIRED_LIBS = True
except ImportError:
HAS_REQUIRED_LIBS = False
log = logging.getLogger(__name__)
__virtualname__ = "kernelpkg"
def __virtual__():
"""
Load this module on Debian-based systems only
"""
if not HAS_REQUIRED_LIBS:
return (False, "Required library could not be imported")
if __grains__.get("os_family", "") in ("Kali", "Debian"):
return __virtualname__
elif __grains__.get("os_family", "") == "Cumulus":
return __virtualname__
return (False, "Module kernelpkg_linux_apt: no APT based system detected")
def active():
"""
Return the version of the running kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.active
"""
if "pkg.normalize_name" in __salt__:
return __salt__["pkg.normalize_name"](__grains__["kernelrelease"])
return __grains__["kernelrelease"]
def list_installed():
"""
Return a list of all installed kernels.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.list_installed
"""
pkg_re = re.compile(r"^{}-[\d.-]+-{}$".format(_package_prefix(), _kernel_type()))
pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True)
if pkgs is None:
pkgs = []
result = list(filter(pkg_re.match, pkgs))
if result is None:
return []
prefix_len = len(_package_prefix()) + 1
return sorted(
[pkg[prefix_len:] for pkg in result], key=functools.cmp_to_key(_cmp_version)
)
def latest_available():
"""
Return the version of the latest kernel from the package repositories.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_available
"""
result = __salt__["pkg.latest_version"](
"{}-{}".format(_package_prefix(), _kernel_type())
)
if result == "":
return latest_installed()
version = re.match(r"^(\d+\.\d+\.\d+)\.(\d+)", result)
return "{}-{}-{}".format(version.group(1), version.group(2), _kernel_type())
def latest_installed():
"""
Return the version of the latest installed kernel.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.latest_installed
.. note::
This function may not return the same value as
:py:func:`~salt.modules.kernelpkg_linux_apt.active` if a new kernel
has been installed and the system has not yet been rebooted.
The :py:func:`~salt.modules.kernelpkg_linux_apt.needs_reboot` function
exists to detect this condition.
"""
pkgs = list_installed()
if pkgs:
return pkgs[-1]
return None
def needs_reboot():
"""
Detect if a new kernel version has been installed but is not running.
Returns True if a new kernel is installed, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.needs_reboot
"""
return _LooseVersion(active()) < _LooseVersion(latest_installed())
def upgrade(reboot=False, at_time=None):
"""
Upgrade the kernel and optionally reboot the system.
reboot : False
Request a reboot if a new kernel is available.
at_time : immediate
Schedule the reboot at some point in the future. This argument
is ignored if ``reboot=False``. See
:py:func:`~salt.modules.system.reboot` for more details
on this argument.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade
salt '*' kernelpkg.upgrade reboot=True at_time=1
.. note::
An immediate reboot often shuts down the system before the minion has a
chance to return, resulting in errors. A minimal delay (1 minute) is
useful to ensure the result is delivered to the master.
"""
result = __salt__["pkg.install"](
name="{}-{}".format(_package_prefix(), latest_available())
)
_needs_reboot = needs_reboot()
ret = {
"upgrades": result,
"active": active(),
"latest_installed": latest_installed(),
"reboot_requested": reboot,
"reboot_required": _needs_reboot,
}
if reboot and _needs_reboot:
log.warning("Rebooting system due to kernel upgrade")
__salt__["system.reboot"](at_time=at_time)
return ret
def upgrade_available():
"""
Detect if a new kernel version is available in the repositories.
Returns True if a new kernel is available, False otherwise.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade_available
"""
return _LooseVersion(latest_available()) > _LooseVersion(latest_installed())
def remove(release):
"""
Remove a specific version of the kernel.
release
The release number of an installed kernel. This must be the entire release
number as returned by :py:func:`~salt.modules.kernelpkg_linux_apt.list_installed`,
not the package name.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.remove 4.4.0-70-generic
"""
if release not in list_installed():
raise CommandExecutionError(
"Kernel release '{}' is not installed".format(release)
)
if release == active():
raise CommandExecutionError("Active kernel cannot be removed")
target = "{}-{}".format(_package_prefix(), release)
log.info("Removing kernel package %s", target)
__salt__["pkg.purge"](target)
return {"removed": [target]}
def cleanup(keep_latest=True):
"""
Remove all unused kernel packages from the system.
keep_latest : True
In the event that the active kernel is not the latest one installed, setting this to True
will retain the latest kernel package, in addition to the active one. If False, all kernel
packages other than the active one will be removed.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.cleanup
"""
removed = []
# Loop over all installed kernel packages
for kernel in list_installed():
# Keep the active kernel package
if kernel == active():
continue
# Optionally keep the latest kernel package
if keep_latest and kernel == latest_installed():
continue
# Remove the kernel package
removed.extend(remove(kernel)["removed"])
return {"removed": removed}
def _package_prefix():
"""
Return static string for the package prefix
"""
return "linux-image"
def _kernel_type():
"""
Parse the kernel name and return its type
"""
return re.match(r"^[\d.-]+-(.+)$", active()).group(1)
def _cmp_version(item1, item2):
"""
Compare function for package version sorting
"""
vers1 = _LooseVersion(item1)
vers2 = _LooseVersion(item2)
if vers1 < vers2:
return -1
if vers1 > vers2:
return 1
return 0
|
src/hammer-vlsi/technology/sky130/sram_compiler/__init__.py | httpsgithu/hammer | 138 | 8084 | <reponame>httpsgithu/hammer
import os, tempfile, subprocess
from hammer_vlsi import MMMCCorner, MMMCCornerType, HammerTool, HammerToolStep, HammerSRAMGeneratorTool, SRAMParameters
from hammer_vlsi.units import VoltageValue, TemperatureValue
from hammer_tech import Library, ExtraLibrary
from typing import NamedTuple, Dict, Any, List
from abc import ABCMeta, abstractmethod
class SKY130SRAMGenerator(HammerSRAMGeneratorTool):
def tool_config_prefix(self) -> str:
return "sram_generator.sky130"
def version_number(self, version: str) -> int:
return 0
# Run generator for a single sram and corner
def generate_sram(self, params: SRAMParameters, corner: MMMCCorner) -> ExtraLibrary:
tech_cache_dir = os.path.abspath(self.technology.cache_dir)
#TODO: this is really an abuse of the corner stuff
if corner.type == MMMCCornerType.Setup:
speed_name = "slow"
speed = "SS"
elif corner.type == MMMCCornerType.Hold:
speed_name = "fast"
speed = "FF"
elif corner.type == MMMCCornerType.Extra:
speed_name = "typical"
speed = "TT"
# Different target memories based on port count
# if params.family == "1rw":
# self.logger.info("Compiling 1rw memories to DFFRAM instances")
# base_dir = self.get_setting("technology.sky130.dffram_lib")
# fam_code = params.family
# sram_name = "RAM{d}x{w}".format(
# d=params.depth,
# w=params.width)
# #TODO: need real libs (perhaps run Liberate here?)
# #For now, use the dummy lib for all corners
# corner_str = "" #
# lib_path = "{b}/{n}.lib".format(
# b=base_dir,
# n=sram_name)
# if not os.path.exists(lib_path):
# self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
# return ExtraLibrary(prefix=None, library=Library(
# name=sram_name,
# nldm_liberty_file=lib_path,
# lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
# #TODO: GDS not generated. Unclear which DEF to use?
# #gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
# spice_file="{b}/{n}/{n}.spice".format(b=base_dir,n=sram_name),
# #TODO: Will not work as-is for behav. sim (this is a structural netlist referencing std. cells)
# #Need to add std cell behavioral Verilog to sim.inputs.input_files
# verilog_sim="{b}/{n}/{n}.nl.v".format(b=base_dir,n=sram_name),
# corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
# supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
# provides=[{'lib_type': "sram", 'vt': params.vt}]))
# elif params.family == "1rw1r":
if params.family == "1rw":
self.logger.info("Compiling 1rw1r memories to OpenRAM instances")
base_dir = self.get_setting("technology.sky130.openram_lib")
fam_code = params.family
s=round(round(params.width*params.depth/8, -3)/1000) # size in kiB
w=params.width
d=params.depth
m=8
sram_name = f"sky130_sram_{s}kbyte_1rw1r_{w}x{d}_{m}"
print(f"SRAM_NAME: {sram_name}")
#TODO: Hammer SRAMParameters doesn't have this info
#TODO: replace this if OpenRAM characterization done for other corners
#For now, use typical lib for all corners
corner_str = "TT_1p8V_25C"
#corner_str = "{speed}_{volt}V_{temp}C".format(
# speed = speed,
# volt = str(corner.voltage.value_in_units("V")).replace(".","p"),
# temp = str(int(corner.temp.value_in_units("C"))).replace(".","p"))
lib_path = "{b}/{n}/{n}_{c}.lib".format(
b=base_dir,
n=sram_name,
c=corner_str)
if not os.path.exists(lib_path):
self.logger.error("SKY130 1rw1r SRAM cache does not support corner: {c}".format(c=corner_str))
return ExtraLibrary(prefix=None, library=Library(
name=sram_name,
nldm_liberty_file=lib_path,
lef_file="{b}/{n}/{n}.lef".format(b=base_dir,n=sram_name),
gds_file="{b}/{n}/{n}.gds".format(b=base_dir,n=sram_name),
spice_file="{b}/{n}/{n}.lvs.sp".format(b=base_dir,n=sram_name),
verilog_sim="{b}/{n}/{n}.v".format(b=base_dir,n=sram_name),
corner={'nmos': speed_name, 'pmos': speed_name, 'temperature': str(corner.temp.value_in_units("C")) + " C"},
supplies={'VDD': str(corner.voltage.value_in_units("V")) + " V", 'GND': "0 V"},
provides=[{'lib_type': "sram", 'vt': params.vt}]))
else:
self.logger.error("SKY130 SRAM cache does not support family:{f}".format(f=params.family))
return ExtraLibrary(prefix=None, library=None)
tool=SKY130SRAMGenerator
|
nemo/collections/tts/torch/data.py | MalikIdreesHasanKhan/NeMo | 4,145 | 8088 | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import librosa
import torch
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.tts.torch.helpers import (
BetaBinomialInterpolator,
beta_binomial_prior_distribution,
general_padding,
)
from nemo.collections.tts.torch.tts_data_types import (
DATA_STR2DATA_CLASS,
MAIN_DATA_TYPES,
VALID_SUPPLEMENTARY_DATA_TYPES,
DurationPrior,
Durations,
Energy,
LMTokens,
LogMel,
Pitch,
SpeakerID,
WithLens,
)
from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer
from nemo.core.classes import Dataset
from nemo.utils import logging
class TTSDataset(Dataset):
def __init__(
self,
manifest_filepath: str,
sample_rate: int,
text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]],
tokens: Optional[List[str]] = None,
text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None,
text_normalizer_call_args: Optional[Dict] = None,
text_tokenizer_pad_id: Optional[int] = None,
sup_data_types: Optional[List[str]] = None,
sup_data_path: Optional[Union[Path, str]] = None,
max_duration: Optional[float] = None,
min_duration: Optional[float] = None,
ignore_file: Optional[str] = None,
trim: bool = False,
n_fft=1024,
win_length=None,
hop_length=None,
window="hann",
n_mels=80,
lowfreq=0,
highfreq=None,
**kwargs,
):
"""Dataset that loads main data types (audio and text) and specified supplementary data types (e.g. log mel, durations, pitch).
Most supplementary data types will be computed on the fly and saved in the supplementary_folder if they did not exist before.
Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section).
Args:
manifest_filepath (str, Path, List[str, Path]): Path(s) to the .json manifests containing information on the
dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid
json. Each line should contain the following:
"audio_filepath": <PATH_TO_WAV>
"mel_filepath": <PATH_TO_LOG_MEL_PT> (Optional)
"duration": <Duration of audio clip in seconds> (Optional)
"text": <THE_TRANSCRIPT> (Optional)
sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to.
text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer.
tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer.
text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer.
text_normalizer_call_args (Optional[Dict]): Additional arguments for text_normalizer function.
text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer.
sup_data_types (Optional[List[str]]): List of supplementary data types.
sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch).
max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be
pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load
audio to compute duration. Defaults to None which does not prune.
ignore_file (Optional[str, Path]): The location of a pickle-saved list of audio_ids (the stem of the audio
files) that will be pruned prior to training. Defaults to None which does not prune.
trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False.
n_fft (Optional[int]): The number of fft samples. Defaults to 1024
win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft.
hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4.
window (Optional[str]): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the
equivalent torch window function.
n_mels (Optional[int]): The number of mel filters. Defaults to 80.
lowfreq (Optional[int]): The lowfreq input to the mel filter calculation. Defaults to 0.
highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None.
Keyword Args:
durs_file (Optional[str]): String path to pickled durations location.
durs_type (Optional[str]): Type of durations. Currently supported only "aligned-based".
use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator. Defaults to False.
pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2').
pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7').
pitch_avg (Optional[float]): The mean that we use to normalize the pitch.
pitch_std (Optional[float]): The std that we use to normalize the pitch.
pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_avg and pitch_std) or not.
"""
super().__init__()
self.text_normalizer = text_normalizer
self.text_normalizer_call = (
self.text_normalizer.normalize if isinstance(self.text_normalizer, Normalizer) else self.text_normalizer
)
self.text_normalizer_call_args = text_normalizer_call_args if text_normalizer_call_args is not None else {}
self.text_tokenizer = text_tokenizer
if isinstance(self.text_tokenizer, BaseTokenizer):
self.text_tokenizer_pad_id = text_tokenizer.pad
self.tokens = text_tokenizer.tokens
else:
if text_tokenizer_pad_id is None:
raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer")
if tokens is None:
raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer")
self.text_tokenizer_pad_id = text_tokenizer_pad_id
self.tokens = tokens
if isinstance(manifest_filepath, str):
manifest_filepath = [manifest_filepath]
self.manifest_filepath = manifest_filepath
if sup_data_path is not None:
Path(sup_data_path).mkdir(parents=True, exist_ok=True)
self.sup_data_path = sup_data_path
self.sup_data_types = (
[DATA_STR2DATA_CLASS[d_as_str] for d_as_str in sup_data_types] if sup_data_types is not None else []
)
self.sup_data_types_set = set(self.sup_data_types)
self.data = []
audio_files = []
total_duration = 0
for manifest_file in self.manifest_filepath:
with open(Path(manifest_file).expanduser(), 'r') as f:
logging.info(f"Loading dataset from {manifest_file}.")
for line in tqdm(f):
item = json.loads(line)
file_info = {
"audio_filepath": item["audio_filepath"],
"mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None,
"duration": item["duration"] if "duration" in item else None,
"text_tokens": None,
"speaker_id": item["speaker"] if "speaker" in item else None,
}
if "text" in item:
text = item["text"]
if self.text_normalizer is not None:
text = self.text_normalizer_call(text, **self.text_normalizer_call_args)
text_tokens = self.text_tokenizer(text)
file_info["raw_text"] = item["text"]
file_info["text_tokens"] = text_tokens
audio_files.append(file_info)
if file_info["duration"] is None:
logging.info(
"Not all audio files have duration information. Duration logging will be disabled."
)
total_duration = None
if total_duration is not None:
total_duration += item["duration"]
logging.info(f"Loaded dataset with {len(audio_files)} files.")
if total_duration is not None:
logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.")
if ignore_file:
logging.info(f"using {ignore_file} to prune dataset.")
with open(Path(ignore_file).expanduser(), "rb") as f:
wavs_to_ignore = set(pickle.load(f))
pruned_duration = 0 if total_duration is not None else None
pruned_items = 0
for item in audio_files:
audio_path = item['audio_filepath']
audio_id = Path(audio_path).stem
# Prune data according to min/max_duration & the ignore file
if total_duration is not None:
if (min_duration and item["duration"] < min_duration) or (
max_duration and item["duration"] > max_duration
):
pruned_duration += item["duration"]
pruned_items += 1
continue
if ignore_file and (audio_id in wavs_to_ignore):
pruned_items += 1
pruned_duration += item["duration"]
wavs_to_ignore.remove(audio_id)
continue
self.data.append(item)
logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files")
if pruned_duration is not None:
logging.info(
f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains "
f"{(total_duration - pruned_duration) / 3600:.2f} hours."
)
self.sample_rate = sample_rate
self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate)
self.trim = trim
self.n_fft = n_fft
self.n_mels = n_mels
self.lowfreq = lowfreq
self.highfreq = highfreq
self.window = window
self.win_length = win_length or self.n_fft
self.hop_length = hop_length
self.hop_len = self.hop_length or self.n_fft // 4
self.fb = torch.tensor(
librosa.filters.mel(
self.sample_rate, self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq
),
dtype=torch.float,
).unsqueeze(0)
window_fn = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}.get(self.window, None)
self.stft = lambda x: torch.stft(
input=x,
n_fft=self.n_fft,
hop_length=self.hop_len,
win_length=self.win_length,
window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None,
)
for data_type in self.sup_data_types:
if data_type not in VALID_SUPPLEMENTARY_DATA_TYPES:
raise NotImplementedError(f"Current implementation of TTSDataset doesn't support {data_type} type.")
getattr(self, f"add_{data_type.name}")(**kwargs)
def add_log_mel(self, **kwargs):
pass
def add_durations(self, **kwargs):
durs_file = kwargs.pop('durs_file')
durs_type = kwargs.pop('durs_type')
audio_stem2durs = torch.load(durs_file)
self.durs = []
for tag in [Path(d["audio_filepath"]).stem for d in self.data]:
durs = audio_stem2durs[tag]
if durs_type == "aligner-based":
self.durs.append(durs)
else:
raise NotImplementedError(
f"{durs_type} duration type is not supported. Only align-based is supported at this moment."
)
def add_duration_prior(self, **kwargs):
self.use_beta_binomial_interpolator = kwargs.pop('use_beta_binomial_interpolator', False)
if self.use_beta_binomial_interpolator:
self.beta_binomial_interpolator = BetaBinomialInterpolator()
def add_pitch(self, **kwargs):
self.pitch_fmin = kwargs.pop("pitch_fmin", librosa.note_to_hz('C2'))
self.pitch_fmax = kwargs.pop("pitch_fmax", librosa.note_to_hz('C7'))
self.pitch_avg = kwargs.pop("pitch_avg", None)
self.pitch_std = kwargs.pop("pitch_std", None)
self.pitch_norm = kwargs.pop("pitch_norm", False)
def add_energy(self, **kwargs):
pass
def add_speaker_id(self, **kwargs):
pass
def get_spec(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.stft(audio)
if spec.dtype in [torch.cfloat, torch.cdouble]:
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9)
return spec
def get_log_mel(self, audio):
with torch.cuda.amp.autocast(enabled=False):
spec = self.get_spec(audio)
mel = torch.matmul(self.fb.to(spec.dtype), spec)
log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny))
return log_mel
def __getitem__(self, index):
sample = self.data[index]
audio_stem = Path(sample["audio_filepath"]).stem
features = self.featurizer.process(sample["audio_filepath"], trim=self.trim)
audio, audio_length = features, torch.tensor(features.shape[0]).long()
text = torch.tensor(sample["text_tokens"]).long()
text_length = torch.tensor(len(sample["text_tokens"])).long()
log_mel, log_mel_length = None, None
if LogMel in self.sup_data_types_set:
mel_path = sample["mel_filepath"]
if mel_path is not None and Path(mel_path).exists():
log_mel = torch.load(mel_path)
else:
mel_path = Path(self.sup_data_path) / f"mel_{audio_stem}.pt"
if mel_path.exists():
log_mel = torch.load(mel_path)
else:
log_mel = self.get_log_mel(audio)
torch.save(log_mel, mel_path)
log_mel = log_mel.squeeze(0)
log_mel_length = torch.tensor(log_mel.shape[1]).long()
durations = None
if Durations in self.sup_data_types_set:
durations = self.durs[index]
duration_prior = None
if DurationPrior in self.sup_data_types_set:
if self.use_beta_binomial_interpolator:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item()))
else:
prior_path = Path(self.sup_data_path) / f"pr_{audio_stem}.pt"
if prior_path.exists():
duration_prior = torch.load(prior_path)
else:
mel_len = self.get_log_mel(audio).shape[2]
duration_prior = beta_binomial_prior_distribution(text_length, mel_len)
duration_prior = torch.from_numpy(duration_prior)
torch.save(duration_prior, prior_path)
pitch, pitch_length = None, None
if Pitch in self.sup_data_types_set:
pitch_name = (
f"{audio_stem}_pitch_pyin_"
f"fmin{self.pitch_fmin}_fmax{self.pitch_fmax}_"
f"fl{self.win_length}_hs{self.hop_len}.pt"
)
pitch_path = Path(self.sup_data_path) / pitch_name
if pitch_path.exists():
pitch = torch.load(pitch_path).float()
else:
pitch, _, _ = librosa.pyin(
audio.numpy(),
fmin=self.pitch_fmin,
fmax=self.pitch_fmax,
frame_length=self.win_length,
sr=self.sample_rate,
fill_na=0.0,
)
pitch = torch.from_numpy(pitch).float()
torch.save(pitch, pitch_path)
if self.pitch_avg is not None and self.pitch_std is not None and self.pitch_norm:
pitch -= self.pitch_avg
pitch[pitch == -self.pitch_avg] = 0.0 # Zero out values that were perviously zero
pitch /= self.pitch_std
pitch_length = torch.tensor(len(pitch)).long()
energy, energy_length = None, None
if Energy in self.sup_data_types_set:
energy_path = Path(self.sup_data_path) / f"{audio_stem}_energy_wl{self.win_length}_hs{self.hop_len}.pt"
if energy_path.exists():
energy = torch.load(energy_path).float()
else:
spec = self.get_spec(audio)
energy = torch.linalg.norm(spec.squeeze(0), axis=0).float()
torch.save(energy, energy_path)
energy_length = torch.tensor(len(energy)).long()
speaker_id = None
if SpeakerID in self.sup_data_types_set:
speaker_id = torch.tensor(sample["speaker_id"]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
)
def __len__(self):
return len(self.data)
def join_data(self, data_dict):
result = []
for data_type in MAIN_DATA_TYPES + self.sup_data_types:
result.append(data_dict[data_type.name])
if issubclass(data_type, WithLens):
result.append(data_dict[f"{data_type.name}_lens"])
return tuple(result)
def general_collate_fn(self, batch):
(
_,
audio_lengths,
_,
tokens_lengths,
_,
log_mel_lengths,
durations_list,
duration_priors_list,
pitches,
pitches_lengths,
energies,
energies_lengths,
_,
) = zip(*batch)
max_audio_len = max(audio_lengths).item()
max_tokens_len = max(tokens_lengths).item()
max_log_mel_len = max(log_mel_lengths) if LogMel in self.sup_data_types_set else None
max_durations_len = max([len(i) for i in durations_list]) if Durations in self.sup_data_types_set else None
max_pitches_len = max(pitches_lengths).item() if Pitch in self.sup_data_types_set else None
max_energies_len = max(energies_lengths).item() if Energy in self.sup_data_types_set else None
if LogMel in self.sup_data_types_set:
log_mel_pad = torch.finfo(batch[0][2].dtype).tiny
duration_priors = (
torch.zeros(
len(duration_priors_list),
max([prior_i.shape[0] for prior_i in duration_priors_list]),
max([prior_i.shape[1] for prior_i in duration_priors_list]),
)
if DurationPrior in self.sup_data_types_set
else []
)
audios, tokens, log_mels, durations_list, pitches, energies, speaker_ids = [], [], [], [], [], [], []
for i, sample_tuple in enumerate(batch):
(
audio,
audio_len,
token,
token_len,
log_mel,
log_mel_len,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = sample_tuple
audio = general_padding(audio, audio_len.item(), max_audio_len)
audios.append(audio)
token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id)
tokens.append(token)
if LogMel in self.sup_data_types_set:
log_mels.append(general_padding(log_mel, log_mel_len, max_log_mel_len, pad_value=log_mel_pad))
if Durations in self.sup_data_types_set:
durations_list.append(general_padding(durations, len(durations), max_durations_len))
if DurationPrior in self.sup_data_types_set:
duration_priors[i, : duration_prior.shape[0], : duration_prior.shape[1]] = duration_prior
if Pitch in self.sup_data_types_set:
pitches.append(general_padding(pitch, pitch_length.item(), max_pitches_len))
if Energy in self.sup_data_types_set:
energies.append(general_padding(energy, energy_length.item(), max_energies_len))
if SpeakerID in self.sup_data_types_set:
speaker_ids.append(speaker_id)
data_dict = {
"audio": torch.stack(audios),
"audio_lens": torch.stack(audio_lengths),
"text": torch.stack(tokens),
"text_lens": torch.stack(tokens_lengths),
"log_mel": torch.stack(log_mels) if LogMel in self.sup_data_types_set else None,
"log_mel_lens": torch.stack(log_mel_lengths) if LogMel in self.sup_data_types_set else None,
"durations": torch.stack(durations_list) if Durations in self.sup_data_types_set else None,
"duration_prior": duration_priors if DurationPrior in self.sup_data_types_set else None,
"pitch": torch.stack(pitches) if Pitch in self.sup_data_types_set else None,
"pitch_lens": torch.stack(pitches_lengths) if Pitch in self.sup_data_types_set else None,
"energy": torch.stack(energies) if Energy in self.sup_data_types_set else None,
"energy_lens": torch.stack(energies_lengths) if Energy in self.sup_data_types_set else None,
"speaker_id": torch.stack(speaker_ids) if SpeakerID in self.sup_data_types_set else None,
}
return data_dict
def _collate_fn(self, batch):
data_dict = self.general_collate_fn(batch)
joined_data = self.join_data(data_dict)
return joined_data
class MixerTTSDataset(TTSDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _albert(self):
from transformers import AlbertTokenizer # noqa pylint: disable=import-outside-toplevel
self.lm_model_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
self.lm_padding_value = self.lm_model_tokenizer._convert_token_to_id('<pad>')
space_value = self.lm_model_tokenizer._convert_token_to_id('▁')
self.id2lm_tokens = {}
for i, d in enumerate(self.data):
raw_text = d["raw_text"]
assert isinstance(self.text_tokenizer, EnglishPhonemesTokenizer) or isinstance(
self.text_tokenizer, EnglishCharsTokenizer
)
preprocess_text_as_tts_input = self.text_tokenizer.text_preprocessing_func(raw_text)
lm_tokens_as_ids = self.lm_model_tokenizer.encode(preprocess_text_as_tts_input, add_special_tokens=False)
if self.text_tokenizer.pad_with_space:
lm_tokens_as_ids = [space_value] + lm_tokens_as_ids + [space_value]
self.id2lm_tokens[i] = lm_tokens_as_ids
def add_lm_tokens(self, **kwargs):
lm_model = kwargs.pop('lm_model')
if lm_model == "albert":
self._albert()
else:
raise NotImplementedError(
f"{lm_model} lm model is not supported. Only albert is supported at this moment."
)
def __getitem__(self, index):
(
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
) = super().__getitem__(index)
lm_tokens = None
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.tensor(self.id2lm_tokens[index]).long()
return (
audio,
audio_length,
text,
text_length,
log_mel,
log_mel_length,
durations,
duration_prior,
pitch,
pitch_length,
energy,
energy_length,
speaker_id,
lm_tokens,
)
def _collate_fn(self, batch):
batch = list(zip(*batch))
data_dict = self.general_collate_fn(list(zip(*batch[:13])))
lm_tokens_list = batch[13]
if LMTokens in self.sup_data_types_set:
lm_tokens = torch.full(
(len(lm_tokens_list), max([lm_tokens.shape[0] for lm_tokens in lm_tokens_list])),
fill_value=self.lm_padding_value,
)
for i, lm_tokens_i in enumerate(lm_tokens_list):
lm_tokens[i, : lm_tokens_i.shape[0]] = lm_tokens_i
data_dict[LMTokens.name] = lm_tokens
joined_data = self.join_data(data_dict)
return joined_data
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.