max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
shenfun/laguerre/matrices.py | spectralDNS/shenfun | 138 | 12754031 | <filename>shenfun/laguerre/matrices.py
import functools
from shenfun.matrixbase import SpectralMatrix
from shenfun.la import TDMA_O
from . import bases
SD = bases.ShenDirichlet
L = bases.Orthogonal
class BLLmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (L_j, L_k)_w
where
.. math::
j = 0, 1, ..., N \text{ and } k = 0, 1, ..., N
and :math:`L_k` is the Laguerre function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], L)
assert isinstance(trial[0], L)
SpectralMatrix.__init__(self, {0:1}, test, trial, scale=scale, measure=measure)
def solve(self, b, u=None, axis=0, constraints=()):
if u is not None:
u[:] = b
u /= (self.scale*self[0])
return u
else:
b /= (self.scale*self[0])
return b
def matvec(self, v, c, format=None, axis=0):
c[:] = v
self.scale_array(c, self.scale*self[0])
return c
class BSDSDmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
B_{kj} = (\phi_j, \phi_k)_w
where
.. math::
j = 0, 1, ..., N-1 \text{ and } k = 0, 1, ..., N-1
and :math:`\phi_k` is the Laguerre (function) Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
d = {0:2., 1: -1., -1:-1.}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def get_solver(self):
return TDMA_O
class ASDSDmat(SpectralMatrix):
r"""Mass matrix for inner product
.. math::
A_{kj} = (\phi'_j, \phi'_k)_w
where
.. math::
j = 0, 1, ..., N-1 \text{ and } k = 0, 1, ..., N-1
and :math:`\phi_k` is the Laguerre (function) Dirichlet basis function.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], SD)
assert isinstance(trial[0], SD)
d = {0: 0.5,
1: 0.25,
-1: 0.25}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def get_solver(self):
return TDMA_O
class _Lagmatrix(SpectralMatrix):
def __init__(self, test, trial, measure=1):
SpectralMatrix.__init__(self, {}, test, trial, measure=measure)
class _LagMatDict(dict):
"""Dictionary of inner product matrices
Matrices that are missing keys are generated from Vandermonde type
computations.
"""
def __missing__(self, key):
measure = 1 if len(key) == 2 else key[3]
c = functools.partial(_Lagmatrix, measure=measure)
self[key] = c
return c
def __getitem__(self, key):
matrix = dict.__getitem__(self, key)
return matrix
mat = _LagMatDict({
((SD, 0), (SD, 0)): BSDSDmat,
((SD, 1), (SD, 1)): ASDSDmat,
((L, 0), (L, 0)): BLLmat
})
|
all/experiments/experiment.py | drozzy/autonomous-learning-library | 584 | 12754060 | from abc import ABC, abstractmethod
import numpy as np
from scipy import stats
import torch
class Experiment(ABC):
'''
An Experiment manages the basic train/test loop and logs results.
Args:
writer (:torch.logging.writer:): A Writer object used for logging.
quiet (bool): If False, the Experiment will print information about
episode returns to standard out.
'''
def __init__(self, writer, quiet):
self._writer = writer
self._quiet = quiet
self._best_returns = -np.inf
self._returns100 = []
@abstractmethod
def train(self, frames=np.inf, episodes=np.inf):
'''
Train the agent for a certain number of frames or episodes.
If both frames and episodes are specified, then the training loop will exit
when either condition is satisfied.
Args:
frames (int): The maximum number of training frames.
episodes (bool): The maximum number of training episodes.
'''
@abstractmethod
def test(self, episodes=100):
'''
Test the agent in eval mode for a certain number of episodes.
Args:
episodes (int): The number of test episodes.
Returns:
list(float): A list of all returns received during testing.
'''
@property
@abstractmethod
def frame(self):
'''The index of the current training frame.'''
@property
@abstractmethod
def episode(self):
'''The index of the current training episode'''
def _log_training_episode(self, returns, fps):
if not self._quiet:
print('episode: {}, frame: {}, fps: {}, returns: {}'.format(self.episode, self.frame, int(fps), returns))
if returns > self._best_returns:
self._best_returns = returns
self._returns100.append(returns)
if len(self._returns100) == 100:
mean = np.mean(self._returns100)
std = np.std(self._returns100)
self._writer.add_summary('returns100', mean, std, step="frame")
self._returns100 = []
self._writer.add_evaluation('returns/episode', returns, step="episode")
self._writer.add_evaluation('returns/frame', returns, step="frame")
self._writer.add_evaluation("returns/max", self._best_returns, step="frame")
self._writer.add_scalar('fps', fps, step="frame")
def _log_test_episode(self, episode, returns):
if not self._quiet:
print('test episode: {}, returns: {}'.format(episode, returns))
def _log_test(self, returns):
if not self._quiet:
print('test returns (mean ± sem): {} ± {}'.format(np.mean(returns), stats.sem(returns)))
self._writer.add_summary('returns-test', np.mean(returns), np.std(returns))
def save(self):
return self._preset.save('{}/preset.pt'.format(self._writer.log_dir))
def close(self):
self._writer.close()
|
xar/utils.py | darjeeling/xar | 1,477 | 12754062 | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Handy helper utils to work with XARs at runtime"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
def get_runtime_path():
# type: (...) -> str
"""Return the location of the runtime files directory"""
runtime_path = os.getenv("XAR_RUNTIME_FILES")
if runtime_path:
if not os.access(runtime_path, os.R_OK):
raise ValueError("XAR_RUNTIME_FILES is invalid: %s" % runtime_path)
return runtime_path
raise ValueError("Cannot determine runtime files path.")
|
tests/configure/test_config.py | testruction/databricks-cli | 252 | 12754066 | # Databricks CLI
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=protected-access
import json
import mock
import click
from click.testing import CliRunner
import databricks_cli.configure.config as config
from databricks_cli.utils import InvalidConfigurationError, eat_exceptions
from databricks_cli.configure.provider import DatabricksConfig
from databricks_cli.click_types import ContextObject
from tests.utils import provide_conf
@provide_conf
def test_debug_option():
# Test that context object debug_mode property changes with --debug flag fed.
@click.command()
@click.option('--debug-fed', type=bool)
@config.debug_option
def test_debug(debug_fed): # noqa
ctx = click.get_current_context()
context_object = ctx.ensure_object(ContextObject)
assert context_object.debug_mode is debug_fed
result = CliRunner().invoke(test_debug, ['--debug', '--debug-fed', True])
assert result.exit_code == 0
result = CliRunner().invoke(test_debug, ['--debug-fed', False])
assert result.exit_code == 0
# Test that with eat_exceptions wrapper, 'Traceback' appears or doesn't appear depending on
# whether --debug flag is given.
@click.command()
@config.debug_option
@eat_exceptions
def test_debug_traceback(): # noqa
assert False
result = CliRunner().invoke(test_debug_traceback, ['--debug'])
assert result.exit_code == 1
assert 'Traceback' in result.output
result = CliRunner().invoke(test_debug_traceback)
assert result.exit_code == 1
assert 'Traceback' not in result.output
@provide_conf
def test_provide_api_client():
@click.command()
@click.option('--x', required=True)
@config.profile_option
@config.provide_api_client
def test_command(api_client, x): # noqa
click.echo(x)
result = CliRunner().invoke(test_command, ['--x', '1'])
assert result.exit_code == 0
assert result.output == '1\n'
def test_provide_api_client_invalid():
@click.command()
@click.option('--x', required=True)
@config.profile_option
@config.provide_api_client
def test_command(api_client, x): # noqa
click.echo(x)
result = CliRunner().invoke(test_command, ['--x', '1'])
assert result.exit_code == 1
assert isinstance(result.exception, InvalidConfigurationError)
TEST_PROFILE_1 = 'test-profile-1'
TEST_PROFILE_2 = 'test-profile-2'
def test_provide_profile_twice():
@click.group()
@config.profile_option
def test_group():
pass
@click.command()
@config.profile_option
def test_command(): # noqa
pass
test_group.add_command(test_command, 'test')
result = CliRunner().invoke(test_group, ['--profile', TEST_PROFILE_1, 'test', '--profile',
TEST_PROFILE_2])
assert '--profile can only be provided once. The profiles [{}, {}] were provided.'.format(
TEST_PROFILE_1, TEST_PROFILE_2) in result.output
TEST_HOST = 'https://test.cloud.databricks.com'
TEST_TOKEN = '<PASSWORD>'
def test_command_headers():
@click.group()
@config.profile_option
def outer_test_group():
pass
@click.group()
@config.profile_option
def inner_test_group():
pass
@click.command()
@click.option('--x', required=True)
@config.profile_option
@config.provide_api_client
def test_command(api_client, x): # noqa
click.echo(json.dumps(api_client.default_headers))
with mock.patch("databricks_cli.configure.config.get_config") as config_mock:
with mock.patch("uuid.uuid1") as uuid_mock:
config_mock.return_value = DatabricksConfig.from_token(TEST_HOST, TEST_TOKEN)
uuid_mock.return_value = '1234'
inner_test_group.add_command(test_command, 'subcommand')
outer_test_group.add_command(inner_test_group, 'command')
result = CliRunner().invoke(outer_test_group, ['command', 'subcommand', '--x', '12'])
assert result.exception is None
default_headers = json.loads(result.output)
assert 'user-agent' in default_headers
assert "command-subcommand-1234" in default_headers['user-agent']
|
pgmpy/estimators/base.py | akleinau/pgmpy | 2,144 | 12754091 | #!/usr/bin/env python
from functools import lru_cache
import pandas as pd
from pgmpy.utils.decorators import convert_args_tuple
class BaseEstimator(object):
def __init__(self, data=None, state_names=None, complete_samples_only=True):
"""
Base class for estimators in pgmpy; `ParameterEstimator`,
`StructureEstimator` and `StructureScore` derive from this class.
Parameters
----------
data: pandas DataFrame object datafame object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
"""
self.data = data
# data can be None in the case when learning structre from
# independence conditions. Look into PC.py.
if self.data is not None:
self.complete_samples_only = complete_samples_only
self.variables = list(data.columns.values)
if not isinstance(state_names, dict):
self.state_names = {
var: self._collect_state_names(var) for var in self.variables
}
else:
self.state_names = dict()
for var in self.variables:
if var in state_names:
if not set(self._collect_state_names(var)) <= set(
state_names[var]
):
raise ValueError(
f"Data contains unexpected states for variable: {var}."
)
self.state_names[var] = state_names[var]
else:
self.state_names[var] = self._collect_state_names(var)
def _collect_state_names(self, variable):
"Return a list of states that the variable takes in the data."
states = sorted(list(self.data.loc[:, variable].dropna().unique()))
return states
@convert_args_tuple
@lru_cache(maxsize=2048)
def state_counts(
self, variable, parents=[], complete_samples_only=None, weighted=False
):
"""
Return counts how often each state of 'variable' occurred in the data.
If a list of parents is provided, counting is done conditionally
for each state configuration of the parents.
Parameters
----------
variable: string
Name of the variable for which the state count is to be done.
parents: list
Optional list of variable parents, if conditional counting is desired.
Order of parents in list is reflected in the returned DataFrame
complete_samples_only: bool
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.NaN` somewhere are ignored. If `False` then
every row where neither the variable nor its parents are `np.NaN` is used.
Desired default behavior can be passed to the class constructor.
weighted: bool
If True, data must have a `_weight` column specifying the weight of the
datapoint (row). If False, each datapoint has a weight of `1`.
Returns
-------
state_counts: pandas.DataFrame
Table with state counts for 'variable'
Examples
--------
>>> import pandas as pd
>>> from pgmpy.estimators import BaseEstimator
>>> data = pd.DataFrame(data={'A': ['a1', 'a1', 'a2'],
'B': ['b1', 'b2', 'b1'],
'C': ['c1', 'c1', 'c2']})
>>> estimator = BaseEstimator(data)
>>> estimator.state_counts('A')
A
a1 2
a2 1
>>> estimator.state_counts('C', parents=['A', 'B'])
A a1 a2
B b1 b2 b1 b2
C
c1 1 1 0 0
c2 0 0 1 0
>>> estimator.state_counts('C', parents=['A'])
A a1 a2
C
c1 2.0 0.0
c2 0.0 1.0
"""
parents = list(parents)
# default for how to deal with missing data can be set in class constructor
if complete_samples_only is None:
complete_samples_only = self.complete_samples_only
# ignores either any row containing NaN, or only those where the variable or its parents is NaN
data = (
self.data.dropna()
if complete_samples_only
else self.data.dropna(subset=[variable] + parents)
)
if weighted and ("_weight" not in self.data.columns):
raise ValueError("data must contain a `_weight` column if weighted=True")
if not parents:
# count how often each state of 'variable' occured
if weighted:
state_count_data = data.groupby([variable]).sum()["_weight"]
else:
state_count_data = data.loc[:, variable].value_counts()
state_counts = (
state_count_data.reindex(self.state_names[variable])
.fillna(0)
.to_frame()
)
else:
parents_states = [self.state_names[parent] for parent in parents]
# count how often each state of 'variable' occured, conditional on parents' states
if weighted:
state_count_data = (
data.groupby([variable] + parents).sum()["_weight"].unstack(parents)
)
else:
state_count_data = (
data.groupby([variable] + parents).size().unstack(parents)
)
if not isinstance(state_count_data.columns, pd.MultiIndex):
state_count_data.columns = pd.MultiIndex.from_arrays(
[state_count_data.columns]
)
# reindex rows & columns to sort them and to add missing ones
# missing row = some state of 'variable' did not occur in data
# missing column = some state configuration of current 'variable's parents
# did not occur in data
row_index = self.state_names[variable]
column_index = pd.MultiIndex.from_product(parents_states, names=parents)
state_counts = state_count_data.reindex(
index=row_index, columns=column_index
).fillna(0)
return state_counts
class ParameterEstimator(BaseEstimator):
def __init__(self, model, data, **kwargs):
"""
Base class for parameter estimators in pgmpy.
Parameters
----------
model: pgmpy.models.BayesianNetwork or pgmpy.models.MarkovNetwork or pgmpy.models.NoisyOrModel model
for which parameter estimation is to be done.
data: pandas DataFrame object
datafame object with column names identical to the variable names of the model.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
"""
if not (set(model.nodes()) - model.latents) <= set(data.columns.values):
raise ValueError(
"variable names of the model must be identical to column names in data"
)
self.model = model
super(ParameterEstimator, self).__init__(data, **kwargs)
def state_counts(self, variable, weighted=False, **kwargs):
"""
Return counts how often each state of 'variable' occurred in the data.
If the variable has parents, counting is done conditionally
for each state configuration of the parents.
Parameters
----------
variable: string
Name of the variable for which the state count is to be done.
complete_samples_only: bool
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.NaN` somewhere are ignored. If `False` then
every row where neither the variable nor its parents are `np.NaN` is used.
Desired default behavior can be passed to the class constructor.
Returns
-------
state_counts: pandas.DataFrame
Table with state counts for 'variable'
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianNetwork
>>> from pgmpy.estimators import ParameterEstimator
>>> model = BayesianNetwork([('A', 'C'), ('B', 'C')])
>>> data = pd.DataFrame(data={'A': ['a1', 'a1', 'a2'],
'B': ['b1', 'b2', 'b1'],
'C': ['c1', 'c1', 'c2']})
>>> estimator = ParameterEstimator(model, data)
>>> estimator.state_counts('A')
A
a1 2
a2 1
>>> estimator.state_counts('C')
A a1 a2
B b1 b2 b1 b2
C
c1 1 1 0 0
c2 0 0 1 0
"""
parents = sorted(self.model.get_parents(variable))
return super(ParameterEstimator, self).state_counts(
variable, parents=parents, weighted=weighted, **kwargs
)
class StructureEstimator(BaseEstimator):
def __init__(self, data=None, independencies=None, **kwargs):
"""
Base class for structure estimators in pgmpy.
Parameters
----------
data: pandas DataFrame object
datafame object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
"""
self.independencies = independencies
if self.independencies is not None:
self.variables = self.independencies.get_all_variables()
super(StructureEstimator, self).__init__(data=data, **kwargs)
def estimate(self):
pass
|
autobahn/wamp/gen/wamp/proto/AuthMethod.py | rapyuta-robotics/autobahn-python | 1,670 | 12754097 | <filename>autobahn/wamp/gen/wamp/proto/AuthMethod.py
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
class AuthMethod(object):
ANONYMOUS = 0
COOKIE = 1
TLS = 2
TICKET = 3
CRA = 4
SCRAM = 5
CRYPTOSIGN = 6
|
pogom/pgoapi/protos/POGOProtos/Networking/Requests/Messages/SetPlayerTeamMessage_pb2.py | tier4fusion/pogom-updated | 2,557 | 12754115 | <filename>pogom/pgoapi/protos/POGOProtos/Networking/Requests/Messages/SetPlayerTeamMessage_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/SetPlayerTeamMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Enums import TeamColor_pb2 as POGOProtos_dot_Enums_dot_TeamColor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/SetPlayerTeamMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\nBPOGOProtos/Networking/Requests/Messages/SetPlayerTeamMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\x1a POGOProtos/Enums/TeamColor.proto\"A\n\x14SetPlayerTeamMessage\x12)\n\x04team\x18\x01 \x01(\x0e\x32\x1b.POGOProtos.Enums.TeamColorb\x06proto3')
,
dependencies=[POGOProtos_dot_Enums_dot_TeamColor__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SETPLAYERTEAMMESSAGE = _descriptor.Descriptor(
name='SetPlayerTeamMessage',
full_name='POGOProtos.Networking.Requests.Messages.SetPlayerTeamMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='team', full_name='POGOProtos.Networking.Requests.Messages.SetPlayerTeamMessage.team', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=145,
serialized_end=210,
)
_SETPLAYERTEAMMESSAGE.fields_by_name['team'].enum_type = POGOProtos_dot_Enums_dot_TeamColor__pb2._TEAMCOLOR
DESCRIPTOR.message_types_by_name['SetPlayerTeamMessage'] = _SETPLAYERTEAMMESSAGE
SetPlayerTeamMessage = _reflection.GeneratedProtocolMessageType('SetPlayerTeamMessage', (_message.Message,), dict(
DESCRIPTOR = _SETPLAYERTEAMMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.SetPlayerTeamMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.SetPlayerTeamMessage)
))
_sym_db.RegisterMessage(SetPlayerTeamMessage)
# @@protoc_insertion_point(module_scope)
|
tools/rosunit/test/dotname_cases.py | mcx/ros | 2,051 | 12754130 | import unittest
class CaseA(unittest.TestCase):
def runTest(self):
self.assertTrue(True)
class CaseB(unittest.TestCase):
def runTest(self):
self.assertTrue(True)
class DotnameLoadingSuite(unittest.TestSuite):
def __init__(self):
super(DotnameLoadingSuite, self).__init__()
self.addTest(CaseA())
self.addTest(CaseB())
class DotnameLoadingTest(unittest.TestCase):
def test_a(self):
self.assertTrue(True)
def test_b(self):
self.assertTrue(True)
class NotTestCase():
def not_test(self):
pass
|
client/verta/verta/_swagger/_public/uac/model/UacResources.py | stefan-petrov-toptal/modeldb | 835 | 12754133 | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class UacResources(BaseType):
def __init__(self, service=None, resource_ids=None, role_service_resource_type=None, authz_service_resource_type=None, modeldb_service_resource_type=None):
required = {
"service": False,
"resource_ids": False,
"role_service_resource_type": False,
"authz_service_resource_type": False,
"modeldb_service_resource_type": False,
}
self.service = service
self.resource_ids = resource_ids
self.role_service_resource_type = role_service_resource_type
self.authz_service_resource_type = authz_service_resource_type
self.modeldb_service_resource_type = modeldb_service_resource_type
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .ServiceEnumService import ServiceEnumService
from .RoleResourceEnumRoleServiceResourceTypes import RoleResourceEnumRoleServiceResourceTypes
from .AuthzResourceEnumAuthzServiceResourceTypes import AuthzResourceEnumAuthzServiceResourceTypes
from .ModelResourceEnumModelDBServiceResourceTypes import ModelResourceEnumModelDBServiceResourceTypes
tmp = d.get('service', None)
if tmp is not None:
d['service'] = ServiceEnumService.from_json(tmp)
tmp = d.get('resource_ids', None)
if tmp is not None:
d['resource_ids'] = [tmp for tmp in tmp]
tmp = d.get('role_service_resource_type', None)
if tmp is not None:
d['role_service_resource_type'] = RoleResourceEnumRoleServiceResourceTypes.from_json(tmp)
tmp = d.get('authz_service_resource_type', None)
if tmp is not None:
d['authz_service_resource_type'] = AuthzResourceEnumAuthzServiceResourceTypes.from_json(tmp)
tmp = d.get('modeldb_service_resource_type', None)
if tmp is not None:
d['modeldb_service_resource_type'] = ModelResourceEnumModelDBServiceResourceTypes.from_json(tmp)
return UacResources(**d)
|
uiBasicIO.py | xcgoo/uiKLine | 232 | 12754142 | <reponame>xcgoo/uiKLine
# -*- coding: utf-8 -*-
# PyQt
from qtpy.QtGui import *
from qtpy.QtWidgets import *
from qtpy.QtCore import *
from qtpy import QtGui,QtCore
# Others
import os
import imp
import sys
import json
import glob
from functools import partial
from collections import OrderedDict
# 导入按钮函数
#---------------------------------------------------------------------------------------
ALL_FUNC_BUTTON = []
funcBtnPath = os.getcwd() + '/func-button/'
allPath = glob.glob(funcBtnPath+r'*.py')
for path in allPath:
fileName = path.split("\\")[-1]
modelName = fileName.split(".")[0]
ALL_FUNC_BUTTON.append(modelName)
imp.load_source('ctaFuncButttons',path)
BUTTON_FUNC = {}
from ctaFuncButttons import *
for func_bt in ALL_FUNC_BUTTON:
fn_obj = getattr(sys.modules['ctaFuncButttons'], func_bt)
BUTTON_FUNC[func_bt] = fn_obj
# 字符串转换
#---------------------------------------------------------------------------------------
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
########################################################################
class uiBasicIO(QWidget):
"""通过json文件,自动生成输入框和按钮的元类"""
#----------------------------------------------------------------------
def __init__(self,parent=None,inpFile='',btnFile=''):
"""初始化函数"""
super(uiBasicIO,self).__init__(parent)
# 输入框数据
self.classDict = OrderedDict()
self.labelDict = {}
self.widthDict = {}
self.typeDict = {}
self.evalDict = {}
self.editDict = {}
# 按钮数据
self.bClassDict = OrderedDict()
self.bWidthDict = {}
self.bFunDict = {}
self.buttonDict = {}
# 输入框和按钮
self.groupInput = None
self.groupProcess = None
# 输入框和按钮的配置文件
self.inpFile = inpFile
self.btnFile = btnFile
self.loadInputSetting()
self.loadButtonSetting()
self.initBasicUi()
#----------------------------------------------------------------------
def getInputParamByName(self,name):
"""获得输入框参数值"""
typeName = self.typeDict[name]
editCell = self.editDict[name]
val = str(editCell.currentText()) if typeName == 'List' else str(editCell.text())
try:
return (eval(val) if self.evalDict[name] else val)
except:
return val
#----------------------------------------------------------------------
def loadInputSetting(self):
"""载入输入框界面配置"""
settingFile = self.inpFile
with open(settingFile) as f:
for setting in json.load(f):
name = setting['name']
label = setting['label']
typeName = setting['type']
evalType = setting['eval']
width = setting['width']
className = setting['class']
default = setting['default']
# 标签
self.labelDict[name] = QLabel(label)
self.labelDict[name].setAlignment(QtCore.Qt.AlignCenter)
# 宽度
self.widthDict[name] = width
# 输入框类型
self.typeDict[name] = typeName
self.evalDict[name] = evalType
# 分类
if className in self.classDict:
self.classDict[className].append(name)
else:
self.classDict[className] = [name]
# 输入框
if typeName == 'Edit':
self.editDict[name] = QLineEdit()
self.editDict[name].setText(default)
elif typeName == 'List':
self.editDict[name] = QComboBox()
self.editDict[name].addItems(eval(setting['ListVar']))
#----------------------------------------------------------------------
def loadButtonSetting(self):
"""载入按钮界面配置"""
settingFile = self.btnFile
with open(settingFile) as f:
for setting in json.load(f):
label = setting['label']
func = setting['func']
width = setting['width']
className = setting['class']
style = setting['style']
# 按钮
self.buttonDict[func] = QPushButton(label)
self.buttonDict[func].setObjectName(_fromUtf8(style))
self.buttonDict[func].clicked.connect(partial(BUTTON_FUNC[func],self))
# 宽度
self.bWidthDict[func] = width
# 分类
if className in self.bClassDict:
self.bClassDict[className].append(func)
else:
self.bClassDict[className] = [func]
#----------------------------------------------------------------------
def initBasicUi(self):
"""初始化界面"""
# 根据配置文件生成输入框界面
self.groupInput = QGroupBox()
self.groupInput.setTitle(u'')
gridup = QGridLayout()
i = 0
for className in self.classDict:
classIndex = i
# 标题和输入框
for name in self.classDict[className]:
width = self.widthDict[name]
qLabel = self.labelDict[name]
qEdit = self.editDict[name]
gridup.addWidget(qLabel, 1, i)
gridup.addWidget(qEdit, 2, i)
gridup.setColumnStretch(i, width)
i+=1
# 分类标题
qcLabel = QLabel(className)
qcLabel.setAlignment(QtCore.Qt.AlignCenter)
qcLabel.setFont(QtGui.QFont("Roman times",10,QtGui.QFont.Bold))
gridup.addWidget(qcLabel, 0, classIndex,1,i-classIndex)
# 分隔符
for j in xrange(0,3):
qcSplit = QLabel(u'|')
qcSplit.setAlignment(QtCore.Qt.AlignCenter)
gridup.addWidget(qcSplit, j, i)
i+=1
self.groupInput.setLayout(gridup)
# 根据配置文件生成按钮界面
self.groupProcess = QGroupBox()
self.groupProcess.setTitle(u'')
griddown = QGridLayout()
i = 0
for className in self.bClassDict:
classIndex = i
# 标题和输入框
for name in self.bClassDict[className]:
width = self.bWidthDict[name]
qButton = self.buttonDict[name]
griddown.addWidget(qButton, 1, i)
griddown.setColumnStretch(i, width)
i+=1
# 分类标题
qcLabel = QLabel(className)
qcLabel.setAlignment(QtCore.Qt.AlignCenter)
qcLabel.setFont(QFont("Roman times",10,QtGui.QFont.Bold))
griddown.addWidget(qcLabel, 0, classIndex,1,i-classIndex)
# 分隔符
for j in xrange(0,2):
qcSplit = QLabel(u'|')
qcSplit.setAlignment(QtCore.Qt.AlignCenter)
griddown.addWidget(qcSplit, j, i)
i+=1
self.groupProcess.setLayout(griddown)
|
src/Python/Visualization/InteractorStyleTrackballActor.py | cvandijck/VTKExamples | 309 | 12754155 | <reponame>cvandijck/VTKExamples<filename>src/Python/Visualization/InteractorStyleTrackballActor.py
import vtk
def main():
colors = vtk.vtkNamedColors()
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
style = vtk.vtkInteractorStyleTrackballActor()
iren.SetInteractorStyle(style)
# create source
sphereSource = vtk.vtkSphereSource()
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(sphereSource.GetOutputPort())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d('Chartreuse'))
# assign actor to the renderer
ren.AddActor(actor)
ren.SetBackground(colors.GetColor3d('PaleGoldenrod'))
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
|
ufora/FORA/python/PurePython/testModules/same_line_number/A.py | ufora/ufora | 571 | 12754177 | from ufora.FORA.python.PurePython.testModules.same_line_number.B import B
class A(object):
def __init__(self, m):
self.m = m
def foo(self):
return B(self.m)
|
Yank/commands/analyze.py | lilyminium/yank | 136 | 12754187 | <gh_stars>100-1000
#!/usr/local/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Analyze YANK output file.
"""
# =============================================================================================
# MODULE IMPORTS
# =============================================================================================
# Module imports handled in individual functions since CLI should be faster to boot up
# =============================================================================================
# COMMAND-LINE INTERFACE
# =============================================================================================
usage = """
YANK analyze
Usage:
yank analyze ((-s STORE | --store=STORE) | (-y YAML | --yaml=YAML)) [-e SERIAL | --serial=SERIAL] [--skipunbiasing] [--distcutoff=DISTANCE] [--energycutoff=ENERGY] [-v | --verbose] [--fulltraj]
yank analyze report ((-s STORE | --store=STORE) | (-y YAML | --yaml=YAML)) (-o OUTPUT | --output=OUTPUT) [--format=FORMAT] [-e SERIAL | --serial=SERIAL] [--skipunbiasing] [--distcutoff=DISTANCE] [--energycutoff=ENERGY] [-v | --verbose] [--fulltraj]
yank analyze extract-trajectory --netcdf=FILEPATH [--checkpoint=FILEPATH ] (--state=STATE | --replica=REPLICA) --trajectory=FILEPATH [--start=START_FRAME] [--skip=SKIP_FRAME] [--end=END_FRAME] [--nosolvent] [--discardequil] [--imagemol] [-v | --verbose]
Description:
Analyze the data to compute Free Energies OR extract the trajectory from the NetCDF file into a common format.
yank analyze report generates a Jupyter (Ipython) notebook of the report instead of writing it to standard output
Free Energy Required Arguments:
-s STORE, --store=STORE Storage directory for NetCDF data files.
EXCLUSIVE with -y and --yaml
-y YAML, --yaml=YAML Target YAML file which setup and ran the experiment(s) being analyzed.
This slightly changes the optional -o|--output flag.
EXCLUSIVE with -s and --store
YANK Analysis Output Arguments:
-e SERIAL, --serial=SERIAL Save data in Pickle serialized output. This behaves differently in report mode.
In normal mode, this is a SINGULAR output file in Pickle format
In report mode, this is the base name of the individual serial files. If not provided,
then the name is inferred from the storage (-s) or the yaml (-y) file
report Toggles output to be of the Jupyter Notebook analysis as a rendered notebook or as
a static file. Can use a path + name as well. File format is set by the --format flag
-o=REPORT, --output=REPORT Name of the health report Jupyter notebook or static file, can use a path + name as well
If the filename ends in .pdf or .html, the notebook is auto run and converted to a
static PDF or HTML file respectively
PDF requires xelatex binary in OS path, often provided by LaTeX packages
MODIFIED BY -y|--yaml: This becomes the DIRECTORY of the output. The names are inferred
from the input YAML file
--format=FORMAT File format of the notebook. If the filename ends in .pdf or .html, the notebook is run
and converted to a static PDF or HTML file respectively. If --format is NOT set, it
defaults to '.ipynb'
Free Energy Optional Arguments:
--skipunbiasing Skip the radially-symmetric restraint unbiasing. This can be an expensive step.
If this flag is not specified, and no cutoff is given, a distance cutoff is
automatically determined as the 99.9-percentile of the restraint distance distribution
in the bound state.
--distcutoff=DISTANCE The restraint distance cutoff (in angstroms) to be used to unbias the restraint.
When the restraint is unbiased, the analyzer discards all the samples for which the
distance between the restrained atoms is above this cutoff. Effectively, this is
equivalent to placing a hard wall potential at a restraint distance "distcutoff".
--energycutoff=ENERGY The restraint unitless potential energy cutoff (i.e. in kT) to be used to unbias the
restraint. When the restraint is unbiased, the analyzer discards all the samples for
which the restrain potential energy (in kT) is above this cutoff. Effectively, this is
equivalent to placing a hard wall potential at a restraint distance such that the
restraint potential energy is equal to "energycutoff".
Extract Trajectory Required Arguments:
--netcdf=FILEPATH Path to the NetCDF file.
--checkpoint=FILEPATH Path to the NetCDF checkpoint file if not the default name inferned from "netcdf" option
--state=STATE_IDX Index of the alchemical state for which to extract the trajectory
--replica=REPLICA_IDX Index of the replica for which to extract the trajectory
--trajectory=FILEPATH Path to the trajectory file to create (extension determines the format)
Extract Trajectory Options:
--start=START_FRAME Index of the first frame to keep
--end=END_FRAME Index of the last frame to keep
--skip=SKIP_FRAME Extract one frame every SKIP_FRAME
--nosolvent Do not extract solvent
--discardequil Detect and discard equilibration frames
--imagemol Reprocess trajectory to enforce periodic boundary conditions to molecules positions
General Options:
-v, --verbose Print verbose output
--fulltraj Force ALL analysis run from this command to rely on the full trajectory and not do any
automatic equilibration detection or decorrelation subsampling. Although the
equilibration and correlation times will still be computed, no calculation depending on
them will use this information.
"""
# =============================================================================================
# COMMAND DISPATCH
# =============================================================================================
def dispatch(args):
import os
import pickle
from .. import utils, analyze
utils.config_root_logger(args['--verbose'])
if args['report']:
if not args['--format']:
args['--format'] = '.ipynb'
elif args['--format'][0] != '.':
# Ensure format is not double dotted
args['--format'] = '.' + args['--format']
if args['--yaml'] is not None and args['--output']:
# Ensure the last output is treated as a directory in all cases
os.makedirs(args['--output'], exist_ok=True)
base, last_item = os.path.split(args['--output'])
if last_item != '':
args['--output'] = os.path.join(base, last_item, '')
return dispatch_report(args)
if args['extract-trajectory']:
return dispatch_extract_trajectory(args)
# Configure analyzer keyword arguments.
analyzer_kwargs = extract_analyzer_kwargs(args)
do_serialize = True if args['--serial'] is not None else False
if args['--yaml']:
multi_analyzer = analyze.MultiExperimentAnalyzer(args['--yaml'])
output = multi_analyzer.run_all_analysis(serial_data_path=args['--serial'], serialize_data=do_serialize,
**analyzer_kwargs)
for exp_name, data in output.items():
analyze.print_analysis_data(data, header="######## EXPERIMENT: {} ########".format(exp_name))
else:
import mpiplus
@mpiplus.on_single_node(0)
def single_run():
# Helper to ensure case someone does MPI on a single diretory
output = analyze.analyze_directory(args['--store'], **analyzer_kwargs)
if do_serialize:
with open(args['--serial'], 'wb') as f:
pickle.dump(output, f)
print("Results have been serialized to {}".format(args['--serial']))
single_run()
return True
def extract_analyzer_kwargs(args, quantities_as_strings=False):
import simtk.unit as unit
"""Return a dictionary with the keyword arguments to pass to the analyzer."""
analyzer_kwargs = {}
if args['--skipunbiasing']:
analyzer_kwargs['unbias_restraint'] = False
if args['--energycutoff']:
analyzer_kwargs['restraint_energy_cutoff'] = float(args['--energycutoff'])
if args['--distcutoff']:
if quantities_as_strings:
distcutoff = args['--distcutoff'] + '*angstroms'
else:
distcutoff = float(args['--distcutoff']) * unit.angstroms
analyzer_kwargs['restraint_distance_cutoff'] = distcutoff
if args['--fulltraj']:
analyzer_kwargs['use_full_trajectory'] = True
return analyzer_kwargs
def dispatch_extract_trajectory(args):
import os
from .. import analyze
# Paths
output_path = args['--trajectory']
nc_path = args['--netcdf']
# Get keyword arguments to pass to extract_trajectory()
kwargs = {}
if args['--state']:
kwargs['state_index'] = int(args['--state'])
else:
kwargs['replica_index'] = int(args['--replica'])
if args['--start']:
kwargs['start_frame'] = int(args['--start'])
if args['--skip']:
kwargs['skip_frame'] = int(args['--skip'])
if args['--end']:
kwargs['end_frame'] = int(args['--end'])
if args['--nosolvent']:
kwargs['keep_solvent'] = False
if args['--discardequil']:
kwargs['discard_equilibration'] = True
if args['--imagemol']:
kwargs['image_molecules'] = True
if args['--checkpoint']:
kwargs["nc_checkpoint_file"] = args['--checkpoint']
# Extract trajectory
trajectory = analyze.extract_trajectory(nc_path, **kwargs)
# Create output directory and save trajectory
output_dir = os.path.dirname(output_path)
if output_dir != '' and not os.path.isdir(output_dir):
os.makedirs(output_dir)
trajectory.save(output_path)
return True
def dispatch_report(args):
import io
import os
import re
import pkg_resources
from .. import analyze
# Check modules for render
store = args['--store']
yaml_input = args['--yaml']
output = args['--output']
do_serialize = True if args['--serial'] is not None else False
analyzer_kwargs = extract_analyzer_kwargs(args, quantities_as_strings=True)
file_extension = args['--format']
# requires_prerendering = file_extension.lower() in {'.pdf', '.html', '.ipynb'}
try:
import seaborn
import matplotlib
import jupyter
except ImportError:
error_msg = ("Rendering this notebook requires the following packages:\n"
" - seaborn\n"
" - matplotlib\n"
" - jupyter\n"
"Rendering as {} is not possible without the packages!".format(file_extension))
raise ImportError(error_msg)
def run_notebook(source_path, output_file, serial_file, **analyzer_kwargs):
template_path = pkg_resources.resource_filename('yank', 'reports/YANK_Health_Report_Template.ipynb')
with open(template_path, 'r') as template:
notebook_text = re.sub('STOREDIRBLANK', source_path, template.read())
notebook_text = re.sub('ANALYZERKWARGSBLANK', str(analyzer_kwargs), notebook_text)
if serial_file is not None:
# Uncomment the line. Traps '#' and the rest, reports only the rest
notebook_text = re.sub(r"(#)(report\.dump_serial_data\('SERIALOUTPUT'\))", r'\2', notebook_text)
notebook_text = re.sub('SERIALOUTPUT', serial_file, notebook_text)
# Cast to static output
print("Rendering notebook as a {} file...".format(file_extension))
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
import nbconvert.exporters
# Categorize exporters based on extension, requires exporter object and data type output
# 'b' = byte types output, e.g. PDF
# 't' = text based output, e.g. HTML or even raw notebook, human-readable-like
exporters = {
".pdf": {'exporter': nbconvert.exporters.PDFExporter, 'write_type': 'b'},
".html": {'exporter': nbconvert.exporters.HTMLExporter, 'write_type': 't'},
".ipynb": {'exporter': nbconvert.exporters.NotebookExporter, 'write_type': 't'}
}
# Load the notebook through Jupyter.
loaded_notebook = nbformat.read(io.StringIO(notebook_text), as_version=4)
# Process the notebook.
ep = ExecutePreprocessor(timeout=None)
# Sometimes the default startup timeout exceed the default of 60 seconds.
ep.startup_timeout = 180
# Set the title name, does not appear in all exporters
_, output_file_name = os.path.split(output_file)
resource_data = {'metadata': {'name': 'YANK Simulation Report: {}'.format(output_file_name)}}
print("Processing notebook now, this may take a while...")
processed_notebook, resources = ep.preprocess(loaded_notebook, resource_data)
# Retrieve exporter
exporter_data = exporters[file_extension.lower()]
# Determine exporter and data output type
exporter = exporter_data['exporter']
write_type = exporter_data['write_type']
with open(output_file, 'w{}'.format(write_type)) as notebook:
exported_notebook, _ = nbconvert.exporters.export(exporter, processed_notebook, resources=resources)
notebook.write(exported_notebook)
def cast_notebook_serial_path(relative_notebook_path):
if args['--serial'] is None:
serial_file = None
else:
serial_file = os.path.splitext(relative_notebook_path)[0] + '_' + args['--serial']
return serial_file
class NotebookMultiExperimentAnalyzer(analyze.MultiExperimentAnalyzer):
"""Custom Multi Experiment Analyzer for notebooks"""
@staticmethod
def _run_specific_analysis(path, **analyzer_kwargs):
_, exp_name = os.path.split(path)
single_output_file = os.path.join(output, exp_name + args['--format'])
single_serial_file = cast_notebook_serial_path(single_output_file)
run_notebook(path, single_output_file, single_serial_file, **analyzer_kwargs)
return
@staticmethod
def _serialize(serial_path, payload):
"""The notebooks do not have a general serial dump"""
pass
if yaml_input is not None:
multi_notebook = NotebookMultiExperimentAnalyzer(yaml_input)
_ = multi_notebook.run_all_analysis(serialize_data=do_serialize,
serial_data_path=args['--serial'],
**analyzer_kwargs)
else:
notebook_serial_file = cast_notebook_serial_path(output)
run_notebook(store, output, notebook_serial_file, **analyzer_kwargs)
return True
|
openmc/data/angle_distribution.py | norberto-schmidt/openmc | 262 | 12754191 | <filename>openmc/data/angle_distribution.py
from collections.abc import Iterable
from io import StringIO
from numbers import Real
from warnings import warn
import numpy as np
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from openmc.stats import Univariate, Tabular, Uniform, Legendre
from .function import INTERPOLATION_SCHEME
from .data import EV_PER_MEV
from .endf import get_head_record, get_cont_record, get_tab1_record, \
get_list_record, get_tab2_record
class AngleDistribution(EqualityMixin):
"""Angle distribution as a function of incoming energy
Parameters
----------
energy : Iterable of float
Incoming energies in eV at which distributions exist
mu : Iterable of openmc.stats.Univariate
Distribution of scattering cosines corresponding to each incoming energy
Attributes
----------
energy : Iterable of float
Incoming energies in eV at which distributions exist
mu : Iterable of openmc.stats.Univariate
Distribution of scattering cosines corresponding to each incoming energy
"""
def __init__(self, energy, mu):
super().__init__()
self.energy = energy
self.mu = mu
@property
def energy(self):
return self._energy
@property
def mu(self):
return self._mu
@energy.setter
def energy(self, energy):
cv.check_type('angle distribution incoming energy', energy,
Iterable, Real)
self._energy = energy
@mu.setter
def mu(self, mu):
cv.check_type('angle distribution scattering cosines', mu,
Iterable, Univariate)
self._mu = mu
def to_hdf5(self, group):
"""Write angle distribution to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
dset = group.create_dataset('energy', data=self.energy)
# Make sure all data is tabular
mu_tabular = [mu_i if isinstance(mu_i, Tabular) else
mu_i.to_tabular() for mu_i in self.mu]
# Determine total number of (mu,p) pairs and create array
n_pairs = sum([len(mu_i.x) for mu_i in mu_tabular])
pairs = np.empty((3, n_pairs))
# Create array for offsets
offsets = np.empty(len(mu_tabular), dtype=int)
interpolation = np.empty(len(mu_tabular), dtype=int)
j = 0
# Populate offsets and pairs array
for i, mu_i in enumerate(mu_tabular):
n = len(mu_i.x)
offsets[i] = j
interpolation[i] = 1 if mu_i.interpolation == 'histogram' else 2
pairs[0, j:j+n] = mu_i.x
pairs[1, j:j+n] = mu_i.p
pairs[2, j:j+n] = mu_i.c
j += n
# Create dataset for distributions
dset = group.create_dataset('mu', data=pairs)
# Write interpolation as attribute
dset.attrs['offsets'] = offsets
dset.attrs['interpolation'] = interpolation
@classmethod
def from_hdf5(cls, group):
"""Generate angular distribution from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.AngleDistribution
Angular distribution
"""
energy = group['energy'][()]
data = group['mu']
offsets = data.attrs['offsets']
interpolation = data.attrs['interpolation']
mu = []
n_energy = len(energy)
for i in range(n_energy):
# Determine length of outgoing energy distribution and number of
# discrete lines
j = offsets[i]
if i < n_energy - 1:
n = offsets[i+1] - j
else:
n = data.shape[1] - j
interp = INTERPOLATION_SCHEME[interpolation[i]]
mu_i = Tabular(data[0, j:j+n], data[1, j:j+n], interp)
mu_i.c = data[2, j:j+n]
mu.append(mu_i)
return cls(energy, mu)
@classmethod
def from_ace(cls, ace, location_dist, location_start):
"""Generate an angular distribution from ACE data
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
location_dist : int
Index in the XSS array corresponding to the start of a block,
e.g. JXS(9).
location_start : int
Index in the XSS array corresponding to the start of an angle
distribution array
Returns
-------
openmc.data.AngleDistribution
Angular distribution
"""
# Set starting index for angle distribution
idx = location_dist + location_start - 1
# Number of energies at which angular distributions are tabulated
n_energies = int(ace.xss[idx])
idx += 1
# Incoming energy grid
energy = ace.xss[idx:idx + n_energies]*EV_PER_MEV
idx += n_energies
# Read locations for angular distributions
lc = ace.xss[idx:idx + n_energies].astype(int)
idx += n_energies
mu = []
for i in range(n_energies):
if lc[i] > 0:
# Equiprobable 32 bin distribution
n_bins = 32
idx = location_dist + abs(lc[i]) - 1
cos = ace.xss[idx:idx + n_bins + 1]
pdf = np.zeros(n_bins + 1)
pdf[:n_bins] = 1.0/(n_bins*np.diff(cos))
cdf = np.linspace(0.0, 1.0, n_bins + 1)
mu_i = Tabular(cos, pdf, 'histogram', ignore_negative=True)
mu_i.c = cdf
elif lc[i] < 0:
# Tabular angular distribution
idx = location_dist + abs(lc[i]) - 1
intt = int(ace.xss[idx])
n_points = int(ace.xss[idx + 1])
# Data is given as rows of (values, PDF, CDF)
data = ace.xss[idx + 2:idx + 2 + 3*n_points]
data.shape = (3, n_points)
mu_i = Tabular(data[0], data[1], INTERPOLATION_SCHEME[intt])
mu_i.c = data[2]
else:
# Isotropic angular distribution
mu_i = Uniform(-1., 1.)
mu.append(mu_i)
return cls(energy, mu)
@classmethod
def from_endf(cls, ev, mt):
"""Generate an angular distribution from an ENDF evaluation
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
mt : int
The MT value of the reaction to get angular distributions for
Returns
-------
openmc.data.AngleDistribution
Angular distribution
"""
file_obj = StringIO(ev.section[4, mt])
# Read HEAD record
items = get_head_record(file_obj)
lvt = items[2]
ltt = items[3]
# Read CONT record
items = get_cont_record(file_obj)
li = items[2]
nk = items[4]
center_of_mass = (items[3] == 2)
# Check for obsolete energy transformation matrix. If present, just skip
# it and keep reading
if lvt > 0:
warn('Obsolete energy transformation matrix in MF=4 angular '
'distribution.')
for _ in range((nk + 5)//6):
file_obj.readline()
if ltt == 0 and li == 1:
# Purely isotropic
energy = np.array([0., ev.info['energy_max']])
mu = [Uniform(-1., 1.), Uniform(-1., 1.)]
elif ltt == 1 and li == 0:
# Legendre polynomial coefficients
params, tab2 = get_tab2_record(file_obj)
n_energy = params[5]
energy = np.zeros(n_energy)
mu = []
for i in range(n_energy):
items, al = get_list_record(file_obj)
temperature = items[0]
energy[i] = items[1]
coefficients = np.asarray([1.0] + al)
mu.append(Legendre(coefficients))
elif ltt == 2 and li == 0:
# Tabulated probability distribution
params, tab2 = get_tab2_record(file_obj)
n_energy = params[5]
energy = np.zeros(n_energy)
mu = []
for i in range(n_energy):
params, f = get_tab1_record(file_obj)
temperature = params[0]
energy[i] = params[1]
if f.n_regions > 1:
raise NotImplementedError('Angular distribution with multiple '
'interpolation regions not supported.')
mu.append(Tabular(f.x, f.y, INTERPOLATION_SCHEME[f.interpolation[0]]))
elif ltt == 3 and li == 0:
# Legendre for low energies / tabulated for high energies
params, tab2 = get_tab2_record(file_obj)
n_energy_legendre = params[5]
energy_legendre = np.zeros(n_energy_legendre)
mu = []
for i in range(n_energy_legendre):
items, al = get_list_record(file_obj)
temperature = items[0]
energy_legendre[i] = items[1]
coefficients = np.asarray([1.0] + al)
mu.append(Legendre(coefficients))
params, tab2 = get_tab2_record(file_obj)
n_energy_tabulated = params[5]
energy_tabulated = np.zeros(n_energy_tabulated)
for i in range(n_energy_tabulated):
params, f = get_tab1_record(file_obj)
temperature = params[0]
energy_tabulated[i] = params[1]
if f.n_regions > 1:
raise NotImplementedError('Angular distribution with multiple '
'interpolation regions not supported.')
mu.append(Tabular(f.x, f.y, INTERPOLATION_SCHEME[f.interpolation[0]]))
energy = np.concatenate((energy_legendre, energy_tabulated))
return AngleDistribution(energy, mu)
|
tests/utils/test_system.py | PyCN/pulsar | 1,410 | 12754200 | <reponame>PyCN/pulsar
'''Tests the tools and utilities in pulsar.utils.'''
import unittest
from pulsar.utils.system import platform, get_maxfd
class TestSystem(unittest.TestCase):
@unittest.skipUnless(platform.is_posix, 'Posix platform required')
def testPlatform(self):
self.assertFalse(platform.is_windows)
def test_maxfd(self):
m = get_maxfd()
self.assertTrue(m)
|
supriya/commands/server.py | butayama/supriya | 191 | 12754203 | <gh_stars>100-1000
import supriya.osc
from supriya.enums import RequestId
from .bases import Request, Response
class ClearScheduleRequest(Request):
### CLASS VARIABLES ###
request_id = RequestId.CLEAR_SCHEDULE
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
request_id = self.request_name
contents = [request_id]
message = supriya.osc.OscMessage(*contents)
return message
class DoneResponse(Response):
### INITIALIZER ###
def __init__(self, action=None):
self._action = action
### PUBLIC METHODS ###
@classmethod
def from_osc_message(cls, osc_message):
arguments = osc_message.contents
response = cls(action=tuple(arguments))
return response
### PUBLIC PROPERTIES ###
@property
def action(self):
return self._action
class DumpOscRequest(Request):
"""
A /dumpOSC request.
::
>>> import supriya.commands
>>> request = supriya.commands.DumpOscRequest(1)
>>> request
DumpOscRequest(
osc_status=1,
)
::
>>> request.to_osc()
OscMessage('/dumpOSC', 1)
"""
### CLASS VARIABLES ###
request_id = RequestId.DUMP_OSC
### INITIALIZER ###
def __init__(self, osc_status=None):
Request.__init__(self)
self._osc_status = int(osc_status)
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
request_id = self.request_name
osc_status = int(self.osc_status)
assert 0 <= osc_status <= 4
message = supriya.osc.OscMessage(request_id, osc_status)
return message
### PUBLIC PROPERTIES ###
@property
def osc_status(self):
return self._osc_status
class FailResponse(Response):
### INITIALIZER ###
def __init__(self, failed_command=None, failure_reason=None):
self._failed_command = failed_command
self._failure_reason = failure_reason
### PUBLIC METHODS ###
@classmethod
def from_osc_message(cls, osc_message):
failed_command = osc_message.contents[0]
failure_reason = osc_message.contents[1:]
if failure_reason:
failure_reason = tuple(failure_reason)
response = cls(failed_command=failed_command, failure_reason=failure_reason)
return response
### PUBLIC PROPERTIES ###
@property
def failed_command(self):
return self._failed_command
@property
def failure_reason(self):
return self._failure_reason
class NothingRequest(Request):
### CLASS VARIABLES ###
request_id = RequestId.NOTHING
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
return supriya.osc.OscMessage(0)
class NotifyRequest(Request):
"""
A /notify message.
::
>>> import supriya.commands
>>> request = supriya.commands.NotifyRequest(notify_status=True,)
>>> request
NotifyRequest(
notify_status=True,
)
::
>>> request.to_osc()
OscMessage('/notify', 1)
"""
### CLASS VARIABLES ###
request_id = RequestId.NOTIFY
### INITIALIZER ###
def __init__(self, notify_status=None):
Request.__init__(self)
self._notify_status = bool(notify_status)
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
request_id = self.request_name
notify_status = int(self.notify_status)
message = supriya.osc.OscMessage(request_id, notify_status)
return message
### PUBLIC PROPERTIES ###
@property
def notify_status(self):
return self._notify_status
@property
def response_patterns(self):
return ["/done", "/notify"], ["/fail", "/notify"]
class QuitRequest(Request):
### CLASS VARIABLES ###
request_id = RequestId.QUIT
### INITIALIZER ###
def __init__(self):
Request.__init__(self)
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
request_id = self.request_name
message = supriya.osc.OscMessage(request_id)
return message
### PUBLIC PROPERTIES ###
@property
def response_patterns(self):
return ["/done", "/quit"], None
class StatusRequest(Request):
"""
A /status request.
::
>>> import supriya.commands
>>> request = supriya.commands.StatusRequest()
>>> request
StatusRequest()
::
>>> request.to_osc()
OscMessage('/status')
"""
### CLASS VARIABLES ###
request_id = RequestId.STATUS
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
request_id = self.request_name
message = supriya.osc.OscMessage(request_id)
return message
### PUBLIC PROPERTIES ###
@property
def response_patterns(self):
return ["/status.reply"], None
class StatusResponse(Response):
### INITIALIZER ###
def __init__(
self,
actual_sample_rate=None,
average_cpu_usage=None,
group_count=None,
peak_cpu_usage=None,
synth_count=None,
synthdef_count=None,
target_sample_rate=None,
ugen_count=None,
):
self._actual_sample_rate = actual_sample_rate
self._average_cpu_usage = average_cpu_usage
self._group_count = group_count
self._peak_cpu_usage = peak_cpu_usage
self._synth_count = synth_count
self._synthdef_count = synthdef_count
self._target_sample_rate = target_sample_rate
self._ugen_count = ugen_count
### PUBLIC METHODS ###
@classmethod
def from_osc_message(cls, osc_message):
"""
Create response from OSC message.
::
>>> message = supriya.osc.OscMessage(
... "/status.reply",
... 1,
... 0,
... 0,
... 2,
... 4,
... 0.040679048746824265,
... 0.15118031203746796,
... 44100.0,
... 44100.00077873274,
... )
>>> supriya.commands.StatusResponse.from_osc_message(message)
StatusResponse(
actual_sample_rate=44100.00077873274,
average_cpu_usage=0.040679048746824265,
group_count=2,
peak_cpu_usage=0.15118031203746796,
synth_count=0,
synthdef_count=4,
target_sample_rate=44100.0,
ugen_count=0,
)
"""
arguments = osc_message.contents[1:]
(
ugen_count,
synth_count,
group_count,
synthdef_count,
average_cpu_usage,
peak_cpu_usage,
target_sample_rate,
actual_sample_rate,
) = arguments
response = cls(
actual_sample_rate=actual_sample_rate,
average_cpu_usage=average_cpu_usage,
group_count=group_count,
peak_cpu_usage=peak_cpu_usage,
synth_count=synth_count,
synthdef_count=synthdef_count,
target_sample_rate=target_sample_rate,
ugen_count=ugen_count,
)
return response
def to_dict(self):
"""
Convert StatusResponse to JSON-serializable dictionay.
::
>>> status_response = supriya.commands.StatusResponse(
... actual_sample_rate=44100.05692801021,
... average_cpu_usage=8.151924133300781,
... group_count=6,
... peak_cpu_usage=15.151398658752441,
... synth_count=19,
... synthdef_count=42,
... target_sample_rate=44100.0,
... ugen_count=685,
... )
::
>>> import json
>>> result = status_response.to_dict()
>>> result = json.dumps(result, indent=4, separators=(",", ": "), sort_keys=True,)
>>> print(result)
{
"server_status": {
"actual_sample_rate": 44100.05692801021,
"average_cpu_usage": 8.151924133300781,
"group_count": 6,
"peak_cpu_usage": 15.151398658752441,
"synth_count": 19,
"synthdef_count": 42,
"target_sample_rate": 44100.0,
"ugen_count": 685
}
}
"""
result = {
"server_status": {
"actual_sample_rate": self.actual_sample_rate,
"average_cpu_usage": self.average_cpu_usage,
"group_count": self.group_count,
"peak_cpu_usage": self.peak_cpu_usage,
"synth_count": self.synth_count,
"synthdef_count": self.synthdef_count,
"target_sample_rate": self.target_sample_rate,
"ugen_count": self.ugen_count,
}
}
return result
### PUBLIC PROPERTIES ###
@property
def actual_sample_rate(self):
return self._actual_sample_rate
@property
def average_cpu_usage(self):
return self._average_cpu_usage
@property
def group_count(self):
return self._group_count
@property
def peak_cpu_usage(self):
return self._peak_cpu_usage
@property
def synth_count(self):
return self._synth_count
@property
def synthdef_count(self):
return self._synthdef_count
@property
def target_sample_rate(self):
return self._target_sample_rate
@property
def ugen_count(self):
return self._ugen_count
class SyncedResponse(Response):
### INITIALIZER ###
def __init__(self, sync_id=None):
self._sync_id = sync_id
### PUBLIC METHODS ###
@classmethod
def from_osc_message(cls, osc_message):
arguments = osc_message.contents
response = cls(*arguments)
return response
### PUBLIC PROPERTIES ###
@property
def sync_id(self):
return self._sync_id
class SyncRequest(Request):
"""
A /sync request.
::
>>> import supriya.commands
>>> request = supriya.commands.SyncRequest(sync_id=1999,)
>>> request
SyncRequest(
sync_id=1999,
)
::
>>> request.to_osc()
OscMessage('/sync', 1999)
"""
### CLASS VARIABLES ###
request_id = RequestId.SYNC
### INITIALIZER ###
def __init__(self, sync_id=None):
Request.__init__(self)
self._sync_id = int(sync_id)
### PUBLIC METHODS ###
def to_osc(self, *, with_placeholders=False):
request_id = self.request_name
sync_id = int(self.sync_id)
message = supriya.osc.OscMessage(request_id, sync_id)
return message
### PUBLIC PROPERTIES ###
@property
def response_patterns(self):
return ["/synced", self.sync_id], None
@property
def sync_id(self):
return self._sync_id
|
hack/def.bzl | akinfermo/elcarro-oracle-operator | 185 | 12754212 | load("@bazel_skylib//lib:shell.bzl", "shell")
def kubebuilder_manifests(name, srcs, config_root, **kwargs):
native.genrule(
name = name,
srcs = srcs,
outs = [name + ".yaml"],
cmd = """
tmp=$$(mktemp --directory)
cp -aL "%s/." "$$tmp"
$(location @io_k8s_sigs_kustomize_kustomize_v4//:v4) build "$$tmp/default" > $@
rm -r "$$tmp"
""" % config_root,
tools = [
"@io_k8s_sigs_kustomize_kustomize_v4//:v4",
],
**kwargs
)
def _ginkgo_test_impl(ctx):
wrapper = ctx.actions.declare_file(ctx.label.name)
ctx.actions.write(
output = wrapper,
content = """#!/usr/bin/env bash
set -e
exec {ginkgo} {ginkgo_args} {go_test} -- "$@"
""".format(
ginkgo = shell.quote(ctx.executable._ginkgo.short_path),
ginkgo_args = " ".join([shell.quote(arg) for arg in ctx.attr.ginkgo_args]),
# Ginkgo requires the precompiled binary end with ".test".
go_test = shell.quote(ctx.executable.go_test.short_path + ".test"),
),
is_executable = True,
)
return [DefaultInfo(
executable = wrapper,
runfiles = ctx.runfiles(
files = ctx.files.data,
symlinks = {ctx.executable.go_test.short_path + ".test": ctx.executable.go_test},
transitive_files = depset([], transitive = [ctx.attr._ginkgo.default_runfiles.files, ctx.attr.go_test.default_runfiles.files]),
),
)]
ginkgo_test = rule(
implementation = _ginkgo_test_impl,
attrs = {
"data": attr.label_list(allow_files = True),
"go_test": attr.label(executable = True, cfg = "target"),
"ginkgo_args": attr.string_list(),
"_ginkgo": attr.label(default = "@com_github_onsi_ginkgo//ginkgo", executable = True, cfg = "target"),
},
executable = True,
test = True,
)
|
code/chapter-4/uretprobes/example.py | dddddai/linux-observability-with-bpf | 643 | 12754220 | <filename>code/chapter-4/uretprobes/example.py<gh_stars>100-1000
from bcc import BPF
bpf_source = """
BPF_HASH(cache, u64, u64);
int trace_start_time(struct pt_regs *ctx) {
u64 pid = bpf_get_current_pid_tgid();
u64 start_time_ns = bpf_ktime_get_ns();
cache.update(&pid, &start_time_ns);
return 0;
}
"""
bpf_source += """
int print_duration(struct pt_regs *ctx) {
u64 pid = bpf_get_current_pid_tgid();
u64 *start_time_ns = cache.lookup(&pid);
if (start_time_ns == 0) {
return 0;
}
u64 duration_ns = bpf_ktime_get_ns() - *start_time_ns;
bpf_trace_printk("Function call duration: %d\\n", duration_ns);
return 0;
}
"""
bpf = BPF(text = bpf_source)
bpf.attach_uprobe(name = "./hello-bpf", sym = "main.main", fn_name = "trace_start_time")
bpf.attach_uretprobe(name = "./hello-bpf", sym = "main.main", fn_name = "print_duration")
bpf.trace_print()
|
notebook/numpy_1d_to_2d.py | vhn0912/python-snippets | 174 | 12754240 | import numpy as np
a = np.arange(6)
print(a)
# [0 1 2 3 4 5]
print(a.reshape(2, 3))
# [[0 1 2]
# [3 4 5]]
print(a.reshape(-1, 3))
# [[0 1 2]
# [3 4 5]]
print(a.reshape(2, -1))
# [[0 1 2]
# [3 4 5]]
# print(a.reshape(3, 4))
# ValueError: cannot reshape array of size 6 into shape (3,4)
# print(a.reshape(-1, 4))
# ValueError: cannot reshape array of size 6 into shape (4)
l = [0, 1, 2, 3, 4, 5]
print(np.array(l).reshape(-1, 3).tolist())
# [[0, 1, 2], [3, 4, 5]]
print(np.array(l).reshape(3, -1).tolist())
# [[0, 1], [2, 3], [4, 5]]
|
py/testdir_single_jvm/test_summary2_uniform.py | gigliovale/h2o | 882 | 12754255 | <gh_stars>100-1000
import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_util, h2o_print as h2p, h2o_summ
DO_MEDIAN = False
MAX_QBINS = 1000
ROWS = 500000
def write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
expectedRange = (expectedMax - expectedMin)
actualMin = None
actualMax = None
for i in range(rowCount):
rowData = []
ri = expectedMin + (random.uniform(0,1) * expectedRange)
for j in range(colCount):
rowData.append(ri)
if not actualMin or ri < actualMin:
actualMin = ri
if not actualMax or ri > actualMax:
actualMax = ri
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
return (actualMax, actualMin)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_summary2_uniform(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
# colname, (min, 25th, 50th, 75th, max)
(ROWS, 1, 'x.hex', 0.0, 20000.0, ['C1', 0, 5000.0, 10000.0, 15000.0, 20000.0]),
(ROWS, 1, 'x.hex', -5000.0, 0.0, ['C1', -5000.0, -3750.0, -2500.0, -1250.0, 0.0]),
(ROWS, 1, 'x.hex', -100000.0, 100000.0, ['C1', -100000.0, -50000.0, 0.0, 50000.0, 100000.0]),
(ROWS, 1, 'x.hex', -1.0, 1.0, ['C1', -1.0, -0.50, 0.0, 0.50, 1.0]),
(ROWS, 1, 'A.hex', 1.0, 100.0, ['C1', 1.0, 26.0, 51.0, 76.0, 100.0]),
(ROWS, 1, 'A.hex', -99.0, 99.0, ['C1', -99.0, -50.0, 0.0, 50.0, 99.0]),
(ROWS, 1, 'B.hex', 1.0, 10000.0, ['C1', 1.0, 2501.0, 5001.0, 7501.0, 10000.0]),
(ROWS, 1, 'B.hex', -100.0, 100.0, ['C1', -100.0, -50.0, 0.0, 50.0, 100.0]),
(ROWS, 1, 'C.hex', 1.0, 100000.0, ['C1', 1.0, 25001.0, 50001.0, 75001.0, 100000.0]),
(ROWS, 1, 'C.hex', -100.0, 100.0, ['C1', -100.0, -50.0, 0.0, 50.0, 100.0]),
]
timeoutSecs = 10
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
x = 0
timeoutSecs = 60
for (rowCount, colCount, hex_key, expectedMin, expectedMax, expected) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
x += 1
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
(actualMax, actualMin) = write_syn_dataset(csvPathname, rowCount, colCount,
expectedMin, expectedMax, SEEDPERFILE)
# adjust the min/max depending on what the min/max actually was!
# the expected 25%/50%/75% will still be off
expected[1] = actualMin
expected[5] = actualMax
# max error = half the bin size?
# use this for comparing to sklearn/sort
expectedRange = expectedMax - expectedMin
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxDelta = 1 * expectedBin
# how much error do we get in the random distribution gen? pain. It's a probability issue
# smaller error likely with larger # of values.
# the maxDelta used for the scipy/sort compare can be tighter, since it's looking
# at actual data
# this is way too coarse. can't get the distribution tight?
maxDeltaPlusDistVariance = 10 * maxDelta
# allow some fuzz in the comparison to scipy/sort
maxDelta = 1.1 * maxDelta
csvPathnameFull = h2i.find_folder_and_filename(None, csvPathname, returnFullPath=True)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=30, doSummary=False)
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
numRows = inspect["numRows"]
numCols = inspect["numCols"]
summaryResult = h2o_cmd.runSummary(key=hex_key, max_qbins=MAX_QBINS)
h2o.verboseprint("summaryResult:", h2o.dump_json(summaryResult))
# only one column
column = summaryResult['summaries'][0]
colname = column['colname']
self.assertEqual(colname, expected[0])
quantile = 0.5 if DO_MEDIAN else .999
# get both answers since we feed both below for checking
q = h2o.nodes[0].quantiles(source_key=hex_key, column=column['colname'],
quantile=quantile, max_qbins=MAX_QBINS, multiple_pass=2, interpolation_type=7) # linear
qresult = q['result']
qresult_single = q['result_single']
h2p.blue_print("h2o quantiles result:", qresult)
h2p.blue_print("h2o quantiles result_single:", qresult_single)
h2p.blue_print("h2o quantiles iterations:", q['iterations'])
h2p.blue_print("h2o quantiles interpolated:", q['interpolated'])
print h2o.dump_json(q)
coltype = column['type']
nacnt = column['nacnt']
stats = column['stats']
stattype= stats['type']
# FIX! we should compare mean and sd to expected?
mean = stats['mean']
sd = stats['sd']
print "colname:", colname, "mean (2 places):", h2o_util.twoDecimals(mean)
print "colname:", colname, "std dev. (2 places):", h2o_util.twoDecimals(sd)
zeros = stats['zeros']
mins = stats['mins']
# these should match exactly except for fp compare error?
h2o_util.assertApproxEqual(mins[0], expected[1], rel=.00001, msg='min is not expected')
maxs = stats['maxs']
h2o_util.assertApproxEqual(maxs[0], expected[5], rel=.00001, msg='max is not expected')
pct = stats['pct']
# the thresholds h2o used, should match what we expected
expectedPct= [0.01, 0.05, 0.1, 0.25, 0.33, 0.5, 0.66, 0.75, 0.9, 0.95, 0.99]
pctile = stats['pctile']
h2o_util.assertApproxEqual(pctile[3], expected[2], tol=maxDeltaPlusDistVariance,
msg='25th percentile is not approx. expected for generated uniform range %s %s' %\
(expectedMin, expectedMax))
h2o_util.assertApproxEqual(pctile[5], expected[3], tol=maxDeltaPlusDistVariance,
msg='50th percentile is not approx. expected for generated uniform range %s %s' %\
(expectedMin, expectedMax))
h2o_util.assertApproxEqual(pctile[7], expected[4], tol=maxDeltaPlusDistVariance,
msg='75th percentile is not approx. expected for generated uniform range %s %s' %\
(expectedMin, expectedMax))
hstart = column['hstart']
hstep = column['hstep']
hbrk = column['hbrk']
hcnt = column['hcnt']
print "pct:", pct
print "hcnt:", hcnt
print "len(hcnt)", len(hcnt)
# don't check the last bin
# too hard to estimate when there are ints now, due to floor/ceil int alignment?
# don't check the last two bins
for b in hcnt[1:(-2 if len(hcnt)>2 else -1)]:
# should we be able to check for a uniform distribution in the files?
e = numRows/len(hcnt)
self.assertAlmostEqual(b, rowCount/len(hcnt), delta=.01*rowCount,
msg="Bins not right. b: %s e: %s" % (b, e))
pt = h2o_util.twoDecimals(pctile)
mx = h2o_util.twoDecimals(maxs)
mn = h2o_util.twoDecimals(mins)
print "colname:", colname, "pctile (2 places):", pt
print "colname:", colname, "maxs: (2 places):", mx
print "colname:", colname, "mins: (2 places):", mn
# FIX! we should do an exec and compare using the exec quantile too
compareActual = mn[0], pt[3], pt[5], pt[7], mx[0]
h2p.green_print("min/25/50/75/max colname:", colname, "(2 places):", compareActual)
print "maxs colname:", colname, "(2 places):", mx
print "mins colname:", colname, "(2 places):", mn
trial += 1
# don't check if colname is empty..means it's a string and scipy doesn't parse right?
if colname!='':
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
h2o_summ.quantile_comparisons(
csvPathnameFull,
col=0, # what col to extract from the csv
datatype='float',
quantile=0.5 if DO_MEDIAN else 0.999,
h2oSummary2=pctile[5 if DO_MEDIAN else 10],
h2oQuantilesApprox=qresult_single,
h2oQuantilesExact=qresult,
h2oSummary2MaxErr=maxDelta,
)
h2o.nodes[0].remove_all_keys()
if __name__ == '__main__':
h2o.unit_main()
|
pymdp/envs/env.py | Arun-Niranjan/pymdp | 108 | 12754257 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Environment Base Class
__author__: <NAME>, <NAME>, <NAME>
"""
class Env(object):
def reset(self, state=None):
raise NotImplementedError
def step(self, action):
raise NotImplementedError
def render(self):
pass
def sample_action(self):
pass
def get_likelihood_dist(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def get_transition_dist(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def get_uniform_posterior(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def get_rand_likelihood_dist(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def get_rand_transition_dist(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def __str__(self):
return "<{} instance>".format(type(self).__name__)
|
src/garage/examples/sim_policy.py | blacksph3re/garage | 1,500 | 12754288 | <reponame>blacksph3re/garage
#!/usr/bin/env python3
"""Simulates pre-learned policy."""
import argparse
import sys
import cloudpickle
import tensorflow as tf
from garage import rollout
def query_yes_no(question, default='yes'):
"""Ask a yes/no question via raw_input() and return their answer.
Args:
question (str): Printed to user.
default (str or None): Default if user just hits enter.
Raises:
ValueError: If the provided default is invalid.
Returns:
bool: True for "yes"y answers, False for "no".
"""
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='path to the snapshot file')
parser.add_argument('--max_episode_length',
type=int,
default=1000,
help='Max length of episode')
args = parser.parse_args()
# If the snapshot file use tensorflow, do:
# import tensorflow as tf
# with tf.compat.v1.Session():
# [rest of the code]
with tf.compat.v1.Session() as sess:
data = cloudpickle.load(args.file)
policy = data['algo'].policy
env = data['env']
while True:
path = rollout(env,
policy,
max_episode_length=args.max_episode_length,
animated=True)
if not query_yes_no('Continue simulation?'):
break
|
dev/dev.py | emadehsan/climetlab | 182 | 12754307 | <gh_stars>100-1000
import climetlab as cml
url = "https://www.cpc.ncep.noaa.gov/products/precip/CWlink/daily_ao_index/monthly.ao.index.b50.current.ascii"
s = cml.load_source("url", url, reader="fix_width_format")
print(s.to_pandas())
|
lib/tests/streamlit/report_context_test.py | AnOctopus/streamlit | 19,099 | 12754315 | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from streamlit.errors import StreamlitAPIException
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.report_thread import ReportContext
from streamlit.state.session_state import SessionState
from streamlit.uploaded_file_manager import UploadedFileManager
class ReportContextTest(unittest.TestCase):
def test_set_page_config_immutable(self):
"""st.set_page_config must be called at most once"""
fake_enqueue = lambda msg: None
ctx = ReportContext(
"TestSessionID",
fake_enqueue,
"",
SessionState(),
UploadedFileManager(),
)
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(msg)
with self.assertRaises(StreamlitAPIException):
ctx.enqueue(msg)
def test_set_page_config_first(self):
"""st.set_page_config must be called before other st commands
when the script has been marked as started"""
fake_enqueue = lambda msg: None
ctx = ReportContext(
"TestSessionID",
fake_enqueue,
"",
SessionState(),
UploadedFileManager(),
)
ctx.on_script_start()
markdown_msg = ForwardMsg()
markdown_msg.delta.new_element.markdown.body = "foo"
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(markdown_msg)
with self.assertRaises(StreamlitAPIException):
ctx.enqueue(msg)
def test_disallow_set_page_config_twice(self):
"""st.set_page_config cannot be called twice"""
fake_enqueue = lambda msg: None
ctx = ReportContext(
"TestSessionID",
fake_enqueue,
"",
SessionState(),
UploadedFileManager(),
)
ctx.on_script_start()
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(msg)
with self.assertRaises(StreamlitAPIException):
same_msg = ForwardMsg()
same_msg.page_config_changed.title = "bar"
ctx.enqueue(same_msg)
def test_set_page_config_reset(self):
"""st.set_page_config should be allowed after a rerun"""
fake_enqueue = lambda msg: None
ctx = ReportContext(
"TestSessionID",
fake_enqueue,
"",
SessionState(),
UploadedFileManager(),
)
ctx.on_script_start()
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(msg)
ctx.reset()
try:
ctx.on_script_start()
ctx.enqueue(msg)
except StreamlitAPIException:
self.fail("set_page_config should have succeeded after reset!")
|
brainstorm/layers/recurrent_layer.py | PyCN/brainstorm | 1,473 | 12754327 | #!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
from collections import OrderedDict
from brainstorm.layers.base_layer import Layer
from brainstorm.structure.buffer_structure import (BufferStructure,
StructureTemplate)
from brainstorm.structure.construction import ConstructionWrapper
from brainstorm.utils import LayerValidationError, flatten_time, \
flatten_time_and_features
def Recurrent(size, activation='tanh', name=None):
"""Create a Simple Recurrent layer."""
return ConstructionWrapper.create(RecurrentLayerImpl, size=size,
name=name, activation=activation)
class RecurrentLayerImpl(Layer):
expected_inputs = {'default': StructureTemplate('T', 'B', '...')}
expected_kwargs = {'size', 'activation'}
def setup(self, kwargs, in_shapes):
self.activation = kwargs.get('activation', 'tanh')
self.size = kwargs.get('size', self.in_shapes['default'].feature_size)
if not isinstance(self.size, int):
raise LayerValidationError('size must be int but was {}'.
format(self.size))
in_size = self.in_shapes['default'].feature_size
outputs = OrderedDict()
outputs['default'] = BufferStructure('T', 'B', self.size,
context_size=1)
parameters = OrderedDict()
parameters['W'] = BufferStructure(self.size, in_size)
parameters['R'] = BufferStructure(self.size, self.size)
parameters['bias'] = BufferStructure(self.size)
internals = OrderedDict()
internals['Ha'] = BufferStructure('T', 'B', self.size, context_size=1)
internals['dHa'] = BufferStructure('T', 'B', self.size, context_size=1,
is_backward_only=True)
internals['dHb'] = BufferStructure('T', 'B', self.size, context_size=1,
is_backward_only=True)
return outputs, parameters, internals
def forward_pass(self, buffers, training_pass=True):
# prepare
_h = self.handler
W, R, bias = buffers.parameters
inputs = buffers.inputs.default
outputs = buffers.outputs.default
Ha = buffers.internals.Ha
flat_inputs = flatten_time_and_features(inputs)
flat_H = flatten_time(Ha[:-1])
_h.dot_mm(flat_inputs, W, flat_H, transb=True)
_h.add_mv(flat_H, bias.reshape((1, self.size)), flat_H)
for t in range(inputs.shape[0]):
_h.dot_add_mm(outputs[t - 1], R, Ha[t], transb=True)
_h.act_func[self.activation](Ha[t], outputs[t])
def backward_pass(self, buffers):
# prepare
_h = self.handler
W, R, bias = buffers.parameters
dW, dR, dbias = buffers.gradients
inputs = buffers.inputs.default
outputs = buffers.outputs.default
dinputs = buffers.input_deltas.default
doutputs = buffers.output_deltas.default
Ha, dHa, dHb = buffers.internals
_h.copy_to(doutputs, dHb)
T = inputs.shape[0] - 1
_h.act_func_deriv[self.activation](Ha[T], outputs[T], dHb[T], dHa[T])
for t in range(T - 1, -1, -1):
_h.dot_add_mm(dHa[t + 1], R, dHb[t])
_h.act_func_deriv[self.activation](Ha[t], outputs[t],
dHb[t], dHa[t])
flat_inputs = flatten_time_and_features(inputs)
flat_dinputs = flatten_time_and_features(dinputs)
flat_dHa = flatten_time(dHa[:-1])
# calculate in_deltas and gradients
_h.dot_add_mm(flat_dHa, W, flat_dinputs)
_h.dot_add_mm(flat_dHa, flat_inputs, dW, transa=True)
dbias_tmp = _h.allocate(dbias.shape)
_h.sum_t(flat_dHa, axis=0, out=dbias_tmp)
_h.add_tt(dbias, dbias_tmp, dbias)
flat_outputs = flatten_time(outputs[:-2])
flat_dHa = flatten_time(dHa[1:-1])
_h.dot_add_mm(flat_dHa, flat_outputs, dR, transa=True)
_h.dot_add_mm(dHa[0], outputs[-1], dR, transa=True)
|
release/stubs.min/System/Windows/Media/Animation_parts/KeySpline.py | htlcnn/ironpython-stubs | 182 | 12754392 | class KeySpline(Freezable,ISealable,IFormattable):
"""
This class is used by a spline key frame to define animation progress.
KeySpline(controlPoint1: Point,controlPoint2: Point)
KeySpline()
KeySpline(x1: float,y1: float,x2: float,y2: float)
"""
def CloneCore(self,*args):
"""
CloneCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a deep copy of the specified System.Windows.Media.Animation.KeySpline. When
copying dependency properties,this method copies resource references and data bindings (but
they might no longer resolve) but not animations or their current values.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone.
"""
pass
def CloneCurrentValueCore(self,*args):
"""
CloneCurrentValueCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a modifiable deep copy of the specified
System.Windows.Media.Animation.KeySpline using current property values. Resource references,
data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone.
"""
pass
def CreateInstance(self,*args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self,*args):
"""
CreateInstanceCore(self: KeySpline) -> Freezable
Creates a new instance of System.Windows.Media.Animation.KeySpline.
Returns: A new instance of System.Windows.Media.Animation.KeySpline.
"""
pass
def FreezeCore(self,*args):
"""
FreezeCore(self: Freezable,isChecking: bool) -> bool
Makes the System.Windows.Freezable object unmodifiable or tests whether it can be made
unmodifiable.
isChecking: true to return an indication of whether the object can be frozen (without actually freezing it);
false to actually freeze the object.
Returns: If isChecking is true,this method returns true if the System.Windows.Freezable can be made
unmodifiable,or false if it cannot be made unmodifiable. If isChecking is false,this method
returns true if the if the specified System.Windows.Freezable is now unmodifiable,or false if
it cannot be made unmodifiable.
"""
pass
def GetAsFrozenCore(self,*args):
"""
GetAsFrozenCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a clone of the specified System.Windows.Media.Animation.KeySpline object.
sourceFreezable: The System.Windows.Media.Animation.KeySpline object to clone.
"""
pass
def GetCurrentValueAsFrozenCore(self,*args):
"""
GetCurrentValueAsFrozenCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a frozen clone of the specified System.Windows.Media.Animation.KeySpline.
Resource references,data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to copy and freeze.
"""
pass
def GetSplineProgress(self,linearProgress):
"""
GetSplineProgress(self: KeySpline,linearProgress: float) -> float
Calculates spline progress from a supplied linear progress.
linearProgress: The linear progress to evaluate.
Returns: The calculated spline progress.
"""
pass
def OnChanged(self,*args):
"""
OnChanged(self: KeySpline)
Called when the current System.Windows.Media.Animation.KeySpline object is modified.
"""
pass
def OnFreezablePropertyChanged(self,*args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self,*args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def ToString(self,formatProvider=None):
"""
ToString(self: KeySpline,formatProvider: IFormatProvider) -> str
Creates a string representation of this System.Windows.Media.Animation.KeySpline based on the
supplied System.IFormatProvider.
formatProvider: The format provider to use. If provider is null,the current culture is used.
Returns: A string representation of this instance of System.Windows.Media.Animation.KeySpline.
ToString(self: KeySpline) -> str
Creates a string representation of this instance of System.Windows.Media.Animation.KeySpline
based on the current culture.
Returns: A string representation of this System.Windows.Media.Animation.KeySpline.
"""
pass
def WritePostscript(self,*args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self,*args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type)
__new__(cls: type,x1: float,y1: float,x2: float,y2: float)
__new__(cls: type,controlPoint1: Point,controlPoint2: Point)
"""
pass
def __str__(self,*args):
pass
ControlPoint1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The first control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline.
Get: ControlPoint1(self: KeySpline) -> Point
Set: ControlPoint1(self: KeySpline)=value
"""
ControlPoint2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The second control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline.
Get: ControlPoint2(self: KeySpline) -> Point
Set: ControlPoint2(self: KeySpline)=value
"""
|
examples/rl/environments/capturegame/pente.py | sveilleux1/pybrain | 2,208 | 12754407 | <reponame>sveilleux1/pybrain<gh_stars>1000+
from __future__ import print_function
#!/usr/bin/env python
""" A little script illustrating how to use a (randomly initialized)
convolutional network to play a game of Pente. """
__author__ = '<NAME>, <EMAIL>'
from pybrain.rl.environments.twoplayergames.pente import PenteGame
from pybrain.rl.environments.twoplayergames.gomokuplayers.randomplayer import RandomGomokuPlayer
from pybrain.rl.environments.twoplayergames.gomokuplayers.moduledecision import ModuleDecidingPlayer
from pybrain.structure.networks.custom.convboard import ConvolutionalBoardNetwork
dim = 7
g = PenteGame((dim, dim))
print(g)
n = ConvolutionalBoardNetwork(dim, 5, 3)
p1 = ModuleDecidingPlayer(n, g)
p2 = RandomGomokuPlayer(g)
p2.color = g.WHITE
g.playToTheEnd(p1, p2)
print(g)
|
examples/database/discovery_rate.py | tachyontraveler/qmpy | 103 | 12754414 | <filename>examples/database/discovery_rate.py
# from qmpy import *
from matplotlib import rc
rc("font", **{"family": "serif", "serif": ["Century"]})
params = {"font.size": 48}
import matplotlib
matplotlib.rcParams.update(params)
import matplotlib.pylab as plt
import pickle
import sys
import numpy as np
def get_data():
if not "new" in sys.argv:
return pickle.loads(open("dates.txt").read())
forms = Formation.objects.filter(
fit="diff_refs",
hull_distance__lte=0.025,
entry__ntypes__gt=1,
entry__icsd__coll_code__gt=0,
).select_related()
dates = []
for form in forms:
try:
struct = form.entry.input
sdates = struct.similar.values_list("entry__reference__year", flat=True)
dates.append(min(sdates))
except:
continue
result = open("dates.txt", "w")
result.write(pickle.dumps(dates))
result.close()
dates = np.array(get_data())
plt.hist(dates, bins=max(dates) - min(dates), cumulative=True)
plt.xlabel("Year")
plt.ylabel("# of Stable Structures")
plt.savefig("test.eps", bbox_inches="tight")
|
tensorflow/python/ops/initializers_ns.py | EricRemmerswaal/tensorflow | 190,993 | 12754416 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for tf.initializer namespace."""
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variables as _variables
# variable initializers
zeros = init_ops.zeros_initializer
ones = init_ops.ones_initializer
constant = init_ops.constant_initializer
random_uniform = init_ops.random_uniform_initializer
random_normal = init_ops.random_normal_initializer
truncated_normal = init_ops.truncated_normal_initializer
uniform_unit_scaling = init_ops.uniform_unit_scaling_initializer
variance_scaling = init_ops.variance_scaling_initializer
orthogonal = init_ops.orthogonal_initializer
identity = init_ops.identity_initializer
# variable initializer ops
variables = _variables.variables_initializer
global_variables = _variables.global_variables_initializer
local_variables = _variables.local_variables_initializer
# Seal API.
del init_ops
del _variables
|
tests/uri_encoding.py | naokazuterada/MarkdownTOC | 299 | 12754424 | <gh_stars>100-1000
# coding:utf-8
from base import TestBase
class TestUriEncoding(TestBase):
"""Test for attributes \'uri_encoding\'"""
# for debug
# def tearDown(self):
# pass
uri_encoding_text = """
<!-- MarkdownTOC autolink="true" lowercase="only_ascii" {0} -->
<!-- /MarkdownTOC -->
# Camión, último
# España
# こんにちわ 世界
# Пример Example
# 一个标题
"""
# default: uri_encoding=true
def test_uri_encoding_default(self):
toc = self.init_update(self.uri_encoding_text.format(""))["toc"]
self.assert_In("- [Camión, último](#cami%C3%B3n-%C3%BAltimo)", toc)
self.assert_In("- [España](#espa%C3%B1a)", toc)
self.assert_In(
"- [こんにちわ 世界](#%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F-%E4%B8%96%E7%95%8C)",
toc,
)
self.assert_In(
"- [Пример Example](#%D0%9F%D1%80%D0%B8%D0%BC%D0%B5%D1%80-example)", toc
)
self.assert_In("- [一个标题](#%E4%B8%80%E4%B8%AA%E6%A0%87%E9%A2%98)", toc)
def test_uri_encoding_true(self):
toc = self.init_update(self.uri_encoding_text.format("uri_encoding=true"))[
"toc"
]
self.assert_In("- [Camión, último](#cami%C3%B3n-%C3%BAltimo)", toc)
self.assert_In("- [España](#espa%C3%B1a)", toc)
self.assert_In(
"- [こんにちわ 世界](#%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F-%E4%B8%96%E7%95%8C)",
toc,
)
self.assert_In(
"- [Пример Example](#%D0%9F%D1%80%D0%B8%D0%BC%D0%B5%D1%80-example)", toc
)
self.assert_In("- [一个标题](#%E4%B8%80%E4%B8%AA%E6%A0%87%E9%A2%98)", toc)
def test_uri_encoding_false(self):
toc = self.init_update(self.uri_encoding_text.format("uri_encoding=false"))[
"toc"
]
self.assert_In("- [Camión, último](#camión-último)", toc)
self.assert_In("- [España](#españa)", toc)
self.assert_In("- [こんにちわ 世界](#こんにちわ-世界)", toc)
self.assert_In("- [Пример Example](#Пример-example)", toc)
self.assert_In("- [一个标题](#一个标题)", toc)
|
desktop/core/ext-py/docutils-0.14/test/test_parsers/test_get_parser_class.py | kokosing/hue | 5,079 | 12754425 | #! /usr/bin/env python
# $Id: test_get_parser_class.py 7504 2012-08-27 07:55:20Z grubert $
# Author: <NAME>
# Maintainer: <EMAIL>
# Copyright: This module has been placed in the public domain.
"""
test get_parser_class
"""
from __init__ import DocutilsTestSupport
from docutils.parsers import get_parser_class
class GetParserClassTestCase(DocutilsTestSupport.StandardTestCase):
def test_registered_parser(self):
rdr = get_parser_class('rst')
# raises ImportError on failure
def test_bogus_parser(self):
self.assertRaises(ImportError,
get_parser_class, 'nope')
def test_local_parser(self):
# requires local-parser.py in test directory (testroot)
wr = get_parser_class('local-parser')
if __name__ == '__main__':
import unittest
unittest.main()
|
web/search/esmodels.py | ChiChou/wiggle | 110 | 12754460 | <filename>web/search/esmodels.py
from datetime import datetime
from elasticsearch_dsl import *
from elasticsearch_dsl.connections import connections
# Define a default Elasticsearch client
connection = connections.create_connection(hosts=['es', 'localhost'])
path_analyzer = analysis.analyzer('path', tokenizer='path_hierarchy')
entitlement_key_analyzer = analysis.analyzer('entitlement_key',
tokenizer='char_group',
tokenize_on_chars=['-', '.']
)
class Import(InnerDoc):
name = Text()
demname = Text()
flagname = Text()
ordinal = Long()
bind = Keyword()
size = Long()
type = Keyword()
vaddr = Long()
paddr = Long()
class Export(InnerDoc):
name = Text()
ordinal = Long()
bind = Keyword()
type = Keyword()
plt = Long()
class Segment(InnerDoc):
name = Text()
flags = Text()
class Executable(Document):
raw_path = Text(analyzer=path_analyzer)
path = Text()
strings = Text()
info = Object()
libraries = Text(analyzer=path_analyzer, multi=True)
imports = Nested(Import)
exports = Nested(Export)
segments = Nested(Segment)
class Index:
name = 'executable-*'
class Method(InnerDoc):
name = Text()
addr = Long()
class Field(InnerDoc):
name = Text()
addr = Long()
class Clazz(InnerDoc):
classname = Text()
methods = Nested(Method)
fields = Nested(Field)
index = Long()
addr = Long()
class RPath(InnerDoc):
prefix = Keyword()
path = Text(analyzer=path_analyzer)
class MachO(Executable):
classdump = Text()
classes = Nested(Clazz)
rpaths = Nested(RPath)
# code signature
ent = Text() # json
ent_str = Text() # xml
ent_keys = Text(analyzer=entitlement_key_analyzer,
multi=True, fields={'raw': Keyword()})
cs_flags = Long()
cs_flags_str = Keyword(multi=True)
lv = Boolean()
signed = Boolean()
apple = Boolean()
codesign = Text()
info_plist = Text() # json
info_plist_str = Text() # xml
class Index:
name = 'macho-*'
if __name__ == "__main__":
i = Index('macho-10.14.2')
i.delete()
# i.save()
index = MachO._index.as_template('macho-test')
index.save()
|
plugins/cipher_plugin/plugin.py | MrT3acher/Android-Malware-Sandbox | 218 | 12754462 | <reponame>MrT3acher/Android-Malware-Sandbox<gh_stars>100-1000
from sqlalchemy.orm import sessionmaker, scoped_session
from lib.model.database.Database import Database
from .lib.Cipher import Cipher
import logging
import os
import binascii
import base64
import filetype
current_path = os.path.dirname(os.path.realpath(__file__))
def onload():
logging.debug("Cipher:loaded()")
def onunload():
logging.debug("Cipher:unloaded()")
def parse(module, message, data):
if(message['plugin'] == "cipher"):
# Create a thread local session
engine = Database.get_engine()
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
session = Session()
Session.remove()
# Fetch application for this session ( could not use self.application
# because the usage must be thread local )
key_value = binascii.unhexlify(message["key"]).hex()
iv = binascii.unhexlify(message["iv"]).hex()
# TODO have link from base64
kind_input = filetype.guess(binascii.unhexlify(message["arg"]))
kind_result = filetype.guess(binascii.unhexlify(message["result"]))
# print('File extension: %s' % kind.extension)
# print('File MIME type: %s' % kind.mime)
if message["opmode"] == 1:
opmode_info = "ENCRYPT_MODE"
try:
input_value = binascii.unhexlify(message["arg"]).decode('utf8')
except:
input_value = base64.b64encode(binascii.unhexlify(message["arg"])).decode('utf8')
output_value = binascii.unhexlify(message["result"]).hex()
elif message["opmode"] == 2:
opmode_info = "DECRYPT_MODE"
try:
output_value = binascii.unhexlify(message["result"]).decode('utf8')
except:
output_value = base64.b64encode(binascii.unhexlify(message["result"])).decode('utf8')
input_value = binascii.unhexlify(message["arg"]).hex()
cipher = Cipher(message["algo"], key_value, iv, opmode_info, input_value, output_value, message["stack"])
cipher.application_id = module.application.id
logging.debug(repr(cipher))
query = session.query(Cipher).filter(Cipher.application_id==cipher.application_id).filter(Cipher.algorithm==cipher.algorithm).filter(Cipher.key==cipher.key).filter(Cipher.iv==cipher.iv).filter(Cipher.opmode==cipher.opmode).filter(Cipher.input_value==cipher.input_value).filter(Cipher.output_value==cipher.output_value)
resultQuery = query.all()
# Prevent duplicates in DB
if len(resultQuery) == 0:
session.add(cipher)
session.commit()
def get_frida_script():
logging.debug("Cipher:get_frida_script()")
with open(f"{current_path}/frida.js") as f:
return ("Cipher", f.read()) |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_manageability_object_tracking_datatypes.py | CiscoDevNet/ydk-py | 177 | 12754477 | <reponame>CiscoDevNet/ydk-py
""" Cisco_IOS_XR_manageability_object_tracking_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class ObjectTrackingBooleanSign(Enum):
"""
ObjectTrackingBooleanSign (Enum Class)
Object tracking boolean sign
.. data:: without_not = 0
Object without not
.. data:: with_not = 1
Object with not
"""
without_not = Enum.YLeaf(0, "without-not")
with_not = Enum.YLeaf(1, "with-not")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_manageability_object_tracking_datatypes as meta
return meta._meta_table['ObjectTrackingBooleanSign']
|
python/scripts/generic-pytest/test_zap.py | eas5/zaproxy | 10,016 | 12754482 | <filename>python/scripts/generic-pytest/test_zap.py
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2012 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a generic pytest (http://pytest.org/) script that can be used
# for controlling and integrating ZAP with existing tests.
# The script is configured via the config file (default name: test_zap.config)
# the default file has plenty of comments to explain whats what.
# You can use this script for a standalone security test - it can start ZAP,
# run the spider and scanner against specified URLs, check for any alerts
# raised and finally stop ZAP.
# However its more effective if you start ZAP, then proxy existing functional
# test via ZAP before running the spider and scanner.
# That means you might need to start ZAP in one test, run your functional tests
# and then run the spider and scanner, etc. in another (sequential) test.
import ast
import copy
import os
import platform
import re
import time
from ConfigParser import SafeConfigParser
from zap import ZAP
def element_to_str(alert, element):
return "'" + element + "':'" + re.escape(alert.get(element)) + "'"
def alert_to_str(alert):
return "{" + \
element_to_str(alert, "alert") + "," + \
element_to_str(alert, "risk") + "," + \
element_to_str(alert, "reliability") + "," + \
element_to_str(alert, "url") + "," + \
element_to_str(alert, "param") + "}"
def match_alert_pattern (alert, pattern, element):
#print "Alert = " + alert + " Pattern = " + pattern + " Element = " + element
if (pattern.get(element)):
return re.search(pattern.get(element), alert.get(element))
return True # No such pattern matches all
def match_alerts (alert, pattern):
if ( not match_alert_pattern (alert, pattern, "alert")):
return False
if ( not match_alert_pattern (alert, pattern, "url")):
return False
if ( not match_alert_pattern (alert, pattern, "reliability")):
return False
if ( not match_alert_pattern (alert, pattern, "risk")):
return False
if ( not match_alert_pattern (alert, pattern, "param")):
return False
return True
# Returns a list of the alerts which dont match the 'ignoreAlerts' - a dictionary of regex patterns
def strip_alerts (alerts, ignoreAlerts):
stripped = []
for alert in alerts:
include = True
for ignore in ignoreAlerts:
if ( match_alerts(alert, ignore)):
include = False
break
if (include):
stripped.append(alert)
return stripped
def test_zap(zapconfig):
parser = SafeConfigParser()
parser.read(zapconfig)
zapUrl = parser.get("Proxy", "url");
zap = ZAP(proxies={'http': zapUrl, 'https': zapUrl})
if (parser.getboolean("Actions", "start")):
# print "platform=" + platform.system()
if (platform.system() == "Windows"):
zapScript = "start /b zap.bat"
else:
zapScript = "zap.sh"
zapInstall = parser.get("Proxy", "install");
if (len(zapInstall) == 0):
if (platform.system() == "Windows"):
# Win 7 default path
zapInstall = "C:\Program Files (x86)\OWASP\Zed Attack Proxy";
if ( not os.path.exists(zapInstall)):
# Win XP default path
zapInstall = "C:\Program Files\OWASP\Zed Attack Proxy";
else:
# No default path for Mac OS or Linux
print "Installation directory must be set in " + zapconfig
if (len(parser.get("Proxy", "home")) > 0):
zapScript = zapScript + " -d " + parser.get("Proxy", "home")
os.chdir(zapInstall);
os.system(zapScript);
time.sleep(20);
spiderUrls = parser.get("Actions", "spider");
if (len(spiderUrls) > 0):
for spiderUrl in spiderUrls.split(','):
zap.urlopen(spiderUrl)
# Give the sites tree a chance to get updated
time.sleep(2)
print 'Spidering %s' % spiderUrl
zap.start_spider(spiderUrl)
# Give the Spider a chance to start
time.sleep(2)
while (int(zap.spider_status[0]) < 100):
#print 'Spider progress %: ' + zap.spider_status[0]
time.sleep(5)
print 'Finished spidering %s' % spiderUrl
print 'Spider completed'
# Give the passive scanner a chance to finish
time.sleep(5)
scanUrls = parser.get("Actions", "scan");
if (len(scanUrls) > 0):
for scanUrl in scanUrls.split(','):
print 'Scanning %s' % scanUrl
zap.start_scan(scanUrl)
while (int(zap.scan_status[0]) < 100):
#print 'Scan progress %: ' + zap.scan_status[0]
time.sleep(5)
print 'Finished scanning %s' % scanUrl
print 'Scanner completed'
saveSession = parser.get("Actions", "savesession");
if (len(saveSession) > 0):
time.sleep(5) # Will this help??
zap.save_session(saveSession)
#zapAlerts = zap.alerts # Save for later, in case ZAP is stopped..
zapAlerts = copy.deepcopy(zap.alerts) # Save for later, in case ZAP is stopped..
if (parser.getboolean("Actions", "stop")):
# TODO: this is causing problems right now :(
zap.shutdown()
requireAlertsStr = parser.get("Alerts", "require")
if (len(requireAlertsStr) > 0):
for requireAlertStr in requireAlertsStr.split("\n"):
requireAlert = ast.literal_eval(requireAlertStr)
# Check at least one match found in the alerts
found = False
for alert in zapAlerts:
if ( match_alerts(alert, requireAlert)):
found = True
break
if (not found):
# No match, fail the test
print "Required alert not present: " + requireAlertStr
assert 0
ignoreAlertsStr = parser.get("Alerts", "ignore")
ignoreAlerts = []
if (len(ignoreAlertsStr) > 0):
for ignoreAlertStr in ignoreAlertsStr.split("\n"):
ignoreAlerts.append(ast.literal_eval(ignoreAlertStr))
strippedAlerts = strip_alerts(zapAlerts, ignoreAlerts)
saveAlerts = parser.get("Alerts", "savealerts")
if (len(saveAlerts) > 0):
alertsFile = open(saveAlerts, 'w')
for alert in strippedAlerts:
alertsFile.write(alert_to_str(alert))
alertsFile.write("\n")
alertsFile.close()
assert len(strippedAlerts) == 0
|
tests/test_optionmenu.py | larryw3i/pygubu | 1,716 | 12754501 | <reponame>larryw3i/pygubu
# encoding: utf8
import support
import pygubu
import os
import sys
import unittest
try:
import tkinter as tk
import tkinter.ttk as ttk
except:
import Tkinter as tk
import ttk
pygubu_basedir = os.path.abspath(os.path.dirname(
os.path.dirname(os.path.realpath(sys.argv[0]))))
if pygubu_basedir not in sys.path:
sys.path.insert(0, pygubu_basedir)
class TestOptionMenu(unittest.TestCase):
def setUp(self):
support.root_deiconify()
xmldata = 'test_optionmenu.ui'
self.builder = builder = pygubu.Builder()
builder.add_from_file(xmldata)
self.widget = builder.get_object('mainwindow')
def tearDown(self):
support.root_withdraw()
def test_class(self):
optionmenu = self.builder.get_object('optionmenu1')
self.assertIsInstance(optionmenu, tk.OptionMenu)
self.widget.destroy()
def test_no_variable_defined(self):
optionmenu2 = self.builder.get_object('optionmenu2')
self.assertIsInstance(optionmenu2, tk.OptionMenu)
self.widget.destroy()
|
vel/api/base/scheduler.py | tigerwlin/vel | 273 | 12754550 | <filename>vel/api/base/scheduler.py
from .callback import Callback
class SchedulerFactory:
""" Factory class for various schedulers """
def instantiate(self, optimizer, last_epoch=-1) -> Callback:
raise NotImplementedError
|
DQM/TrackingMonitor/python/tracksDQMMiniAOD_cff.py | ckamtsikis/cmssw | 852 | 12754553 | import FWCore.ParameterSet.Config as cms
from DQM.TrackingMonitor.packedCandidateTrackValidator_cfi import *
packedCandidateTrackValidatorLostTracks = packedCandidateTrackValidator.clone(
trackToPackedCandidateAssociation = "lostTracks",
rootFolder = "Tracking/PackedCandidate/lostTracks"
)
tracksDQMMiniAOD = cms.Sequence(
packedCandidateTrackValidator +
packedCandidateTrackValidatorLostTracks
)
|
superset/db_engine_specs/elasticsearch.py | delorenzosoftware/superset | 18,621 | 12754560 | <reponame>delorenzosoftware/superset
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from datetime import datetime
from distutils.version import StrictVersion
from typing import Any, Dict, Optional, Type
from superset.db_engine_specs.base import BaseEngineSpec
from superset.db_engine_specs.exceptions import (
SupersetDBAPIDatabaseError,
SupersetDBAPIOperationalError,
SupersetDBAPIProgrammingError,
)
from superset.utils import core as utils
logger = logging.getLogger()
class ElasticSearchEngineSpec(BaseEngineSpec): # pylint: disable=abstract-method
engine = "elasticsearch"
engine_name = "ElasticSearch (SQL API)"
time_groupby_inline = True
time_secondary_columns = True
allows_joins = False
allows_subqueries = True
allows_sql_comments = False
_time_grain_expressions = {
None: "{col}",
"PT1S": "HISTOGRAM({col}, INTERVAL 1 SECOND)",
"PT1M": "HISTOGRAM({col}, INTERVAL 1 MINUTE)",
"PT1H": "HISTOGRAM({col}, INTERVAL 1 HOUR)",
"P1D": "HISTOGRAM({col}, INTERVAL 1 DAY)",
"P1M": "HISTOGRAM({col}, INTERVAL 1 MONTH)",
"P1Y": "HISTOGRAM({col}, INTERVAL 1 YEAR)",
}
type_code_map: Dict[int, str] = {} # loaded from get_datatype only if needed
@classmethod
def get_dbapi_exception_mapping(cls) -> Dict[Type[Exception], Type[Exception]]:
# pylint: disable=import-error,import-outside-toplevel
import es.exceptions as es_exceptions
return {
es_exceptions.DatabaseError: SupersetDBAPIDatabaseError,
es_exceptions.OperationalError: SupersetDBAPIOperationalError,
es_exceptions.ProgrammingError: SupersetDBAPIProgrammingError,
}
@classmethod
def convert_dttm(
cls, target_type: str, dttm: datetime, db_extra: Optional[Dict[str, Any]] = None
) -> Optional[str]:
db_extra = db_extra or {}
if target_type.upper() == utils.TemporalType.DATETIME:
es_version = db_extra.get("version")
# The elasticsearch CAST function does not take effect for the time zone
# setting. In elasticsearch7.8 and above, we can use the DATETIME_PARSE
# function to solve this problem.
supports_dttm_parse = False
try:
if es_version:
supports_dttm_parse = StrictVersion(es_version) >= StrictVersion(
"7.8"
)
except Exception as ex: # pylint: disable=broad-except
logger.error("Unexpected error while convert es_version", exc_info=True)
logger.exception(ex)
if supports_dttm_parse:
datetime_formatted = dttm.isoformat(sep=" ", timespec="seconds")
return (
f"""DATETIME_PARSE('{datetime_formatted}', 'yyyy-MM-dd HH:mm:ss')"""
)
return f"""CAST('{dttm.isoformat(timespec="seconds")}' AS DATETIME)"""
return None
class OpenDistroEngineSpec(BaseEngineSpec): # pylint: disable=abstract-method
time_groupby_inline = True
time_secondary_columns = True
allows_joins = False
allows_subqueries = True
allows_sql_comments = False
_time_grain_expressions = {
None: "{col}",
"PT1S": "date_format({col}, 'yyyy-MM-dd HH:mm:ss.000')",
"PT1M": "date_format({col}, 'yyyy-MM-dd HH:mm:00.000')",
"PT1H": "date_format({col}, 'yyyy-MM-dd HH:00:00.000')",
"P1D": "date_format({col}, 'yyyy-MM-dd 00:00:00.000')",
"P1M": "date_format({col}, 'yyyy-MM-01 00:00:00.000')",
"P1Y": "date_format({col}, 'yyyy-01-01 00:00:00.000')",
}
engine = "odelasticsearch"
engine_name = "ElasticSearch (OpenDistro SQL)"
@classmethod
def convert_dttm(
cls, target_type: str, dttm: datetime, db_extra: Optional[Dict[str, Any]] = None
) -> Optional[str]:
if target_type.upper() == utils.TemporalType.DATETIME:
return f"""'{dttm.isoformat(timespec="seconds")}'"""
return None
@staticmethod
def _mutate_label(label: str) -> str:
return label.replace(".", "_")
|
language/labs/exemplar_decoding/models/linear.py | naveenjafer/language | 1,199 | 12754562 | <gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A linear layer for output projection.
This is based on code in tf.contrib.seq2seq.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from language.labs.exemplar_decoding.models.common import dimension_value
import tensorflow.compat.v1 as tf
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
__all__ = [
"Linear",
"HyperDense",
]
class Linear(object):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch, n, Tensors.
output_size: int, second dimension of weight variable.
weights: (optional) a specified tensor.
dtype: data type for variables.
build_bias: boolean, whether to build a bias variable.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Raises:
ValueError: if inputs_shape is wrong.
"""
def __init__(self,
args,
output_size,
build_bias,
weights=None,
weight_initializer=None,
bias_initializer=None):
self._build_bias = build_bias
if args is None or (tf.contrib.framework.nest.is_sequence(args) and
not args):
raise ValueError("`args` must be specified")
if not tf.contrib.framework.nest.is_sequence(args):
args = [args]
self._is_sequence = False
else:
self._is_sequence = True
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
scope = tf.get_variable_scope()
with tf.variable_scope(scope) as outer_scope:
if weights is None:
self._weights = tf.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=weight_initializer)
else:
self._weights = weights
if build_bias:
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = tf.constant_initializer(0.0, dtype=dtype)
self._biases = tf.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
def __call__(self, args):
if not self._is_sequence:
args = [args]
if len(args) == 1:
res = tf.matmul(args[0], self._weights)
else:
# Explicitly creating a one for a minor performance improvement.
one = tf.constant(1, dtype=tf.int32)
res = tf.matmul(tf.concat(args, one), self._weights)
if self._build_bias:
res = tf.nn.bias_add(res, self._biases)
return res
class HyperDense(tf.keras.layers.Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
mem_input,
hps,
use_beam=False,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if "input_shape" not in kwargs and "input_dim" in kwargs:
kwargs["input_shape"] = (kwargs.pop("input_dim"),)
super(HyperDense, self).__init__(
activity_regularizer=tf.keras.regularizers.get(activity_regularizer),
**kwargs)
self.units = int(units)
self.activation = tf.keras.activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self.bias_constraint = tf.keras.constraints.get(bias_constraint)
self._mem_input = mem_input
self.supports_masking = True
self.input_spec = tf.keras.layers.InputSpec(min_ndim=2)
self._can_use_graph_functions = True
self._decoder_dim = hps.decoder_dim
self._rank = hps.rank
self._tau = hps.tau
self._sigma_norm = hps.sigma_norm
self._beam_width = hps.beam_width
self._use_beam = use_beam
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if dimension_value(input_shape[-1]) is None:
raise ValueError("The last dimension of the inputs to `Dense` "
"should be defined. Found `None`.")
last_dim = dimension_value(input_shape[-1])
self.input_spec = tf.keras.layers.InputSpec(min_ndim=2, axes={-1: last_dim})
self._c = tf.get_variable(
"c", [self._decoder_dim, self._rank],
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
sigma = tf.matmul(self._mem_input, self._c)
if self._sigma_norm > 0.:
sigma = tf.nn.l2_normalize(sigma, axis=1) * self._sigma_norm
elif self._sigma_norm == -1.:
sigma = tf.nn.softmax(sigma / self._tau, axis=1)
sigma_diag = tf.matrix_diag(sigma)
self._u = tf.get_variable(
"u", [last_dim, self._rank],
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
self._v = tf.get_variable(
"v", [self._rank, self.units],
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
self.kernel = tf.einsum("ij,ajk,kl->ail", self._u, sigma_diag, self._v)
if self._use_beam and self._beam_width:
self.kernel = tf.contrib.seq2seq.tile_batch(
self.kernel, multiplier=self._beam_width)
if self.use_bias:
self._b = self.add_weight(
"b",
shape=[self.units, self._rank],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
self.bias = tf.einsum("ij,aj->ai", self._b, sigma)
if self._use_beam and self._beam_width:
self.bias = tf.contrib.seq2seq.tile_batch(
self.bias, multiplier=self._beam_width)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs)
rank = tf.rank(inputs)
if rank > 2:
outputs = tf.einsum("aki,aij->akj", inputs, self.kernel)
# Reshape the output back to the original ndim of the input.
if not tf.executing_eagerly():
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
assert False
# outputs = tf.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if dimension_value(input_shape[-1]) is None:
raise ValueError(
"The innermost dimension of input_shape must be defined, but saw: %s"
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
"units":
self.units,
"activation":
tf.keras.activations.serialize(self.activation),
"use_bias":
self.use_bias,
"kernel_initializer":
tf.keras.initializers.serialize(self.kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self.bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self.kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self.bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self.activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self.kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self.bias_constraint)
}
base_config = super(HyperDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
src/logger/neptune/neptune_logger.py | Sarajvega/kaggle-birdsong-recognition | 137 | 12754568 | from ignite.engine import Events
from ignite.contrib.handlers.tqdm_logger import ProgressBar
from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine
from logger.base.base_logger import BaseLogger
from logger.base.utils import *
from logger.neptune.neptune_utils import *
from ignite.contrib.handlers.neptune_logger import *
import numpy as np
import os
class MyNeptuneLogger(BaseLogger):
def __init__(self, log_every=5, **kwargs):
self.writer = NeptuneLogger(api_token=os.getenv('NEPTUNE_API_TOKEN'),
project_name=kwargs["project_name"],
name=kwargs["name"],
params=kwargs["params"],
tags=kwargs["tags"])
super().__init__(log_every=log_every)
def _add_train_events(self, model = None, optimizer=None, scheduler=None, metrics={}):
# self.writer.attach(self.trainer,
# log_handler=WeightsScalarHandler(model),
# event_name=Events.ITERATION_COMPLETED(every=100))
# self.writer.attach(self.trainer,
# log_handler=GradsScalarHandler(model),
# event_name=Events.ITERATION_COMPLETED(every=100))
iteration_events = [
training_iteration(self.writer),
lr_iteration(optimizer, self.writer)
]
completion_events = [
train_metrics_completion(self.writer)
]
self._add_train_handlers(
**{
"iteration_events": iteration_events,
"completion_events": completion_events
}
)
def _add_eval_events(self, model = None, optimizer=None, scheduler=None, metrics={}):
iteration_events = []
completion_events = [
validation_metrics_completion(self.trainer, self.writer),
]
self._add_evaluation_handlers(
**{
"iteration_events": iteration_events,
"completion_events": completion_events
}
)
def _end_of_training(self):
self.writer.experiment.stop() |
tasks/multi_length_sequences.py | evanharwin/keras-tcn | 1,473 | 12754576 | import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tcn import TCN
# if you increase the sequence length make sure the receptive field of the TCN is big enough.
MAX_TIME_STEP = 30
"""
Input: sequence of length 7
Input: sequence of length 25
Input: sequence of length 29
Input: sequence of length 21
Input: sequence of length 20
Input: sequence of length 13
Input: sequence of length 9
Input: sequence of length 7
Input: sequence of length 4
Input: sequence of length 14
Input: sequence of length 10
Input: sequence of length 11
...
"""
def get_x_y(max_time_steps):
for k in range(int(1e9)):
time_steps = np.random.choice(range(1, max_time_steps), size=1)[0]
if k % 2 == 0:
x_train = np.expand_dims([np.insert(np.zeros(shape=(time_steps, 1)), 0, 1)], axis=-1)
y_train = [1]
else:
x_train = np.array([np.zeros(shape=(time_steps, 1))])
y_train = [0]
if k % 100 == 0:
print(f'({k}) Input: sequence of length {time_steps}.')
yield x_train, np.expand_dims(y_train, axis=-1)
m = Sequential([
TCN(input_shape=(None, 1)),
Dense(1, activation='sigmoid')
])
m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
gen = get_x_y(max_time_steps=MAX_TIME_STEP)
m.fit(gen, epochs=1, steps_per_epoch=1000, max_queue_size=1, verbose=2)
|
tests/test_chi_police_retirement.py | MAYANK25402/city-scrapers | 255 | 12754679 | from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.chi_police_retirement import ChiPoliceRetirementSpider
test_response = file_response(
join(dirname(__file__), "files", "chi_police_retirement.html"),
url="http://www.chipabf.org/ChicagoPolicePension/MonthlyMeetings.html",
)
spider = ChiPoliceRetirementSpider()
freezer = freeze_time("2019-05-05")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_title():
assert parsed_items[0]["title"] == "Retirement Board"
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2019, 1, 31, 9, 0)
def test_id():
assert (
parsed_items[0]["id"] == "chi_police_retirement/201901310900/x/"
"retirement_board"
)
def test_status():
assert parsed_items[0]["status"] == "passed"
def test_location():
assert parsed_items[0]["location"] == {
"name": "Policemen's Annuity and Benefit Fund",
"address": "221 North LaSalle Street, Suite 1626, Chicago, "
"Illinois 60601-1203",
}
def test_source():
assert (
parsed_items[0]["source"]
== "http://www.chipabf.org/ChicagoPolicePension/MonthlyMeetings.html"
)
def test_links():
assert parsed_items[0]["links"] == [
{
"href": "http://www.chipabf.org/ChicagoPolicePension/PDF/Agenda/2019/2019AGENDA01.pdf", # noqa
"title": "Agenda",
},
{
"href": "http://www.chipabf.org/ChicagoPolicePension/PDF/Minutes/2019/2019MINUTES01.pdf", # noqa
"title": "Minutes",
},
]
def test_classification():
assert parsed_items[0]["classification"] == "Board"
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
|
rpython/jit/metainterp/test/test_call.py | nanjekyejoannah/pypy | 381 | 12754707 |
from rpython.jit.metainterp.test.support import LLJitMixin, noConst
from rpython.rlib import jit
class CallTest(object):
def test_indirect_call(self):
@jit.dont_look_inside
def f1(x):
return x + 1
@jit.dont_look_inside
def f2(x):
return x + 2
@jit.dont_look_inside
def choice(i):
if i:
return f1
return f2
def f(i):
func = choice(i)
return func(i)
res = self.interp_operations(f, [3])
assert res == f(3)
def test_cond_call(self):
def f(l, n):
l.append(n)
def main(n):
l = []
jit.conditional_call(n == 10, f, l, n)
return len(l)
assert self.interp_operations(main, [10]) == 1
assert self.interp_operations(main, [5]) == 0
def test_cond_call_disappears(self):
driver = jit.JitDriver(greens = [], reds = ['n'])
def f(n):
raise ValueError
def main(n):
while n > 0:
driver.jit_merge_point(n=n)
jit.conditional_call(False, f, 10)
n -= 1
return 42
assert self.meta_interp(main, [10]) == 42
self.check_resops(guard_no_exception=0)
def test_cond_call_i(self):
def f(n):
return n * 200
def main(n, m):
return jit.conditional_call_elidable(n, f, m)
assert self.interp_operations(main, [0, 10]) == 2000
assert self.interp_operations(main, [15, 42]) == 15
def test_cond_call_r(self):
def f(n):
return [n]
def main(n):
if n == 10:
l = []
else:
l = None
l = jit.conditional_call_elidable(l, f, n)
return len(l)
assert main(10) == 0
assert main(5) == 1
assert self.interp_operations(main, [10]) == 0
assert self.interp_operations(main, [5]) == 1
def test_cond_call_constant_in_pyjitpl(self):
def f(a, b):
return a + b
def main(n):
# this is completely constant-folded because the arguments
# to f() are constants.
return jit.conditional_call_elidable(n, f, 40, 2)
assert main(12) == 12
assert main(0) == 42
assert self.interp_operations(main, [12]) == 12
self.check_operations_history({'finish': 1}) # empty history
assert self.interp_operations(main, [0]) == 42
self.check_operations_history({'finish': 1}) # empty history
def test_cond_call_constant_in_optimizer(self):
myjitdriver = jit.JitDriver(greens = ['m'], reds = ['n', 'p'])
def externfn(x):
return x - 3
class V:
def __init__(self, value):
self.value = value
def f(n, m, p):
while n > 0:
myjitdriver.can_enter_jit(n=n, p=p, m=m)
myjitdriver.jit_merge_point(n=n, p=p, m=m)
m1 = noConst(m)
n -= jit.conditional_call_elidable(p, externfn, m1)
return n
res = self.meta_interp(f, [21, 5, 0])
assert res == -1
# the COND_CALL_VALUE is constant-folded away by optimizeopt.py
self.check_resops({'int_sub': 2, 'int_gt': 2, 'guard_true': 2,
'jump': 1})
def test_cond_call_constant_in_optimizer_1(self):
# same as test_cond_call_constant_in_optimizer, but the 'value'
# argument changes
myjitdriver = jit.JitDriver(greens = ['m'], reds = ['n', 'p'])
def externfn(x):
return x - 3
class V:
def __init__(self, value):
self.value = value
def f(n, m, p):
while n > 0:
myjitdriver.can_enter_jit(n=n, p=p, m=m)
myjitdriver.jit_merge_point(n=n, p=p, m=m)
m1 = noConst(m)
n -= jit.conditional_call_elidable(p, externfn, m1)
return n
assert f(21, 5, 0) == -1
res = self.meta_interp(f, [21, 5, 0])
assert res == -1
# the COND_CALL_VALUE is constant-folded away by optimizeopt.py
self.check_resops({'int_sub': 2, 'int_gt': 2, 'guard_true': 2,
'jump': 1})
def test_cond_call_constant_in_optimizer_2(self):
myjitdriver = jit.JitDriver(greens = ['m'], reds = ['n', 'p'])
def externfn(x):
return 2
def f(n, m, p):
while n > 0:
myjitdriver.can_enter_jit(n=n, p=p, m=m)
myjitdriver.jit_merge_point(n=n, p=p, m=m)
assert p > -1
assert p < 1
n -= jit.conditional_call_elidable(p, externfn, n)
return n
res = self.meta_interp(f, [21, 5, 0])
assert res == -1
# optimizer: the COND_CALL_VALUE is turned into a regular
# CALL_PURE, which itself becomes a CALL
self.check_resops(call_pure_i=0, cond_call_value_i=0, call_i=2,
int_sub=2)
def test_cond_call_constant_in_optimizer_3(self):
myjitdriver = jit.JitDriver(greens = ['m'], reds = ['n', 'p'])
def externfn(x):
return 1
def f(n, m, p):
while n > 0:
myjitdriver.can_enter_jit(n=n, p=p, m=m)
myjitdriver.jit_merge_point(n=n, p=p, m=m)
assert p > -1
assert p < 1
n0 = n
n -= jit.conditional_call_elidable(p, externfn, n0)
n -= jit.conditional_call_elidable(p, externfn, n0)
return n
res = self.meta_interp(f, [21, 5, 0])
assert res == -1
# same as test_cond_call_constant_in_optimizer_2, but the two
# intermediate CALL_PUREs are replaced with only one, because
# they are called with the same arguments
self.check_resops(call_pure_i=0, cond_call_value_i=0, call_i=2,
int_sub=4)
def test_cond_call_constant_in_optimizer_4(self):
class X:
def __init__(self, value):
self.value = value
self.triple = 0
def _compute_triple(self):
self.triple = self.value * 3
return self.triple
def get_triple(self):
return jit.conditional_call_elidable(self.triple,
X._compute_triple, self)
myjitdriver = jit.JitDriver(greens = [], reds = 'auto')
def main(n):
total = 0
while n > 1:
myjitdriver.jit_merge_point()
x = X(n)
total += x.get_triple() + x.get_triple() + x.get_triple()
n -= 10
return total
res = self.meta_interp(main, [100])
assert res == main(100)
# remaining: only the first call to get_triple(), as a call_i
# because we know that x.triple == 0 here. The remaining calls
# are removed because equal to the first one.
self.check_resops(call_i=2, cond_call_value_i=0,
new_with_vtable=2) # escapes: _compute_triple(self)
def test_cond_call_constant_in_optimizer_5(self):
def _compute_triple(value):
return value * 3
class X:
def __init__(self, value):
self.value = value
self.triple = 0
def get_triple(self):
res = jit.conditional_call_elidable(self.triple,
_compute_triple, self.value)
self.triple = res
return res
myjitdriver = jit.JitDriver(greens = [], reds = 'auto')
def main(n):
total = 0
while n > 1:
myjitdriver.jit_merge_point()
x = X(n)
total += x.get_triple() + x.get_triple() + x.get_triple()
n -= 10
return total
res = self.meta_interp(main, [100])
assert res == main(100)
# remaining: only the first call to get_triple(), as a call_i
# because we know that x.triple == 0 here. The remaining calls
# are removed because equal to the first one.
self.check_resops(call_i=2, cond_call_value_i=0,
new_with_vtable=0) # all virtual
def test_cond_call_multiple_in_optimizer_1(self):
# test called several times with the same arguments, but
# the condition is not available to the short preamble.
# This means that the second cond_call_value after unrolling
# can't be removed.
myjitdriver = jit.JitDriver(greens = [], reds = ['n', 'p', 'm'])
def externfn(x):
return 2000 # never actually called
@jit.dont_look_inside
def randomish(p):
return p + 1
def f(n, m, p):
while n > 0:
myjitdriver.can_enter_jit(n=n, p=p, m=m)
myjitdriver.jit_merge_point(n=n, p=p, m=m)
n -= jit.conditional_call_elidable(randomish(p), externfn, m)
return n
assert f(21, 5, 1) == -1
res = self.meta_interp(f, [21, 5, 1])
assert res == -1
self.check_resops(call_pure_i=0, cond_call_value_i=2,
call_i=2, # randomish()
int_sub=2)
def test_cond_call_multiple_in_optimizer_2(self):
# test called several times with the same arguments. Ideally
# we would like them to be consolidated into one call even if
# the 'value' are different but available from the short
# preamble. We don't do it so far---it's a mess, because the
# short preamble is supposed to depend only on loop-invariant
# things, and 'value' is (most of the time) not loop-invariant.
myjitdriver = jit.JitDriver(greens = [], reds = ['n', 'p', 'm'])
def externfn(x):
return 2 # called only the first time
def f(n, m, p):
while n > 0:
myjitdriver.can_enter_jit(n=n, p=p, m=m)
myjitdriver.jit_merge_point(n=n, p=p, m=m)
p = jit.conditional_call_elidable(p, externfn, m)
n -= p
return n
assert f(21, 5, 0) == -1
res = self.meta_interp(f, [21, 5, 0])
assert res == -1
self.check_resops(call_pure_i=0,
cond_call_value_i=2, # ideally 1, but see above
int_sub=2)
def test_cond_call_in_blackhole(self):
myjitdriver = jit.JitDriver(greens = [], reds = ['n', 'p', 'm'])
def externfn(x):
return 2
def f(n, m, p):
while n > 0:
myjitdriver.can_enter_jit(n=n, p=p, m=m)
myjitdriver.jit_merge_point(n=n, p=p, m=m)
if n > 6: # will fail and finish in the blackhole
pass
if jit.we_are_jitted(): # manually inline here
p = jit._jit_conditional_call_value(p, externfn, m)
else:
p = jit.conditional_call_elidable(p, externfn, m)
n -= p
return n
assert f(21, 5, 0) == -1
res = self.meta_interp(f, [21, 5, 0])
assert res == -1
def test_cond_call_raises(self):
myjitdriver = jit.JitDriver(greens = [], reds = ['n', 'p', 'm'])
def externfn(x, m):
if m == 1 or m == 1008:
raise ValueError
return x + m
def f(n, m, p):
while n > 0:
myjitdriver.can_enter_jit(n=n, p=p, m=m)
myjitdriver.jit_merge_point(n=n, p=p, m=m)
try:
p = jit.conditional_call_elidable(p, externfn, n, m)
p -= (n + m) # => zero again
except ValueError:
m += 1000
m += 1
n -= 2
return n * m
assert f(21, 0, 0) == -2011
res = self.meta_interp(f, [21, 0, 0])
assert res == -2011
class TestCall(LLJitMixin, CallTest):
pass
|
server/workers/persistence/src/example_settings.py | chreman/Headstart | 111 | 12754744 | BEHIND_PROXY = True
SWAGGER_BASEPATH = ""
DEFAULT_DATABASE = "dev"
DATABASES = ["test"]
ENV = "development"
DEBUG = True
|
tools/src/deleteGroup.py | kevinsigwart/ArcREST | 208 | 12754749 | <reponame>kevinsigwart/ArcREST
"""
@author: ArcREST Team
@contact: www.github.com/Esri/ArcREST
@company: Esri
@version: 1.0.0
@description: deletes a group on the AGOL site.
@requirements: Python 2.7.x, ArcGIS 10.2.2, ArcREST 2.0
@copyright: Esri, 2015
"""
import os
from arcpy import env
from arcpy import mapping
from arcpy import da
import arcpy
import ConfigParser
import arcrest
#--------------------------------------------------------------------------
class FunctionError(Exception):
""" raised when a function fails to run """
pass
#--------------------------------------------------------------------------
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback
import sys
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, __file__, synerror
#--------------------------------------------------------------------------
def main(*argv):
""" main driver of program """
try:
# Inputs
#
adminUsername = argv[0]
adminPassword = argv[1]
siteURL = argv[2]
groupName = argv[3]
# Logic
#
sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword)
admin = arcrest.manageorg.Administration(securityHandler=sh)
community = admin.community
g = community.getGroupIDs(groupNames=[groupName])
if len(g) == 0:
arcpy.AddWarning("No Group Exists with That Name %s" % groupName)
arcpy.SetParameterAsText(4, False)
elif len(g) == 1:
groups = community.groups
groups.deleteGroup(groupId=g[0])
arcpy.AddWarning("%s was erased." % groupName)
arcpy.SetParameterAsText(4, True)
else:
arcpy.AddError("Multiple group names found, please manually delete!")
arcpy.SetParameterAsText(4, False)
except arcpy.ExecuteError:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror)
arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
except FunctionError, f_e:
messages = f_e.args[0]
arcpy.AddError("error in function: %s" % messages["function"])
arcpy.AddError("error on line: %s" % messages["line"])
arcpy.AddError("error in file name: %s" % messages["filename"])
arcpy.AddError("with error message: %s" % messages["synerror"])
arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
except:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror)
#--------------------------------------------------------------------------
if __name__ == "__main__":
env.overwriteOutput = True
argv = tuple(str(arcpy.GetParameterAsText(i))
for i in xrange(arcpy.GetArgumentCount()))
main(*argv) |
alipay/aop/api/response/AlipayCommerceIotMdeviceprodQueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 12754821 | <reponame>snowxmas/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.MerchantModel import MerchantModel
from alipay.aop.api.domain.MerchantModel import MerchantModel
class AlipayCommerceIotMdeviceprodQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceIotMdeviceprodQueryResponse, self).__init__()
self._activate_time = None
self._addr_info = None
self._biz_type = None
self._device_id = None
self._device_name = None
self._device_sn = None
self._img_url = None
self._isv = None
self._merchant = None
self._shop_address = None
self._shop_id = None
self._shop_name = None
self._status = None
self._status_desc = None
self._supplier_name = None
@property
def activate_time(self):
return self._activate_time
@activate_time.setter
def activate_time(self, value):
self._activate_time = value
@property
def addr_info(self):
return self._addr_info
@addr_info.setter
def addr_info(self, value):
self._addr_info = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def device_name(self):
return self._device_name
@device_name.setter
def device_name(self, value):
self._device_name = value
@property
def device_sn(self):
return self._device_sn
@device_sn.setter
def device_sn(self, value):
self._device_sn = value
@property
def img_url(self):
return self._img_url
@img_url.setter
def img_url(self, value):
self._img_url = value
@property
def isv(self):
return self._isv
@isv.setter
def isv(self, value):
if isinstance(value, MerchantModel):
self._isv = value
else:
self._isv = MerchantModel.from_alipay_dict(value)
@property
def merchant(self):
return self._merchant
@merchant.setter
def merchant(self, value):
if isinstance(value, MerchantModel):
self._merchant = value
else:
self._merchant = MerchantModel.from_alipay_dict(value)
@property
def shop_address(self):
return self._shop_address
@shop_address.setter
def shop_address(self, value):
self._shop_address = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def shop_name(self):
return self._shop_name
@shop_name.setter
def shop_name(self, value):
self._shop_name = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def status_desc(self):
return self._status_desc
@status_desc.setter
def status_desc(self, value):
self._status_desc = value
@property
def supplier_name(self):
return self._supplier_name
@supplier_name.setter
def supplier_name(self, value):
self._supplier_name = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceIotMdeviceprodQueryResponse, self).parse_response_content(response_content)
if 'activate_time' in response:
self.activate_time = response['activate_time']
if 'addr_info' in response:
self.addr_info = response['addr_info']
if 'biz_type' in response:
self.biz_type = response['biz_type']
if 'device_id' in response:
self.device_id = response['device_id']
if 'device_name' in response:
self.device_name = response['device_name']
if 'device_sn' in response:
self.device_sn = response['device_sn']
if 'img_url' in response:
self.img_url = response['img_url']
if 'isv' in response:
self.isv = response['isv']
if 'merchant' in response:
self.merchant = response['merchant']
if 'shop_address' in response:
self.shop_address = response['shop_address']
if 'shop_id' in response:
self.shop_id = response['shop_id']
if 'shop_name' in response:
self.shop_name = response['shop_name']
if 'status' in response:
self.status = response['status']
if 'status_desc' in response:
self.status_desc = response['status_desc']
if 'supplier_name' in response:
self.supplier_name = response['supplier_name']
|
test/python/classical_function_compiler/examples.py | Roshan-Thomas/qiskit-terra | 1,599 | 12754834 | <filename>test/python/classical_function_compiler/examples.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-function-docstring
"""These examples should be handle by the classicalfunction compiler"""
from qiskit.circuit import Int1
def identity(a: Int1) -> Int1:
return a
def bit_and(a: Int1, b: Int1) -> Int1:
return a & b
def bit_or(a: Int1, b: Int1) -> Int1:
return a | b
def bool_or(a: Int1, b: Int1) -> Int1:
return a or b
def bool_not(a: Int1) -> Int1:
return not a
def and_and(a: Int1, b: Int1, c: Int1) -> Int1:
return a and b and c
def multiple_binop(a: Int1, b: Int1) -> Int1:
return (a or b) | (b & a) and (a & b)
def id_assing(a: Int1) -> Int1:
b = a
return b
def example1(a: Int1, b: Int1) -> Int1:
c = a & b
d = b | a
return c ^ a | d
def grover_oracle(a: Int1, b: Int1, c: Int1, d: Int1) -> Int1:
return not a and b and not c and d
|
libs/csv.py | bgizdov/revolut-stocks | 206 | 12754836 | <gh_stars>100-1000
from libs.utils import humanize_date
from libs import NAP_DATE_FORMAT, NAP_DIGIT_PRECISION
import os
import csv
import decimal
decimal.getcontext().rounding = decimal.ROUND_HALF_UP
def export_to_csv(list_object, csv_file, fieldnames):
csv_list_object = humanize_date(list_object)
with open(csv_file, "w") as fd:
writer = csv.DictWriter(
fd,
fieldnames=fieldnames,
quotechar='"',
quoting=csv.QUOTE_ALL,
)
header = {fieldname: fieldname.replace("_", " ").title() for fieldname in fieldnames}
writer.writerow(header)
for elements in csv_list_object:
writer.writerow(elements)
def export_statements(file_path, statements):
export_to_csv(
statements,
file_path,
[
"trade_date",
"settle_date",
"currency",
"activity_type",
"company",
"symbol_description",
"symbol",
"quantity",
"price",
"amount",
],
)
def export_app8_part1(file_path, purchases):
export_purchases = []
for stock_symbol, stock_queue in purchases.items():
for purchase in stock_queue:
count = purchase["quantity"].quantize(decimal.Decimal(NAP_DIGIT_PRECISION))
if count > 0:
export_purchases.append(
{
**{
"stock_symbol": stock_symbol,
"count": str(count),
"acquire_date": purchase["trade_date"].strftime(NAP_DATE_FORMAT),
"purchase_price_in_currency": purchase["price_in_currency"],
"purchase_price_in_lev": purchase["price"],
},
}
)
export_to_csv(
export_purchases,
file_path,
["stock_symbol", "count", "acquire_date", "purchase_price_in_currency", "purchase_price_in_lev"],
)
def export_app5_table2(file_path, sales):
sales = [
{
**{
k: v
for k, v in sale.items()
if k not in ["symbol", "avg_purchase_price", "sell_exchange_rate", "profit_in_currency", "loss_in_currency"]
},
**{"code": 508},
}
for sale in sales
]
export_to_csv(
sales,
file_path,
["code", "trade_date", "sell_price", "purchase_price", "profit", "loss"],
)
def export_app8_part4_1(file_path, dividend_taxes):
dividends = [
{
**{k: v for k, v in dividend_tax.items() if k not in ["symbol"]},
**{"profit_code": 8141, "tax_code": 1},
**{
"gross_profit_amount": dividend_tax["gross_profit_amount"].quantize(decimal.Decimal(NAP_DIGIT_PRECISION)),
"paid_tax_amount": dividend_tax["paid_tax_amount"].quantize(decimal.Decimal(NAP_DIGIT_PRECISION)),
"owe_tax": dividend_tax["owe_tax"].quantize(decimal.Decimal(NAP_DIGIT_PRECISION)),
},
}
for dividend_tax in dividend_taxes
]
export_to_csv(
dividends,
file_path,
["stock_symbol", "company", "profit_code", "tax_code", "gross_profit_amount", "paid_tax_amount", "owe_tax"],
)
|
src/python/grpcio_tests/tests_aio/unit/done_callback_test.py | echo80313/grpc | 36,552 | 12754855 | <gh_stars>1000+
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing the done callbacks mechanism."""
import asyncio
import logging
import unittest
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
from tests_aio.unit._common import inject_callbacks
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
_NUM_STREAM_RESPONSES = 5
_REQUEST_PAYLOAD_SIZE = 7
_RESPONSE_PAYLOAD_SIZE = 42
_REQUEST = b'\x01\x02\x03'
_RESPONSE = b'\x04\x05\x06'
_TEST_METHOD = '/test/Test'
_FAKE_METHOD = '/test/Fake'
class TestClientSideDoneCallback(AioTestBase):
async def setUp(self):
address, self._server = await start_test_server()
self._channel = aio.insecure_channel(address)
self._stub = test_pb2_grpc.TestServiceStub(self._channel)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
async def test_add_after_done(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
self.assertEqual(grpc.StatusCode.OK, await call.code())
validation = inject_callbacks(call)
await validation
async def test_unary_unary(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
validation = inject_callbacks(call)
self.assertEqual(grpc.StatusCode.OK, await call.code())
await validation
async def test_unary_stream(self):
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
call = self._stub.StreamingOutputCall(request)
validation = inject_callbacks(call)
response_cnt = 0
async for response in call:
response_cnt += 1
self.assertIsInstance(response,
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(_NUM_STREAM_RESPONSES, response_cnt)
self.assertEqual(grpc.StatusCode.OK, await call.code())
await validation
async def test_stream_unary(self):
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
async def gen():
for _ in range(_NUM_STREAM_RESPONSES):
yield request
call = self._stub.StreamingInputCall(gen())
validation = inject_callbacks(call)
response = await call
self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse)
self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(grpc.StatusCode.OK, await call.code())
await validation
async def test_stream_stream(self):
call = self._stub.FullDuplexCall()
validation = inject_callbacks(call)
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
response = await call.read()
self.assertIsInstance(response,
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
await call.done_writing()
self.assertEqual(grpc.StatusCode.OK, await call.code())
await validation
class TestServerSideDoneCallback(AioTestBase):
async def setUp(self):
self._server = aio.server()
port = self._server.add_insecure_port('[::]:0')
self._channel = aio.insecure_channel('localhost:%d' % port)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
async def _register_method_handler(self, method_handler):
"""Registers method handler and starts the server"""
generic_handler = grpc.method_handlers_generic_handler(
'test',
dict(Test=method_handler),
)
self._server.add_generic_rpc_handlers((generic_handler,))
await self._server.start()
async def test_unary_unary(self):
validation_future = self.loop.create_future()
async def test_handler(request: bytes, context: aio.ServicerContext):
self.assertEqual(_REQUEST, request)
validation_future.set_result(inject_callbacks(context))
return _RESPONSE
await self._register_method_handler(
grpc.unary_unary_rpc_method_handler(test_handler))
response = await self._channel.unary_unary(_TEST_METHOD)(_REQUEST)
self.assertEqual(_RESPONSE, response)
validation = await validation_future
await validation
async def test_unary_stream(self):
validation_future = self.loop.create_future()
async def test_handler(request: bytes, context: aio.ServicerContext):
self.assertEqual(_REQUEST, request)
validation_future.set_result(inject_callbacks(context))
for _ in range(_NUM_STREAM_RESPONSES):
yield _RESPONSE
await self._register_method_handler(
grpc.unary_stream_rpc_method_handler(test_handler))
call = self._channel.unary_stream(_TEST_METHOD)(_REQUEST)
async for response in call:
self.assertEqual(_RESPONSE, response)
validation = await validation_future
await validation
async def test_stream_unary(self):
validation_future = self.loop.create_future()
async def test_handler(request_iterator, context: aio.ServicerContext):
validation_future.set_result(inject_callbacks(context))
async for request in request_iterator:
self.assertEqual(_REQUEST, request)
return _RESPONSE
await self._register_method_handler(
grpc.stream_unary_rpc_method_handler(test_handler))
call = self._channel.stream_unary(_TEST_METHOD)()
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(_REQUEST)
await call.done_writing()
self.assertEqual(_RESPONSE, await call)
validation = await validation_future
await validation
async def test_stream_stream(self):
validation_future = self.loop.create_future()
async def test_handler(request_iterator, context: aio.ServicerContext):
validation_future.set_result(inject_callbacks(context))
async for request in request_iterator:
self.assertEqual(_REQUEST, request)
return _RESPONSE
await self._register_method_handler(
grpc.stream_stream_rpc_method_handler(test_handler))
call = self._channel.stream_stream(_TEST_METHOD)()
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(_REQUEST)
await call.done_writing()
async for response in call:
self.assertEqual(_RESPONSE, response)
validation = await validation_future
await validation
async def test_error_in_handler(self):
"""Errors in the handler still triggers callbacks."""
validation_future = self.loop.create_future()
async def test_handler(request: bytes, context: aio.ServicerContext):
self.assertEqual(_REQUEST, request)
validation_future.set_result(inject_callbacks(context))
raise RuntimeError('A test RuntimeError')
await self._register_method_handler(
grpc.unary_unary_rpc_method_handler(test_handler))
with self.assertRaises(aio.AioRpcError) as exception_context:
await self._channel.unary_unary(_TEST_METHOD)(_REQUEST)
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNKNOWN, rpc_error.code())
validation = await validation_future
await validation
async def test_error_in_callback(self):
"""Errors in the callback won't be propagated to client."""
validation_future = self.loop.create_future()
async def test_handler(request: bytes, context: aio.ServicerContext):
self.assertEqual(_REQUEST, request)
def exception_raiser(unused_context):
raise RuntimeError('A test RuntimeError')
context.add_done_callback(exception_raiser)
validation_future.set_result(inject_callbacks(context))
return _RESPONSE
await self._register_method_handler(
grpc.unary_unary_rpc_method_handler(test_handler))
response = await self._channel.unary_unary(_TEST_METHOD)(_REQUEST)
self.assertEqual(_RESPONSE, response)
# Following callbacks won't be invoked, if one of the callback crashed.
validation = await validation_future
with self.assertRaises(asyncio.TimeoutError):
await validation
# Invoke RPC one more time to ensure the toxic callback won't break the
# server.
with self.assertRaises(aio.AioRpcError) as exception_context:
await self._channel.unary_unary(_FAKE_METHOD)(_REQUEST)
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.UNIMPLEMENTED, rpc_error.code())
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
external/heng/tgs/pspnet/loss.py | liaopeiyuan/ml-arsenal-public | 280 | 12754860 | <reponame>liaopeiyuan/ml-arsenal-public
from include import *
#https://github.com/marvis/pytorch-yolo2/blob/master/FocalLoss.py
#https://github.com/unsky/focal-loss
class FocalLoss2d(nn.Module):
def __init__(self, gamma=2, size_average=True):
super(FocalLoss2d, self).__init__()
self.gamma = gamma
self.size_average = size_average
def forward(self, logit, target, class_weight=None, type='softmax'):
target = target.view(-1, 1).long()
if type=='sigmoid':
if class_weight is None:
class_weight = [1]*2 #[0.5, 0.5]
prob = F.sigmoid(logit)
prob = prob.view(-1, 1)
prob = torch.cat((1-prob, prob), 1)
select = torch.FloatTensor(len(prob), 2).zero_().cuda()
select.scatter_(1, target, 1.)
elif type=='softmax':
B,C,H,W = logit.size()
if class_weight is None:
class_weight =[1]*C #[1/C]*C
logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, C)
prob = F.softmax(logit,1)
select = torch.FloatTensor(len(prob), C).zero_().cuda()
select.scatter_(1, target, 1.)
class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1)
class_weight = torch.gather(class_weight, 0, target)
prob = (prob*select).sum(1).view(-1,1)
prob = torch.clamp(prob,1e-8,1-1e-8)
batch_loss = - class_weight *(torch.pow((1-prob), self.gamma))*prob.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss
return loss
##------------
class RobustFocalLoss2d(nn.Module):
#assume top 10% is outliers
def __init__(self, gamma=2, size_average=True):
super(RobustFocalLoss2d, self).__init__()
self.gamma = gamma
self.size_average = size_average
def forward(self, logit, target, class_weight=None, type='softmax'):
target = target.view(-1, 1).long()
if type=='sigmoid':
if class_weight is None:
class_weight = [1]*2 #[0.5, 0.5]
prob = F.sigmoid(logit)
prob = prob.view(-1, 1)
prob = torch.cat((1-prob, prob), 1)
select = torch.FloatTensor(len(prob), 2).zero_().cuda()
select.scatter_(1, target, 1.)
elif type=='softmax':
B,C,H,W = logit.size()
if class_weight is None:
class_weight =[1]*C #[1/C]*C
logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, C)
prob = F.softmax(logit,1)
select = torch.FloatTensor(len(prob), C).zero_().cuda()
select.scatter_(1, target, 1.)
class_weight = torch.FloatTensor(class_weight).cuda().view(-1,1)
class_weight = torch.gather(class_weight, 0, target)
prob = (prob*select).sum(1).view(-1,1)
prob = torch.clamp(prob,1e-8,1-1e-8)
focus = torch.pow((1-prob), self.gamma)
#focus = torch.where(focus < 2.0, focus, torch.zeros(prob.size()).cuda())
focus = torch.clamp(focus,0,2)
batch_loss = - class_weight *focus*prob.log()
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss
return loss
##------------
## http://geek.csdn.net/news/detail/126833
class PseudoBCELoss2d(nn.Module):
def __init__(self):
super(PseudoBCELoss2d, self).__init__()
def forward(self, logit, truth):
z = logit.view (-1)
t = truth.view (-1)
loss = z.clamp(min=0) - z*t + torch.log(1 + torch.exp(-z.abs()))
loss = loss.sum()/len(t) #w.sum()
return loss
#
# # https://github.com/bermanmaxim/jaccardSegment/blob/master/losses.py
# # https://discuss.pytorch.org/t/solved-what-is-the-correct-way-to-implement-custom-loss-function/3568/4
# class CrossEntropyLoss2d(nn.Module):
# def __init__(self, weight=None, size_average=True):
# super(CrossEntropyLoss2d, self).__init__()
# self.nll_loss = nn.NLLLoss2d(weight, size_average)
#
# def forward(self, logits, targets):
# return self.nll_loss(F.log_softmax(logits), targets)
#
# class BCELoss2d(nn.Module):
# def __init__(self, weight=None, size_average=True):
# super(BCELoss2d, self).__init__()
# self.bce_loss = nn.BCELoss(weight, size_average)
#
# def forward(self, logits, targets):
# probs = F.sigmoid(logits)
# probs_flat = probs.view (-1)
# targets_flat = targets.view(-1)
# return self.bce_loss(probs_flat, targets_flat)
#
#
# class SoftDiceLoss(nn.Module):
# def __init__(self): #weight=None, size_average=True):
# super(SoftDiceLoss, self).__init__()
#
#
# def forward(self, logits, targets):
#
# probs = F.sigmoid(logits)
# num = targets.size(0)
# m1 = probs.view(num,-1)
# m2 = targets.view(num,-1)
# intersection = (m1 * m2)
# score = 2. * (intersection.sum(1)+1) / (m1.sum(1) + m2.sum(1)+1)
# score = 1- score.sum()/num
# return score
#
#
#
# ## http://geek.csdn.net/news/detail/126833
# class WeightedBCELoss2d(nn.Module):
# def __init__(self):
# super(WeightedBCELoss2d, self).__init__()
#
# def forward(self, logits, labels, weights):
# w = weights.view(-1)
# z = logits.view (-1)
# t = labels.view (-1)
# loss = w*z.clamp(min=0) - w*z*t + w*torch.log(1 + torch.exp(-z.abs()))
# loss = loss.sum()/(w.sum()+ 1e-12)
# return loss
#
# class WeightedSoftDiceLoss(nn.Module):
# def __init__(self):
# super(WeightedSoftDiceLoss, self).__init__()
#
# def forward(self, logits, labels, weights):
# probs = F.sigmoid(logits)
# num = labels.size(0)
# w = (weights).view(num,-1)
# w2 = w*w
# m1 = (probs ).view(num,-1)
# m2 = (labels ).view(num,-1)
# intersection = (m1 * m2)
# score = 2. * ((w2*intersection).sum(1)+1) / ((w2*m1).sum(1) + (w2*m2).sum(1)+1)
# score = 1 - score.sum()/num
# return score
#
#
#
#
#
# def multi_loss(logits, labels):
# #l = BCELoss2d()(logits, labels)
#
#
# if 0:
# l = BCELoss2d()(logits, labels) + SoftDiceLoss()(logits, labels)
#
# #compute weights
# else:
# batch_size,C,H,W = labels.size()
# weights = Variable(torch.tensor.torch.ones(labels.size())).cuda()
#
# if 1: #use weights
# kernel_size = 5
# avg = F.avg_pool2d(labels,kernel_size=kernel_size,padding=kernel_size//2,stride=1)
# boundary = avg.ge(0.01) * avg.le(0.99)
# boundary = boundary.float()
#
# w0 = weights.sum()
# weights = weights + boundary*2
# w1 = weights.sum()
# weights = weights/w1*w0
#
# l = WeightedBCELoss2d()(logits, labels, weights) + \
# WeightedSoftDiceLoss()(logits, labels, weights)
#
# return l
#
#
# #
# #
# #
# #
# #
# #
# #
# # class SoftCrossEntroyLoss(nn.Module):
# # def __init__(self):
# # super(SoftCrossEntroyLoss, self).__init__()
# #
# # def forward(self, logits, soft_labels):
# # #batch_size, num_classes = logits.size()
# # # soft_labels = labels.view(-1,num_classes)
# # # logits = logits.view(-1,num_classes)
# #
# # logits = logits - logits.max()
# # log_sum_exp = torch.log(torch.sum(torch.exp(logits), 1))
# # loss = - (soft_labels*logits).sum(1) + log_sum_exp
# # loss = loss.mean()
# #
# # return loss
# #
# #
# #
# # # loss, accuracy -------------------------
# # def top_accuracy(probs, labels, top_k=(1,)):
# # """Computes the precision@k for the specified values of k"""
# #
# # probs = probs.data
# # labels = labels.data
# #
# # max_k = max(top_k)
# # batch_size = labels.size(0)
# #
# # values, indices = probs.topk(max_k, dim=1, largest=True, sorted=True)
# # indices = indices.t()
# # corrects = indices.eq(labels.view(1, -1).expand_as(indices))
# #
# # accuracy = []
# # for k in top_k:
# # # https://stackoverflow.com/questions/509211/explain-slice-notation
# # # a[:end] # items from the beginning through end-1
# # c = corrects[:k].view(-1).float().sum(0, keepdim=True)
# # accuracy.append(c.mul_(1. / batch_size))
# # return accuracy
# #
# #
# # ## focal loss ## ---------------------------------------------------
# # class CrossEntroyLoss(nn.Module):
# # def __init__(self):
# # super(CrossEntroyLoss, self).__init__()
# #
# # def forward(self, logits, labels):
# # #batch_size, num_classes = logits.size()
# # # labels = labels.view(-1,1)
# # # logits = logits.view(-1,num_classes)
# #
# # max_logits = logits.max()
# # log_sum_exp = torch.log(torch.sum(torch.exp(logits-max_logits), 1))
# # loss = log_sum_exp - logits.gather(dim=1, index=labels.view(-1,1)).view(-1) + max_logits
# # loss = loss.mean()
# #
# # return loss
# #
# # ## https://github.com/unsky/focal-loss
# # ## https://github.com/sciencefans/Focal-Loss
# # ## https://www.kaggle.com/c/carvana-image-masking-challenge/discussion/39951
# #
# # # https://raberrytv.wordpress.com/2017/07/01/pytorch-kludges-to-ensure-numerical-stability/
# # # https://github.com/pytorch/pytorch/issues/1620
# # class FocalLoss(nn.Module):
# # def __init__(self,gamma = 2, alpha=1.2):
# # super(FocalLoss, self).__init__()
# # self.gamma = gamma
# # self.alpha = alpha
# #
# #
# # def forward(self, logits, labels):
# # eps = 1e-7
# #
# # # loss = - np.power(1 - p, gamma) * np.log(p))
# # probs = F.softmax(logits)
# # probs = probs.gather(dim=1, index=labels.view(-1,1)).view(-1)
# # probs = torch.clamp(probs, min=eps, max=1-eps)
# #
# # loss = -torch.pow(1-probs, self.gamma) *torch.log(probs)
# # loss = loss.mean()*self.alpha
# #
# # return loss
# #
# #
# #
# #
# # # https://arxiv.org/pdf/1511.05042.pdf
# # class TalyorCrossEntroyLoss(nn.Module):
# # def __init__(self):
# # super(TalyorCrossEntroyLoss, self).__init__()
# #
# # def forward(self, logits, labels):
# # #batch_size, num_classes = logits.size()
# # # labels = labels.view(-1,1)
# # # logits = logits.view(-1,num_classes)
# #
# # talyor_exp = 1 + logits + logits**2
# # loss = talyor_exp.gather(dim=1, index=labels.view(-1,1)).view(-1) /talyor_exp.sum(dim=1)
# # loss = loss.mean()
# #
# # return loss
# #
# # # check #################################################################
# # def run_check_focal_loss():
# # batch_size = 64
# # num_classes = 15
# #
# # logits = np.random.uniform(-2,2,size=(batch_size,num_classes))
# # labels = np.random.choice(num_classes,size=(batch_size))
# #
# # logits = Variable(torch.from_numpy(logits)).cuda()
# # labels = Variable(torch.from_numpy(labels)).cuda()
# #
# # focal_loss = FocalLoss(gamma = 2)
# # loss = focal_loss(logits, labels)
# # print (loss)
# #
# #
# # def run_check_soft_cross_entropy_loss():
# # batch_size = 64
# # num_classes = 15
# #
# # logits = np.random.uniform(-2,2,size=(batch_size,num_classes))
# # soft_labels = np.random.uniform(-2,2,size=(batch_size,num_classes))
# #
# # logits = Variable(torch.from_numpy(logits)).cuda()
# # soft_labels = Variable(torch.from_numpy(soft_labels)).cuda()
# # soft_labels = F.softmax(soft_labels,1)
# #
# # soft_cross_entropy_loss = SoftCrossEntroyLoss()
# # loss = soft_cross_entropy_loss(logits, soft_labels)
# # print (loss)
#
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
print('\nsucess!') |
legacy/build_vocab.py | SeongJunKang/pytorch-bert-crf-ner | 353 | 12754865 | <filename>legacy/build_vocab.py
import codecs
import pickle
import pandas as pd
import itertools
from pathlib import Path
from sklearn.model_selection import train_test_split
import gluonnlp as nlp
from pathlib import Path
from collections import Counter
import os
def load_data_from_txt(file_full_name):
with codecs.open(file_full_name, "r", "utf-8" ) as io:
lines = io.readlines()
# parsing에 문제가 있어서 아래 3개 변수 도입!
prev_line = ""
save_flag = False
count = 0
sharp_lines = []
for line in lines:
if prev_line == "\n" or prev_line == "":
save_flag = True
if line[:3] == "## " and save_flag is True:
count += 1
sharp_lines.append(line[3:])
if count == 3:
count = 0
save_flag = False
prev_line = line
list_of_source_no, list_of_source_str, list_of_target_str = sharp_lines[0::3], sharp_lines[1::3], sharp_lines[2::3]
return list_of_source_no, list_of_source_str, list_of_target_str
def main():
cwd = Path.cwd()
data_in = cwd / "data_in"
train_data_in = data_in / "NER-master" / "말뭉치 - 형태소_개체명"
list_of_file_name = [file_name for file_name in os.listdir(train_data_in) if '.txt' in file_name]
list_of_full_file_path = [train_data_in / file_name for file_name in list_of_file_name]
print("num of files: ", len(list_of_full_file_path))
list_of_total_source_no, list_of_total_source_str, list_of_total_target_str = [], [], []
for i, full_file_path in enumerate(list_of_full_file_path):
list_of_source_no, list_of_source_str, list_of_target_str = load_data_from_txt(file_full_name=full_file_path)
list_of_total_source_str.extend(list_of_source_str)
list_of_total_target_str.extend(list_of_target_str)
print("list_of_total_source_str: ", list_of_total_source_str[0])
print("list_of_total_target_str: ", list_of_total_target_str[0])
print("list_of_total_source_str: ", list_of_total_source_str[-10:])
print("list_of_total_target_str: ", list_of_total_target_str[-10:])
print("len(list_of_total_source_str): ", len(list_of_total_source_str))
print("len(list_of_total_target_str): ", len(list_of_total_target_str))
assert len(list_of_total_source_str) == len(list_of_total_target_str)
corpus_full_path = '/var/tmp/corpus.txt'
print("corpus_full_path:" , corpus_full_path)
with open(corpus_full_path, 'w', encoding='utf-8') as io:
for line in list_of_source_str:
io.write(line)
# kobert load 해서 쓸거기 때문에 이 vocab을 쓸수는 없음
# https://github.com/google/sentencepiece/issues/4
# what is hard_vocab_limit?
import sentencepiece as spm
templates = '--input={} --model_prefix={} --vocab_size={} --hard_vocab_limit=false --user_defined_symbols=[CLS],[SEP],[MASK] --pad_id=0 --bos_id=1 --eos_id=2 --unk_id=3'
prefix = 'sentencePiece'
vocab_size = 8000
cmd = templates.format(corpus_full_path, prefix, vocab_size)
spm.SentencePieceTrainer.Train(cmd)
# Load model
sp = spm.SentencePieceProcessor()
sp_model_path = '{}.model'.format(prefix)
sp.Load(sp_model_path)
print(sp.pad_id()) # 결과: 0
print(sp.bos_id()) # 결과: 1
print(sp.eos_id()) # 결과: 2
print(sp.unk_id()) # 결과: 3
tokenizer = nlp.data.SentencepieceTokenizer(path=sp_model_path)
detokenizer = nlp.data.SentencepieceDetokenizer(path=sp_model_path)
print(tokenizer)
print(tokenizer("안녕하세요 ㅋㅋ"))
print(detokenizer(tokenizer("안녕하세요 ㅋㅋ")))
list_of_source_tokens = [tokenizer(source_str) for source_str in list_of_total_source_str]
count_tokens = Counter(itertools.chain.from_iterable(list_of_source_tokens))
print("list_of_tokens:", list_of_source_tokens)
print("count_tokens: ", count_tokens)
reserved_tokens = ['[CLS]','[SEP]','[MASK]']
vocab = nlp.Vocab(counter=count_tokens, bos_token=None, eos_token=None, reserved_tokens=reserved_tokens)
print(vocab.unknown_token)
print(vocab.padding_token)
print(vocab.token_to_idx)
import json
import pickle
with open(data_in / 'token_to_index.json', 'w', encoding='utf-8') as io:
json.dump(vocab.token_to_idx, io, ensure_ascii=False, indent=4)
with open(data_in / 'vocab.pkl', mode='wb') as io:
pickle.dump(vocab, io)
with open(data_in / 'list_of_source_tokens.pkl', mode='wb') as io:
pickle.dump(list_of_source_tokens, io)
# with open(data_in / 'list_of_label.pkl', mode='wb') as io:
# pickle.dump(list_of_label, io)
if __name__ == '__main__':
main() |
tests/test_0099-read-from-file-object.py | eic/uproot4 | 133 | 12754869 | <filename>tests/test_0099-read-from-file-object.py
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import pytest
import skhep_testdata
import uproot
def test():
with open(skhep_testdata.data_path("uproot-Zmumu.root"), "rb") as f:
assert uproot.open({f: "events"})["px1"].array(library="np")[:10].tolist() == [
-41.1952876442,
35.1180497674,
35.1180497674,
34.1444372454,
22.7835819537,
-19.8623073126,
-19.8623073126,
-20.1773731496,
71.1437106445,
51.0504859191,
]
|
experiments/vera/convert_duot5_output_to_trec.py | Elfsong/pygaggle | 166 | 12754894 | import argparse
import collections
import numpy as np
parser = argparse.ArgumentParser(
description='Convert T5 predictions into a TREC-formatted run.')
parser.add_argument('--predictions', type=str, required=True, help='T5 predictions file.')
parser.add_argument('--query_run_ids', type=str, required=True,
help='File containing query doc id pairs paired with the T5\'s predictions file.')
parser.add_argument('--output', type=str, required=True, help='run file in the TREC format.')
args = parser.parse_args()
examples = collections.defaultdict(dict)
with open(args.query_run_ids) as f_query_run_ids, open(args.predictions) as f_pred:
for line_query_doc_id, line_pred in zip(f_query_run_ids, f_pred):
query_id, doc_id_a, doc_id_b = line_query_doc_id.strip().split()
doc_id_a = doc_id_a.split("#")[0]
doc_id_b = doc_id_b.split("#")[0]
_, score = line_pred.strip().split()
score = float(score)
if doc_id_a not in examples[query_id]:
examples[query_id][doc_id_a] = 0
if doc_id_b not in examples[query_id]:
examples[query_id][doc_id_b] = 0
examples[query_id][doc_id_a] += np.exp(score)
examples[query_id][doc_id_b] += 1 - np.exp(score)
with open(args.output, 'w') as fout:
for query_id, doc_ids_scores in examples.items():
doc_ids_scores = [
(doc_id, scores)
for doc_id, scores in doc_ids_scores.items()]
doc_ids_scores.sort(key=lambda x: x[1], reverse=True)
for rank, (doc_id, score) in enumerate(doc_ids_scores):
print(2*(len(doc_ids_scores) - 1))
fout.write(
f'{query_id} Q0 {doc_id} {rank + 1} {score/(2*(len(doc_ids_scores) - 1))} duot5\n')
|
loudml/misc.py | toni-moreno/loudml | 245 | 12754897 | <gh_stars>100-1000
"""
Miscelaneous Loud ML helpers
"""
import datetime
import dateutil.parser
import hashlib
import json
import numpy as np
import math
import itertools
from uuid import getnode
from jinja2 import Environment, meta
import loudml
from loudml import (
errors,
)
QUOTE_ESCAPE_TRANS = str.maketrans({
"'": "\\'",
})
DOUBLEQUOTE_ESCAPE_TRANS = str.maketrans({
'"': '\\"',
})
def clear_fields(obj, fields, include_fields):
if include_fields:
out = {
key: obj.get(key)
for key in set(fields)
}
obj.clear()
obj.update(out)
else:
out = {
key: obj.get(key)
for key in (set(obj.keys()) - set(fields))
}
obj.clear()
obj.update(out)
def escape_quotes(string):
"""
Escape simple quotes
"""
return string.translate(QUOTE_ESCAPE_TRANS)
def escape_doublequotes(string):
"""
Escaping double quotes
"""
return string.translate(DOUBLEQUOTE_ESCAPE_TRANS)
def build_agg_name(measurement, field):
return "agg_%s-%s" % (measurement, field)
def parse_timedelta(
delta,
min=None,
max=None,
min_included=True,
max_included=True,
):
"""
Parse time delta
"""
if isinstance(delta, str) and len(delta) > 0:
unit = delta[-1]
if unit in '0123456789':
unit = 's'
value = delta
else:
value = delta[:-1]
else:
unit = 's'
value = delta
try:
value = float(value)
except ValueError:
raise errors.Invalid("invalid time delta value")
if unit == 'M':
value *= 30
unit = 'd'
elif unit == 'y':
value *= 365
unit = 'd'
unit = {
's': 'seconds',
'm': 'minutes',
'h': 'hours',
'd': 'days',
'w': 'weeks',
}.get(unit)
if unit is None:
raise errors.Invalid("invalid time delta unit")
message = "time delta must be {} {} seconds"
if min is not None:
if min_included:
if value < min:
raise errors.Invalid(message.format(">=", min))
else:
if value <= min:
raise errors.Invalid(message.format(">", min))
if max is not None:
if max_included:
if value > max:
raise errors.Invalid(message.format("<=", max))
else:
if value >= max:
raise errors.Invalid(message.format("<", max))
return datetime.timedelta(**{unit: value})
def ts_to_datetime(ts):
"""
Convert timestamp to datetime
"""
return datetime.datetime.fromtimestamp(ts, datetime.timezone.utc)
def ts_to_str(ts):
"""
Convert timestamp to string
"""
return datetime_to_str(ts_to_datetime(ts))
def str_to_datetime(string):
"""
Convert string (ISO or relative) to timestamp
"""
if string.startswith("now"):
now = datetime.datetime.now()
if len(string) == 3:
return now
return now + parse_timedelta(string[3:])
else:
return dateutil.parser.parse(string)
def str_to_ts(string):
"""
Convert string to timestamp
"""
return str_to_datetime(string).timestamp()
def make_datetime(mixed):
"""
Build a datetime object from a mixed input (second timestamp or string)
"""
try:
return ts_to_datetime(float(mixed))
except ValueError as exn:
if isinstance(mixed, str):
return str_to_datetime(mixed)
else:
raise exn
def make_ts(mixed):
"""
Build a timestamp from a mixed input
(second timestamp or ISO string or relative time)
"""
try:
return float(mixed)
except ValueError:
return str_to_ts(mixed)
def datetime_to_str(dt):
"""
Convert datetime to string
"""
return "%s.%03dZ" % (
dt.strftime("%Y-%m-%dT%H:%M:%S"), dt.microsecond / 1000)
def dt_get_daytime(dt):
"""
Return daytime of a datetime
"""
return (dt.timestamp() / 3600) % 24
def dt_get_weekday(dt):
"""
Return weekday of a datetime
"""
return dt.isoweekday()
class DateRange:
def __init__(self, from_date, to_date):
self.from_ts = make_ts(from_date)
self.to_ts = make_ts(to_date)
if self.to_ts < self.from_ts:
raise errors.Invalid("invalid date range: {}".format(self))
@classmethod
def build_date_range(cls, from_date, to_date, bucket_interval):
"""
Fixup date range to be sure that is a multiple of bucket_interval
return timestamps
"""
from_ts = make_ts(from_date)
to_ts = make_ts(to_date)
from_ts = math.floor(
from_ts / bucket_interval) * bucket_interval
to_ts = math.ceil(to_ts / bucket_interval) * bucket_interval
return cls(from_ts, to_ts)
@property
def from_str(self):
return ts_to_str(self.from_ts)
@property
def to_str(self):
return ts_to_str(self.to_ts)
def __str__(self):
return "{}-{}".format(
self.from_str,
self.to_str,
)
def parse_addr(addr, default_port=None):
addr = addr.split(':')
return {
'host': 'localhost' if len(addr[0]) == 0 else addr[0],
'port': default_port if len(addr) == 1 else int(addr[1]),
}
def make_bool(mixed):
"""
Convert value to boolean
"""
if mixed is None:
return False
if isinstance(mixed, bool):
return mixed
try:
return int(mixed) != 0
except ValueError:
pass
if isinstance(mixed, str):
mixed = mixed.lower()
if mixed in ['', 'false', 'no']:
return False
if mixed in ['true', 'yes']:
return True
raise ValueError
def get_date_ranges(from_ts, max_ts, span, interval):
while (from_ts + span) < max_ts:
to_ts = from_ts + span
yield ts_to_str(from_ts), ts_to_str(to_ts)
from_ts += interval
def load_hook(hook_name, hook_data, model, storage, source):
hook_type = hook_data.get('type')
hook_cls = loudml.load_entry_point('loudml.hooks', hook_type)
if hook_cls is None:
raise errors.NotFound("unknown hook type '{}'".format(hook_type))
return hook_cls(
hook_name,
hook_data.get('config'),
model,
storage,
source,
)
def parse_constraint(constraint):
try:
feature, _type, threshold = constraint.split(':')
except ValueError:
raise errors.Invalid("invalid format for 'constraint' parameter")
if _type not in ('low', 'high'):
raise errors.Invalid(
"invalid threshold type for 'constraint' parameter")
try:
threshold = float(threshold)
except ValueError:
raise errors.Invalid("invalid threshold for 'constraint' parameter")
return {
'feature': feature,
'type': _type,
'threshold': threshold,
}
#http://stackoverflow.com/questions/4284991/parsing-nested-parentheses-in-python-grab-content-by-level # noqa
def parse_expression(string):
"""Generate parenthesized contents in string as pairs (level, contents)."""
stack = []
for i, c in enumerate(string):
if c == '(':
stack.append(i)
elif c == ')' and stack:
start = stack.pop()
yield (len(stack), string[start + 1: i])
def nan_to_none(x):
"""
Convert value to None if its NaN
"""
return None if x is np.nan or np.isnan(x) else x
def list_from_np(array):
"""
Convert numpy array into a jsonifiable list
"""
return [nan_to_none(x) for x in array]
def hash_dict(data):
ctx = hashlib.sha1()
ctx.update(json.dumps(data, sort_keys=True).encode("utf-8"))
return ctx.hexdigest()
def chunks(iterable, size=1):
iterator = iter(iterable)
for first in iterator: # stops when iterator is depleted
def chunk(): # construct generator for next chunk
yield first # yield element from for loop
for more in itertools.islice(iterator, size - 1):
yield more # yield more elements from the iterator
yield chunk() # in outer generator, yield next chunk
def my_host_id():
"""
Compute host identifier.
Identifier is based on:
- identifier computed by Python uuid library (usually MAC address)
- MD5 hashing
It is NOT based on:
- system UUID in DMI entries (requires root privileges and may not be
avalaible)
- root filesystem UUID (requires root privileges)
"""
m = hashlib.md5()
m.update(str(getnode()).encode('ascii'))
return m.hexdigest()
def find_undeclared_variables(settings):
env = Environment(autoescape=True) # autoescape added via Sonarlint
ast = env.parse(json.dumps(settings))
return meta.find_undeclared_variables(ast)
|
docs/components_page/components/carousel/indicators.py | glsdown/dash-bootstrap-components | 776 | 12754926 | <filename>docs/components_page/components/carousel/indicators.py
import dash_bootstrap_components as dbc
carousel = dbc.Carousel(
items=[
{"key": "1", "src": "/static/images/slide1.svg"},
{"key": "2", "src": "/static/images/slide2.svg"},
{"key": "3", "src": "/static/images/slide3.svg"},
],
controls=True,
indicators=True,
)
|
runtime/translation/models/gnmt_large/gpus=4/gnmt_large.py | NestLakerJasonLIN/pipedream | 273 | 12754961 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from .stage0 import Stage0
from .stage1 import Stage1
from .stage2 import Stage2
from .stage3 import Stage3
class GNMTSplit(torch.nn.Module):
def __init__(self):
super(GNMTSplit, self).__init__()
self.stage0 = Stage0()
self.stage1 = Stage1()
self.stage2 = Stage2()
self.stage3 = Stage3()
def forward(self, input0, input1, input2):
(out0, out2, out1, out3) = self.stage0(input0, input1, input2)
(out12, out13, out4, out5, out6) = self.stage1(out0, out2, out1, out3)
(out14, out15, out16, out17) = self.stage2(out12, out13, out4, out5, out6)
out18 = self.stage3(out12, out14, out15, out16, out17)
return out18
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
|
tests/py/test_www_npm_package.py | kant/gratipay.com | 517 | 12754972 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.models.package import NPM, Package
from gratipay.testing import Harness
class Tests(Harness):
def setUp(self):
self.make_package()
def test_trailing_slash_redirects(self):
response = self.client.GxT('/on/npm/foo/')
assert response.code == 302
assert response.headers['Location'] == '/on/npm/foo'
def test_anon_gets_signin_page_from_unclaimed(self):
body = self.client.GET('/on/npm/foo').body
assert 'foo</a> npm package on Gratipay:' in body
def test_auth_gets_send_confirmation_page_from_unclaimed(self):
self.make_participant('bob', claimed_time='now')
body = self.client.GET('/on/npm/foo', auth_as='bob').body
assert 'foo</a> npm package:' in body
assert '<EMAIL>' in body
def test_auth_gets_multiple_options_if_present(self):
self.make_package(NPM, 'bar', 'Bar', ['<EMAIL>', '<EMAIL>'])
self.make_participant('bob', claimed_time='now')
body = self.client.GET('/on/npm/bar', auth_as='bob').body
assert '<EMAIL>' in body
assert '<EMAIL>' in body
def test_auth_gets_something_if_no_emails(self):
self.make_package(NPM, 'bar', 'Bar', [])
self.make_participant('bob', claimed_time='now')
body = self.client.GET('/on/npm/bar', auth_as='bob').body
assert "No email addresses on file" in body
def claim_package(self):
foo = Package.from_names('npm', 'foo')
alice = self.make_participant('alice', claimed_time='now')
alice.start_email_verification('<EMAIL>', foo)
nonce = alice.get_email('<EMAIL>').nonce
alice.finish_email_verification('<EMAIL>', nonce)
team = alice.get_teams()[0]
assert team.package == foo
return team.slug
def test_package_redirects_to_project_if_claimed(self):
self.claim_package()
response = self.client.GxT('/on/npm/foo')
assert response.code == 302
assert response.headers['Location'] == '/foo/'
def test_package_served_as_project_if_claimed(self):
self.claim_package()
assert 'owned by' in self.client.GET('/foo/').body
class Bulk(Harness):
def setUp(self):
self.make_package()
def test_anon_gets_payment_flow(self):
body = self.client.GET('/on/npm/').body
assert 'Paste a package.json' in body
assert '0 out of all 1 npm package' in body
|
tensorflow/python/data/experimental/kernel_tests/serialization/parallel_interleave_dataset_serialization_test.py | MathMachado/tensorflow | 848 | 12754981 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ParallelInterleaveDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class ParallelInterleaveDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self.input_values = np.array([4, 5, 6], dtype=np.int64)
self.num_repeats = 2
self.num_outputs = np.sum(self.input_values) * 2
def _build_ds(self, cycle_length, block_length, sloppy=False):
return (dataset_ops.Dataset.from_tensor_slices(
self.input_values).repeat(self.num_repeats).apply(
interleave_ops.parallel_interleave(
lambda x: dataset_ops.Dataset.range(10 * x, 11 * x),
cycle_length, block_length, sloppy)))
def testSerializationCore(self):
# cycle_length > 1, block_length > 1
cycle_length = 2
block_length = 3
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
self.num_outputs)
# cycle_length = 1
cycle_length = 1
block_length = 3
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
self.num_outputs)
# block_length = 1
cycle_length = 2
block_length = 1
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
self.num_outputs)
def testSerializationWithSloppy(self):
break_points = self.gen_break_points(self.num_outputs, 10)
expected_outputs = np.repeat(
np.concatenate([np.arange(10 * x, 11 * x) for x in self.input_values]),
self.num_repeats).tolist()
def run_test(cycle_length, block_length):
actual = self.gen_outputs(
lambda: self._build_ds(cycle_length, block_length, True),
break_points, self.num_outputs)
self.assertSequenceEqual(sorted(actual), expected_outputs)
# cycle_length > 1, block_length > 1
run_test(2, 3)
# cycle_length = 1
run_test(1, 3)
# block_length = 1
run_test(2, 1)
def testSparseCore(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_dataset():
return dataset_ops.Dataset.range(10).map(_map_fn).apply(
interleave_ops.parallel_interleave(_interleave_fn, 1))
self.run_core_tests(_build_dataset, 20)
if __name__ == '__main__':
test.main()
|
setup.py | DPDmancul/termpdf.py | 216 | 12755001 | #!/usr/bin/python3
from distutils.core import setup
setup(name='termpdf.py',
version='0.1.0',
description='Graphical pdf reader that works inside the kitty terminal',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dsanson/termpdf.py',
scripts=['termpdf.py'],
requires=[
'PyMuPDF',
'pyperclip',
'pdfrw',
'pybtex',
'pynvim',
'roman',
'pagelabels'
]
)
|
tensorflow_serving/model_servers/profiler_client.py | mzhang-code/serving | 5,791 | 12755008 | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple client to send profiling request to ModelServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.profiler import profiler_client
def main(argv):
server = argv[1] if len(argv) > 1 else 'localhost:8500'
logdir = argv[2] if len(argv) > 2 else '/tmp'
duration_ms = argv[3] if len(argv) > 3 else 2000
profiler_client.trace(server, logdir, duration_ms)
if __name__ == '__main__':
tf.compat.v1.app.run()
|
tests/plugins/test_pexpect_filters.py | dexy/dexy | 136 | 12755017 | <filename>tests/plugins/test_pexpect_filters.py
from dexy.doc import Doc
from tests.utils import assert_in_output
from tests.utils import wrap
from nose.exc import SkipTest
def test_shint_filter():
with wrap() as wrapper:
src = """
### @export "touch"
touch newfile.txt
### @export "ls"
ls
"""
doc = Doc("example.sh|idio|shint|pyg",
wrapper,
[],
contents = src)
wrapper.run_docs(doc)
assert list(doc.output_data().keys()) == ['1', 'touch', 'ls']
SCALA = """object HelloWorld {
def main(args: Array[String]) {
println("Hello, world!")
}
}
"""
def test_scala_repl():
raise SkipTest()
with wrap() as wrapper:
doc = Doc("HelloWorld.scala|scalai",
wrapper,
[],
contents = SCALA
)
wrapper.run_docs(doc)
assert "defined module HelloWorld" in str(doc.output_data())
RUST = """fn main() {
io::println("hello?");
}"""
def test_rust_interactive():
raise SkipTest("Need to get rust interactive filter working.")
with wrap() as wrapper:
doc = Doc("example.rs|rusti",
wrapper,
[],
contents = "1+1"
)
wrapper.run_docs(doc)
assert "rusti> 1+1\n2" in str(doc.output_data())
def test_rust():
with wrap() as wrapper:
doc = Doc("example.rs|rustc",
wrapper,
[],
contents = RUST
)
wrapper.run_docs(doc)
assert str(doc.output_data()) == "hello?\n"
PYTHON_CONTENT = """
x = 6
y = 7
"""
def test_python_filter_record_vars():
with wrap() as wrapper:
doc = Doc("example.py|pycon",
wrapper,
[],
pycon = { 'record-vars' : True},
contents = PYTHON_CONTENT
)
wrapper.run_docs(doc)
assert "doc:example.py-vars.json" in wrapper.nodes
def test_matlab_filter():
raise SkipTest()
assert_in_output('matlabint', "fprintf (1, 'Hello, world\\n')\n", "< M A T L A B (R) >")
def test_clj_filter():
assert_in_output('cljint', '1+1', "user=> 1+1")
def test_ksh_filter():
assert_in_output('kshint', 'ls', "example.txt")
def test_php_filter():
assert_in_output('phpint', '1+1', "php > 1+1")
def test_rhino_filter():
assert_in_output('rhinoint', '1+1', "js> 1+1")
def test_irb_filter():
assert_in_output('irb', "puts 'hello'", ">> puts 'hello'")
def test_pycon_filter_single_section():
assert_in_output('pycon', "print 'hello'", ">>> print 'hello'")
def test_ipython_filter():
assert_in_output('ipython', "print 'hello'", ">>> print 'hello'")
def test_r_filter():
assert_in_output('r', '1+1', '> 1+1')
def test_pycon_filter():
with wrap() as wrapper:
src = """
### @export "vars"
x = 6
y = 7
### @export "multiply"
x*y
"""
node = Doc("example.py|idio|pycon",
wrapper,
[],
contents=src)
wrapper.run_docs(node)
assert list(node.output_data().keys()) == ['1', 'vars', 'multiply']
assert str(node.output_data()['vars']) == """
>>> x = 6
>>> y = 7"""
assert str(node.output_data()['multiply']) == """
>>> x*y
42"""
|
tests/convert/test_to_address.py | x3devships/brownie | 1,595 | 12755023 | #!/usr/bin/python3
import pytest
from brownie.convert import to_address
addr = "0x14b0Ed2a7C4cC60DD8F676AE44D0831d3c9b2a9E"
addr_encoded = b"\x14\xb0\xed*|L\xc6\r\xd8\xf6v\xaeD\xd0\x83\x1d<\x9b*\x9e"
def test_success():
assert to_address(addr) == addr
assert to_address(addr.lower()) == addr
assert to_address(addr.upper()) == addr
assert to_address(addr[2:]) == addr
def test_bytes_success():
assert to_address(addr_encoded) == addr
def test_wrong_length():
with pytest.raises(ValueError):
to_address("0x00")
with pytest.raises(ValueError):
to_address(addr[:20])
with pytest.raises(ValueError):
to_address(addr + "00")
|
migrations/versions/f5f55452fa58_make_scheduler_params_a_separate_jsonb_.py | eltociear/chaos_genius | 320 | 12755025 | """make scheduler_params a separate JSONB field
Revision ID: f5f55452fa58
Revises: <PASSWORD>
Create Date: 2021-09-28 16:48:42.834962
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'f5f55452fa58'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('kpi', sa.Column('scheduler_params', postgresql.JSONB(astext_type=sa.Text()), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('kpi', 'scheduler_params')
# ### end Alembic commands ###
|
pandas_market_calendars/holidays_uk.py | jmcdermo/pandas_market_calendars | 460 | 12755060 | <filename>pandas_market_calendars/holidays_uk.py
# UK Holidays
import pandas as pd
from pandas import DateOffset, Timestamp
from pandas.tseries.holiday import Holiday, MO, previous_friday, weekend_to_monday
from pandas_market_calendars.market_calendar import MONDAY, TUESDAY
# New Year's Eve
LSENewYearsEve = Holiday(
"New Year's Eve",
month=12,
day=31,
observance=previous_friday,
)
# New Year's Day
LSENewYearsDay = Holiday(
"New Year's Day",
month=1,
day=1,
observance=weekend_to_monday,
)
# Early May bank holiday has two exceptions based on the 50th and 75th anniversary of VE-Day
# 1995-05-01 Early May bank holiday removed for VE-day 50th anniversary
# 2020-05-04 Early May bank holiday removed for VE-day 75th anniversary
# Early May bank holiday pre-1995
MayBank_pre_1995 = Holiday(
"Early May Bank Holiday",
month=5,
offset=DateOffset(weekday=MO(1)),
day=1,
end_date=Timestamp('1994-12-31'),
)
# Early May bank holiday post-1995 and pre-2020
MayBank_post_1995_pre_2020 = Holiday(
"Early May Bank Holiday",
month=5,
offset=DateOffset(weekday=MO(1)),
day=1,
start_date=Timestamp('1996-01-01'),
end_date=Timestamp('2019-12-31'),
)
# Early May bank holiday post 2020
MayBank_post_2020 = Holiday(
"Early May Bank Holiday",
month=5,
offset=DateOffset(weekday=MO(1)),
day=1,
start_date=Timestamp('2021-01-01')
)
# Spring bank holiday has two exceptions based on the Golden & Diamond Jubilee
# 2002-05-27 Spring bank holiday removed for Golden Jubilee
# 2012-05-28 Spring bank holiday removed for Diamond Jubilee
# 2022-05-31 Spring bank holiday removed for Platinum Jubilee
# Spring bank holiday
SpringBank_pre_2002 = Holiday(
"Spring Bank Holiday",
month=5,
day=31,
offset=DateOffset(weekday=MO(-1)),
end_date=Timestamp('2001-12-31'),
)
SpringBank_post_2002_pre_2012 = Holiday(
"Spring Bank Holiday",
month=5,
day=31,
offset=DateOffset(weekday=MO(-1)),
start_date=Timestamp('2003-01-01'),
end_date=Timestamp('2011-12-31'),
)
SpringBank_post_2012_pre_2022 = Holiday(
"Spring Bank Holiday",
month=5,
day=31,
offset=DateOffset(weekday=MO(-1)),
start_date=Timestamp('2013-01-01'),
end_date=Timestamp('2021-12-31'),
)
SpringBank_post_2022 = Holiday(
"Spring Bank Holiday",
month=5,
day=31,
offset=DateOffset(weekday=MO(-1)),
start_date=Timestamp('2022-01-01'),
)
# Summer bank holiday
SummerBank = Holiday(
"Summer Bank Holiday",
month=8,
day=31,
offset=DateOffset(weekday=MO(-1)),
)
# Christmas Eve
ChristmasEve = Holiday(
'Christmas Eve',
month=12,
day=24,
observance=previous_friday,
)
# Christmas
Christmas = Holiday(
"Christmas",
month=12,
day=25,
)
# If christmas day is Saturday Monday 27th is a holiday
# If christmas day is sunday the Tuesday 27th is a holiday
WeekendChristmas = Holiday(
"Weekend Christmas",
month=12,
day=27,
days_of_week=(MONDAY, TUESDAY),
)
# Boxing day
BoxingDay = Holiday(
"Boxing Day",
month=12,
day=26,
)
# If boxing day is saturday then Monday 28th is a holiday
# If boxing day is sunday then Tuesday 28th is a holiday
WeekendBoxingDay = Holiday(
"Weekend Boxing Day",
month=12,
day=28,
days_of_week=(MONDAY, TUESDAY),
)
# One-off holiday additions and removals in England
UniqueCloses = []
# VE-Day Anniversary
UniqueCloses.append(pd.Timestamp("1995-05-08", tz='UTC')) # 50th Anniversary
UniqueCloses.append(pd.Timestamp("2020-05-08", tz='UTC')) # 75th Anniversary
# Queen Elizabeth II Jubilees
# Silver Jubilee
UniqueCloses.append(pd.Timestamp("1977-06-07", tz='UTC'))
# Golden Jubilee
UniqueCloses.append(pd.Timestamp("2002-06-03", tz='UTC'))
UniqueCloses.append(pd.Timestamp("2002-06-04", tz='UTC'))
# Diamond Jubilee
UniqueCloses.append(pd.Timestamp("2012-06-04", tz='UTC'))
UniqueCloses.append(pd.Timestamp("2012-06-05", tz='UTC'))
# Platinum Jubilee
UniqueCloses.append(pd.Timestamp("2022-06-02", tz='UTC'))
UniqueCloses.append(pd.Timestamp("2022-06-03", tz='UTC'))
# Royal Weddings
UniqueCloses.append(pd.Timestamp("1973-11-14", tz='UTC')) # Wedding Day of <NAME> and <NAME>
UniqueCloses.append(pd.Timestamp("1981-07-29", tz='UTC')) # Wedding Day of <NAME> and <NAME>
UniqueCloses.append(pd.Timestamp("2011-04-29", tz='UTC')) # Wedding Day of <NAME> and <NAME>
# Miscellaneous
UniqueCloses.append(pd.Timestamp("1999-12-31", tz='UTC')) # Eve of 3rd Millenium A.D.
|
climpred/tests/test_alignment.py | raybellwaves/climpred | 104 | 12755061 | import logging
import numpy as np
import pytest
import xskillscore as xs
from climpred.exceptions import CoordinateError
from climpred.prediction import compute_hindcast
def test_same_inits_initializations(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that inits are identical at all leads for `same_inits` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_inits",
)
for i, record in enumerate(caplog.record_tuples):
if i >= 2:
print(record)
assert "inits: 1954-01-01 00:00:00-2007-01-01 00:00:00" in record[2]
def test_same_inits_verification_dates(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate verifs are being used at each lead for `same_inits`
alignment."""
with caplog.at_level(logging.INFO):
FIRST_INIT, LAST_INIT = 1954, 2007
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_inits",
)
nleads = hind_ds_initialized_1d_cftime["lead"].size
for i, record in zip(
np.arange(nleads + 2),
caplog.record_tuples,
):
if i >= 2:
print(record)
assert (
f"verifs: {FIRST_INIT+i}-01-01 00:00:00-{LAST_INIT+i}-01-01"
in record[2]
)
@pytest.mark.parametrize("alignment", ["same_inits", "same_verifs"])
def test_disjoint_verif_time(small_initialized_da, small_verif_da, alignment):
"""Tests that alignment works with disjoint time in the verification
data, i.e., non-continuous time sampling to verify against."""
hind = small_initialized_da
verif = small_verif_da.drop_sel(time=1992)
actual = compute_hindcast(hind, verif, alignment=alignment, metric="mse")
assert actual.notnull().all()
# hindcast inits: [1990, 1991, 1992, 1993]
# verif times: [1990, 1991, 1993, 1994]
a = hind.sel(init=[1990, 1992, 1993]).rename({"init": "time"})
b = verif.sel(time=[1991, 1993, 1994])
a["time"] = b["time"]
expected = xs.mse(a, b, "time")
assert actual == expected
@pytest.mark.parametrize("alignment", ["same_inits", "same_verifs"])
def test_disjoint_inits(small_initialized_da, small_verif_da, alignment):
"""Tests that alignment works with disjoint inits in the verification
data, i.e., non-continuous initializing to verify with."""
hind = small_initialized_da.drop_sel(init=1991)
verif = small_verif_da
actual = compute_hindcast(hind, verif, alignment=alignment, metric="mse")
assert actual.notnull().all()
# hindcast inits: [1990, 1992, 1993]
# verif times: [1990, 1991, 1992, 1993, 1994]
a = hind.rename({"init": "time"})
b = verif.sel(time=[1991, 1993, 1994])
a["time"] = b["time"]
expected = xs.mse(a, b, "time")
assert actual == expected
def test_same_verifs_verification_dates(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that verifs are identical at all leads for `same_verifs` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_verifs",
)
for i, record in enumerate(caplog.record_tuples):
if i >= 2:
print(record)
assert "verifs: 1964-01-01 00:00:00-2017-01-01 00:00:00" in record[2]
def test_same_verifs_initializations(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate verifs are being used at each lead for `same_inits`
alignment."""
with caplog.at_level(logging.INFO):
FIRST_INIT, LAST_INIT = 1964, 2017
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_verifs",
)
nleads = hind_ds_initialized_1d_cftime["lead"].size
for i, record in zip(
np.arange(nleads + 2),
caplog.record_tuples,
):
if i >= 2:
print(record)
assert (
f"inits: {FIRST_INIT-i}-01-01 00:00:00-{LAST_INIT-i}-01-01 00:00:00"
in record[2]
)
def test_same_verifs_raises_error_when_not_possible(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime
):
"""Tests that appropriate error is raised when a common set of verification dates
cannot be found with the supplied initializations."""
hind = hind_ds_initialized_1d_cftime.isel(lead=slice(0, 3), init=[1, 3, 5, 7, 9])
with pytest.raises(CoordinateError):
compute_hindcast(hind, reconstruction_ds_1d_cftime, alignment="same_verifs")
def test_maximize_alignment_inits(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate inits are selected for `maximize` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="maximize",
)
# Add dummy values for the first two lines since they are just metadata.
for i, record in zip(
np.concatenate(([0, 0], hind_ds_initialized_1d_cftime.lead.values)),
caplog.record_tuples,
):
if i >= 1:
print(record)
assert (
f"inits: 1954-01-01 00:00:00-{2016-i}-01-01 00:00:00" in record[2]
)
def test_maximize_alignment_verifs(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate verifs are selected for `maximize` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="maximize",
)
# Add dummy values for the first two lines since they are just metadata.
for i, record in zip(
np.concatenate(([0, 0], hind_ds_initialized_1d_cftime.lead.values)),
caplog.record_tuples,
):
if i >= 1:
print(record)
assert (
f"verifs: {1955+i}-01-01 00:00:00-2017-01-01 00:00:00" in record[2]
)
|
gremlin/neptune-streams/lambda/neptune-streams-demo/neptune_python_utils/bulkload.py | chriscoombs/amazon-neptune-samples | 298 | 12755066 | '''
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
'''
import json
import urllib.request
import os
import time
from neptune_python_utils.endpoints import Endpoints
class BulkLoad:
def __init__(self, source, format='csv', role=None, region=None, endpoints=None):
self.source = source
self.format = format
if role is None:
assert ('NEPTUNE_LOAD_FROM_S3_ROLE_ARN' in os.environ), 'role is missing.'
self.role = os.environ['NEPTUNE_LOAD_FROM_S3_ROLE_ARN']
else:
self.role = role
if region is None:
assert ('AWS_REGION' in os.environ), 'region is missing.'
self.region = os.environ['AWS_REGION']
else:
self.region = region
if endpoints is None:
self.endpoints = Endpoints()
else:
self.endpoints = endpoints
def __load_from(self, source, format, role, region):
return {
'source' : source,
'format' : format,
'iamRoleArn' : role,
'region' : region,
'failOnError' : 'FALSE'
}
def __load(self, loader_url, data):
jsondataasbytes = json.dumps(data).encode('utf8')
req = urllib.request.Request(loader_url, data=jsondataasbytes, headers={'Content-Type': 'application/json'})
response = urllib.request.urlopen(req)
jsonresponse = json.loads(response.read().decode('utf8'))
return jsonresponse['payload']['loadId']
def load_async(self):
localised_source = self.source.replace('${AWS_REGION}', self.region)
loader_url = self.endpoints.loader_endpoint()
json_payload = self.__load_from(localised_source, self.format, self.role, self.region)
print('''curl -X POST \\
-H 'Content-Type: application/json' \\
{} -d \'{}\''''.format(loader_url, json.dumps(json_payload, indent=4)))
load_id = self.__load(loader_url, json_payload)
return BulkLoadStatus(self.endpoints.load_status_endpoint(load_id))
def load(self, interval=2):
status = self.load_async()
print('status_uri: {}'.format(status.uri()))
status.wait(interval)
class BulkLoadStatus:
def __init__(self, status_uri):
self.status_uri = status_uri
def status(self):
req = urllib.request.Request(self.status_uri)
response = urllib.request.urlopen(req)
jsonresponse = json.loads(response.read().decode('utf8'))
status = jsonresponse['payload']['overallStatus']['status']
return (status, jsonresponse)
def uri(self):
return self.status_uri
def wait(self, interval=2):
while True:
status, jsonresponse = self.status()
if status == 'LOAD_COMPLETED':
print('load completed')
break
if status == 'LOAD_IN_PROGRESS':
print('loading... {} records inserted'.format(jsonresponse['payload']['overallStatus']['totalRecords']))
time.sleep(interval)
else:
raise Exception(jsonresponse) |
src/amuse/units/core.py | sibonyves/amuse | 131 | 12755071 | <filename>src/amuse/units/core.py<gh_stars>100-1000
from amuse.support.core import late
from amuse.support import exceptions
import numpy
from amuse.support.core import memoize
from amuse.support.core import MultitonMetaClass
class system(object):
ALL = {}
def __init__(self, name):
self.name = name
self.bases = []
self.mapping_from_base_name_to_base = {}
self.ALL[self.name] = self
self.index = len(self.ALL)
def reference_string(self):
return "{0}.get({1!r})".format('system', self.name)
def add_base(self, unit):
unit.system = self
unit.index = len(self.bases)
self.bases.append(unit)
self.mapping_from_base_name_to_base[unit.quantity] = unit
def base(self, name):
return self.mapping_from_base_name_to_base[name]
@classmethod
def get(cls, name):
try:
return cls.ALL[name]
except KeyError as ex:
from amuse.units import nbody_system
from amuse.units import si
return cls.ALL[name]
def __reduce__(self):
return (get_system_with_name, (self.name,))
def __str__(self):
return self.name
class unit(object):
"""
Abstract base class for unit objects.
Two classes of units are defined:
base units
The base units in a given system of units. For SI, these
are meter, kilogram, second, ampere, kelvin, mole and
candele. See the si module :mod:`amuse.units.si`
derived units
Derived units are created by dividing or multiplying
with a number or with another unit. For example,
to get a velocity unit we can devine vel = 1000 * m / s
Units can also be named, by creating a named unit.
"""
__array_priority__ = 100
def __mul__(self, other):
if isinstance(other, unit):
return mul_unit(self, other)
else:
return other*self
# return factor_unit(other, self)
def __truediv__(self, other):
if isinstance(other, unit):
return div_unit(self, other)
else:
return (1.0/other)*self
# return factor_unit(1.0 / other, self)
def __rmul__(self, other):
if other == 1:
return self
else:
return factor_unit(other, self)
def __ror__(self, value):
"""Create a new Quantity object.
:argument value: numeric value of the quantity, can be
a number or a sequence (list or ndarray)
:returns: new ScalarQuantity or VectorQuantity object
with this unit
Examples
>>> from amuse.units import units
>>> 100 | units.kg
quantity<100 kg>
"""
return self.new_quantity(value)
def __rtruediv__(self, other):
return factor_unit(other, pow_unit(-1,self))
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def __pow__(self, other):
if other == 1:
return self
else:
return pow_unit(other, self)
def __call__(self, x):
return self.new_quantity(x)
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, unit):
return self.base == other.base and self.factor == other.factor
else:
return False
def __ne__(self, other):
if isinstance(other, unit):
if (isinstance(self, base_unit) and isinstance(other, base_unit)) or \
isinstance(self, nonnumeric_unit) or isinstance(other, nonnumeric_unit):
return NotImplemented
return self.base != other.base and self.factor != other.factor
else:
return True
def __hash__(self):
return self._hash
@late
def _hash(self):
return hash(id(self))
@property
def dtype(self):
return None
@property
def number(self):
return 1.0
@property
def unit(self):
return self
def is_zero(self):
return False
def iskey(self):
return False
def new_quantity(self, value):
"""Create a new Quantity object.
:argument value: numeric value of the quantity, can be
a number or a sequence (list or ndarray)
:returns: new ScalarQuantity or VectorQuantity object
with this unit
"""
from amuse.units import quantities
return quantities.new_quantity(value, self)
def to_simple_form(self):
"""Convert unit to a form with only one factor and powers
:result: Unit with only a factor and power terms
>>> from amuse.units import units
>>> N = (units.m * units.kg) / (units.s * units.s)
>>> N
unit<m * kg / (s * s)>
>>> J = N * units.m
>>> J
unit<m * kg / (s * s) * m>
>>> J.to_simple_form()
unit<m**2 * kg * s**-2>
"""
if not self.base:
return none_unit('none', 'none') * self.factor
result = self.factor
for n, base in self.base:
if n == 1:
if result == 1:
result = base
else:
result = result * base
else:
result = result * (base ** n)
return result
def to_reduced_form(self):
"""Convert unit to a reduced (simpler) form
"""
if not self.base:
return none_unit('none', 'none') * self.factor
total_factor = 1
combined_unit = None
for factor, power, unit in self.get_parts_with_power():
total_factor *= factor
if power == 0:
pass
else:
if combined_unit is None:
combined_unit = unit ** power
else:
combined_unit = combined_unit * (unit ** power)
if total_factor == 1:
return combined_unit
else:
return factor_unit(total_factor , combined_unit)
def to_factor_and_reduced_form(self):
"""Convert unit to a reduced (simpler) form
"""
if not self.base:
return none_unit('none', 'none') * self.factor
total_factor = 1
combined_unit = None
for factor, power, unit in self.get_parts_with_power():
total_factor *= factor
if power == 0:
pass
else:
if combined_unit is None:
combined_unit = unit ** power
else:
combined_unit = combined_unit * (unit ** power)
return total_factor , combined_unit
def are_bases_equal(self, other):
if len(self.base) != len(other.base):
return False
for n1, unit1 in sorted(self.base, key=lambda x: x[1].index):
found = False
for n2, unit2 in other.base:
if unit1 == unit2:
if not n2 == n1:
return False
found = True
break
if not found:
return False
return True
def _compare_bases(self, other, eps = None):
if len(self.base) != len(other.base):
return False
if eps is None:
eps = numpy.finfo(numpy.double).eps
for (n1, unit1), (n2, unit2) in zip(self.base, other.base):
if not unit1 == unit2:
return False
if n1 == n2:
continue
else:
if abs(n1 - n2) < eps:
continue
if abs(n2) > abs(n1):
relativeError = abs((n1 - n2) * 1.0 / n2)
else:
relativeError = abs((n1 - n2) * 1.0 / n1)
if relativeError <= eps:
continue
else:
return False
return True
@memoize
def conversion_factor_from(self, x):
if x.base is None:
return self.factor * 1.0
elif self._compare_bases(x):
this_factor = self.factor * 1.0
other_factor = x.factor
return 1*(this_factor == other_factor) or this_factor / other_factor
else:
raise IncompatibleUnitsException(x, self)
def in_(self, x):
"""Express this quantity in the given unit
:argument unit: The unit to express this quantity in
:result: A Quantity object
Examples
>>> from amuse.units import units
>>> l = 1 | units.AU
>>> l.in_(units.km)
quantity<149597870.691 km>
"""
return self.as_quantity_in(x)
def as_quantity_in(self, unit):
"""Express this unit as a quantity in the given unit
:argument unit: The unit to express this unit in
:result: A Quantity object
Examples
>>> from amuse.units import units
>>> ton = 1000 * units.kg
>>> ton.as_quantity_in(units.kg)
quantity<1000.0 kg>
"""
from amuse.units import quantities
if isinstance(unit, quantities.Quantity):
raise exceptions.AmuseException("Cannot expres a unit in a quantity")
factor = self.conversion_factor_from(unit)
return quantities.new_quantity(factor, unit)
def value_in(self, unit):
"""
Return a numeric value of this unit in the given unit.
Works only when the units are compatible, i.e. from
tonnage to kg's.
A number is returned without any unit information.
:argument unit: wanted unit of the value
:returns: number in the given unit
>>> from amuse.units import units
>>> x = units.km
>>> x.value_in(units.m)
1000.0
"""
return self.conversion_factor_from(unit)
def __repr__(self):
return 'unit<'+str(self)+'>'
def combine_bases(self, base1, base2):
indexed1 = [None] * 7
for n1, unit1 in base1:
indexed1[unit1.index] = (n1, unit1)
indexed2 = [None] * 7
for n2, unit2 in base2:
indexed2[unit2.index] = (n2, unit2)
result = []
for sub1, sub2 in zip(indexed1, indexed2):
if not sub1 is None:
if not sub2 is None:
if sub1[1] == sub2[1]:
result.append((sub1[0], sub2[0], sub1[1]))
else:
raise exceptions.AmuseException("Cannot combine units from "
"different systems: {0} and {1}".format(sub1[1], sub2[1]))
else:
result.append((sub1[0], 0, sub1[1]))
elif not sub2 is None:
result.append((0, sub2[0], sub2[1]))
return result
def has_same_base_as(self, other):
"""Determine if the base of other is the same as the
base of self.
:argument other: unit to compare base to
:result: True, if bases are compatiple.
>>> from amuse.units import units
>>> mps = units.m / units.s
>>> kph = units.km / units.hour
>>> mps.has_same_base_as(kph)
True
>>> mps.has_same_base_as(units.km)
False
"""
return other.base == self.base
def base_unit(self):
if not self.base:
return none_unit('none', 'none')
unit = 1
for n, base in self.base:
if n == 1:
unit = unit*base
else:
unit = unit*(base ** n)
return unit
def is_non_numeric(self):
return False
def is_generic(self):
return False
def is_none(self):
return False
def get_parts_with_power(self):
"""
The parts of this units as a list of tuple with factor, power and unit
"""
return ((1.0, 1, self),)
def convert_result_value(self, method, definition, value):
return self.new_quantity(value)
def convert_argument_value(self, method, definition, value):
return value.value_in(self)
def append_result_value(self, method, definition, value, result):
result.append(self.convert_result_value(method, definition, value))
def to_array_of_floats(self):
"""Represent a unit as an array of 8 64-bit floats. First float represents the factor, the other 7 the power of each base unit.
Cannot be used for non numeric units
"""
result = numpy.zeros(9, dtype=numpy.float64)
if not self.base:
return result
result[0] = self.factor
for n, base in self.base:
result[base.index + 2] = n
result[1] = base.system.index
return result
def describe_array_of_floats(self):
"""Create a human readable description of the array of floats
"""
if not self.base:
return 'not a numerical unit'
parts = ['factor']
parts.extend(['-']*8)
for n, base in self.base:
if n != 0:
parts[base.index + 2] = str(base)
else:
parts[base.index + 2] = '-'
parts[1] = str(base.system)
return ', '.join(parts)
@property
def base_system(self):
base=self.base
system=self.base[0][1].system
for b in base:
if system!=b[1].system:
raise Exception("inconsistent unit found")
return self.base[0][1].system
class base_unit(unit):
"""
base_unit objects are orthogonal, indivisable units
of a sytem of units.
A system of units contains a set of base units
:argument quantity: name of the base quantity, for example *length*
:argument name: name of the unit, for example *meter*
:argument symbol: symbol of the unit, for example *m*
:argument system: system of units object
>>> cgs = system("cgs")
>>> cm = base_unit("length", "centimetre", "cm", cgs)
>>> cm
unit<cm>
"""
def __init__(self, quantity, name, symbol, system):
self.quantity = quantity
self.name = name
self.symbol = symbol
self.system = system
system.add_base(self)
def __str__(self):
return self.symbol
def __hash__(self):
return self._hash
@property
def factor(self):
"""
The multiplication factor of a unit.
For example, factor is 1000 for km.
"""
return 1
@late
def base(self):
"""
The base represented as a list of tuples.
Each tuple consists of an power and a unit.
"""
return ((1,self),)
def reference_string(self):
return '{0}.base({1!r})'.format(self.system.reference_string(), self.quantity)
def __reduce__(self):
return (get_base_unit_with_name, (self.system, self.quantity,))
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, base_unit):
return NotImplemented
else:
return False
class no_system(object):
ALL = {}
@classmethod
def set(cls, unit):
cls.ALL[unit.name] = unit
@classmethod
def get(cls, name):
return cls.ALL[name]
class none_unit(unit, metaclass=MultitonMetaClass):
def __init__(self, name, symbol):
self.name = name
self.symbol = symbol
no_system.set(self)
def __str__(self):
return self.symbol
def reference_string(self):
return 'no_system.get({0!r})'.format(self.name)
@late
def factor(self):
return 1
@late
def base(self):
return ()
def get_parts_with_power(self):
return ()
def is_none(self):
return True
class zero_unit(none_unit):
def __init__(self):
none_unit.__init__(self,'zero', 'zero')
def __str__(self):
return self.symbol
def is_zero(self):
return True
@late
def base(self):
return None
def get_parts_with_power(self):
return None
def conversion_factor_from(self, x):
if x.base is None:
return 1.0
else:
return x.factor
class key_unit(none_unit):
def iskey(self):
return True
@property
def dtype(self):
return numpy.dtype('uint64')
class nonnumeric_unit(unit):
"""
nonnumeric_unit objects are indivisable units
not connected to any system of units.
nonnumeric_units cannot be used to
derive new units from.
nonnumeric_units have no physical meaning.
"""
def __init__(self, name, symbol):
self.name = name
self.symbol = symbol
no_system.set(self)
def __str__(self):
return self.symbol
def reference_string(self):
return 'no_system.get({0!r})'.format(self.name)
def __mul__(self, other):
if other == 1:
return self
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __truediv__(self, other):
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __rmul__(self, other):
if other == 1:
return self
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __rtruediv__(self, other):
if other == 1:
return self
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __pow__(self, other):
if other == 1:
return self
raise exceptions.AmuseException("Cannot derive other units from a non numeric unit")
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def is_non_numeric(self):
return True
@property
def factor(self):
return 1
@property
def base(self):
return ((1,self),)
def value_to_string(self, value):
return None
def is_valid_value(self, value):
return False
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, nonnumeric_unit):
return NotImplemented
else:
return False
class string_unit(nonnumeric_unit):
"""
String unit objects define quantities with a string value.
These have no physical meaning, but are needed for some
legacy codes. For example the path of a file.
"""
def __init__(self, name, symbol):
nonnumeric_unit.__init__(self, name, symbol)
def value_to_string(self, value):
return '' if value is None else value
def is_valid_value(self, value):
return value is None or isinstance(value, str)
@property
def dtype(self):
return numpy.dtype('S256')
class enumeration_unit(nonnumeric_unit):
DEFINED={}
"""
Enumeration unit objects define a fixed set of quantities.
A quantity with a enumeration_unit can only have a
value defined in the set of values of the enumeration_unit.
:argument possible_values: A sequence or iterable with all
the possible values. If None the possible values are
integers ranging from 0 to the length of the
names_for_values argument
:argument names_for_values: A sequence of strings defining a
display name for each value. If None the names are the
string vales of the values in the possible_values arguments
Examples
>>> my_unit = enumeration_unit('my_unit','my_unit', [1,2,5], ["star","gas","planet"])
>>> 2 | my_unit
quantity<2 - gas>
>>> list(my_unit.quantities())
[quantity<1 - star>, quantity<2 - gas>, quantity<5 - planet>]
>>> 3 | my_unit
Traceback (most recent call last):
...
AmuseException: <3> is not a valid value for unit<my_unit>
Or, with default values:
>>> my_unit = enumeration_unit('my_unit','my_unit', None, ["star","gas","planet"])
>>> 2 | my_unit
quantity<2 - planet>
>>> list(my_unit.quantities())
[quantity<0 - star>, quantity<1 - gas>, quantity<2 - planet>]
"""
def __init__(self, name, symbol, possible_values = None, names_for_values = None):
nonnumeric_unit.__init__(self, name, symbol)
self.possible_values = self._initial_list_of_possible_values(possible_values, names_for_values)
self.names_for_values = self._initial_names_for_values(possible_values, names_for_values)
if not len(self.possible_values) == len(self.names_for_values):
raise exceptions.AmuseException("Must provide equal lenght list for values({0}) and names({1})".format(len(self.possible_values), len(self.names_for_values)))
self.mapping_from_values_to_names = self._initial_mapping_from_values_to_names()
self.DEFINED[name] = self
def _initial_list_of_possible_values(self, possible_values, names_for_values):
if possible_values is None:
if names_for_values is None:
raise exceptions.AmuseException("Must provide a list of values and / or a list of names for each value")
return list(range(len(names_for_values)))
else:
return list(possible_values)
def _initial_mapping_from_values_to_names(self):
result = {}
for value, name in zip(self.possible_values, self.names_for_values):
result[value] = name
return result
def _initial_names_for_values(self, possible_values, names_for_values):
if names_for_values is None:
if possible_values is None:
raise exceptions.AmuseException("Must provide a list of values and / or a list of names for each value")
return [str(x) for x in possible_values]
else:
return list(names_for_values)
def __hash__(self):
return self._hash
def is_valid_value(self, value):
return value in self.mapping_from_values_to_names
def value_to_string(self, value):
return self.mapping_from_values_to_names[value]
def quantities(self):
for x in self.possible_values:
yield x | self
def __call__(self, string):
index = self.names_for_values.index(string)
if index > 0:
return self.possible_values[index] | self
else:
raise exceptions.AmuseException("{0} is not a valid name for {1} enumeration type".format(string, self.name))
@property
def dtype(self):
return numpy.dtype('int32')
@classmethod
def get(cls, name):
try:
return cls.DEFINED[name]
except KeyError as ex:
from amuse.units import nbody_system
from amuse.units import si
return cls.DEFINED[name]
def __reduce__(self):
return (get_enumeration_unit_with_name, (self.name,))
class named_unit(unit):
"""
A named_unit object defines an alias for another
unit. When printing a named_unit, the symbol
is shown and not the unit parts. For all other
operations the named_units works exactly like
the aliased unit.
:argument name: Long name or description of the unit
:argument symbol: Short name to show when printing units
or quantities
:argument unit: The unit to alias
>>> from amuse.units import si
>>> 60.0 * si.s
unit<60.0 * s>
>>> minute = named_unit("minute","min", 60*si.s)
>>> minute
unit<min>
>>> (20.0 | (60.0 * si.s)).as_quantity_in(minute)
quantity<20.0 min>
"""
def __init__(self, name, symbol, unit):
self.name = name
self.symbol = symbol
self.local_unit = unit
def __str__(self):
return self.symbol
def reference_string(self):
return self.to_simple_form().reference_string()
@late
def factor(self):
return self.local_unit.factor
@late
def base(self):
return self.local_unit.base
def is_none(self):
return self.local_unit.is_none()
class derived_unit(unit, metaclass=MultitonMetaClass):
"""
Abstract base class of derived units. New units
can be derived from base_units. Each operation on
a unit creates a new derived_unit.
"""
pass
class factor_unit(derived_unit):
"""
A factor_unit object defines a unit multiplied by
a number. Do not call this method directly,
factor_unit objects are supposed to be created by
multiplying a number with a unit.
:argument unit: The unit to derive from.
:argument factor: The multiplication factor.
>>> from amuse.units import si
>>> minute = 60.0 * si.s
>>> minute.as_quantity_in(si.s)
quantity<60.0 s>
>>> hour = 60.0 * minute
>>> hour
unit<60.0 * 60.0 * s>
>>> hour.as_quantity_in(si.s)
quantity<3600.0 s>
"""
def __init__(self, factor, unit, name = None, symbol = None):
self.name = name
self.symbol = symbol
self.local_factor = factor
self.local_unit = unit
def __str__(self):
if self.symbol is None:
return str(self.local_factor) + ' * ' + str(self.local_unit)
return self.symbol + str(self.local_unit)
def reference_string(self):
return '(' + str(self.local_factor) + ' * ' + self.local_unit.reference_string() + ')'
@late
def factor(self):
return self.local_factor * self.local_unit.factor
@late
def base(self):
return self.local_unit.base
def get_parts_with_power(self):
local_unit_parts = self.local_unit.get_parts_with_power()
result = []
is_first = True
for factor, power, unit in local_unit_parts:
if is_first:
factor *= self.local_factor
is_first = False
result.append( (factor, power, unit) )
return result
class mul_unit(derived_unit):
"""
A mul_unit object defines a unit multiplied by
another unit. Do not call this method directly,
mul_unit objects are supposed to be created by
multiplying units.
:argument left_hand: Left hand side of the multiplication.
:argument right_hand: Right hand side of the multiplication.
>>> from amuse.units import si
>>> area = si.m * si.m
>>> area
unit<m * m>
>>> hectare = (100 * si.m) * (100 * si.m)
>>> hectare.as_quantity_in(area)
quantity<10000.0 m * m>
"""
def __init__(self, left_hand, right_hand):
self.left_hand = left_hand
self.right_hand = right_hand
def __str__(self):
return str(self.left_hand) + ' * ' + str(self.right_hand)
def reference_string(self):
return '(' + self.left_hand.reference_string() + ' * ' + self.right_hand.reference_string() + ')'
@late
def factor(self):
return self.left_hand.factor * self.right_hand.factor
@late
def base(self):
return tuple(
[
x
for x in [
(x[0] + x[1], x[2])
for x in self.combine_bases(self.left_hand.base, self.right_hand.base)
]
if x[0] != 0
]
)
def get_parts_with_power(self):
lhs_parts = list(self.left_hand.get_parts_with_power())
rhs_parts = list(self.right_hand.get_parts_with_power())
result = []
for lhs_factor, lhs_power, lhs_unit in lhs_parts:
rhs_index = 0
found_match = False
for rhs_factor, rhs_power, rhs_unit in rhs_parts:
if lhs_unit is rhs_unit:
result.append( (lhs_factor * rhs_factor, lhs_power + rhs_power, lhs_unit,) )
found_match = True
del rhs_parts[rhs_index]
break
rhs_index += 1
if not found_match:
result.append( (lhs_factor, lhs_power, lhs_unit,))
for rhs_factor, rhs_power, rhs_unit in rhs_parts:
result.append( (rhs_factor, rhs_power, rhs_unit,))
return result
class pow_unit(derived_unit):
"""
A pow_unit object defines a unit as
another unit to a specified power.
Do not call this method directly,
pow_unit objects are supposed to be created by
taking powers of units.
:argument power: Power of the unit
:argument unit: The unit to derive from
>>> from amuse.units import si
>>> area = si.m**2
>>> area
unit<m**2>
>>> area.as_quantity_in(si.m * si.m)
quantity<1 m * m>
>>> hectare = (100 * si.m) ** 2
>>> hectare.as_quantity_in(area)
quantity<10000.0 m**2>
"""
def __init__(self, power, unit):
self.power = power
self.local_unit = unit
def __str__(self):
if isinstance(self.local_unit, derived_unit):
return '(' + str(self.local_unit) + ')**' + str(self.power)
else:
return str(self.local_unit) + '**' + str(self.power)
def reference_string(self):
return '(' + self.local_unit.reference_string() + '**' + str(self.power) + ')'
@late
def base(self):
return tuple(
[
x
for x in [
(x[0] * self.power, x[1])
for x in self.local_unit.base
]
if x[0] != 0
]
)
@late
def factor(self):
return self.local_unit.factor ** self.power
def get_parts_with_power(self):
result = []
for factor, power, unit in self.local_unit.get_parts_with_power():
result.append( (factor ** self.power, power * self.power, unit,))
return result
class div_unit(derived_unit):
"""
A div_unit object defines a unit multiplied by
another unit. Do not call this method directly,
div_unit objects are supposed to be created by
dividing units.
:argument left_hand: Left hand side of the multiplication.
:argument right_hand: Right hand side of the multiplication.
>>> from amuse.units import si
>>> speed = si.m / si.s
>>> speed
unit<m / s>
>>> speed_with_powers = si.m * si.s ** -1
>>> speed.as_quantity_in(speed_with_powers)
quantity<1 m * s**-1>
"""
def __init__(self, left_hand, right_hand):
self.left_hand = left_hand
self.right_hand = right_hand
def __str__(self):
if isinstance(self.right_hand, derived_unit):
return str(self.left_hand) + ' / (' + str(self.right_hand)+')'
else:
return str(self.left_hand) + ' / ' + str(self.right_hand)+''
def reference_string(self):
return '(' + self.left_hand.reference_string() + '/' + self.right_hand.reference_string() + ')'
@late
def factor(self):
return self.left_hand.factor * 1.0 / self.right_hand.factor
@late
def base(self):
return tuple(
[
x
for x in [
(x[0] - x[1], x[2])
for x in self.combine_bases(self.left_hand.base, self.right_hand.base)
]
if x[0] != 0
]
)
def get_parts_with_power(self):
lhs_parts = list(self.left_hand.get_parts_with_power())
rhs_parts = list(self.right_hand.get_parts_with_power())
result = []
for lhs_factor, lhs_power, lhs_unit in lhs_parts:
rhs_index = 0
found_match = False
for rhs_factor, rhs_power, rhs_unit in rhs_parts:
if lhs_unit is rhs_unit:
result.append( (lhs_factor / rhs_factor, lhs_power - rhs_power, lhs_unit,) )
found_match = True
del rhs_parts[rhs_index]
break
rhs_index += 1
if not found_match:
result.append( (lhs_factor, lhs_power, lhs_unit,))
for rhs_factor, rhs_power, rhs_unit in rhs_parts:
result.append( (1.0 / rhs_factor, -rhs_power, rhs_unit,))
return result
class UnitException(exceptions.AmuseException):
formatstring = "Unit exception: {0}"
class IncompatibleUnitsException(exceptions.AmuseException):
formatstring = "Cannot express {1} in {0}, the units do not have the same bases"
def __init__(self, *arguments):
Exception.__init__(self)
self.arguments = arguments
def get_system_with_name(name):
return system.get(name)
def get_enumeration_unit_with_name(name):
return enumeration_unit.get(name)
def get_base_unit_with_name(system, name):
return system.base(name)
class UnitWithSpecificDtype(named_unit):
def __init__(self, unit, dtype):
self.specific_dtype = dtype
symbol = str(unit) + "_" + str(dtype)
named_unit.__init__(self, symbol, symbol, unit)
@property
def dtype(self):
return self.specific_dtype
@memoize
def unit_with_specific_dtype(unit, dtype):
if unit is None or dtype is None:
return unit
return UnitWithSpecificDtype(unit, dtype)
|
mmtbx/regression/model_idealization/tst_withmap_04.py | dperl-sol/cctbx_project | 155 | 12755083 | from __future__ import absolute_import, division, print_function
from libtbx import easy_run
import libtbx.load_env
import os.path
import time
# taken from phenix_regression/refinement/ncs/tst_ncs_0.py
pdb_str = """\
CRYST1 100.000 100.000 100.000 90.00 90.00 90.00 P 1
ATOM 1 N ALA A 1 27.344 16.348 30.784 1.00 10.00 N
ATOM 2 CA ALA A 1 26.429 15.281 31.335 1.00 10.00 C
ATOM 3 C ALA A 1 26.610 14.025 30.603 1.00 10.00 C
ATOM 4 O ALA A 1 26.479 13.979 29.356 1.00 10.00 O
ATOM 5 CB ALA A 1 24.874 15.800 31.300 1.00 10.00 C
ATOM 1 N ALA A 2 26.812 12.925 31.345 1.00 10.00 N
ATOM 2 CA ALA A 2 27.084 11.577 30.797 1.00 10.00 C
ATOM 3 C ALA A 2 25.856 10.737 30.707 1.00 10.00 C
ATOM 4 O ALA A 2 25.741 9.860 29.891 1.00 10.00 O
ATOM 5 CB ALA A 2 28.151 10.950 31.721 1.00 10.00 C
ATOM 1 N ALA A 3 25.009 10.973 31.714 1.00 10.00 N
ATOM 2 CA ALA A 3 23.621 10.543 31.560 1.00 10.00 C
ATOM 3 C ALA A 3 23.023 11.008 30.214 1.00 10.00 C
ATOM 4 O ALA A 3 22.786 10.233 29.249 1.00 10.00 O
ATOM 5 CB ALA A 3 22.760 11.040 32.654 1.00 10.00 C
ATOM 1 N ALA A 4 22.798 12.304 30.175 1.00 10.00 N
ATOM 2 CA ALA A 4 22.329 13.084 28.981 1.00 10.00 C
ATOM 3 C ALA A 4 23.116 12.816 27.721 1.00 10.00 C
ATOM 4 O ALA A 4 22.533 12.805 26.670 1.00 10.00 O
ATOM 5 CB ALA A 4 22.372 14.607 29.318 1.00 10.00 C
ATOM 1 N ALA A 5 24.448 12.622 27.823 1.00 10.00 N
ATOM 2 CA ALA A 5 25.228 12.407 26.573 1.00 10.00 C
ATOM 3 C ALA A 5 25.222 10.947 26.143 1.00 10.00 C
ATOM 4 O ALA A 5 25.386 10.664 24.983 1.00 10.00 O
ATOM 5 CB ALA A 5 26.634 12.906 26.746 1.00 10.00 C
ATOM 1 N ALA A 6 24.976 10.048 27.071 1.00 10.00 N
ATOM 2 CA ALA A 6 24.857 8.614 26.805 1.00 10.00 C
ATOM 3 C ALA A 6 23.537 8.349 26.054 1.00 10.00 C
ATOM 4 O ALA A 6 23.439 7.570 25.057 1.00 10.00 O
ATOM 5 CB ALA A 6 24.874 7.845 28.114 1.00 10.00 C
ATOM 1 N ALA A 7 22.542 9.039 26.580 1.00 10.00 N
ATOM 2 CA ALA A 7 21.228 8.903 25.942 1.00 10.00 C
ATOM 3 C ALA A 7 21.329 9.698 24.628 1.00 10.00 C
ATOM 4 O ALA A 7 20.707 9.383 23.632 1.00 10.00 O
ATOM 5 CB ALA A 7 20.146 9.465 26.862 1.00 10.00 C
ATOM 1 N ALA A 8 22.181 10.696 24.613 1.00 10.00 N
ATOM 2 CA ALA A 8 22.526 11.372 23.378 1.00 10.00 C
ATOM 3 C ALA A 8 23.351 10.555 22.448 1.00 10.00 C
ATOM 4 O ALA A 8 23.618 10.883 21.252 1.00 10.00 O
ATOM 5 CB ALA A 8 23.168 12.697 23.693 1.00 10.00 C
ATOM 1 N ALA A 9 23.864 9.423 22.961 1.00 10.00 N
ATOM 2 CA ALA A 9 24.785 8.541 22.264 1.00 10.00 C
ATOM 3 C ALA A 9 24.057 7.451 21.484 1.00 10.00 C
ATOM 4 O ALA A 9 24.127 7.381 20.257 1.00 10.00 O
ATOM 5 CB ALA A 9 25.815 7.975 23.249 1.00 10.00 C
ATOM 1 N ALA A 10 23.518 6.548 22.264 1.00 10.00 N
ATOM 2 CA ALA A 10 22.629 5.525 21.690 1.00 10.00 C
ATOM 3 C ALA A 10 21.549 6.308 21.009 1.00 10.00 C
ATOM 4 O ALA A 10 21.114 5.933 19.930 1.00 10.00 O
ATOM 5 CB ALA A 10 22.057 4.714 22.784 1.00 10.00 C
ATOM 1 N ALA A 11 21.120 7.452 21.541 1.00 10.00 N
ATOM 2 CA ALA A 11 20.186 8.260 20.874 1.00 10.00 C
ATOM 3 C ALA A 11 20.978 9.215 19.937 1.00 10.00 C
ATOM 4 O ALA A 11 20.386 10.177 19.507 1.00 10.00 O
ATOM 5 CB ALA A 11 19.295 9.031 21.867 1.00 10.00 C
ATOM 1 N ALA A 12 22.222 8.932 19.598 1.00 10.00 N
ATOM 2 CA ALA A 12 22.896 9.709 18.563 1.00 10.00 C
ATOM 3 C ALA A 12 22.924 8.925 17.308 1.00 10.00 C
ATOM 4 O ALA A 12 22.982 9.445 16.193 1.00 10.00 O
ATOM 5 CB ALA A 12 24.294 10.138 18.994 1.00 10.00 C
ATOM 1 N ALA A 13 22.951 7.633 17.508 1.00 10.00 N
ATOM 2 CA ALA A 13 22.709 6.629 16.554 1.00 10.00 C
ATOM 3 C ALA A 13 21.275 6.673 16.206 1.00 10.00 C
ATOM 4 O ALA A 13 20.870 6.521 15.092 1.00 10.00 O
ATOM 5 CB ALA A 13 23.077 5.254 17.025 1.00 10.00 C
ATOM 1 N ALA A 14 20.471 6.929 17.226 1.00 10.00 N
ATOM 2 CA ALA A 14 19.039 6.992 17.025 1.00 10.00 C
ATOM 3 C ALA A 14 18.676 8.380 16.528 1.00 10.00 C
ATOM 4 O ALA A 14 17.748 8.556 15.761 1.00 10.00 O
ATOM 5 CB ALA A 14 18.240 6.715 18.272 1.00 10.00 C
ATOM 1 N ALA A 15 19.381 9.390 17.055 1.00 10.00 N
ATOM 2 CA ALA A 15 19.204 10.743 16.669 1.00 10.00 C
ATOM 3 C ALA A 15 19.407 10.807 15.174 1.00 10.00 C
ATOM 4 O ALA A 15 18.402 10.987 14.424 1.00 10.00 O
ATOM 5 CB ALA A 15 20.190 11.665 17.493 1.00 10.00 C
ATOM 1 N ALA A 16 20.702 10.653 14.831 1.00 10.00 N
ATOM 2 CA ALA A 16 21.206 10.546 13.480 1.00 10.00 C
ATOM 3 C ALA A 16 20.484 9.612 12.585 1.00 10.00 C
ATOM 4 O ALA A 16 20.380 9.918 11.386 1.00 10.00 O
ATOM 5 CB ALA A 16 22.631 10.174 13.475 1.00 10.00 C
ATOM 1 N ALA A 17 20.064 8.475 13.175 1.00 10.00 N
ATOM 2 CA ALA A 17 19.355 7.473 12.426 1.00 10.00 C
ATOM 3 C ALA A 17 17.924 7.807 12.064 1.00 10.00 C
ATOM 4 O ALA A 17 17.535 7.721 10.871 1.00 10.00 O
ATOM 5 CB ALA A 17 19.359 6.123 13.216 1.00 10.00 C
ATOM 1 N ALA A 18 17.152 8.115 13.031 1.00 10.00 N
ATOM 2 CA ALA A 18 15.835 8.594 12.861 1.00 10.00 C
ATOM 3 C ALA A 18 15.811 9.835 11.861 1.00 10.00 C
ATOM 4 O ALA A 18 15.020 9.889 10.868 1.00 10.00 O
ATOM 5 CB ALA A 18 15.272 8.918 14.234 1.00 10.00 C
ATOM 1 N ALA A 19 16.661 10.845 12.100 1.00 10.00 N
ATOM 2 CA ALA A 19 16.435 12.061 11.275 1.00 10.00 C
ATOM 3 C ALA A 19 17.004 11.815 9.833 1.00 10.00 C
ATOM 4 O ALA A 19 16.334 12.117 8.857 1.00 10.00 O
ATOM 5 CB ALA A 19 17.059 13.242 11.866 1.00 10.00 C
ATOM 1 N ALA A 20 18.191 11.200 9.841 1.00 10.00 N
ATOM 2 CA ALA A 20 19.091 11.247 8.697 1.00 10.00 C
ATOM 3 C ALA A 20 19.549 9.835 8.231 1.00 10.00 C
ATOM 4 O ALA A 20 20.670 9.692 7.663 1.00 10.00 O
ATOM 5 CB ALA A 20 20.326 12.105 9.035 1.00 10.00 C
ATOM 1 N ALA A 21 18.654 8.850 8.523 1.00 10.00 N
ATOM 2 CA ALA A 21 18.827 7.437 8.168 1.00 10.00 C
ATOM 3 C ALA A 21 17.565 6.607 8.282 1.00 10.00 C
ATOM 4 O ALA A 21 16.485 6.992 7.820 1.00 10.00 O
ATOM 5 CB ALA A 21 19.888 6.838 8.983 1.00 10.00 C
TER
ATOM 1 N ALA B 1 16.348 17.420 35.897 1.00 50.00 N
ATOM 2 CA ALA B 1 16.783 16.083 36.351 1.00 50.00 C
ATOM 3 C ALA B 1 16.794 15.172 35.139 1.00 50.00 C
ATOM 4 O ALA B 1 16.167 15.477 34.133 1.00 50.00 O
ATOM 5 CB ALA B 1 15.785 15.534 37.468 1.00 50.00 C
ATOM 1 N ALA B 2 17.491 14.058 35.255 1.00 50.00 N
ATOM 2 CA ALA B 2 17.790 13.267 34.127 1.00 50.00 C
ATOM 3 C ALA B 2 16.716 12.232 33.688 1.00 50.00 C
ATOM 4 O ALA B 2 16.676 11.869 32.543 1.00 50.00 O
ATOM 5 CB ALA B 2 19.125 12.656 34.415 1.00 50.00 C
ATOM 1 N ALA B 3 15.904 11.687 34.605 1.00 50.00 N
ATOM 2 CA ALA B 3 14.798 10.901 34.173 1.00 50.00 C
ATOM 3 C ALA B 3 13.740 11.723 33.536 1.00 50.00 C
ATOM 4 O ALA B 3 13.398 11.501 32.356 1.00 50.00 O
ATOM 5 CB ALA B 3 14.148 10.176 35.403 1.00 50.00 C
ATOM 1 N ALA B 4 13.239 12.708 34.247 1.00 50.00 N
ATOM 2 CA ALA B 4 12.158 13.487 33.709 1.00 50.00 C
ATOM 3 C ALA B 4 12.674 14.248 32.495 1.00 50.00 C
ATOM 4 O ALA B 4 11.935 14.376 31.526 1.00 50.00 O
ATOM 5 CB ALA B 4 11.553 14.432 34.712 1.00 50.00 C
ATOM 1 N ALA B 5 13.947 14.627 32.479 1.00 50.00 N
ATOM 2 CA ALA B 5 14.416 15.490 31.405 1.00 50.00 C
ATOM 3 C ALA B 5 14.960 14.730 30.186 1.00 50.00 C
ATOM 4 O ALA B 5 14.575 14.940 29.054 1.00 50.00 O
ATOM 5 CB ALA B 5 15.464 16.431 31.928 1.00 50.00 C
ATOM 1 N ALA B 6 15.867 13.827 30.546 1.00 50.00 N
ATOM 2 CA ALA B 6 16.575 12.918 29.615 1.00 50.00 C
ATOM 3 C ALA B 6 15.465 12.002 28.975 1.00 50.00 C
ATOM 4 O ALA B 6 15.450 11.709 27.742 1.00 50.00 O
ATOM 5 CB ALA B 6 17.632 12.157 30.362 1.00 50.00 C
ATOM 1 N ALA B 7 14.542 11.597 29.783 1.00 50.00 N
ATOM 2 CA ALA B 7 13.529 10.701 29.277 1.00 50.00 C
ATOM 3 C ALA B 7 12.175 11.364 28.835 1.00 50.00 C
ATOM 4 O ALA B 7 11.466 10.770 27.969 1.00 50.00 O
ATOM 5 CB ALA B 7 13.161 9.644 30.376 1.00 50.00 C
ATOM 1 N ALA B 8 11.753 12.455 29.452 1.00 50.00 N
ATOM 2 CA ALA B 8 10.536 13.193 28.972 1.00 50.00 C
ATOM 3 C ALA B 8 10.919 13.923 27.670 1.00 50.00 C
ATOM 4 O ALA B 8 10.171 14.036 26.729 1.00 50.00 O
ATOM 5 CB ALA B 8 10.032 14.139 30.014 1.00 50.00 C
ATOM 1 N ALA B 9 12.185 14.247 27.579 1.00 50.00 N
ATOM 2 CA ALA B 9 12.754 14.849 26.385 1.00 50.00 C
ATOM 3 C ALA B 9 12.892 13.859 25.320 1.00 50.00 C
ATOM 4 O ALA B 9 12.234 13.980 24.290 1.00 50.00 O
ATOM 5 CB ALA B 9 14.108 15.448 26.695 1.00 50.00 C
ATOM 1 N ALA B 10 13.655 12.794 25.566 1.00 50.00 N
ATOM 2 CA ALA B 10 13.831 11.803 24.529 1.00 50.00 C
ATOM 3 C ALA B 10 12.551 10.987 24.319 1.00 50.00 C
ATOM 4 O ALA B 10 12.514 10.237 23.390 1.00 50.00 O
ATOM 5 CB ALA B 10 15.024 10.750 24.992 1.00 50.00 C
ATOM 1 N ALA B 11 11.558 11.184 25.126 1.00 50.00 N
ATOM 2 CA ALA B 11 10.334 10.457 24.931 1.00 50.00 C
ATOM 3 C ALA B 11 9.326 11.284 24.168 1.00 50.00 C
ATOM 4 O ALA B 11 8.566 10.707 23.476 1.00 50.00 O
ATOM 5 CB ALA B 11 9.644 10.042 26.251 1.00 50.00 C
ATOM 1 N ALA B 12 9.277 12.611 24.334 1.00 50.00 N
ATOM 2 CA ALA B 12 8.354 13.375 23.644 1.00 50.00 C
ATOM 3 C ALA B 12 9.019 13.546 22.264 1.00 50.00 C
ATOM 4 O ALA B 12 8.400 13.891 21.317 1.00 50.00 O
ATOM 5 CB ALA B 12 8.056 14.678 24.287 1.00 50.00 C
ATOM 1 N ALA B 13 10.333 13.339 22.264 1.00 50.00 N
ATOM 2 CA ALA B 13 11.239 13.471 21.127 1.00 50.00 C
ATOM 3 C ALA B 13 11.096 12.161 20.325 1.00 50.00 C
ATOM 4 O ALA B 13 11.145 12.175 19.123 1.00 50.00 O
ATOM 5 CB ALA B 13 12.584 13.665 21.596 1.00 50.00 C
ATOM 1 N ALA B 14 11.051 11.078 21.086 1.00 50.00 N
ATOM 2 CA ALA B 14 10.953 9.771 20.454 1.00 50.00 C
ATOM 3 C ALA B 14 9.550 9.463 20.117 1.00 50.00 C
ATOM 4 O ALA B 14 9.233 8.571 19.367 1.00 50.00 O
ATOM 5 CB ALA B 14 11.461 8.697 21.413 1.00 50.00 C
ATOM 1 N ALA B 15 8.669 10.215 20.743 1.00 50.00 N
ATOM 2 CA ALA B 15 7.282 10.010 20.486 1.00 50.00 C
ATOM 3 C ALA B 15 6.825 10.982 19.376 1.00 50.00 C
ATOM 4 O ALA B 15 5.855 10.783 18.619 1.00 50.00 O
ATOM 5 CB ALA B 15 6.367 10.306 21.797 1.00 50.00 C
ATOM 1 N ALA B 16 7.511 12.143 19.430 1.00 50.00 N
ATOM 2 CA ALA B 16 7.233 13.302 18.551 1.00 50.00 C
ATOM 3 C ALA B 16 7.912 13.082 17.205 1.00 50.00 C
ATOM 4 O ALA B 16 7.492 13.573 16.111 1.00 50.00 O
ATOM 5 CB ALA B 16 7.762 14.594 19.165 1.00 50.00 C
ATOM 1 N ALA B 17 9.071 12.427 17.269 1.00 50.00 N
ATOM 2 CA ALA B 17 9.595 11.771 16.091 1.00 50.00 C
ATOM 3 C ALA B 17 8.883 10.519 15.763 1.00 50.00 C
ATOM 4 O ALA B 17 8.890 10.193 14.597 1.00 50.00 O
ATOM 5 CB ALA B 17 11.046 11.518 16.265 1.00 50.00 C
ATOM 1 N ALA B 18 8.315 9.809 16.722 1.00 50.00 N
ATOM 2 CA ALA B 18 7.515 8.647 16.448 1.00 50.00 C
ATOM 3 C ALA B 18 6.253 9.063 15.707 1.00 50.00 C
ATOM 4 O ALA B 18 5.559 8.173 15.198 1.00 50.00 O
ATOM 5 CB ALA B 18 7.129 7.915 17.695 1.00 50.00 C
ATOM 1 N ALA B 19 5.866 10.332 15.772 1.00 50.00 N
ATOM 2 CA ALA B 19 4.686 10.808 15.089 1.00 50.00 C
ATOM 3 C ALA B 19 5.011 11.578 13.803 1.00 50.00 C
ATOM 4 O ALA B 19 4.291 11.514 12.837 1.00 50.00 O
ATOM 5 CB ALA B 19 3.854 11.710 15.960 1.00 50.00 C
ATOM 1 N ALA B 20 6.176 12.195 13.822 1.00 50.00 N
ATOM 2 CA ALA B 20 6.614 13.121 12.789 1.00 50.00 C
ATOM 3 C ALA B 20 7.933 12.759 12.098 1.00 50.00 C
ATOM 4 O ALA B 20 8.620 13.613 11.585 1.00 50.00 O
ATOM 5 CB ALA B 20 6.823 14.498 13.449 1.00 50.00 C
ATOM 1 N ALA B 21 8.284 11.511 12.050 1.00 50.00 N
ATOM 2 CA ALA B 21 9.513 11.117 11.323 1.00 50.00 C
ATOM 3 C ALA B 21 9.313 9.628 11.029 1.00 50.00 C
ATOM 4 O ALA B 21 9.731 8.751 11.795 1.00 50.00 O
ATOM 5 CB ALA B 21 10.799 11.332 12.178 1.00 50.00 C
TER
"""
def exercise_04(prefix="tst_mi_map_test_04"):
"""
Run with reference map.
Check if working with NCS in the model. Without symmetry.
"""
# without cryst
pdb_file = open("%s_start.pdb" % prefix, "w")
pdb_file.write(pdb_str)
pdb_file.close()
cmd = " ".join([
"phenix.model_idealization",
"%s_start.pdb" % prefix,
"use_map_for_reference=True",
"loop_idealization.number_of_ccd_trials=1",
"number_of_refinement_cycles=1",
"n_macro=1",
"debug=True",
">%s.log" % prefix])
print(cmd)
assert not easy_run.call(cmd)
assert os.path.isfile("%s_start.pdb_all_idealized.pdb" % prefix)
res_log = open("%s.log" % prefix, "r")
log_lines = res_log.readlines()
# NCS constraints with map are not implemented yet
for l in [
# "Using ncs\n",
"Using map as reference\n",
" Minimizing... (NCS)\n",
# "Ramachandran outliers: 0.00 0.00 0.00 0.00 0.00\n",
"All done.\n"]:
assert l in log_lines, "'%s' not in log file." % l
res_log.close()
if (__name__ == "__main__"):
t0 = time.time()
if (not libtbx.env.has_module(name="probe")):
print("Skipping: probe not configured")
else:
exercise_04()
print("Time: %.2f" % (time.time() - t0))
print("OK")
|
RecoEgamma/ElectronIdentification/python/egmPatElectronIDs_cfi.py | ckamtsikis/cmssw | 852 | 12755091 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry
from PhysicsTools.SelectorUtils.trivialCutFlow_cff import *
trivialCutFlowMD5 = central_id_registry.getMD5FromName(trivialCutFlow.idName)
egmPatElectronIDs = cms.EDProducer(
"VersionedPatElectronIdProducer",
physicsObjectSrc = cms.InputTag('patElectrons'),
physicsObjectIDs = cms.VPSet( cms.PSet( idDefinition = trivialCutFlow,
idMD5 = cms.string(trivialCutFlowMD5) )
)
)
|
corehq/apps/builds/fixtures.py | dimagilg/commcare-hq | 471 | 12755094 | import json
commcare_build_config = json.loads("""{
"_id": "config--commcare-builds",
"doc_type": "CommCareBuildConfig",
"preview": {
"version": "1.2.1",
"build_number": null,
"latest": true
},
"defaults": [{
"version": "1.2.1",
"build_number": null,
"latest": true
}, {
"version": "2.0.0",
"build_number": null,
"latest": true
}],
"application_versions": ["1.0", "2.0"],
"menu": [
{
"build": {
"version": "1.1.1",
"build_number": null,
"latest": true
},
"label": "CommCare 1.1.1"
},
{
"build": {
"version": "1.2.1",
"build_number": null,
"latest": true
},
"label": "CommCare 1.2.1"
},
{
"build": {
"version": "1.3.0",
"build_number": null,
"latest": true
},
"label": "CommCare 1.3 (RC5)"
},
{
"build": {
"version": "2.0.0",
"build_number": null,
"latest": true
},
"label": "CommCare 2.0 (unstable)"
}
],
"ID": "config--commcare-builds"
}""")
|
staffjoy/resources/session.py | Joey-Wondersign/Staffjoy-suite-Joey | 890 | 12755096 | from staffjoy.resource import Resource
class Session(Resource):
"""User session"""
PATH = "users/{user_id}/sessions/{session_id}"
ID_NAME = "session_id"
|
py4j-python/src/py4j/tests/memory_leak_test.py | dHannasch/py4j | 3,301 | 12755123 | <reponame>dHannasch/py4j
# -*- coding: UTF-8 -*-
from contextlib import contextmanager
import gc
from multiprocessing import Process
import subprocess
import unittest
from py4j.java_gateway import (
JavaGateway, GatewayParameters, CallbackServerParameters,
DEFAULT_PORT, DEFAULT_PYTHON_PROXY_PORT)
from py4j.clientserver import (
ClientServer, JavaParameters, PythonParameters)
from py4j.tests.java_gateway_test import (
PY4J_JAVA_PATH, check_connection, sleep)
from py4j.tests.py4j_callback_recursive_example import HelloState
from py4j.tests.instrumented import (
InstrJavaGateway, InstrumentedPythonPing, register_creation,
CREATED, FINALIZED, MEMORY_HOOKS, InstrClientServer)
def start_instrumented_gateway_server():
subprocess.call([
"java", "-Xmx512m", "-cp", PY4J_JAVA_PATH,
"py4j.instrumented.InstrumentedApplication"])
def start_instrumented_clientserver():
subprocess.call([
"java", "-Xmx512m", "-cp", PY4J_JAVA_PATH,
"py4j.instrumented.InstrumentedClientServerApplication"])
def start_gateway_server_example_app_process(start_gateway_server=True):
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
if start_gateway_server:
p = Process(target=start_instrumented_gateway_server)
else:
p = Process(target=start_instrumented_clientserver)
p.start()
sleep()
check_connection()
return p
@contextmanager
def gateway_server_example_app_process(start_gateway_server=True):
p = start_gateway_server_example_app_process(start_gateway_server)
try:
yield p
finally:
p.join()
class HelloState2(HelloState):
def __init__(self, run_gc=True):
self.gateway = None
self.run_gc = run_gc
super(HelloState2, self).__init__()
register_creation(self)
def _play_with_jvm(self):
al = self.gateway.jvm.java.util.ArrayList()
al.append("Hello World")
obj = self.gateway.jvm.py4j.\
instrumented.InstrumentedObject("test")
al.append(obj)
return str(al)
def sayHello(self, int_value=None, string_value=None):
self._play_with_jvm()
if self.run_gc:
python_gc()
return super(HelloState2, self).sayHello(
int_value, string_value)
class Java:
implements = ["py4j.examples.IHello"]
def assert_python_memory(test, size):
test.assertEqual(size, len(CREATED))
test.assertEqual(size, len(FINALIZED))
test.assertEqual(set(CREATED), set(FINALIZED))
def python_gc():
"""Runs the gc three times to ensure that all circular reference are
correctly removed.
"""
for i in range(3):
gc.collect()
class GatewayServerTest(unittest.TestCase):
def tearDown(self):
MEMORY_HOOKS.clear()
CREATED.clear()
FINALIZED.clear()
def testPythonToJava(self):
def work_with_object(gateway):
obj = gateway.jvm.py4j.\
instrumented.InstrumentedObject("test")
return str(obj)
def internal_work():
gateway2 = InstrJavaGateway(gateway_parameters=GatewayParameters(
port=DEFAULT_PORT+5))
sleep()
work_with_object(gateway2)
python_gc()
sleep()
gateway2.shutdown()
with gateway_server_example_app_process():
gateway = JavaGateway()
gateway.entry_point.startServer2()
internal_work()
python_gc()
gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()
sleep()
createdSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 4 objects: GatewayServer, GatewayConnection, CallbackClient,
# InstrumentedObject
self.assertEqual(4, len(createdSet))
self.assertEqual(4, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
gateway.shutdown()
# 4 objects: JavaGateway, GatewayClient, GatewayProperty,
# GatewayConnection
assert_python_memory(self, 4)
def testPythonToJavaToPython(self):
def play_with_ping(gateway):
ping = InstrumentedPythonPing()
pingpong = gateway.jvm.py4j.examples.PingPong()
total = pingpong.start(ping)
return total
def internal_work():
gateway2 = InstrJavaGateway(
gateway_parameters=GatewayParameters(
port=DEFAULT_PORT+5),
callback_server_parameters=CallbackServerParameters(
port=DEFAULT_PYTHON_PROXY_PORT+5))
sleep()
play_with_ping(gateway2)
python_gc()
sleep()
gateway2.shutdown()
with gateway_server_example_app_process():
gateway = JavaGateway()
gateway.entry_point.startServer2()
internal_work()
python_gc()
gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()
sleep()
createdSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 9 objects: GatewayServer, 4 GatewayConnection, CallbackClient,
# 3 CallbackConnection
self.assertEqual(9, len(createdSet))
self.assertEqual(9, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
gateway.shutdown()
# 11 objects: JavaGateway, CallbackSerer, GatewayClient,
# GatewayProperty, PythonPing, 4 GatewayConnection,
# 3 CallbackConnection. Notice the symmetry
assert_python_memory(self, 12)
def testPythonToJavaToPythonClose(self):
def play_with_ping(gateway):
ping = InstrumentedPythonPing()
pingpong = gateway.jvm.py4j.examples.PingPong()
total = pingpong.start(ping)
return total
def internal_work(assert_memory):
gateway2 = InstrJavaGateway(
gateway_parameters=GatewayParameters(
port=DEFAULT_PORT+5),
callback_server_parameters=CallbackServerParameters(
port=DEFAULT_PYTHON_PROXY_PORT+5))
sleep()
play_with_ping(gateway2)
python_gc()
sleep()
gateway2.close(close_callback_server_connections=True,
keep_callback_server=True)
sleep()
assert_memory()
gateway2.shutdown()
sleep()
with gateway_server_example_app_process():
gateway = JavaGateway()
gateway.entry_point.startServer2()
def perform_memory_tests():
python_gc()
gateway.jvm.py4j.instrumented.MetricRegistry.\
forceFinalization()
sleep()
createdSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 10 objects: GatewayServer, 4 GatewayConnection,
# CallbackClient, 4 CallbackConnection
self.assertEqual(10, len(createdSet))
# 13 objects: JavaGateway, CallbackSerer, GatewayClient,
# GatewayProperty, PythonPing, 4 GatewayConnection,
# 4 CallbackConnection. Notice the symmetry between callback
# and gateway connections.
self.assertEqual(13, len(CREATED))
# 4 gateway connections, 3 callback connections.
# There is still one callback connection staying around
# following Java finalization that called back Python.
self.assertEqual(7, len(finalizedSet))
# Same amount of connections for the Python side
self.assertEqual(7, len(FINALIZED))
internal_work(perform_memory_tests)
python_gc()
gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()
sleep()
gateway.shutdown()
# 14 objects: JavaGateway, CallbackSerer, GatewayClient,
# GatewayProperty, PythonPing, 5 GatewayConnection,
# 4 CallbackConnection. Notice the symmetry
# One more gateway connection created because we called shutdown
# after close (which requires a connection to send a shutdown
# command).
assert_python_memory(self, 14)
def testJavaToPythonToJavaCleanGC(self):
def internal_work(gateway):
hello_state = HelloState2()
gateway2 = InstrJavaGateway(
gateway_parameters=GatewayParameters(
port=DEFAULT_PORT+5),
callback_server_parameters=CallbackServerParameters(
port=DEFAULT_PYTHON_PROXY_PORT+5),
python_server_entry_point=hello_state)
hello_state.gateway = gateway2
sleep()
gateway.entry_point.startServerWithPythonEntry(True)
sleep()
gateway2.shutdown()
# Check that Java correctly called Python
self.assertEqual(2, len(hello_state.calls))
self.assertEqual((None, None), hello_state.calls[0])
self.assertEqual((2, "Hello World"), hello_state.calls[1])
with gateway_server_example_app_process():
gateway = JavaGateway()
internal_work(gateway)
python_gc()
gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()
sleep()
createdSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 6 objects: 2 InstrumentedObject (sayHello called twice), 1
# InstrGatewayServer, 1 CallbackClient, 1 CallbackConnection, 1
# GatewayConnection
self.assertEqual(6, len(createdSet))
self.assertEqual(6, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
gateway.shutdown()
# 7 objects: JavaGateway, GatewayClient, CallbackServer,
# GatewayProperty, HelloState, GatewayConnection,
# CallbackConnection
assert_python_memory(self, 7)
def testJavaToPythonToJavaNoGC(self):
def internal_work(gateway):
hello_state = HelloState2(run_gc=False)
gateway2 = InstrJavaGateway(
gateway_parameters=GatewayParameters(
port=DEFAULT_PORT+5),
callback_server_parameters=CallbackServerParameters(
port=DEFAULT_PYTHON_PROXY_PORT+5),
python_server_entry_point=hello_state)
hello_state.gateway = gateway2
sleep()
gateway.entry_point.startServerWithPythonEntry(True)
sleep()
gateway2.shutdown()
# Check that Java correctly called Python
self.assertEqual(2, len(hello_state.calls))
self.assertEqual((None, None), hello_state.calls[0])
self.assertEqual((2, "Hello World"), hello_state.calls[1])
with gateway_server_example_app_process():
gateway = JavaGateway()
# We disable gc to test whether a shut down on one side will
# garbage collect everything.
gc.disable()
internal_work(gateway)
gc.enable()
python_gc()
gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()
sleep()
createdSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 6 objects: 2 InstrumentedObject (sayHello called twice), 1
# InstrGatewayServer, 1 CallbackClient, 1 CallbackConnection, 1
# GatewayConnection
self.assertEqual(6, len(createdSet))
self.assertEqual(6, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
gateway.shutdown()
# 7 objects: JavaGateway, GatewayClient, CallbackServer,
# GatewayProperty, HelloState, GatewayConnection,
# CallbackConnection
assert_python_memory(self, 7)
def testJavaToPythonToJavaCleanGCNoShutdown(self):
def internal_work(gateway):
hello_state = HelloState2()
gateway2 = InstrJavaGateway(
gateway_parameters=GatewayParameters(
port=DEFAULT_PORT+5),
callback_server_parameters=CallbackServerParameters(
port=DEFAULT_PYTHON_PROXY_PORT+5),
python_server_entry_point=hello_state)
hello_state.gateway = gateway2
sleep()
gateway.entry_point.startServerWithPythonEntry(False)
sleep()
gateway2.shutdown()
# Check that Java correctly called Python
self.assertEqual(2, len(hello_state.calls))
self.assertEqual((None, None), hello_state.calls[0])
self.assertEqual((2, "Hello World"), hello_state.calls[1])
with gateway_server_example_app_process():
gateway = JavaGateway()
internal_work(gateway)
python_gc()
gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()
sleep()
createdSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 6 objects: 2 InstrumentedObject (sayHello called twice), 1
# InstrGatewayServer, 1 CallbackClient, 1 CallbackConnection, 1
# GatewayConnection
self.assertEqual(6, len(createdSet))
self.assertEqual(6, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
gateway.shutdown()
# 7 objects: JavaGateway, GatewayClient, CallbackServer,
# GatewayProperty, HelloState, GatewayConnection,
# CallbackConnection
assert_python_memory(self, 7)
def testJavaToPythonToJavaNoGCNoShutdown(self):
def internal_work(gateway):
hello_state = HelloState2(run_gc=False)
gateway2 = InstrJavaGateway(
gateway_parameters=GatewayParameters(
port=DEFAULT_PORT+5),
callback_server_parameters=CallbackServerParameters(
port=DEFAULT_PYTHON_PROXY_PORT+5),
python_server_entry_point=hello_state)
hello_state.gateway = gateway2
sleep()
gateway.entry_point.startServerWithPythonEntry(False)
sleep()
gateway2.shutdown()
# Check that Java correctly called Python
self.assertEqual(2, len(hello_state.calls))
self.assertEqual((None, None), hello_state.calls[0])
self.assertEqual((2, "Hello World"), hello_state.calls[1])
with gateway_server_example_app_process():
gateway = JavaGateway()
# We disable gc to test whether a shut down on one side will
# garbage collect everything.
gc.disable()
internal_work(gateway)
gc.enable()
python_gc()
gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()
sleep()
createdSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = gateway.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 6 objects: 2 InstrumentedObject (sayHello called twice), 1
# InstrGatewayServer, 1 CallbackClient, 1 CallbackConnection, 1
# GatewayConnection
self.assertEqual(6, len(createdSet))
self.assertEqual(6, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
gateway.shutdown()
# 7 objects: JavaGateway, GatewayClient, CallbackServer,
# GatewayProperty, HelloState, GatewayConnection,
# CallbackConnection
assert_python_memory(self, 7)
class ClientServerTest(unittest.TestCase):
def tearDown(self):
MEMORY_HOOKS.clear()
CREATED.clear()
FINALIZED.clear()
def testPythonToJava(self):
def work_with_object(clientserver):
obj = clientserver.jvm.py4j.\
instrumented.InstrumentedObject("test")
return str(obj)
def internal_work():
clientserver2 = InstrClientServer(
JavaParameters(port=DEFAULT_PORT+5),
PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5))
sleep()
work_with_object(clientserver2)
python_gc()
sleep()
clientserver2.shutdown()
with gateway_server_example_app_process(False):
clientserver = ClientServer()
clientserver.entry_point.startServer2()
internal_work()
python_gc()
clientserver.jvm.py4j.instrumented.MetricRegistry.\
forceFinalization()
sleep()
createdSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 5 objects: ClientServer, ClientServerConnection, PythonClient,
# JavaServer, InstrumentedObject
self.assertEqual(5, len(createdSet))
self.assertEqual(5, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
clientserver.shutdown()
# 5 objects: ClientServer, ClientServerConnection, PythonClient,
# JavaServer, GatewayProperty
assert_python_memory(self, 5)
def testPythonToJavaToPython(self):
def play_with_ping(clientserver):
ping = InstrumentedPythonPing()
pingpong = clientserver.jvm.py4j.examples.PingPong()
total = pingpong.start(ping)
return total
def internal_work():
clientserver2 = InstrClientServer(
JavaParameters(port=DEFAULT_PORT+5),
PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5))
sleep()
play_with_ping(clientserver2)
python_gc()
sleep()
clientserver2.shutdown()
with gateway_server_example_app_process(False):
clientserver = ClientServer()
clientserver.entry_point.startServer2()
internal_work()
python_gc()
clientserver.jvm.py4j.instrumented.MetricRegistry.\
forceFinalization()
sleep()
createdSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 4 objects: ClientServer, ClientServerConnection, JavaServer,
# PythonClient
self.assertEqual(4, len(createdSet))
self.assertEqual(4, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
clientserver.shutdown()
# 6 objects: ClientServer, PythonServer, JavaClient,
# GatewayProperty, PythonPing, ClientServerConnection
def testPythonToJavaToPythonClose(self):
def play_with_ping(clientserver):
ping = InstrumentedPythonPing()
pingpong = clientserver.jvm.py4j.examples.PingPong()
total = pingpong.start(ping)
return total
def internal_work(assert_memory):
clientserver2 = InstrClientServer(
JavaParameters(port=DEFAULT_PORT+5),
PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5))
sleep()
play_with_ping(clientserver2)
python_gc()
sleep()
clientserver2.close(
close_callback_server_connections=True,
keep_callback_server=True)
sleep()
assert_memory()
clientserver2.shutdown()
sleep()
with gateway_server_example_app_process(False):
clientserver = ClientServer()
clientserver.entry_point.startServer2()
def perform_memory_tests():
python_gc()
clientserver.jvm.py4j.instrumented.MetricRegistry.\
forceFinalization()
sleep()
createdSet = clientserver.jvm.py4j.instrumented.\
MetricRegistry.getCreatedObjectsKeySet()
finalizedSet = clientserver.jvm.py4j.instrumented.\
MetricRegistry.getFinalizedObjectsKeySet()
# 6 objects: ClientServer, JavaServer,
# PythonClient, 3 ClientServerConnection.
self.assertEqual(6, len(createdSet))
# Should be 2: ClientServer, 1 ClientServerConnection
# But for some reasons, Java refuses to collect the
# clientserverconnection even though there are no strong
# references.
self.assertEqual(1, len(finalizedSet))
# 8 objects: ClientServer, PythonServer, JavaClient,
# GatewayProperty, PythonPing, 3 ClientServerConnection
self.assertEqual(8, len(CREATED))
# PythonPing + ClientServerConnection
self.assertEqual(2, len(FINALIZED))
internal_work(perform_memory_tests)
python_gc()
clientserver.jvm.py4j.instrumented.MetricRegistry.\
forceFinalization()
sleep()
clientserver.shutdown()
# 9 objects: ClientServer, PythonServer, JavaClient,
# GatewayProperty, PythonPing, 4 ClientServerConnection
assert_python_memory(self, 9)
def testJavaToPythonToJavaCleanGC(self):
def internal_work(clientserver):
hello_state = HelloState2()
clientserver2 = InstrClientServer(
JavaParameters(port=DEFAULT_PORT+5),
PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5),
python_server_entry_point=hello_state)
hello_state.gateway = clientserver2
sleep()
clientserver.entry_point.startServerWithPythonEntry(True)
sleep()
clientserver2.shutdown()
# Check that Java correctly called Python
self.assertEqual(2, len(hello_state.calls))
self.assertEqual((None, None), hello_state.calls[0])
self.assertEqual((2, "Hello World"), hello_state.calls[1])
with gateway_server_example_app_process(False):
clientserver = ClientServer()
internal_work(clientserver)
python_gc()
clientserver.jvm.py4j.instrumented.MetricRegistry.\
forceFinalization()
sleep()
createdSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 7 objects: 2 InstrumentedObject (sayHello called twice), 1
# JavaServer, 1 PythonClient, 1 ClientServer, 2
# ClientServerConnection (1 to call sayHello)
self.assertEqual(6, len(createdSet))
self.assertEqual(6, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
clientserver.shutdown()
# 8 objects: ClientServer (ok), PythonServer (ok), JavaClient,
# GatewayProperty, HelloState (ok), 3 ClientServer Connections (1)
assert_python_memory(self, 7)
def testJavaToPythonToJavaNoGC(self):
def internal_work(clientserver):
hello_state = HelloState2()
clientserver2 = InstrClientServer(
JavaParameters(port=DEFAULT_PORT+5),
PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5),
python_server_entry_point=hello_state)
hello_state.gateway = clientserver2
sleep()
clientserver.entry_point.startServerWithPythonEntry(True)
sleep()
clientserver2.shutdown()
# Check that Java correctly called Python
self.assertEqual(2, len(hello_state.calls))
self.assertEqual((None, None), hello_state.calls[0])
self.assertEqual((2, "Hello World"), hello_state.calls[1])
with gateway_server_example_app_process(False):
clientserver = ClientServer()
# We disable gc to test whether a shut down on one side will
# garbage collect everything.
gc.disable()
internal_work(clientserver)
gc.enable()
python_gc()
clientserver.jvm.py4j.instrumented.MetricRegistry.\
forceFinalization()
sleep()
createdSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 7 objects: 2 InstrumentedObject (sayHello called twice), 1
# JavaServer, 1 PythonClient, 1 ClientServer, 2
# ClientServerConnection (1 to call sayHello)
self.assertEqual(6, len(createdSet))
self.assertEqual(6, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
clientserver.shutdown()
# 8 objects: ClientServer (ok), PythonServer (ok), JavaClient,
# GatewayProperty, HelloState (ok), 3 ClientServer Connections (2)
assert_python_memory(self, 7)
def testJavaToPythonToJavaCleanGCNoShutdown(self):
def internal_work(clientserver):
hello_state = HelloState2()
clientserver2 = InstrClientServer(
JavaParameters(port=DEFAULT_PORT+5),
PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5),
python_server_entry_point=hello_state)
hello_state.gateway = clientserver2
sleep()
clientserver.entry_point.startServerWithPythonEntry(False)
sleep()
clientserver2.shutdown()
# Check that Java correctly called Python
self.assertEqual(2, len(hello_state.calls))
self.assertEqual((None, None), hello_state.calls[0])
self.assertEqual((2, "Hello World"), hello_state.calls[1])
with gateway_server_example_app_process(False):
clientserver = ClientServer()
# We disable gc to test whether a shut down on one side will
# garbage collect everything.
internal_work(clientserver)
python_gc()
clientserver.jvm.py4j.instrumented.MetricRegistry.\
forceFinalization()
sleep()
createdSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 8 objects: 2 InstrumentedObject (sayHello called twice), 1
# JavaServer, 1 PythonClient, 1 ClientServer, 3
# ClientServerConnection (1 to call sayHello,
# 1 that receives shutdown command)
self.assertEqual(7, len(createdSet))
self.assertEqual(7, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
clientserver.shutdown()
# 8 objects: ClientServer (ok), PythonServer (ok), JavaClient,
# GatewayProperty, HelloState (ok), 3 ClientServer Connections (2)
assert_python_memory(self, 7)
def testJavaToPythonToJavaNoGCNoShutdown(self):
def internal_work(clientserver):
hello_state = HelloState2()
clientserver2 = InstrClientServer(
JavaParameters(port=DEFAULT_PORT+5),
PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5),
python_server_entry_point=hello_state)
hello_state.gateway = clientserver2
sleep()
clientserver.entry_point.startServerWithPythonEntry(False)
sleep()
clientserver2.shutdown()
# Check that Java correctly called Python
self.assertEqual(2, len(hello_state.calls))
self.assertEqual((None, None), hello_state.calls[0])
self.assertEqual((2, "Hello World"), hello_state.calls[1])
with gateway_server_example_app_process(False):
clientserver = ClientServer()
# We disable gc to test whether a shut down on one side will
# garbage collect everything.
gc.disable()
internal_work(clientserver)
gc.enable()
python_gc()
clientserver.jvm.py4j.instrumented.MetricRegistry.\
forceFinalization()
sleep()
createdSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getCreatedObjectsKeySet()
finalizedSet = clientserver.jvm.py4j.instrumented.MetricRegistry.\
getFinalizedObjectsKeySet()
# 7 objects: 2 InstrumentedObject (sayHello called twice), 1
# JavaServer, 1 PythonClient, 1 ClientServer, 3
# ClientServerConnection (1 to call sayHello,
# 1 that receives shutdown command)
self.assertEqual(7, len(createdSet))
self.assertEqual(7, len(finalizedSet))
self.assertEqual(createdSet, finalizedSet)
clientserver.shutdown()
# 8 objects: ClientServer (ok), PythonServer (ok), JavaClient,
# GatewayProperty, HelloState (ok), 3 ClientServer Connections (2)
assert_python_memory(self, 7)
|
dataset/merge_dataset.py | wx-b/SOLD2 | 347 | 12755133 | """ Compose multiple datasets in a single loader. """
import numpy as np
from copy import deepcopy
from torch.utils.data import Dataset
from dataset.wireframe_dataset import WireframeDataset
from dataset.holicity_dataset import HolicityDataset
class MergeDataset(Dataset):
def __init__(self, mode, config=None):
super(MergeDataset, self).__init__()
# Initialize the datasets
self._datasets = []
spec_config = deepcopy(config)
for i, d in enumerate(config['datasets']):
spec_config['dataset_name'] = d
spec_config['gt_source_train'] = config['gt_source_train'][i]
spec_config['gt_source_test'] = config['gt_source_test'][i]
if d == "wireframe":
self._datasets.append(WireframeDataset(mode, spec_config))
elif d == "holicity":
spec_config['train_split'] = config['train_splits'][i]
self._datasets.append(HolicityDataset(mode, spec_config))
else:
raise ValueError("Unknown dataset: " + d)
self._weights = config['weights']
def __getitem__(self, item):
dataset = self._datasets[np.random.choice(
range(len(self._datasets)), p=self._weights)]
return dataset[np.random.randint(len(dataset))]
def __len__(self):
return np.sum([len(d) for d in self._datasets])
|
corehq/apps/data_interfaces/migrations/0010_automaticupdaterule_workflow.py | dimagilg/commcare-hq | 471 | 12755136 | <filename>corehq/apps/data_interfaces/migrations/0010_automaticupdaterule_workflow.py
# Generated by Django 1.10.7 on 2017-05-23 11:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0009_scheduling_integration'),
]
operations = [
migrations.AddField(
model_name='automaticupdaterule',
name='workflow',
field=models.CharField(default='CASE_UPDATE', max_length=126),
preserve_default=False,
),
]
|
tests/test_config.py | istvan-fodor/giraffez | 122 | 12755144 | # -*- coding: utf-8 -*-
import os
import platform
import pytest
import yaml
import giraffez
from giraffez.constants import *
from giraffez.errors import *
from giraffez.types import Columns
from giraffez.utils import *
@pytest.mark.usefixtures('config', 'tmpfiles')
class TestConfig(object):
def test_get_set_list_value(self, tmpfiles):
with giraffez.Config(tmpfiles.conf, "w", tmpfiles.key) as config:
value = config.get_value("test")
assert value == {}
value = config.get_value("connections.default")
assert value == "db1"
config.set_value("connections.default", "db2")
value = config.get_value("connections.default")
assert value == "db2"
value = config.list_value(decrypt=False)
def test_get_multi_value(self, tmpfiles):
with giraffez.Config(tmpfiles.conf, "w", tmpfiles.key) as config:
value = config.get_value("connections")
def test_get_trailing_dot(self, tmpfiles):
with giraffez.Config(tmpfiles.conf, "w", tmpfiles.key) as config:
value1 = config.get_value("connections")
value2 = config.get_value("connections.")
assert value1 == value2
def test_unset_value(self, tmpfiles):
expected_dsn = "db2"
with giraffez.Config(tmpfiles.conf, "w", tmpfiles.key) as config:
config.unset_value("connections.db1")
value = config.get_value("connections.db1")
assert value == {}
def test_read_only(self, tmpfiles):
with pytest.raises(ConfigReadOnly):
with giraffez.Config(tmpfiles.conf, "r", tmpfiles.key) as config:
config.set_value("connections.default", "db2")
config.write()
def test_config_conf_missing(self, tmpfiles):
with pytest.raises(ConfigNotFound):
with giraffez.Config("None", "r", tmpfiles.key) as config:
pass
def test_config_key_missing(self, tmpfiles):
with pytest.raises(KeyNotFound):
with giraffez.Config(tmpfiles.conf, "r", "None") as config:
pass
def test_config_conf_bad_permissions(self, tmpfiles):
# Tests for permissions on linux or unix-like system only. Windows
# requires the use of Windows-only APIs to determine and set the
# permissions on files.
if platform.system() == 'Windows':
return
with pytest.raises(ConfigurationError):
os.chmod(tmpfiles.conf, 0o655)
with giraffez.Config(tmpfiles.conf, "r", tmpfiles.key) as config:
pass
os.chmod(tmpfiles.conf, 0o600)
def test_config_key_bad_permissions(self, tmpfiles):
# Tests for permissions on linux or unix-like system only. Windows
# requires the use of Windows-only APIs to determine and set the
# permissions on files.
if platform.system() == 'Windows':
return
with pytest.raises(ConfigurationError):
os.chmod(tmpfiles.key, 0o655)
with giraffez.Config(tmpfiles.conf, "r", tmpfiles.key) as config:
pass
os.chmod(tmpfiles.key, 0o400)
def test_config_connections(self, tmpfiles):
with giraffez.Config(tmpfiles.conf, "r", tmpfiles.key) as config:
connections = config.connections
dsn = config.get_connection("db1")
assert dsn.get("host") == None
def test_config_lock(self, tmpfiles):
with giraffez.Config(tmpfiles.conf, "r", tmpfiles.key) as config:
giraffez.Config.lock_connection(tmpfiles.conf, "db1", key=tmpfiles.key)
giraffez.Config.lock_connection(tmpfiles.conf, "db1", key=tmpfiles.key)
with pytest.raises(ConnectionLock):
giraffez.Config.lock_connection(tmpfiles.conf, "db1", key=tmpfiles.key)
config.reload()
lock_value = config.get_value("connections.db1.lock")
assert lock_value == 2
giraffez.Config.unlock_connection(tmpfiles.conf, "db1", key=tmpfiles.key)
config.reload()
lock_value = config.get_value("connections.db1.lock")
assert lock_value == {}
def test_secret_decrypt(self, tmpfiles):
expected_username = "user123"
expected_password = "<PASSWORD>"
with giraffez.Config(tmpfiles.conf, "w", tmpfiles.key) as config:
config.set_value("connections.db1.username", expected_username)
config.set_value("connections.db1.password", expected_password)
config.write()
with giraffez.Secret(tmpfiles.conf, "r", tmpfiles.key) as secret:
username, password = secret("connections.db1.username, connections.db1.password")
assert expected_username == username
assert expected_password == password
with giraffez.Secret(tmpfiles.conf, "w", tmpfiles.key) as secret:
secret.set("db1.username", expected_username)
secret.set("db1.password", expected_password)
username, password = secret("db1.username, db1.password")
assert expected_username == username
assert expected_password == password
|
sdk/python/pulumi_azure/mysql/flexible_server_configuration.py | henriktao/pulumi-azure | 109 | 12755194 | <gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['FlexibleServerConfigurationArgs', 'FlexibleServerConfiguration']
@pulumi.input_type
class FlexibleServerConfigurationArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
server_name: pulumi.Input[str],
value: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a FlexibleServerConfiguration resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] server_name: Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] value: Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values.
:param pulumi.Input[str] name: Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "value", value)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _FlexibleServerConfigurationState:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering FlexibleServerConfiguration resources.
:param pulumi.Input[str] name: Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] server_name: Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] value: Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class FlexibleServerConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Sets a MySQL Flexible Server Configuration value on a MySQL Flexible Server.
## Disclaimers
> **Note:** Since this resource is provisioned by default, the Azure Provider will not check for the presence of an existing resource prior to attempting to create it.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_flexible_server = azure.mysql.FlexibleServer("exampleFlexibleServer",
resource_group_name=azurerm_resource_group["test"]["name"],
location=azurerm_resource_group["test"]["location"],
administrator_login="adminTerraform",
administrator_password="<PASSWORD>!",
sku_name="GP_Standard_D2ds_v4")
example_flexible_server_configuration = azure.mysql.FlexibleServerConfiguration("exampleFlexibleServerConfiguration",
resource_group_name=example_resource_group.name,
server_name=azurerm_mysql_server["example"]["name"],
value="600")
```
## Import
MySQL Flexible Server Configurations can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:mysql/flexibleServerConfiguration:FlexibleServerConfiguration interactive_timeout /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/configurations/interactive_timeout
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] server_name: Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] value: Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FlexibleServerConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Sets a MySQL Flexible Server Configuration value on a MySQL Flexible Server.
## Disclaimers
> **Note:** Since this resource is provisioned by default, the Azure Provider will not check for the presence of an existing resource prior to attempting to create it.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_flexible_server = azure.mysql.FlexibleServer("exampleFlexibleServer",
resource_group_name=azurerm_resource_group["test"]["name"],
location=azurerm_resource_group["test"]["location"],
administrator_login="adminTerraform",
administrator_password="<PASSWORD>!",
sku_name="GP_Standard_D2ds_v4")
example_flexible_server_configuration = azure.mysql.FlexibleServerConfiguration("exampleFlexibleServerConfiguration",
resource_group_name=example_resource_group.name,
server_name=azurerm_mysql_server["example"]["name"],
value="600")
```
## Import
MySQL Flexible Server Configurations can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:mysql/flexibleServerConfiguration:FlexibleServerConfiguration interactive_timeout /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/configurations/interactive_timeout
```
:param str resource_name: The name of the resource.
:param FlexibleServerConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FlexibleServerConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FlexibleServerConfigurationArgs.__new__(FlexibleServerConfigurationArgs)
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__.__dict__["server_name"] = server_name
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(FlexibleServerConfiguration, __self__).__init__(
'azure:mysql/flexibleServerConfiguration:FlexibleServerConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'FlexibleServerConfiguration':
"""
Get an existing FlexibleServerConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] server_name: Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created.
:param pulumi.Input[str] value: Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FlexibleServerConfigurationState.__new__(_FlexibleServerConfigurationState)
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["server_name"] = server_name
__props__.__dict__["value"] = value
return FlexibleServerConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server_name")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values.
"""
return pulumi.get(self, "value")
|
src/pipelines/epidemiology/ua_authority.py | chrismayemba/covid-19-open-data | 430 | 12755224 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Dict
import requests
from pandas import DataFrame
from lib.concurrent import thread_map
from lib.data_source import DataSource
from lib.time import date_range, date_today
_api_url_tpl = "https://api-covid19.rnbo.gov.ua/data?to={date}"
def _get_daily_records(date: str):
records = []
url = _api_url_tpl.format(date=date)
daily_data = requests.get(url, timeout=60).json().get("ukraine", [])
for record in daily_data:
records.append(
{
"date": date,
"country_code": "UA",
"match_string": record.get("label", {}).get("en"),
"total_confirmed": record.get("confirmed"),
"total_deceased": record.get("deaths"),
"total_recovered": record.get("recovered"),
}
)
return records
class UkraineDataSource(DataSource):
def parse(self, sources: Dict[str, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
# Data can only be retrieved one day at a time, and it starts on 2020-01-22
first = "2020-01-22"
map_iter = list(date_range(first, date_today()))
records = sum(thread_map(_get_daily_records, map_iter), [])
return DataFrame.from_records(records)
|
pahelix/utils/protein_tools.py | agave233/PaddleHelix | 454 | 12755241 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools for protein features.
"""
from collections import OrderedDict
from enum import Enum
class ProteinTokenizer(object):
"""
Protein Tokenizer.
"""
padding_token = '<pad>'
mask_token = '<mask>'
start_token = class_token = '<cls>'
end_token = seperate_token = '<sep>'
unknown_token = '<unk>'
padding_token_id = 0
mask_token_id = 1
start_token_id = class_token_id = 2
end_token_id = seperate_token_id = 3
unknown_token_id = 4
special_token_ids = [padding_token_id, mask_token_id, start_token_id, end_token_id, unknown_token_id]
vocab = OrderedDict([
(padding_token, 0),
(mask_token, 1),
(class_token, 2),
(seperate_token, 3),
(unknown_token, 4),
('A', 5),
('B', 6),
('C', 7),
('D', 8),
('E', 9),
('F', 10),
('G', 11),
('H', 12),
('I', 13),
('K', 14),
('L', 15),
('M', 16),
('N', 17),
('O', 18),
('P', 19),
('Q', 20),
('R', 21),
('S', 22),
('T', 23),
('U', 24),
('V', 25),
('W', 26),
('X', 27),
('Y', 28),
('Z', 29)])
def tokenize(self, sequence):
"""
Split the sequence into token list.
Args:
sequence: The sequence to be tokenized.
Returns:
tokens: The token lists.
"""
return [x for x in sequence]
def convert_token_to_id(self, token):
"""
Converts a token to an id.
Args:
token: Token.
Returns:
id: The id of the input token.
"""
if token not in self.vocab:
return ProteinTokenizer.unknown_token_id
else:
return ProteinTokenizer.vocab[token]
def convert_tokens_to_ids(self, tokens):
"""
Convert multiple tokens to ids.
Args:
tokens: The list of tokens.
Returns:
ids: The id list of the input tokens.
"""
return [self.convert_token_to_id(token) for token in tokens]
def gen_token_ids(self, sequence):
"""
Generate the list of token ids according the input sequence.
Args:
sequence: Sequence to be tokenized.
Returns:
token_ids: The list of token ids.
"""
tokens = []
tokens.append(ProteinTokenizer.start_token)
tokens.extend(self.tokenize(sequence))
tokens.append(ProteinTokenizer.end_token)
token_ids = self.convert_tokens_to_ids(tokens)
return token_ids
|
feature_learning/visualzation/file2web.py | oumayb/ArtMiner | 107 | 12755255 |
import argparse
import os
import re
parser = argparse.ArgumentParser('Visualizing Training sample, top200 pairs from randomly top 2000 pairs')
parser.add_argument(
'--outHtml', type=str, help='output html file')
parser.add_argument(
'--imgDir', type=str, help='image directory')
args = parser.parse_args()
### Writing the table format###
f = open(args.outHtml, 'w')
f.write('<html>\n')
f.write('<head>\n')
f.write('\t<title></title>\n')
f.write('\t<meta name=\"keywords\" content= \"Visual Result\" /> <meta charset=\"utf-8\" />\n')
f.write('\t<meta name=\"robots\" content=\"index, follow\" />\n')
f.write('\t<meta http-equiv=\"Content-Script-Type\" content=\"text/javascript\" />\n')
f.write('\t<meta http-equiv=\"expires\" content=\"0\" />\n')
f.write('\t<meta name=\"description\" content= \"Project page of style.css\" />\n')
f.write('\t<link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\" media=\"screen\" />\n')
f.write('\t<link rel=\"shortcut icon\" href=\"favicon.ico\" />\n')
f.write('</head>\n')
f.write('<body>\n')
f.write('<div id="website">\n')
f.write('<center>\n')
f.write('\t<div class=\"blank\"></div>\n')
f.write('\t<h1>\n')
f.write('\t\tVisualize Training Sample\n')
f.write('\t</h1>\n')
f.write('</center>\n')
f.write('<div class=\"blank\"></div>\n')
f.write('<center>\n')
f.write('<div>\n')
f.write('</div>\n')
### ---HTML Table--- ###
f.write('<table>\n')
f.write('\t<tr>\n')
f.write('\t\t<th># Rank</th>\n')
f.write('\t\t<th>Img 1 </th>\n')
f.write('\t\t<th>Img 2 </th>\n')
f.write('\t</tr>\n')
nbPair = len(os.listdir(args.imgDir)) / 2 ## Nb of row
for j in range(nbPair) :
f.write('\t<tr >\n')
msg = '\t\t<th>{:d}</th>\n'.format(j + 1)
f.write(msg)## Rank
img1 = os.path.join(args.imgDir, 'Rank{:d}_1.jpg'.format(j))
msg = '\t\t<td><a download=\"{}\" href=\"{}\" title="ImageName"> <img src=\"{}\" /></a> </td>\n'.format(img1, img1, img1)
f.write(msg)## Img 1
img2 = os.path.join(args.imgDir, 'Rank{:d}_2.jpg'.format(j))
msg = '\t\t<td><a download=\"{}\" href=\"{}\" title="ImageName"> <img src=\"{}\" /></a> </td>\n'.format(img2, img2, img2)
f.write(msg)## Img 2
f.write('\t</tr>\n')
f.write('</table>\n')
f.write('</center>\n</div>\n </body>\n</html>\n')
f.close()
|
data/io/convert_data_to_tfrecord_coco.py | rickyHong/NAS_FPN_repl | 224 | 12755269 | <reponame>rickyHong/NAS_FPN_repl
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import sys
sys.path.append('../../')
import xml.etree.cElementTree as ET
import numpy as np
import tensorflow as tf
import glob
import cv2
import json
from libs.label_name_dict.label_dict import *
from help_utils.tools import *
tf.app.flags.DEFINE_string('coco_dir', '/data/COCO/coco_trainvalmini.odgt', 'coco dir')
tf.app.flags.DEFINE_string('save_name', 'train', 'save name')
tf.app.flags.DEFINE_string('save_dir', '../tfrecord/', 'save name')
tf.app.flags.DEFINE_string('dataset', 'coco', 'dataset')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_pascal_to_tfrecord(coco_trainvalmini):
save_path = FLAGS.save_dir + FLAGS.dataset + '_' + FLAGS.save_name + '.tfrecord'
mkdir(FLAGS.save_dir)
# writer_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
# writer = tf.python_io.TFRecordWriter(path=save_path, options=writer_options)
writer = tf.python_io.TFRecordWriter(path=save_path)
with open(coco_trainvalmini) as f:
files = f.readlines()
img_count = 0
gt_count = 0
for count, raw_line in enumerate(files):
file = json.loads(raw_line)
img_path = os.path.join('/data/COCO/train2017', file['fpath'].split('_')[-1])
img_name = file['ID']
if not os.path.exists(img_path):
# print('{} is not exist!'.format(img_path))
img_count += 1
continue
# img = np.array(Image.open(img_path))
img = cv2.imread(img_path)[:, :, ::-1]
if img is None:
continue
gtboxes = file['gtboxes']
img_height = file['height']
img_width = file['width']
if len(gtboxes) == 0:
# print('{}: gt is not exist!'.format(img_path))
gt_count += 1
continue
gtbox_label = []
for gt in gtboxes:
box = gt['box']
label = gt['tag']
gtbox_label.append([box[0], box[1], box[0]+box[2], box[1]+box[3], NAME_LABEL_MAP[label]])
gtbox_label = np.array(gtbox_label, np.int32)
feature = tf.train.Features(feature={
# do not need encode() in linux
'img_name': _bytes_feature(img_name.encode()),
# 'img_name': _bytes_feature(img_name),
'img_height': _int64_feature(img_height),
'img_width': _int64_feature(img_width),
'img': _bytes_feature(img.tostring()),
'gtboxes_and_label': _bytes_feature(gtbox_label.tostring()),
'num_objects': _int64_feature(gtbox_label.shape[0])
})
example = tf.train.Example(features=feature)
writer.write(example.SerializeToString())
view_bar('Conversion progress', count + 1, len(files))
print('{} images not exist!'.format(img_count))
print('{} gts not exist!'.format(gt_count))
print('\nConversion is complete!')
if __name__ == '__main__':
convert_pascal_to_tfrecord(FLAGS.coco_dir)
|
dragonfly/exd/unittest_exd_utils.py | hase1128/dragonfly | 675 | 12755300 | <filename>dragonfly/exd/unittest_exd_utils.py<gh_stars>100-1000
"""
Unit tests for ed_utils.
-- <EMAIL>
"""
from __future__ import absolute_import
from __future__ import division
# pylint: disable=relative-import
# Local imports
from .exd_utils import random_sampling_cts, random_sampling_kmeans_cts
from ..utils.base_test_class import BaseTestClass, execute_tests
class EDUtilsTestCase(BaseTestClass):
""" Unit tests for generic functions ed_utils.py """
def setUp(self):
""" Sets up unit tests. """
self.lhs_data = [(1, 10), (2, 5), (4, 10), (10, 100)]
@classmethod
def _check_sample_sizes(cls, data, samples):
""" Data is a tuple of the form (dim, num_samples) ans samples is an ndarray."""
assert (data[1], data[0]) == samples.shape
def test_random_sampling(self):
""" Tests random sampling. """
self.report('Test random sampling.')
for data in self.lhs_data:
self._check_sample_sizes(data, random_sampling_cts(data[0], data[1]))
def test_random_sampling_kmeans(self):
""" Tests random sampling with k-means. """
self.report('Test random sampling with k-means.')
for data in self.lhs_data:
self._check_sample_sizes(data, random_sampling_kmeans_cts(data[0], data[1]))
if __name__ == '__main__':
execute_tests()
|
chainer-1.4/seg_rnn.py | ysadamori/chainer_LSTM_seq2seq_example | 137 | 12755306 | <gh_stars>100-1000
#!/usr/bin/python3
#import my_settings
import sys
import math
import numpy as np
from argparse import ArgumentParser
from chainer import functions, optimizers
import util.generators as gens
from util.functions import trace, fill_batch
from util.model_file import ModelFile
from util.vocabulary import Vocabulary
from util.chainer_cpu_wrapper import wrapper
#from util.chainer_gpu_wrapper import wrapper
class RNNSegmentationModel:
def __init__(self):
pass
def __make_model(self):
self.__model = wrapper.make_model(
w_xe = functions.EmbedID(len(self.__vocab), self.__n_embed),
w_ea = functions.Linear(self.__n_embed, 4 * self.__n_hidden),
w_aa = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_eb = functions.Linear(self.__n_embed, 4 * self.__n_hidden),
w_bb = functions.Linear(self.__n_hidden, 4 * self.__n_hidden),
w_ay1 = functions.Linear(self.__n_hidden, 1),
w_by1 = functions.Linear(self.__n_hidden, 1),
w_ay2 = functions.Linear(self.__n_hidden, 1),
w_by2 = functions.Linear(self.__n_hidden, 1),
)
@staticmethod
def new(vocab, n_embed, n_hidden):
self = RNNSegmentationModel()
self.__vocab = vocab
self.__n_embed = n_embed
self.__n_hidden = n_hidden
self.__make_model()
return self
def save(self, filename):
with ModelFile(filename, 'w') as fp:
self.__vocab.save(fp.get_file_pointer())
fp.write(self.__n_embed)
fp.write(self.__n_hidden)
wrapper.begin_model_access(self.__model)
fp.write_embed(self.__model.w_xe)
fp.write_linear(self.__model.w_ea)
fp.write_linear(self.__model.w_aa)
fp.write_linear(self.__model.w_eb)
fp.write_linear(self.__model.w_bb)
fp.write_linear(self.__model.w_ay1)
fp.write_linear(self.__model.w_by1)
fp.write_linear(self.__model.w_ay2)
fp.write_linear(self.__model.w_by2)
wrapper.end_model_access(self.__model)
@staticmethod
def load(filename):
self = RNNSegmentationModel()
with ModelFile(filename) as fp:
self.__vocab = Vocabulary.load(fp.get_file_pointer())
self.__n_embed = int(fp.read())
self.__n_hidden = int(fp.read())
self.__make_model()
wrapper.begin_model_access(self.__model)
fp.read_embed(self.__model.w_xe)
fp.read_linear(self.__model.w_ea)
fp.read_linear(self.__model.w_aa)
fp.read_linear(self.__model.w_eb)
fp.read_linear(self.__model.w_bb)
fp.read_linear(self.__model.w_ay1)
fp.read_linear(self.__model.w_by1)
fp.read_linear(self.__model.w_ay2)
fp.read_linear(self.__model.w_by2)
wrapper.end_model_access(self.__model)
return self
def init_optimizer(self):
self.__opt = optimizers.AdaGrad(lr=0.001)
self.__opt.setup(self.__model)
def __make_input(self, is_training, text):
word_list = text.split()
letters = [self.__vocab.stoi(x) for x in ''.join(word_list)]
if is_training:
labels = []
for x in word_list:
labels += [-1] * (len(x) - 1) + [1]
return letters, labels[:-1]
else:
return letters, None
def __forward(self, is_training, text):
m = self.__model
tanh = functions.tanh
lstm = functions.lstm
letters, labels = self.__make_input(is_training, text)
n_letters = len(letters)
accum_loss = wrapper.zeros(()) if is_training else None
hidden_zeros = wrapper.zeros((1, self.__n_hidden))
# embedding
list_e = []
for i in range(n_letters):
s_x = wrapper.make_var([letters[i]], dtype=np.int32)
list_e.append(tanh(m.w_xe(s_x)))
# forward encoding
s_a = hidden_zeros
c = hidden_zeros
list_a = []
for i in range(n_letters):
c, s_a = lstm(c, m.w_ea(list_e[i]) + m.w_aa(s_a))
list_a.append(s_a)
# backward encoding
s_b = hidden_zeros
c = hidden_zeros
list_b = []
for i in reversed(range(n_letters)):
c, s_b = lstm(c, m.w_eb(list_e[i]) + m.w_bb(s_b))
list_b.append(s_b)
# segmentation
scores = []
for i in range(n_letters - 1):
s_y = tanh(m.w_ay1(list_a[i]) + m.w_by1(list_b[i]) + m.w_ay2(list_a[i + 1]) + m.w_by2(list_b[i + 1]))
scores.append(float(wrapper.get_data(s_y)))
if is_training:
s_t = wrapper.make_var([[labels[i]]])
accum_loss += functions.mean_squared_error(s_y, s_t)
return scores, accum_loss
def train(self, text):
self.__opt.zero_grads()
scores, accum_loss = self.__forward(True, text)
accum_loss.backward()
self.__opt.clip_grads(5)
self.__opt.update()
return scores
def predict(self, text):
return self.__forward(False, text)[0]
def parse_args():
def_vocab = 2500
def_embed = 100
def_hidden = 100
def_epoch = 20
p = ArgumentParser(description='Word segmentation using LSTM-RNN')
p.add_argument('mode', help='\'train\' or \'test\'')
p.add_argument('corpus', help='[in] source corpus')
p.add_argument('model', help='[in/out] model file')
p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int,
help='vocabulary size (default: %d)' % def_vocab)
p.add_argument('--embed', default=def_embed, metavar='INT', type=int,
help='embedding layer size (default: %d)' % def_embed)
p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int,
help='hidden layer size (default: %d)' % def_hidden)
p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int,
help='number of training epoch (default: %d)' % def_epoch)
args = p.parse_args()
# check args
try:
if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'')
if args.vocab < 1: raise ValueError('you must set --vocab >= 1')
if args.embed < 1: raise ValueError('you must set --embed >= 1')
if args.hidden < 1: raise ValueError('you must set --hidden >= 1')
if args.epoch < 1: raise ValueError('you must set --epoch >= 1')
except Exception as ex:
p.print_usage(file=sys.stderr)
print(ex, file=sys.stderr)
sys.exit()
return args
def make_hyp(letters, scores):
hyp = letters[0]
for w, s in zip(letters[1:], scores):
if s >= 0:
hyp += ' '
hyp += w
return hyp
def train_model(args):
trace('making vocabularies ...')
vocab = Vocabulary.new(gens.letter_list(args.corpus), args.vocab)
trace('start training ...')
model = RNNSegmentationModel.new(vocab, args.embed, args.hidden)
for epoch in range(args.epoch):
trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
trained = 0
model.init_optimizer()
with open(args.corpus) as fp:
for text in fp:
word_list = text.split()
if not word_list:
continue
text = ' '.join(word_list)
letters = ''.join(word_list)
scores = model.train(text)
trained += 1
hyp = make_hyp(letters, scores)
trace(trained)
trace(text)
trace(hyp)
trace(' '.join('%+.1f' % x for x in scores))
if trained % 100 == 0:
trace(' %8d' % trained)
trace('saveing model ...')
model.save(args.model + '.%03d' % (epoch + 1))
trace('finished.')
def test_model(args):
trace('loading model ...')
model = RNNSegmentationModel.load(args.model)
trace('generating output ...')
with open(args.corpus) as fp:
for text in fp:
letters = ''.join(text.split())
if not letters:
print()
continue
scores = model.predict(text)
hyp = make_hyp(letters, scores)
print(hyp)
trace('finished.')
def main():
args = parse_args()
trace('initializing ...')
wrapper.init()
if args.mode == 'train': train_model(args)
elif args.mode == 'test': test_model(args)
if __name__ == '__main__':
main()
|
sheetfu/config.py | darkroomdave/sheetfu | 893 | 12755323 | <reponame>darkroomdave/sheetfu
fields_masks = {
'background': "sheets/data/rowData/values/effectiveFormat/backgroundColor",
'value': "sheets/data/rowData/values/formattedValue",
'note': "sheets/data/rowData/values/note",
'font_color': "sheets/data/rowData/values/effectiveFormat/textFormat/foregroundColor"
}
|
Malt/Render/Lighting.py | BlenderNPR/BEER | 242 | 12755397 | # Copyright (c) 2020 BlenderNPR and contributors. MIT license.
import math
import ctypes
import pyrr
from Malt.GL.GL import *
from Malt.GL.Shader import UBO
from Malt.GL.Texture import TextureArray, CubeMapArray
from Malt.GL.RenderTarget import ArrayLayerTarget, RenderTarget
from Malt import Pipeline
_LIGHTS_BUFFER = None
def get_lights_buffer():
if Pipeline.MAIN_CONTEXT:
global _LIGHTS_BUFFER
if _LIGHTS_BUFFER is None: _LIGHTS_BUFFER = LightsBuffer()
return _LIGHTS_BUFFER
else:
return LightsBuffer()
_SHADOWMAPS = None
def get_shadow_maps():
if Pipeline.MAIN_CONTEXT:
global _SHADOWMAPS
if _SHADOWMAPS is None: _SHADOWMAPS = ShadowMaps()
return _SHADOWMAPS
else:
return ShadowMaps()
LIGHT_SUN = 1
LIGHT_POINT = 2
LIGHT_SPOT = 3
class C_Light(ctypes.Structure):
_fields_ = [
('color', ctypes.c_float*3),
('type', ctypes.c_int32),
('position', ctypes.c_float*3),
('radius', ctypes.c_float),
('direction', ctypes.c_float*3),
('spot_angle', ctypes.c_float),
('spot_blend', ctypes.c_float),
('type_index', ctypes.c_int32),
('__padding', ctypes.c_int32*2),
]
MAX_SPOTS = 64
MAX_SUNS = 64
MAX_LIGHTS = 128
class C_LightsBuffer(ctypes.Structure):
_fields_ = [
('lights', C_Light*MAX_LIGHTS),
('lights_count', ctypes.c_int),
('cascades_count', ctypes.c_int),
('__padding', ctypes.c_int32*2),
('spot_matrices', ctypes.c_float*16*MAX_SPOTS),
('sun_matrices', ctypes.c_float*16*MAX_SUNS),
]
class ShadowMaps(object):
def __init__(self):
self.max_spots = 1
self.spot_resolution = 2048
self.spot_depth_t = None
self.spot_fbos = []
self.max_suns = 1
self.sun_resolution = 2048
self.sun_depth_t = None
self.sun_fbos = []
self.max_points = 1
self.point_resolution = 512
self.point_depth_t = None
self.point_fbos = []
self.initialized = False
def load(self, scene, spot_resolution, sun_resolution, point_resolution, sun_cascades):
needs_setup = self.initialized is False
self.initialized = True
new_settings = (spot_resolution, sun_resolution, point_resolution)
current_settings = (self.spot_resolution, self.sun_resolution, self.point_resolution)
if new_settings != current_settings:
self.spot_resolution = spot_resolution
self.sun_resolution = sun_resolution
self.point_resolution = point_resolution
needs_setup = True
spot_count = len([l for l in scene.lights if l.type == LIGHT_SPOT])
if spot_count > self.max_spots:
self.max_spots = spot_count
needs_setup = True
sun_count = len([l for l in scene.lights if l.type == LIGHT_SUN])
sun_count = sun_count * sun_cascades
if sun_count > self.max_suns:
self.max_suns = sun_count
needs_setup = True
point_count = len([l for l in scene.lights if l.type == LIGHT_POINT])
if point_count > self.max_points:
self.max_points = point_count
needs_setup = True
if needs_setup:
self.setup()
self.clear(spot_count, sun_count, point_count)
def setup(self, create_fbos=True):
self.spot_depth_t = TextureArray((self.spot_resolution, self.spot_resolution), self.max_spots, GL_DEPTH_COMPONENT32F)
self.sun_depth_t = TextureArray((self.sun_resolution, self.sun_resolution), self.max_suns, GL_DEPTH_COMPONENT32F)
self.point_depth_t = CubeMapArray((self.point_resolution, self.point_resolution), self.max_points, GL_DEPTH_COMPONENT32F)
if create_fbos:
self.spot_fbos = []
for i in range(self.spot_depth_t.length):
self.spot_fbos.append(RenderTarget([], ArrayLayerTarget(self.spot_depth_t, i)))
self.sun_fbos = []
for i in range(self.sun_depth_t.length):
self.sun_fbos.append(RenderTarget([], ArrayLayerTarget(self.sun_depth_t, i)))
self.point_fbos = []
for i in range(self.point_depth_t.length*6):
self.point_fbos.append(RenderTarget([], ArrayLayerTarget(self.point_depth_t, i)))
def clear(self, spot_count, sun_count, point_count):
for i in range(spot_count):
self.spot_fbos[i].clear(depth=1)
for i in range(sun_count):
self.sun_fbos[i].clear(depth=1)
for i in range(point_count*6):
self.point_fbos[i].clear(depth=1)
def shader_callback(self, shader):
shader.textures['SHADOWMAPS_DEPTH_SPOT'] = self.spot_depth_t
shader.textures['SHADOWMAPS_DEPTH_SUN'] = self.sun_depth_t
shader.textures['SHADOWMAPS_DEPTH_POINT'] = self.point_depth_t
class LightsBuffer(object):
def __init__(self):
self.data = C_LightsBuffer()
self.UBO = UBO()
self.spots = None
self.suns = None
self.points = None
def load(self, scene, cascades_count, cascades_distribution_scalar, cascades_max_distance=1.0):
#TODO: Automatic distribution exponent basedd on FOV
spot_count=0
sun_count=0
point_count=0
from collections import OrderedDict
self.spots = OrderedDict()
self.suns = OrderedDict()
self.points = OrderedDict()
for i, light in enumerate(scene.lights):
self.data.lights[i].color = light.color
self.data.lights[i].type = light.type
self.data.lights[i].position = light.position
self.data.lights[i].radius = light.radius
self.data.lights[i].direction = light.direction
self.data.lights[i].spot_angle = light.spot_angle
self.data.lights[i].spot_blend = light.spot_blend
if light.type == LIGHT_SPOT:
self.data.lights[i].type_index = spot_count
projection_matrix = make_projection_matrix(light.spot_angle,1,0.01,light.radius)
spot_matrix = projection_matrix * pyrr.Matrix44(light.matrix)
self.data.spot_matrices[spot_count] = flatten_matrix(spot_matrix)
self.spots[light] = [(light.matrix, flatten_matrix(projection_matrix))]
spot_count+=1
if light.type == LIGHT_SUN:
self.data.lights[i].type_index = sun_count
sun_matrix = pyrr.Matrix44(light.matrix)
projection_matrix = pyrr.Matrix44(scene.camera.projection_matrix)
view_matrix = projection_matrix * pyrr.Matrix44(scene.camera.camera_matrix)
cascades_matrices = get_sun_cascades(sun_matrix, projection_matrix, view_matrix, cascades_count, cascades_distribution_scalar, cascades_max_distance)
self.suns[light] = []
for i, cascade in enumerate(cascades_matrices):
cascade = flatten_matrix(cascade)
self.data.sun_matrices[sun_count * cascades_count + i] = cascade
self.suns[light].append((cascade, flatten_matrix(pyrr.Matrix44.identity())))
sun_count+=1
if light.type == LIGHT_POINT:
self.data.lights[i].type_index = point_count
cube_map_axes = [
(( 1, 0, 0),( 0,-1, 0)),
((-1, 0, 0),( 0,-1, 0)),
(( 0, 1, 0),( 0, 0, 1)),
(( 0,-1, 0),( 0, 0,-1)),
(( 0, 0, 1),( 0,-1, 0)),
(( 0, 0,-1),( 0,-1, 0))
]
matrices = []
for axes in cube_map_axes:
position = pyrr.Vector3(light.position)
front = pyrr.Vector3(axes[0])
up = pyrr.Vector3(axes[1])
matrices.append(pyrr.Matrix44.look_at(position, position + front, up))
projection_matrix = make_projection_matrix(math.pi / 2.0, 1.0, 0.01, light.radius)
self.points[light] = []
for i in range(6):
self.points[light].append((flatten_matrix(matrices[i]), flatten_matrix(projection_matrix)))
point_count+=1
self.data.lights_count = len(scene.lights)
self.data.cascades_count = cascades_count
self.UBO.load_data(self.data)
def bind(self, location):
self.UBO.bind(location)
def flatten_matrix(matrix):
return (ctypes.c_float * 16)(*[e for v in matrix for e in v])
#TODO: Hard-coded for Blender conventions for now
def make_projection_matrix(fov, aspect_ratio, near, far):
x_scale = 1.0 / math.tan(fov / 2.0)
y_scale = x_scale * aspect_ratio
return pyrr.Matrix44([
x_scale, 0, 0, 0,
0, y_scale, 0, 0,
0, 0, (-(far + near)) / (far - near), -1,
0, 0, (-2.0 * far * near) / (far - near), 0
])
def get_sun_cascades(sun_from_world_matrix, projection_matrix, view_from_world_matrix, cascades_count, cascades_distribution_scalar, cascades_max_distance):
cascades = []
splits = []
n,f = 0,0
if projection_matrix[3][3] == 1.0:
# ortho
n = cascades_max_distance / 2
f = -cascades_max_distance / 2
else:
# perspective
clip_start = projection_matrix.inverse * pyrr.Vector4([0,0,-1,1])
clip_start /= clip_start.w
n = clip_start.z
f = -cascades_max_distance
def lerp(a,b,f):
f = max(0,min(f,1))
return a * (1.0 - f) + b * f
for i in range(cascades_count+1):
split_log = n * pow(f/n, i/cascades_count)
split_uniform = n + (f-n) * (i/cascades_count)
split = lerp(split_uniform, split_log, cascades_distribution_scalar)
projected = projection_matrix * pyrr.Vector4([0,0,split,1])
projected = (projected / projected.w) * (1.0 if projected.w >= 0 else -1.0)
splits.append(projected.z)
for i in range(1, len(splits)):
near = splits[i-1]
far = splits[i]
cascades.append(sun_shadowmap_matrix(sun_from_world_matrix, view_from_world_matrix, near, far))
return cascades
def frustum_corners(view_from_world_matrix, near, far):
m = view_from_world_matrix.inverse
corners = []
for x in (-1, 1):
for y in (-1, 1):
for z in (near, far):
v = pyrr.Vector4([x, y, z, 1])
v = m * v
v /= v.w
corners.append(v)
return corners
def sun_shadowmap_matrix(sun_from_world_matrix, view_from_world_matrix, near, far):
INFINITY = float('inf')
aabb = {
'min': pyrr.Vector3([ INFINITY, INFINITY, INFINITY]),
'max': pyrr.Vector3([-INFINITY, -INFINITY, -INFINITY])
}
for corner in frustum_corners(view_from_world_matrix, near, far):
corner = sun_from_world_matrix * corner
aabb['min'].x = min(aabb['min'].x, corner.x)
aabb['min'].y = min(aabb['min'].y, corner.y)
aabb['min'].z = min(aabb['min'].z, corner.z)
aabb['max'].x = max(aabb['max'].x, corner.x)
aabb['max'].y = max(aabb['max'].y, corner.y)
aabb['max'].z = max(aabb['max'].z, corner.z)
world_from_light_space = sun_from_world_matrix.inverse
size = aabb['max'] - aabb['min']
aabb['min'] = world_from_light_space * pyrr.Vector4([*aabb['min'].tolist(), 1.0])
aabb['max'] = world_from_light_space * pyrr.Vector4([*aabb['max'].tolist(), 1.0])
center = (aabb['min'] + aabb['max']) / 2.0
center = pyrr.Vector3(center.tolist()[:3])
scale = pyrr.Matrix44.from_scale(size)
translate = pyrr.Matrix44.from_translation(center)
matrix = translate * world_from_light_space * scale
screen = pyrr.Matrix44([
1, 0, 0, 0,
0, 1, 0, 0,
0, 0,-1, 0,
0, 0, 0, 1
])
return screen * matrix.inverse
|
sdk/python/pulumi_aws/eks/get_clusters.py | chivandikwa/pulumi-aws | 260 | 12755417 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetClustersResult',
'AwaitableGetClustersResult',
'get_clusters',
]
@pulumi.output_type
class GetClustersResult:
"""
A collection of values returned by getClusters.
"""
def __init__(__self__, id=None, names=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if names and not isinstance(names, list):
raise TypeError("Expected argument 'names' to be a list")
pulumi.set(__self__, "names", names)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def names(self) -> Sequence[str]:
"""
Set of EKS clusters names
"""
return pulumi.get(self, "names")
class AwaitableGetClustersResult(GetClustersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClustersResult(
id=self.id,
names=self.names)
def get_clusters(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClustersResult:
"""
Retrieve EKS Clusters list
"""
__args__ = dict()
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:eks/getClusters:getClusters', __args__, opts=opts, typ=GetClustersResult).value
return AwaitableGetClustersResult(
id=__ret__.id,
names=__ret__.names)
|
pylons/decorators/rest.py | KinSai1975/Menira.py | 118 | 12755426 | """REST decorators"""
import logging
from decorator import decorator
from pylons.controllers.util import abort
from pylons.decorators.util import get_pylons
__all__ = ['dispatch_on', 'restrict']
log = logging.getLogger(__name__)
def restrict(*methods):
"""Restricts access to the function depending on HTTP method
Example:
.. code-block:: python
from pylons.decorators import rest
class SomeController(BaseController):
@rest.restrict('GET')
def comment(self, id):
"""
def check_methods(func, *args, **kwargs):
"""Wrapper for restrict"""
if get_pylons(args).request.method not in methods:
log.debug("Method not allowed by restrict")
abort(405, headers=[('Allow', ','.join(methods))])
return func(*args, **kwargs)
return decorator(check_methods)
def dispatch_on(**method_map):
"""Dispatches to alternate controller methods based on HTTP method
Multiple keyword arguments should be passed, with the keyword
corresponding to the HTTP method to dispatch on (DELETE, POST, GET,
etc.) and the value being the function to call. The value should be
a string indicating the name of the function to dispatch to.
Example:
.. code-block:: python
from pylons.decorators import rest
class SomeController(BaseController):
@rest.dispatch_on(POST='create_comment')
def comment(self):
# Do something with the comment
def create_comment(self, id):
# Do something if its a post to comment
"""
def dispatcher(func, self, *args, **kwargs):
"""Wrapper for dispatch_on"""
alt_method = method_map.get(get_pylons(args).request.method)
if alt_method:
alt_method = getattr(self, alt_method)
log.debug("Dispatching to %s instead", alt_method)
return self._inspect_call(alt_method, **kwargs)
return func(self, *args, **kwargs)
return decorator(dispatcher)
|
app/log.py | nthparameter/ci_edit | 229 | 12755429 | <reponame>nthparameter/ci_edit
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import inspect
import os
import sys
import time
import traceback
import app.buffer_file
screenLog = [u"--- screen log ---"]
fullLog = [u"--- begin log ---"]
enabledChannels = {
u"meta": True,
#'mouse': True,
u"startup": True,
}
shouldWritePrintLog = False
startTime = time.time()
def get_lines():
return screenLog
def parse_lines(frame, logChannel, *args):
if not len(args):
args = [u""]
msg = str(args[0])
if 1:
msg = u"%s %s %s %s: %s" % (
logChannel,
os.path.split(frame[1])[1],
frame[2],
frame[3],
msg,
)
prior = msg
for i in args[1:]:
if not len(prior) or prior[-1] != u"\n":
msg += u" "
prior = repr(i) # unicode(i)
msg += prior
return msg.split(u"\n")
def channel_enable(logChannel, isEnabled):
global fullLog, shouldWritePrintLog
fullLog += [
u"%10s %10s: %s %r" % (u"logging", u"channel_enable", logChannel, isEnabled)
]
if isEnabled:
enabledChannels[logChannel] = isEnabled
shouldWritePrintLog = True
else:
enabledChannels.pop(channel, None)
def channel(logChannel, *args):
global fullLog, screenLog
if logChannel in enabledChannels:
lines = parse_lines(inspect.stack()[2], logChannel, *args)
screenLog += lines
fullLog += lines
def caller(*args):
global fullLog, screenLog
priorCaller = inspect.stack()[2]
msg = (
u"%s %s %s"
% (os.path.split(priorCaller[1])[1], priorCaller[2], priorCaller[3]),
) + args
lines = parse_lines(inspect.stack()[1], u"caller", *msg)
screenLog += lines
fullLog += lines
def exception(e, *args):
global fullLog
lines = parse_lines(inspect.stack()[1], u"except", *args)
fullLog += lines
errorType, value, tracebackInfo = sys.exc_info()
out = traceback.format_exception(errorType, value, tracebackInfo)
for i in out:
error(i[:-1])
def check_failed(prefix, a, op, b):
stack(u"failed %s %r %s %r" % (prefix, a, op, b))
raise Exception("fatal error")
def check_ge(a, b):
if a >= b:
return
check_failed(u"check_ge", a, u">=", b)
def check_gt(a, b):
if a > b:
return
check_failed(u"check_lt", a, u"<", b)
def check_le(a, b):
if a <= b:
return
check_failed(u"check_le", a, u"<=", b)
def check_lt(a, b):
if a < b:
return
check_failed(u"check_lt", a, u"<", b)
def stack(*args):
global fullLog, screenLog
callStack = inspect.stack()[1:]
callStack.reverse()
for i, frame in enumerate(callStack):
line = [
u"stack %2d %14s %4s %s"
% (i, os.path.split(frame[1])[1], frame[2], frame[3])
]
screenLog += line
fullLog += line
if len(args):
screenLog.append(u"stack " + repr(args[0]))
fullLog.append(u"stack " + repr(args[0]))
def info(*args):
channel(u"info", *args)
def meta(*args):
"""Log information related to logging."""
channel(u"meta", *args)
def mouse(*args):
channel(u"mouse", *args)
def parser(*args):
channel(u"parser", *args)
def startup(*args):
channel(u"startup", *args)
def quick(*args):
global fullLog, screenLog
msg = str(args[0])
prior = msg
for i in args[1:]:
if not len(prior) or prior[-1] != u"\n":
msg += u" "
prior = i # unicode(i)
msg += prior
lines = msg.split(u"\n")
screenLog += lines
fullLog += lines
def debug(*args):
global fullLog, screenLog
if u"debug" in enabledChannels:
lines = parse_lines(inspect.stack()[1], u"debug_@@@", *args)
screenLog += lines
fullLog += lines
def detail(*args):
global fullLog
if u"detail" in enabledChannels:
lines = parse_lines(inspect.stack()[1], u"detail", *args)
fullLog += lines
def error(*args):
global fullLog
lines = parse_lines(inspect.stack()[1], u"error", *args)
fullLog += lines
def when(*args):
args = (time.time() - startTime,) + args
channel(u"info", *args)
def wrapper(function, shouldWrite=True):
global shouldWritePrintLog
shouldWritePrintLog = shouldWrite
r = -1
try:
try:
r = function()
except BaseException:
shouldWritePrintLog = True
errorType, value, tracebackInfo = sys.exc_info()
out = traceback.format_exception(errorType, value, tracebackInfo)
for i in out:
error(i[:-1])
finally:
flush()
return r
def write_to_file(path):
fullPath = app.buffer_file.expand_full_path(path)
with io.open(fullPath, "w+", encoding=u"UTF-8") as out:
out.write(u"\n".join(fullLog) + u"\n")
def flush():
if shouldWritePrintLog:
sys.stdout.write(u"\n".join(fullLog) + u"\n")
|
common/tools/jcxxgen.bzl | mfkiwl/verible | 147 | 12755438 | # -*- Python -*-
# Copyright 2021 The Verible Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Rule to generate json-serializable simple structs
"""
def jcxxgen(name, src, out, namespace = ""):
"""Generate C/C++ language source from a jcxxgen schema file.
Args:
name: Name of the rule, producing a cc-library with the same name.
src: The schema yaml input file.
out: Name of the generated header file.
namespace: Optional name of the C++ namespace for generated structs.
"""
tool = "//common/tools:jcxxgen"
json_header = '"nlohmann/json.hpp"'
native.genrule(
name = name + "_gen",
srcs = [src],
outs = [out],
cmd = ("$(location //common/tools:jcxxgen) --json_header='" +
json_header + "' --class_namespace " +
namespace + " --output $@ $<"),
tools = [tool],
)
native.cc_library(
name = name,
hdrs = [out],
deps = ["@jsonhpp"],
)
|
glumpy/app/__init__.py | Frekby/glumpy | 1,074 | 12755470 | # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
"""
import os
import re
import sys
import logging
import importlib
import numpy as np
from glumpy import gl
from glumpy.log import log
from glumpy.ext.inputhook import inputhook_manager, stdin_ready
from glumpy.app.window import backends
from . import parser
from . import configuration
from . import clock as _clock
from . clock import Clock
from . console import Console
from . viewport import Viewport
from . window import EventDispatcher
# Default clock
__clock__ = None
# Active windows
__windows__ = []
# Current backend
__backend__ = None
__running__ = False
# --------------------------------------------------------------------- fps ---
def fps():
"""
Get FPS from the default clock.
"""
return __clock__.get_fps()
# --------------------------------------------------------------------- use ---
def use(backend, api=None, major=None, minor=None, profile=None):
""" Select a specific backend
Parameters
----------
backend : ['osxglut', 'freeglut', 'pyglet', 'glfw', 'sdl', 'sdl2', 'pyside']
Graphical toolkit to use
api : ['GL'|'ES']
OpenGL API to use
major : int
OpenGL major version to use
minor : int
OpenGL minor version to use
profile : ['compatibility'|'core']
OpenGL profile to use
Note
----
A shortened version is available with the following syntax:
use("backend (api major.minor profile)")
For example, `use("glfw (GL 3.3 core)")`
"""
global __backend__
config = configuration.get_default()
# Parse options (in backend name, see note above)
exp = """(?P<backend>\w+)?
(.*\(
(.*(?P<api>GL|ES))?
(.*(?P<major>[1234])\.(?P<minor>[012345]))?
(.*(?P<profile>compatibility|core))?.*\))?"""
r = re.search(exp, backend, re.IGNORECASE | re.VERBOSE)
_backend = r.group('backend') or "glfw"
_api = r.group('api') or "GL"
_major = int(r.group('major') or str(config.major_version))
_minor = int(r.group('minor') or str(config.minor_version))
_profile = r.group('profile') or ""
# Arguments take precedence over shortened options
backend = _backend
api = api or _api
major = major or _major
minor = minor or _minor
profile = profile or _profile
config.api = api
config.major_version = major
config.minor_version = minor
config.profile = profile
if backend not in backends.__backends__:
log.critical("Unknown backend (%s)" % backend)
log.critical("Available backends are: %s", str(backends.__backends__))
sys.exit(0)
# BUG: For some reason, the import module changes the working directory
# We save it beforehand and restore it just after
workdir = os.getcwd()
name = "glumpy.app.window.backends.backend_" + backend
importlib.import_module(name)
backend = sys.modules[name]
os.chdir(workdir)
# Check availability
if backend.available():
__backend__ = backend
return backend
else:
log.warning("Backend (%s) not available" % backend)
return None
# ----------------------------------------------------------------- Window ---
class Window(object):
"""
Abstract Window
This class is responsible for finding a suitable backend and parsing
arguments.
"""
def __new__(cls, *args, **kwargs):
global __backend__
all = list(backends.__backends__)
options = parser.get_options()
# No backend was specified
# Check for command line argument then pick a default one if possible
if __backend__ is None:
if options.backend != all[0]:
all = [options.backend,] + all
for name in all:
backend = use(name)
if backend and backend.available():
__backend__ = backend
break
# No backend available, there's nothing we can do
if __backend__ is None:
log.critical("No suitable backend found")
raise NotImplementedError
config = configuration.get_default()
if "config" not in kwargs.keys():
kwargs['config'] = config
if 'vsync' not in kwargs.keys():
kwargs['vsync'] = options.vsync
# Get command line size
# if options.size:
# size = options.size.split(",")
# kwargs['width'] = int(size[0])
# kwargs['height'] = int(size[1])
# else:
# kwargs['width'] = kwargs.get('width', 512)
# kwargs['height'] = kwargs.get('height', 512)
# Get command line position
# if options.position:
# position = options.position.split(",")
# #kwargs['x'] = kwargs.get('x', int(position[0]))
# #kwargs['y'] = kwargs.get('y', int(position[1]))
# else:
# pass
# #kwargs['x'] = kwargs.get('x', 0)
# #kwargs['y'] = kwargs.get('y', 0)
# Create the backend window
window = __backend__.Window(*args, **kwargs)
window._backend = __backend__
window._config = config
log.info("Using %s (%s %d.%d)" %
(__backend__.name(), config.api,
config.major_version, config.minor_version))
if config.samples > 0:
log.info("Using multisampling with %d samples" %
(config.samples))
# Display fps options
if options.display_fps:
@window.timer(1.0)
def timer(elapsed):
print("Estimated FPS: %f"% fps())
return window
# --------------------------------------------------------------- __init__ ---
def __init__(clock=None, framerate=None, backend=None):
""" Initialize the main loop
Parameters
----------
clock : Clock
clock to use to run the app (gives the elementary tick)
framerate : int
frames per second
backend : python module
Backend module
"""
global __clock__
options = parser.get_options()
if options.debug:
log.setLevel(logging.DEBUG)
if framerate is None:
framerate = options.framerate
if framerate > 0:
log.info("Running at %d frames/second" % framerate)
else:
log.info("Running at full speed")
if clock is None:
__clock__ = _clock.get_default()
else:
__clock__ = clock
__clock__.set_fps_limit(framerate)
# OpenGL Initialization
for window in backend.windows():
window.activate()
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
gl.glEnable(gl.GL_VERTEX_PROGRAM_POINT_SIZE)
try: # This has been removed in 3.2 (it's now on by default)
gl.glEnable(gl.GL_POINT_SPRITE)
except:
pass
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
# Initialize timers for all windows
for window in backend.windows():
window._clock = __clock__
# Start timers
for i in range(len(window._timer_stack)):
handler, interval = window._timer_stack[i]
__clock__.schedule_interval(handler, interval)
# Activate window
window.activate()
# Dispatch init event
window.dispatch_event('on_init')
# Dispatch an initial resize event
window.dispatch_event('on_resize', window._width, window._height)
return __clock__
# -------------------------------------------------------------------- quit ---
def quit():
global __running__
__running__ = False
# count = len(__backend__.windows())
# while count:
# dt = clock.tick()
# window = __backend__.windows()[-1]
# window.close()
# count = __backend__.process(dt)
# --------------------------------------------------------------------- run ---
def run(clock=None, framerate=None, interactive=None,
duration = sys.maxsize, framecount = sys.maxsize):
""" Run the main loop
Parameters
----------
clock : Clock
clock to use to run the app (gives the elementary tick)
framerate : int
frames per second
duration : float
Duration after which the app will be stopped
framecount : int
Number of frame to display before stopping.
"""
global __running__
clock = __init__(clock=clock, framerate=framerate, backend=__backend__)
options = parser.get_options()
if interactive is None:
interactive = options.interactive
if interactive:
# Set interactive python session
os.environ['PYTHONINSPECT'] = '1'
import readline
readline.parse_and_bind("tab: complete")
def run():
while not stdin_ready():
__backend__.process(clock.tick())
return 0
inputhook_manager.set_inputhook(run)
else:
__running__ = True
def run(duration, framecount):
count = len(__backend__.windows())
while count and duration > 0 and framecount > 0 and __running__:
dt = clock.tick()
duration -= dt
framecount -= 1
count = __backend__.process(dt)
if options.record:
from .movie import record
try:
# Check if output file name given
name = sys.argv[2]
except:
# Obtain the name of the script that is being run
name = os.path.basename(sys.argv[0])
# Replace .py extension with .mp4
filename=re.sub('.py$', '.mp4', name)
log.info("Recording movie in '%s'" % filename)
with record(window=__backend__.windows()[0],
filename=filename,
fps=60):
run(duration, framecount)
else:
run(duration, framecount)
|
aries_cloudagent/transport/inbound/tests/test_session.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 12755476 | <reponame>kuraakhilesh8230/aries-cloudagent-python
import asyncio
import pytest
from asynctest import TestCase, mock as async_mock
from ....admin.server import AdminResponder
from ....core.in_memory import InMemoryProfile
from ....messaging.responder import BaseResponder
from ....multitenant.base import BaseMultitenantManager
from ....multitenant.manager import MultitenantManager
from ...error import WireFormatError
from ...outbound.message import OutboundMessage
from ..message import InboundMessage
from ..receipt import MessageReceipt
from ..session import InboundSession
class TestInboundSession(TestCase):
def setUp(self):
self.profile = InMemoryProfile.test_profile()
def test_init(self):
test_inbound = async_mock.MagicMock()
test_session_id = "session-id"
test_wire_format = async_mock.MagicMock()
test_client_info = {"client": "info"}
test_close = async_mock.MagicMock()
test_reply_mode = MessageReceipt.REPLY_MODE_ALL
test_reply_thread_ids = {"1", "2"}
test_reply_verkeys = {"3", "4"}
test_transport_type = "transport-type"
sess = InboundSession(
profile=self.profile,
inbound_handler=test_inbound,
session_id=test_session_id,
wire_format=test_wire_format,
client_info=test_client_info,
close_handler=test_close,
reply_mode=test_reply_mode,
reply_thread_ids=test_reply_thread_ids,
reply_verkeys=test_reply_verkeys,
transport_type=test_transport_type,
)
assert sess.profile is self.profile
assert sess.session_id == test_session_id
assert sess.wire_format is test_wire_format
assert sess.client_info == test_client_info
assert sess.reply_mode == test_reply_mode
assert sess.transport_type == test_transport_type
assert "1" in sess.reply_thread_ids
assert "3" in sess.reply_verkeys
test_msg = async_mock.MagicMock()
with async_mock.patch.object(sess, "process_inbound") as process:
sess.receive_inbound(test_msg)
process.assert_called_once_with(test_msg)
test_inbound.assert_called_once_with(
sess.profile, test_msg, can_respond=False
)
sess.close()
test_close.assert_called_once()
assert sess.closed
def test_setters(self):
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=None,
wire_format=None,
)
sess.reply_mode = MessageReceipt.REPLY_MODE_ALL
assert sess.reply_mode == MessageReceipt.REPLY_MODE_ALL
sess.add_reply_thread_ids("1")
assert "1" in sess.reply_thread_ids
sess.add_reply_verkeys("2")
assert "2" in sess.reply_verkeys
sess.reply_mode = "invalid"
assert not sess.reply_mode
assert not sess.reply_thread_ids # reset by setter method
async def test_parse_inbound(self):
test_session_id = "session-id"
test_transport_type = "transport-type"
test_wire_format = async_mock.MagicMock()
test_wire_format.parse_message = async_mock.CoroutineMock()
test_parsed = "parsed-payload"
test_receipt = async_mock.MagicMock()
test_wire_format.parse_message.return_value = (test_parsed, test_receipt)
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=test_session_id,
wire_format=test_wire_format,
transport_type=test_transport_type,
)
session = self.profile.session()
setattr(self.profile, "session", async_mock.MagicMock(return_value=session))
test_payload = "{}"
result = await sess.parse_inbound(test_payload)
test_wire_format.parse_message.assert_awaited_once_with(session, test_payload)
assert result.payload == test_parsed
assert result.receipt is test_receipt
assert result.session_id == test_session_id
assert result.transport_type == test_transport_type
async def test_receive(self):
self.multitenant_mgr = async_mock.MagicMock(MultitenantManager, autospec=True)
self.multitenant_mgr.get_wallets_by_message = async_mock.CoroutineMock(
return_value=[async_mock.MagicMock(is_managed=True)]
)
self.multitenant_mgr.get_wallet_profile = async_mock.CoroutineMock(
return_value=self.profile
)
self.profile.context.injector.bind_instance(
BaseMultitenantManager, self.multitenant_mgr
)
self.profile.context.update_settings({"multitenant.enabled": True})
self.base_responder = async_mock.MagicMock(AdminResponder, autospec=True)
self.profile.context.injector.bind_instance(BaseResponder, self.base_responder)
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=None,
wire_format=None,
)
test_msg = async_mock.MagicMock()
with async_mock.patch.object(
sess, "parse_inbound", async_mock.CoroutineMock()
) as encode, async_mock.patch.object(
sess, "receive_inbound", async_mock.MagicMock()
) as receive:
result = await sess.receive(test_msg)
encode.assert_awaited_once_with(test_msg)
receive.assert_called_once_with(encode.return_value)
assert result is encode.return_value
async def test_receive_no_wallet_found(self):
self.multitenant_mgr = async_mock.MagicMock(MultitenantManager, autospec=True)
self.multitenant_mgr.get_wallets_by_message = async_mock.CoroutineMock(
side_effect=ValueError("no such wallet")
)
self.multitenant_mgr.get_wallet_profile = async_mock.CoroutineMock(
return_value=self.profile
)
self.profile.context.injector.bind_instance(
BaseMultitenantManager, self.multitenant_mgr
)
self.profile.context.update_settings({"multitenant.enabled": True})
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=None,
wire_format=None,
)
test_msg = async_mock.MagicMock()
with async_mock.patch.object(
sess, "parse_inbound", async_mock.CoroutineMock()
) as encode, async_mock.patch.object(
sess, "receive_inbound", async_mock.MagicMock()
) as receive:
result = await sess.receive(test_msg)
encode.assert_awaited_once_with(test_msg)
receive.assert_called_once_with(encode.return_value)
assert result is encode.return_value
def test_process_inbound(self):
test_session_id = "session-id"
test_thread_id = "thread-id"
test_verkey = "verkey"
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=test_session_id,
wire_format=None,
)
receipt = MessageReceipt(
direct_response_mode=MessageReceipt.REPLY_MODE_THREAD,
thread_id=test_thread_id,
sender_verkey=test_verkey,
)
receipt.recipient_did = "dummy"
assert receipt.recipient_did == "dummy"
receipt.recipient_did_public = True
assert receipt.recipient_did_public
receipt.recipient_did = None
receipt.recipient_did_public = None
assert receipt.recipient_did is None
assert receipt.recipient_did_public is None
receipt.sender_did = "dummy"
assert receipt.sender_did == "dummy"
receipt.sender_did = None
assert receipt.sender_did is None
assert "direct_response_mode" in str(receipt)
message = InboundMessage(payload=None, receipt=receipt)
sess.process_inbound(message)
assert sess.reply_mode == receipt.direct_response_mode
assert test_verkey in sess.reply_verkeys
assert test_thread_id in sess.reply_thread_ids
assert receipt.in_time is None
receipt.connection_id = "dummy"
assert receipt.connection_id == "dummy"
def test_select_outbound(self):
test_session_id = "session-id"
test_thread_id = "thread-id"
test_verkey = "verkey"
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=test_session_id,
wire_format=None,
)
sess.reply_mode = MessageReceipt.REPLY_MODE_ALL
test_msg = OutboundMessage(payload=None)
assert not sess.select_outbound(test_msg) # no key
test_msg.reply_session_id = test_session_id
assert not sess.select_outbound(test_msg) # no difference
sess.can_respond = True
assert not sess.select_outbound(test_msg) # no difference
test_msg.reply_to_verkey = test_verkey
sess.add_reply_verkeys(test_verkey)
assert sess.select_outbound(test_msg)
sess.reply_mode = MessageReceipt.REPLY_MODE_THREAD
sess.reply_verkeys = None
sess.reply_thread_ids = None
test_msg = OutboundMessage(payload=None)
assert not sess.select_outbound(test_msg)
sess.add_reply_thread_ids(test_thread_id)
test_msg.reply_thread_id = test_thread_id
assert not sess.select_outbound(test_msg)
sess.add_reply_verkeys(test_verkey)
test_msg.reply_to_verkey = test_verkey
assert sess.select_outbound(test_msg)
sess.close()
assert not sess.select_outbound(test_msg)
async def test_wait_response(self):
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=None,
wire_format=None,
)
test_msg = OutboundMessage(payload=None)
sess.set_response(test_msg)
assert sess.response_event.is_set()
assert sess.response_buffered
with async_mock.patch.object(
sess, "encode_outbound", async_mock.CoroutineMock()
) as encode:
result = await asyncio.wait_for(sess.wait_response(), 0.1)
assert encode.awaited_once_with(test_msg)
assert result is encode.return_value
sess.clear_response()
assert not sess.response_buffer
sess.close()
assert await asyncio.wait_for(sess.wait_response(), 0.1) is None
async def test_wait_response_x(self):
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=None,
wire_format=None,
)
test_msg = OutboundMessage(payload=None)
sess.set_response(test_msg)
assert sess.response_event.is_set()
assert sess.response_buffered
with async_mock.patch.object(
sess, "encode_outbound", async_mock.CoroutineMock()
) as encode:
encode.side_effect = WireFormatError()
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(sess.wait_response(), 0.1)
assert not sess.response_buffer
sess.close()
assert await asyncio.wait_for(sess.wait_response(), 0.1) is None
async def test_encode_response(self):
test_wire_format = async_mock.MagicMock()
test_wire_format.encode_message = async_mock.CoroutineMock()
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=None,
wire_format=test_wire_format,
)
test_msg = OutboundMessage(payload=None)
test_from_verkey = "from-verkey"
test_to_verkey = "to-verkey"
session = self.profile.session()
setattr(self.profile, "session", async_mock.MagicMock(return_value=session))
with self.assertRaises(WireFormatError):
await sess.encode_outbound(test_msg)
test_msg.payload = "{}"
with self.assertRaises(WireFormatError):
await sess.encode_outbound(test_msg)
test_msg.reply_from_verkey = test_from_verkey
test_msg.reply_to_verkey = test_to_verkey
result = await sess.encode_outbound(test_msg)
assert result is test_wire_format.encode_message.return_value
test_wire_format.encode_message.assert_awaited_once_with(
session,
test_msg.payload,
[test_to_verkey],
None,
test_from_verkey,
)
async def test_accept_response(self):
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=None,
wire_format=None,
)
test_msg = OutboundMessage(payload=None)
with async_mock.patch.object(sess, "select_outbound") as selector:
selector.return_value = False
accepted = sess.accept_response(test_msg)
assert not accepted and not accepted.retry
sess.set_response(OutboundMessage(payload=None))
selector.return_value = True
accepted = sess.accept_response(test_msg)
assert not accepted and accepted.retry
sess.clear_response()
accepted = sess.accept_response(test_msg)
assert accepted
async def test_context_mgr(self):
sess = InboundSession(
profile=self.profile,
inbound_handler=None,
session_id=None,
wire_format=None,
)
assert not sess.closed
async with sess:
pass
assert sess.closed
|
losses/flow_loss.py | Wassouli/projet-prat-oceano | 196 | 12755480 | import torch.nn as nn
import torch.nn.functional as F
from .loss_blocks import SSIM, smooth_grad_1st, smooth_grad_2nd, TernaryLoss
from utils.warp_utils import flow_warp
from utils.warp_utils import get_occu_mask_bidirection, get_occu_mask_backward
class unFlowLoss(nn.modules.Module):
def __init__(self, cfg):
super(unFlowLoss, self).__init__()
self.cfg = cfg
def loss_photomatric(self, im1_scaled, im1_recons, occu_mask1):
loss = []
if self.cfg.w_l1 > 0:
loss += [self.cfg.w_l1 * (im1_scaled - im1_recons).abs() * occu_mask1]
if self.cfg.w_ssim > 0:
loss += [self.cfg.w_ssim * SSIM(im1_recons * occu_mask1,
im1_scaled * occu_mask1)]
if self.cfg.w_ternary > 0:
loss += [self.cfg.w_ternary * TernaryLoss(im1_recons * occu_mask1,
im1_scaled * occu_mask1)]
return sum([l.mean() for l in loss]) / occu_mask1.mean()
def loss_smooth(self, flow, im1_scaled):
if 'smooth_2nd' in self.cfg and self.cfg.smooth_2nd:
func_smooth = smooth_grad_2nd
else:
func_smooth = smooth_grad_1st
loss = []
loss += [func_smooth(flow, im1_scaled, self.cfg.alpha)]
return sum([l.mean() for l in loss])
def forward(self, output, target):
"""
:param output: Multi-scale forward/backward flows n * [B x 4 x h x w]
:param target: image pairs Nx6xHxW
:return:
"""
pyramid_flows = output
im1_origin = target[:, :3]
im2_origin = target[:, 3:]
pyramid_smooth_losses = []
pyramid_warp_losses = []
self.pyramid_occu_mask1 = []
self.pyramid_occu_mask2 = []
s = 1.
for i, flow in enumerate(pyramid_flows):
if self.cfg.w_scales[i] == 0:
pyramid_warp_losses.append(0)
pyramid_smooth_losses.append(0)
continue
b, _, h, w = flow.size()
# resize images to match the size of layer
im1_scaled = F.interpolate(im1_origin, (h, w), mode='area')
im2_scaled = F.interpolate(im2_origin, (h, w), mode='area')
im1_recons = flow_warp(im2_scaled, flow[:, :2], pad=self.cfg.warp_pad)
im2_recons = flow_warp(im1_scaled, flow[:, 2:], pad=self.cfg.warp_pad)
if i == 0:
if self.cfg.occ_from_back:
occu_mask1 = 1 - get_occu_mask_backward(flow[:, 2:], th=0.2)
occu_mask2 = 1 - get_occu_mask_backward(flow[:, :2], th=0.2)
else:
occu_mask1 = 1 - get_occu_mask_bidirection(flow[:, :2], flow[:, 2:])
occu_mask2 = 1 - get_occu_mask_bidirection(flow[:, 2:], flow[:, :2])
else:
occu_mask1 = F.interpolate(self.pyramid_occu_mask1[0],
(h, w), mode='nearest')
occu_mask2 = F.interpolate(self.pyramid_occu_mask2[0],
(h, w), mode='nearest')
self.pyramid_occu_mask1.append(occu_mask1)
self.pyramid_occu_mask2.append(occu_mask2)
loss_warp = self.loss_photomatric(im1_scaled, im1_recons, occu_mask1)
if i == 0:
s = min(h, w)
loss_smooth = self.loss_smooth(flow[:, :2] / s, im1_scaled)
if self.cfg.with_bk:
loss_warp += self.loss_photomatric(im2_scaled, im2_recons,
occu_mask2)
loss_smooth += self.loss_smooth(flow[:, 2:] / s, im2_scaled)
loss_warp /= 2.
loss_smooth /= 2.
pyramid_warp_losses.append(loss_warp)
pyramid_smooth_losses.append(loss_smooth)
pyramid_warp_losses = [l * w for l, w in
zip(pyramid_warp_losses, self.cfg.w_scales)]
pyramid_smooth_losses = [l * w for l, w in
zip(pyramid_smooth_losses, self.cfg.w_sm_scales)]
warp_loss = sum(pyramid_warp_losses)
smooth_loss = self.cfg.w_smooth * sum(pyramid_smooth_losses)
total_loss = warp_loss + smooth_loss
return total_loss, warp_loss, smooth_loss, pyramid_flows[0].abs().mean()
|
nmap_port_scanner_ip_obj.py | Mr-Cracker-Pro/red-python-scripts | 1,353 | 12755494 | #!/usr/bin/env python3
#Use these commands in Kali to install required software:
# sudo apt install python3-pip
# pip install python-nmap
# Import nmap so we can use it for the scan
import nmap
# We import the ipaddress module. We want to use the ipaddress.ip_address(address)
# method to see if we can instantiate a valid ip address to test.
import ipaddress
# We need to create regular expressions to ensure that the input is correctly formatted.
import re
# Regular Expression Pattern to extract the number of ports you want to scan.
# You have to specify <lowest_port_number>-<highest_port_number> (ex 10-100)
port_range_pattern = re.compile("([0-9]+)-([0-9]+)")
# Initialising the port numbers, will be using the variables later on.
port_min = 0
port_max = 65535
# This port scanner uses the Python nmap module.
# You'll need to install the following to get it work on Linux:
# Step 1: sudo apt install python3-pip
# Step 2: pip install python-nmap
# Basic user interface header
print(r"""______ _ _ ______ _ _
| _ \ (_) | | | ___ \ | | | |
| | | |__ ___ ___ __| | | |_/ / ___ _ __ ___ | |__ __ _| |
| | | / _` \ \ / / |/ _` | | ___ \/ _ \| '_ ` _ \| '_ \ / _` | |
| |/ / (_| |\ V /| | (_| | | |_/ / (_) | | | | | | |_) | (_| | |
|___/ \__,_| \_/ |_|\__,_| \____/ \___/|_| |_| |_|_.__/ \__,_|_|""")
print("\n****************************************************************")
print("\n* Copyright of <NAME>, 2021 *")
print("\n* https://www.davidbombal.com *")
print("\n* https://www.youtube.com/davidbombal *")
print("\n****************************************************************")
# Ask user to input the ip address they want to scan.
while True:
ip_add_entered = input("\nPlease enter the ip address that you want to scan: ")
# If we enter an invalid ip address the try except block will go to the except block and say you entered an invalid ip address.
try:
ip_address_obj = ipaddress.ip_address(ip_add_entered)
# The following line will only execute if the ip is valid.
print("You entered a valid ip address.")
break
except:
print("You entered an invalid ip address")
while True:
# You can scan 0-65535 ports. This scanner is basic and doesn't use multithreading so scanning all the ports is not advised.
print("Please enter the range of ports you want to scan in format: <int>-<int> (ex would be 60-120)")
port_range = input("Enter port range: ")
# We pass the port numbers in by removing extra spaces that people sometimes enter. So if you enter 80 - 90 instead of 80-90 the program will still work.
port_range_valid = port_range_pattern.search(port_range.replace(" ",""))
if port_range_valid:
# We're extracting the low end of the port scanner range the user want to scan.
port_min = int(port_range_valid.group(1))
# We're extracting the upper end of the port scanner range the user want to scan.
port_max = int(port_range_valid.group(2))
break
nm = nmap.PortScanner()
# We're looping over all of the ports in the specified range.
for port in range(port_min, port_max + 1):
try:
# The result is quite interesting to look at. You may want to inspect the dictionary it returns.
# It contains what was sent to the command line in addition to the port status we're after.
# For in nmap for port 80 and ip 10.0.0.2 you'd run: nmap -oX - -p 89 -sV 10.0.0.2
result = nm.scan(ip_add_entered, str(port))
# Uncomment following line and look at dictionary
# print(result)
# We extract the port status from the returned object
port_status = (result['scan'][ip_add_entered]['tcp'][port]['state'])
print(f"Port {port} is {port_status}")
except:
# We cannot scan some ports and this ensures the program doesn't crash when we try to scan them.
print(f"Cannot scan port {port}.")
|
language/conpono/cpc/run_cc_cpc.py | naveenjafer/language | 1,199 | 12755501 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT next sentence prediction / binary coherence finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from absl import app
from absl import flags
from bert import modeling
from bert import optimization
from bert import tokenization
from language.conpono.cpc import model_builder
from language.conpono.reconstruct import preprocess as ip
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import data as contrib_data
from tensorflow.contrib import lookup as contrib_lookup
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"eval_file", None,
"The input data. Should be in tfrecord format ready to input to BERT.")
flags.DEFINE_string(
"train_file", None,
"The input data. Should be in tfrecord format ready to input to BERT.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_bool("include_mlm", True, "Whether to include MLM loss/objective")
flags.DEFINE_integer("num_choices", 32, "Number of negative samples + 1")
flags.DEFINE_integer("data_window_size", 5, "Number of documents to draw"
"negative samples from.")
flags.DEFINE_integer("data_window_shift", 2, "Shift windows by this many for"
"negative samples.")
flags.DEFINE_integer("max_sent_length", 70, "Number of tokens per sentence.")
flags.DEFINE_integer("max_para_length", 30, "Number of sentences per paragraph")
flags.DEFINE_integer("context_size", 4, "Number of sentences in the context")
flags.DEFINE_integer("margin", 1, "Eta value for margin.")
flags.DEFINE_float("mask_rate", 0.1, "Rate of masking for mlm.")
flags.DEFINE_bool("add_lv2loss", False, "Whether to use the level 2 loss.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_float(
"dataset_one_weight", 0.5, "Weight of first dataset."
"Weight of second dataset will be 1-x")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 32, "Total batch size for eval.")
flags.DEFINE_integer("train_data_size", 10000, "The number of examples in the"
"training data")
flags.DEFINE_integer("eval_data_size", -1, "The number of examples in the"
"validation data")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 10000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
_SEP_TOKEN = "[SEP]"
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
# pylint: disable=invalid-name
Outputs_And_Context = collections.namedtuple(
"Outputs_And_Context",
["input_ids", "input_mask", "segment_ids", "label_types", "context"])
# pylint: enable=invalid-name
def pad_and_cut(tensor, max_len_scalar):
end_padding = tf.constant([[0, max_len_scalar]])
return tf.pad(tensor, end_padding)[:max_len_scalar]
def build_distractors(distractor_examples, context):
"""Create inputs with distractors."""
CLS_ID = tf.constant([101], dtype=tf.int64) # pylint: disable=invalid-name
SEP_ID = tf.constant([102], dtype=tf.int64) # pylint: disable=invalid-name
bert_inputs = []
input_masks = []
segment_ids = []
# for each distractor
sample_size = int((FLAGS.num_choices - 4) / (FLAGS.data_window_size - 1))
for example in distractor_examples:
# randomly sample 7
intermediate_examples_tensor = tf.reduce_sum(tf.abs(example), 1)
examples_zero_vector = tf.zeros(shape=(1, 1), dtype=tf.int64)
examples_bool_mask = tf.squeeze(
tf.not_equal(intermediate_examples_tensor, examples_zero_vector))
paragraph_len = tf.reduce_sum(tf.cast(examples_bool_mask, tf.int32))
indices = tf.range(0, limit=paragraph_len, dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)[:sample_size]
# extend examples / targets
distractor_cand = example
distractor_cand_plus_one = distractor_cand[1:]
distractor_cand_plus_two = distractor_cand[2:]
# pad extensions
paddings_one = tf.constant([[0, 1], [0, 0]])
distractor_cand_plus_one = tf.pad(distractor_cand_plus_one, paddings_one)
paddings_two = tf.constant([[0, 2], [0, 0]])
distractor_cand_plus_two = tf.pad(distractor_cand_plus_two, paddings_two)
distractor_cand_ext = tf.concat(
[distractor_cand, distractor_cand_plus_one, distractor_cand_plus_two],
axis=1)
distractors = tf.gather(distractor_cand_ext, shuffled_indices)
for i in range(sample_size):
distractors_non_zero = tf.where(
tf.not_equal(distractors[i], tf.zeros_like(distractors[i])))
distractors_stripped = tf.gather_nd(distractors[i], distractors_non_zero)
segment_id = tf.concat([
tf.zeros_like(CLS_ID, dtype=tf.int64),
tf.zeros_like(context),
tf.zeros_like(SEP_ID, dtype=tf.int64),
tf.ones_like(distractors_stripped),
tf.ones_like(SEP_ID, dtype=tf.int64)
],
axis=0)
segment_id = pad_and_cut(segment_id, FLAGS.max_seq_length)
segment_ids.append(segment_id)
new_input = tf.concat(
[CLS_ID, context, SEP_ID, distractors_stripped, SEP_ID], axis=0)
input_mask = tf.ones_like(new_input)
input_mask = pad_and_cut(input_mask, FLAGS.max_seq_length)
input_masks.append(input_mask)
padded_new_input = pad_and_cut(new_input, FLAGS.max_seq_length)
bert_inputs.append(padded_new_input)
bert_inputs = tf.stack(bert_inputs, axis=0)
input_masks = tf.stack(input_masks, axis=0)
segment_ids = tf.stack(segment_ids, axis=0)
out = Outputs_And_Context(bert_inputs, input_masks, segment_ids, None, None)
return out
def build_bert_inputs(example):
"""Convert example <Tensor [30, 70]> into bert inputs."""
CLS_ID = tf.constant([101], dtype=tf.int64) # pylint: disable=invalid-name
SEP_ID = tf.constant([102], dtype=tf.int64) # pylint: disable=invalid-name
max_len = tf.constant([FLAGS.max_para_length])
context_size = tf.constant([FLAGS.context_size])
intermediate_examples_tensor = tf.reduce_sum(tf.abs(example), 1)
examples_zero_vector = tf.zeros(shape=(1, 1), dtype=tf.int64)
examples_bool_mask = tf.squeeze(
tf.not_equal(intermediate_examples_tensor, examples_zero_vector))
paragraph_len = tf.reduce_sum(tf.cast(examples_bool_mask, tf.int32))
start = tf.random.uniform([1],
0,
tf.reshape(paragraph_len, []) -
tf.reshape(context_size, []) + 1,
dtype=tf.int32)
# Slice the document into the before, after and context.
# Discard the zero padding.
sizes = tf.squeeze(
tf.concat([[
start, context_size, paragraph_len - context_size - start,
max_len - paragraph_len
]], 0))
before, context, after, _ = tf.split(example, sizes, axis=0)
# Gather the context removing zero padding at end of sentences.
non_zeros = tf.where(tf.not_equal(context, tf.zeros_like(context)))
context_gathered = tf.gather_nd(context, non_zeros)
# Flip before so we select the 4 sentences closest to target
before = tf.reverse(before, axis=[0])
# pad both to longer than needed
paddings = tf.constant([[0, 8], [0, 0]])
before = tf.pad(before, paddings)
after = tf.pad(after, paddings)
# Extend targets to 3 sentences
# pad both
before_minus_one = before[1:][:4]
before_minus_two = before[2:][:4]
after_plus_one = after[1:][:4]
after_plus_two = after[2:][:4]
before = before[:4]
after = after[:4]
before = tf.concat([before_minus_two, before_minus_one, before], axis=1)
after = tf.concat([after, after_plus_one, after_plus_two], axis=1)
############################################################################
# before = before[:4]
# after = after[:4]
# These 8 sentences are the 8 surrounding targets. Some are padding.
targets = tf.concat([before, after], axis=0)
# Remove the padding from the sourrounding sentences
# Eg. if context starts at beginning of paragraph, before is all padding
intermediate_tensor = tf.reduce_sum(tf.abs(targets), 1)
zero_vector = tf.zeros(shape=(1, 1), dtype=tf.int64)
bool_mask = tf.squeeze(tf.not_equal(intermediate_tensor, zero_vector))
bool_mask.set_shape([None])
targets = tf.boolean_mask(targets, bool_mask)
# Randomly select 4 targets
# We will also select the label_types for each selected target
indices = tf.range(0, limit=tf.shape(targets)[0], dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)[:4]
targets = tf.gather(targets, shuffled_indices)
full_labels = tf.concat([tf.range(3, -1, -1), tf.range(4, 8)], axis=0)
label_types = tf.boolean_mask(full_labels, bool_mask)
label_types = tf.gather(label_types, shuffled_indices)
# create inputs
bert_inputs = []
input_masks = []
segment_ids = []
for i in range(4):
target_non_zero = tf.where(
tf.not_equal(targets[i], tf.zeros_like(targets[i])))
targets_stripped = tf.gather_nd(targets[i], target_non_zero)
segment_id = tf.concat([
tf.zeros_like(CLS_ID, dtype=tf.int64),
tf.zeros_like(context_gathered),
tf.zeros_like(SEP_ID, dtype=tf.int64),
tf.ones_like(targets_stripped),
tf.ones_like(SEP_ID, dtype=tf.int64)
],
axis=0)
segment_id = pad_and_cut(segment_id, FLAGS.max_seq_length)
segment_ids.append(segment_id)
new_input = tf.concat(
[CLS_ID, context_gathered, SEP_ID, targets_stripped, SEP_ID], axis=0)
input_mask = tf.ones_like(new_input)
input_mask = pad_and_cut(input_mask, FLAGS.max_seq_length)
input_masks.append(input_mask)
padded_new_input = pad_and_cut(new_input, FLAGS.max_seq_length)
bert_inputs.append(padded_new_input)
bert_inputs = tf.stack(bert_inputs, axis=0)
input_masks = tf.stack(input_masks, axis=0)
segment_ids = tf.stack(segment_ids, axis=0)
out = Outputs_And_Context(bert_inputs, input_masks, segment_ids, label_types,
context_gathered)
return out
def file_based_input_fn_builder(input_file, is_training, drop_remainder,
add_masking):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
input_file = input_file.split(",")
expanded_files = []
for infile in input_file:
try:
sharded_files = tf.io.gfile.glob(infile)
expanded_files.append(sharded_files)
except tf.errors.OpError:
expanded_files.append(infile)
name_to_features = {
"sents":
tf.FixedLenFeature([FLAGS.max_para_length * FLAGS.max_sent_length],
tf.int64)
}
def _decode_record(record, name_to_features, vocab_table):
"""Decodes a record to a TensorFlow example."""
target_example = tf.parse_single_example(record[0], name_to_features)
target_example = tf.reshape(target_example["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
# distractor_examples = []
# for rec in record[1:]:
# distractor_examples.append(
# tf.reshape(
# tf.parse_single_example(rec, name_to_features)["sents"],
# [FLAGS.max_para_length, FLAGS.max_sent_length]))
# This is an unfortunate hack but is necessary to get around a TF error.
dist0 = tf.reshape(
tf.parse_single_example(record[1], name_to_features)["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
dist1 = tf.reshape(
tf.parse_single_example(record[2], name_to_features)["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
dist2 = tf.reshape(
tf.parse_single_example(record[3], name_to_features)["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
dist3 = tf.reshape(
tf.parse_single_example(record[4], name_to_features)["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
inputs_obj = build_bert_inputs(target_example)
distractor_obj = build_distractors([dist0, dist1, dist2, dist3],
inputs_obj.context)
example = {}
example["input_ids"] = tf.concat(
[inputs_obj.input_ids, distractor_obj.input_ids], axis=0)
example["input_mask"] = tf.concat(
[inputs_obj.input_mask, distractor_obj.input_mask], axis=0)
example["segment_ids"] = tf.concat(
[inputs_obj.segment_ids, distractor_obj.segment_ids], axis=0)
example["label_types"] = inputs_obj.label_types
# Add masking:
if add_masking:
mask_rate = FLAGS.mask_rate
max_predictions_per_seq = int(math.ceil(FLAGS.max_seq_length * mask_rate))
cls_token = "[CLS]"
sep_token = "[SEP]"
mask_token = "[MASK]"
# pad_token = "[PAD]"
mask_blacklist = tf.constant([cls_token, sep_token]) # , pad_token])
mask_blacklist_ids = tf.to_int32(vocab_table.lookup(mask_blacklist))
mask_token_id = tf.to_int32(vocab_table.lookup(tf.constant(mask_token)))
input_ids = tf.to_int32(example["input_ids"])
def call_sample_mask_indices(x):
return ip.sample_mask_indices(x, mask_rate, mask_blacklist_ids,
max_predictions_per_seq)
mask_indices = tf.map_fn(
call_sample_mask_indices, input_ids, dtype=tf.int32)
def call_get_target_tokens(x):
input_len = tf.shape(input_ids)[-1]
x_input_id = x[:input_len]
x_mask_indices = x[input_len:]
return ip.get_target_tokens_for_apply(x_input_id, x_mask_indices)
map_input = tf.concat([input_ids, mask_indices], -1)
target_token_ids = tf.map_fn(call_get_target_tokens, map_input)
def call_apply_masking(x):
input_len = tf.shape(input_ids)[-1]
mask_idx_len = tf.shape(mask_indices)[-1]
x_input_id = x[:input_len]
x_mask_indices = x[input_len:input_len + mask_idx_len]
x_target_token_ids = x[input_len + mask_idx_len:]
return ip.apply_masking(x_input_id, x_target_token_ids, x_mask_indices,
mask_token_id, 1000)
map_input2 = tf.concat([input_ids, mask_indices, target_token_ids], -1)
token_ids_masked = tf.map_fn(call_apply_masking, tf.to_int64(map_input2))
target_token_weights = tf.ones_like(target_token_ids, dtype=tf.float32)
pad_targets = tf.where(
tf.equal(target_token_ids, 0),
tf.ones_like(target_token_ids, dtype=tf.float32),
tf.zeros_like(target_token_ids, dtype=tf.float32))
target_token_weights = target_token_weights - pad_targets
example["target_token_weights"] = target_token_weights
example["target_token_ids"] = target_token_ids
example["input_ids"] = token_ids_masked
example["mask_indices"] = mask_indices
# Set shape explicitly for TPU
example["target_token_weights"].set_shape(
[FLAGS.num_choices, max_predictions_per_seq])
example["target_token_ids"].set_shape(
[FLAGS.num_choices, max_predictions_per_seq])
example["mask_indices"].set_shape(
[FLAGS.num_choices, max_predictions_per_seq])
# Set shape explicitly for TPU
example["input_ids"].set_shape([FLAGS.num_choices, FLAGS.max_seq_length])
example["input_mask"].set_shape([FLAGS.num_choices, FLAGS.max_seq_length])
example["segment_ids"].set_shape([FLAGS.num_choices, FLAGS.max_seq_length])
example["label_types"].set_shape([4])
example["label_ids"] = tf.scatter_nd(
tf.reshape(example["label_types"], [4, 1]), tf.range(4), [8])
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()): # pylint: disable=g-builtin-op
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
vocab_table = contrib_lookup.index_table_from_file(FLAGS.vocab_file)
if len(expanded_files) == 1:
d = tf.data.TFRecordDataset(expanded_files[0])
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=256)
else:
dataset_list = [
tf.data.TFRecordDataset(expanded_files[i])
for i in range(len(expanded_files))
]
if is_training:
dataset_list = [d.repeat() for d in dataset_list]
dset_weights = [FLAGS.dataset_one_weight, 1 - FLAGS.dataset_one_weight]
d = tf.data.experimental.sample_from_datasets(dataset_list, dset_weights)
# Note that sample_from_datasets() inserts randomness into the training
# An alternative would be to use choose_from_datasets() but then the
# order must be stated explicitly which is less intitive for unbalanced
# datasets. Example below:
#
# choice_dataset = tf.data.Dataset.range(len(dataset_list)).repeat()
# d = tf.data.experimental.choose_from_datasets(dataset_list,
# choice_dataset)
if is_training:
d = d.shuffle(buffer_size=256)
# The window size will be for selecting negative samples
# It equals the number of documents to sample from -1
d = d.apply(
contrib_data.sliding_window_batch(
window_size=FLAGS.data_window_size,
window_shift=FLAGS.data_window_shift))
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features, vocab_table
),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, num_choices, add_masking):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = tf.reshape(features["input_ids"], [-1, FLAGS.max_seq_length])
input_mask = tf.reshape(features["input_mask"], [-1, FLAGS.max_seq_length])
segment_ids = tf.reshape(features["segment_ids"],
[-1, FLAGS.max_seq_length])
label_types = features["label_types"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
is_real_example = tf.reduce_sum(tf.one_hot(label_types, 8), axis=1)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(cpc_loss, _, logits, probabilities) = model_builder.create_model(
model,
label_ids,
label_types,
FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size,
num_choices,
use_tpu,
FLAGS.add_lv2loss,
margin=float(FLAGS.margin))
if add_masking:
mask_rate = FLAGS.mask_rate # search alternatives?
max_predictions_per_seq = int(math.ceil(FLAGS.max_seq_length * mask_rate))
masked_lm_positions = tf.reshape(features["mask_indices"],
[-1, max_predictions_per_seq])
masked_lm_ids = tf.reshape(features["target_token_ids"],
[-1, max_predictions_per_seq])
masked_lm_weights = tf.reshape(features["target_token_weights"],
[-1, max_predictions_per_seq])
(masked_lm_loss, _,
_) = model_builder.get_masked_lm_output(bert_config,
model.get_sequence_output(),
model.get_embedding_table(),
masked_lm_positions,
masked_lm_ids, masked_lm_weights)
total_loss = cpc_loss + masked_lm_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate,
num_train_steps,
num_warmup_steps, use_tpu)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(cpc_loss, mlm_loss, label_ids, logits, is_real_example):
"""Collect metrics for function."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
cpc_loss_metric = tf.metrics.mean(values=cpc_loss)
mlm_loss_metric = tf.metrics.mean(values=mlm_loss)
metric_dict = {
"eval_accuracy": accuracy,
"eval_cpc_loss": cpc_loss_metric,
"eval_mlm_loss": mlm_loss_metric
}
for i in range(8):
metric_dict["acc" + str(i)] = tf.metrics.accuracy(
labels=label_ids[:, i],
predictions=predictions[:, i],
weights=is_real_example[:, i])
return metric_dict
eval_metrics = (metric_fn, [
cpc_loss, masked_lm_loss, label_ids, logits, is_real_example
])
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train`, `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
num_train_steps = int(FLAGS.train_data_size / FLAGS.train_batch_size)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
num_choices=FLAGS.num_choices,
add_masking=FLAGS.include_mlm)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=FLAGS.train_file,
is_training=True,
drop_remainder=True,
add_masking=FLAGS.include_mlm)
estimator.train(input_fn=train_input_fn, steps=num_train_steps)
if FLAGS.do_eval:
# This tells the estimator to run through the entire set.
if FLAGS.eval_data_size < 0:
eval_steps = None
else:
eval_steps = int(FLAGS.eval_data_size / FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
# Note that we are masking inputs for eval as well as training and this will
# decrease eval performance
eval_input_fn = file_based_input_fn_builder(
input_file=FLAGS.eval_file,
is_training=False,
drop_remainder=eval_drop_remainder,
add_masking=FLAGS.include_mlm)
# checkpoints_iterator blocks until a new checkpoint appears.
for ckpt in contrib_training.checkpoints_iterator(estimator.model_dir):
try:
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
tf.logging.info("********** Eval results:*******\n")
for key in sorted(result.keys()):
tf.logging.info("%s = %s" % (key, str(result[key])))
except tf.errors.NotFoundError:
tf.logging.error("Checkpoint path '%s' no longer exists.", ckpt)
if __name__ == "__main__":
flags.mark_flag_as_required("eval_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
app.run(main)
|
dp/cloud/python/magma/db_service/tests/db_testcase.py | nstng/magma | 539 | 12755521 | <filename>dp/cloud/python/magma/db_service/tests/db_testcase.py
import unittest
from typing import Dict, Optional
import sqlalchemy.engine
from magma.db_service.config import TestConfig
from magma.db_service.models import Base
from magma.db_service.session_manager import Session
from sqlalchemy import MetaData, create_engine
class DBTestCaseBlueprint(unittest.TestCase):
metadata: MetaData
engine: sqlalchemy.engine.Engine
session: Session
@classmethod
def drop_all(cls):
cls.metadata.drop_all()
@classmethod
def create_all(cls):
cls.metadata.create_all()
@classmethod
def setMetadata(cls, metadata: MetaData = Base.metadata):
cls.metadata = metadata
@classmethod
def setUpClass(cls) -> None:
cls.setMetadata(metadata=Base.metadata)
@classmethod
def set_up_db_test_case(cls, **kwargs: Optional[Dict]):
cls.engine = cls.get_test_db_engine(**kwargs)
cls.session = Session(bind=cls.engine)
cls.bind_engine()
@staticmethod
def get_test_db_engine(**kwargs) -> sqlalchemy.engine.Engine:
config = TestConfig()
return create_engine(
url=kwargs.get("SQLALCHEMY_DB_URI", config.SQLALCHEMY_DB_URI),
encoding=kwargs.get("SQLALCHEMY_DB_ENCODING", config.SQLALCHEMY_DB_ENCODING),
echo=False,
future=kwargs.get("SQLALCHEMY_FUTURE", config.SQLALCHEMY_FUTURE),
)
@classmethod
def bind_engine(cls):
cls.metadata.bind = cls.engine
@classmethod
def close_session(cls):
cls.session.rollback()
cls.session.close()
class BaseDBTestCase(DBTestCaseBlueprint):
def setUp(self):
self.set_up_db_test_case()
self.create_all()
def tearDown(self):
self.close_session()
self.drop_all()
|
research/feelvos/utils/video_input_generator.py | 873040/Abhishek | 153 | 12755530 | <reponame>873040/Abhishek
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for providing semantic segmentation video data."""
import tensorflow as tf
from feelvos import input_preprocess
from feelvos import model
from feelvos.utils import mask_damaging
from feelvos.utils import train_utils
slim = tf.contrib.slim
dataset_data_provider = slim.dataset_data_provider
MIN_LABEL_COUNT = 10
def decode_image_sequence(tensor, image_format='jpeg', shape=None,
channels=3, raw_dtype=tf.uint8):
"""Decodes a sequence of images.
Args:
tensor: the tensor of strings to decode, shape: [num_images]
image_format: a string (possibly tensor) with the format of the image.
Options include 'jpeg', 'png', and 'raw'.
shape: a list or tensor of the decoded image shape for a single image.
channels: if 'shape' is None, the third dimension of the image is set to
this value.
raw_dtype: if the image is encoded as raw bytes, this is the method of
decoding the bytes into values.
Returns:
The decoded images with shape [time, height, width, channels].
"""
handler = slim.tfexample_decoder.Image(
shape=shape, channels=channels, dtype=raw_dtype, repeated=True)
return handler.tensors_to_item({'image/encoded': tensor,
'image/format': image_format})
def _get_data(data_provider, dataset_split, video_frames_are_decoded):
"""Gets data from data provider.
Args:
data_provider: An object of slim.data_provider.
dataset_split: Dataset split.
video_frames_are_decoded: Boolean, whether the video frames are already
decoded
Returns:
image: Image Tensor.
label: Label Tensor storing segmentation annotations.
object_label: An integer refers to object_label according to labelmap. If
the example has more than one object_label, take the first one.
image_name: Image name.
height: Image height.
width: Image width.
video_id: String tensor representing the name of the video.
Raises:
ValueError: Failed to find label.
"""
if video_frames_are_decoded:
image, = data_provider.get(['image'])
else:
image, = data_provider.get(['image/encoded'])
# Some datasets do not contain image_name.
if 'image_name' in data_provider.list_items():
image_name, = data_provider.get(['image_name'])
else:
image_name = tf.constant('')
height, width = data_provider.get(['height', 'width'])
label = None
if dataset_split != 'test':
if video_frames_are_decoded:
if 'labels_class' not in data_provider.list_items():
raise ValueError('Failed to find labels.')
label, = data_provider.get(['labels_class'])
else:
key = 'segmentation/object/encoded'
if key not in data_provider.list_items():
raise ValueError('Failed to find labels.')
label, = data_provider.get([key])
object_label = None
video_id, = data_provider.get(['video_id'])
return image, label, object_label, image_name, height, width, video_id
def _has_foreground_and_background_in_first_frame(label, subsampling_factor):
"""Checks if the labels have foreground and background in the first frame.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
subsampling_factor: Integer, the subsampling factor.
Returns:
Boolean, whether the labels have foreground and background in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis],
[h // subsampling_factor,
w // subsampling_factor],
align_corners=True),
axis=0)
is_bg = tf.equal(label_downscaled, 0)
is_fg = tf.logical_not(is_bg)
# Just using reduce_any was not robust enough, so lets make sure the count
# is above MIN_LABEL_COUNT.
fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
return tf.logical_and(has_bg, has_fg)
def _has_foreground_and_background_in_first_frame_2(label,
decoder_output_stride):
"""Checks if the labels have foreground and background in the first frame.
Second attempt, this time we use the actual output dimension for resizing.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
decoder_output_stride: Integer, the stride of the decoder output.
Returns:
Boolean, whether the labels have foreground and background in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
align_corners=True), axis=0)
is_bg = tf.equal(label_downscaled, 0)
is_fg = tf.logical_not(is_bg)
# Just using reduce_any was not robust enough, so lets make sure the count
# is above MIN_LABEL_COUNT.
fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
return tf.logical_and(has_bg, has_fg)
def _has_enough_pixels_of_each_object_in_first_frame(
label, decoder_output_stride):
"""Checks if for each object (incl. background) enough pixels are visible.
During test time, we will usually not see a reference frame in which only
very few pixels of one object are visible. These cases can be problematic
during training, especially if more than the 1-nearest neighbor is used.
That's why this function can be used to detect and filter these cases.
Args:
label: Label tensor of shape [num_frames, height, width, 1].
decoder_output_stride: Integer, the stride of the decoder output.
Returns:
Boolean, whether the labels have enough pixels of each object in the first
frame.
"""
h, w = train_utils.resolve_shape(label)[1:3]
h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
label_downscaled = tf.squeeze(
tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
align_corners=True), axis=0)
_, _, counts = tf.unique_with_counts(
tf.reshape(label_downscaled, [-1]))
has_enough_pixels_per_object = tf.reduce_all(
tf.greater_equal(counts, MIN_LABEL_COUNT))
return has_enough_pixels_per_object
def get(dataset,
num_frames_per_video,
crop_size,
batch_size,
min_resize_value=None,
max_resize_value=None,
resize_factor=None,
min_scale_factor=1.,
max_scale_factor=1.,
scale_factor_step_size=0,
preprocess_image_and_label=True,
num_readers=1,
num_threads=1,
dataset_split=None,
is_training=True,
model_variant=None,
batch_capacity_factor=32,
video_frames_are_decoded=False,
decoder_output_stride=None,
first_frame_finetuning=False,
sample_only_first_frame_for_finetuning=False,
sample_adjacent_and_consistent_query_frames=False,
remap_labels_to_reference_frame=True,
generate_prev_frame_mask_by_mask_damaging=False,
three_frame_dataset=False,
add_prev_frame_label=True):
"""Gets the dataset split for semantic segmentation.
This functions gets the dataset split for semantic segmentation. In
particular, it is a wrapper of (1) dataset_data_provider which returns the raw
dataset split, (2) input_preprcess which preprocess the raw data, and (3) the
Tensorflow operation of batching the preprocessed data. Then, the output could
be directly used by training, evaluation or visualization.
Args:
dataset: An instance of slim Dataset.
num_frames_per_video: The number of frames used per video
crop_size: Image crop size [height, width].
batch_size: Batch size.
min_resize_value: Desired size of the smaller image side.
max_resize_value: Maximum allowed size of the larger image side.
resize_factor: Resized dimensions are multiple of factor plus one.
min_scale_factor: Minimum scale factor value.
max_scale_factor: Maximum scale factor value.
scale_factor_step_size: The step size from min scale factor to max scale
factor. The input is randomly scaled based on the value of
(min_scale_factor, max_scale_factor, scale_factor_step_size).
preprocess_image_and_label: Boolean variable specifies if preprocessing of
image and label will be performed or not.
num_readers: Number of readers for data provider.
num_threads: Number of threads for batching data.
dataset_split: Dataset split.
is_training: Is training or not.
model_variant: Model variant (string) for choosing how to mean-subtract the
images. See feature_extractor.network_map for supported model variants.
batch_capacity_factor: Batch capacity factor affecting the training queue
batch capacity.
video_frames_are_decoded: Boolean, whether the video frames are already
decoded
decoder_output_stride: Integer, the stride of the decoder output.
first_frame_finetuning: Boolean, whether to only sample the first frame
for fine-tuning.
sample_only_first_frame_for_finetuning: Boolean, whether to only sample the
first frame during fine-tuning. This should be False when using lucid or
wonderland data, but true when fine-tuning on the first frame only.
Only has an effect if first_frame_finetuning is True.
sample_adjacent_and_consistent_query_frames: Boolean, if true, the query
frames (all but the first frame which is the reference frame) will be
sampled such that they are adjacent video frames and have the same
crop coordinates and flip augmentation.
remap_labels_to_reference_frame: Boolean, whether to remap the labels of
the query frames to match the labels of the (downscaled) reference frame.
If a query frame contains a label which is not present in the reference,
it will be mapped to background.
generate_prev_frame_mask_by_mask_damaging: Boolean, whether to generate
the masks used as guidance from the previous frame by damaging the
ground truth mask.
three_frame_dataset: Boolean, whether the dataset has exactly three frames
per video of which the first is to be used as reference and the two
others are consecutive frames to be used as query frames.
add_prev_frame_label: Boolean, whether to sample one more frame before the
first query frame to obtain a previous frame label. Only has an effect,
if sample_adjacent_and_consistent_query_frames is True and
generate_prev_frame_mask_by_mask_damaging is False.
Returns:
A dictionary of batched Tensors for semantic segmentation.
Raises:
ValueError: dataset_split is None, or Failed to find labels.
"""
if dataset_split is None:
raise ValueError('Unknown dataset split.')
if model_variant is None:
tf.logging.warning('Please specify a model_variant. See '
'feature_extractor.network_map for supported model '
'variants.')
data_provider = dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=num_readers,
num_epochs=None if is_training else 1,
shuffle=is_training)
image, label, object_label, image_name, height, width, video_id = _get_data(
data_provider, dataset_split, video_frames_are_decoded)
sampling_is_valid = tf.constant(True)
if num_frames_per_video is not None:
total_num_frames = tf.shape(image)[0]
if first_frame_finetuning or three_frame_dataset:
if sample_only_first_frame_for_finetuning:
assert not sample_adjacent_and_consistent_query_frames, (
'this option does not make sense for sampling only first frame.')
# Sample the first frame num_frames_per_video times.
sel_indices = tf.tile(tf.constant(0, dtype=tf.int32)[tf.newaxis],
multiples=[num_frames_per_video])
else:
if sample_adjacent_and_consistent_query_frames:
if add_prev_frame_label:
num_frames_per_video += 1
# Since this is first frame fine-tuning, we'll for now assume that
# each sequence has exactly 3 images: the ref frame and 2 adjacent
# query frames.
assert num_frames_per_video == 3
with tf.control_dependencies([tf.assert_equal(total_num_frames, 3)]):
sel_indices = tf.constant([1, 2], dtype=tf.int32)
else:
# Sample num_frames_per_video - 1 query frames which are not the
# first frame.
sel_indices = tf.random_shuffle(
tf.range(1, total_num_frames))[:(num_frames_per_video - 1)]
# Concat first frame as reference frame to the front.
sel_indices = tf.concat([tf.constant(0, dtype=tf.int32)[tf.newaxis],
sel_indices], axis=0)
else:
if sample_adjacent_and_consistent_query_frames:
if add_prev_frame_label:
# Sample one more frame which we can use to provide initial softmax
# feedback.
num_frames_per_video += 1
ref_idx = tf.random_shuffle(tf.range(total_num_frames))[0]
sampling_is_valid = tf.greater_equal(total_num_frames,
num_frames_per_video)
def sample_query_start_idx():
return tf.random_shuffle(
tf.range(total_num_frames - num_frames_per_video + 1))[0]
query_start_idx = tf.cond(sampling_is_valid, sample_query_start_idx,
lambda: tf.constant(0, dtype=tf.int32))
def sample_sel_indices():
return tf.concat(
[ref_idx[tf.newaxis],
tf.range(
query_start_idx,
query_start_idx + (num_frames_per_video - 1))], axis=0)
sel_indices = tf.cond(
sampling_is_valid, sample_sel_indices,
lambda: tf.zeros((num_frames_per_video,), dtype=tf.int32))
else:
# Randomly sample some frames from the video.
sel_indices = tf.random_shuffle(
tf.range(total_num_frames))[:num_frames_per_video]
image = tf.gather(image, sel_indices, axis=0)
if not video_frames_are_decoded:
image = decode_image_sequence(image)
if label is not None:
if num_frames_per_video is not None:
label = tf.gather(label, sel_indices, axis=0)
if not video_frames_are_decoded:
label = decode_image_sequence(label, image_format='png', channels=1)
# Sometimes, label is saved as [num_frames_per_video, height, width] or
# [num_frames_per_video, height, width, 1]. We change it to be
# [num_frames_per_video, height, width, 1].
if label.shape.ndims == 3:
label = tf.expand_dims(label, 3)
elif label.shape.ndims == 4 and label.shape.dims[3] == 1:
pass
else:
raise ValueError('Input label shape must be '
'[num_frames_per_video, height, width],'
' or [num_frames, height, width, 1]. '
'Got {}'.format(label.shape.ndims))
label.set_shape([None, None, None, 1])
# Add size of first dimension since tf can't figure it out automatically.
image.set_shape((num_frames_per_video, None, None, None))
if label is not None:
label.set_shape((num_frames_per_video, None, None, None))
preceding_frame_label = None
if preprocess_image_and_label:
if num_frames_per_video is None:
raise ValueError('num_frame_per_video must be specified for preproc.')
original_images = []
images = []
labels = []
if sample_adjacent_and_consistent_query_frames:
num_frames_individual_preproc = 1
else:
num_frames_individual_preproc = num_frames_per_video
for frame_idx in range(num_frames_individual_preproc):
original_image_t, image_t, label_t = (
input_preprocess.preprocess_image_and_label(
image[frame_idx],
label[frame_idx],
crop_height=crop_size[0] if crop_size is not None else None,
crop_width=crop_size[1] if crop_size is not None else None,
min_resize_value=min_resize_value,
max_resize_value=max_resize_value,
resize_factor=resize_factor,
min_scale_factor=min_scale_factor,
max_scale_factor=max_scale_factor,
scale_factor_step_size=scale_factor_step_size,
ignore_label=dataset.ignore_label,
is_training=is_training,
model_variant=model_variant))
original_images.append(original_image_t)
images.append(image_t)
labels.append(label_t)
if sample_adjacent_and_consistent_query_frames:
imgs_for_preproc = [image[frame_idx] for frame_idx in
range(1, num_frames_per_video)]
labels_for_preproc = [label[frame_idx] for frame_idx in
range(1, num_frames_per_video)]
original_image_rest, image_rest, label_rest = (
input_preprocess.preprocess_images_and_labels_consistently(
imgs_for_preproc,
labels_for_preproc,
crop_height=crop_size[0] if crop_size is not None else None,
crop_width=crop_size[1] if crop_size is not None else None,
min_resize_value=min_resize_value,
max_resize_value=max_resize_value,
resize_factor=resize_factor,
min_scale_factor=min_scale_factor,
max_scale_factor=max_scale_factor,
scale_factor_step_size=scale_factor_step_size,
ignore_label=dataset.ignore_label,
is_training=is_training,
model_variant=model_variant))
original_images.extend(original_image_rest)
images.extend(image_rest)
labels.extend(label_rest)
assert len(original_images) == num_frames_per_video
assert len(images) == num_frames_per_video
assert len(labels) == num_frames_per_video
if remap_labels_to_reference_frame:
# Remap labels to indices into the labels of the (downscaled) reference
# frame, or 0, i.e. background, for labels which are not present
# in the reference.
reference_labels = labels[0][tf.newaxis]
h, w = train_utils.resolve_shape(reference_labels)[1:3]
embedding_height = model.scale_dimension(
h, 1.0 / decoder_output_stride)
embedding_width = model.scale_dimension(
w, 1.0 / decoder_output_stride)
reference_labels_embedding_size = tf.squeeze(
tf.image.resize_nearest_neighbor(
reference_labels, tf.stack([embedding_height, embedding_width]),
align_corners=True),
axis=0)
# Get sorted unique labels in the reference frame.
labels_in_ref_frame, _ = tf.unique(
tf.reshape(reference_labels_embedding_size, [-1]))
labels_in_ref_frame = tf.contrib.framework.sort(labels_in_ref_frame)
for idx in range(1, len(labels)):
ref_label_mask = tf.equal(
labels[idx],
labels_in_ref_frame[tf.newaxis, tf.newaxis, :])
remapped = tf.argmax(tf.cast(ref_label_mask, tf.uint8), axis=-1,
output_type=tf.int32)
# Set to 0 if label is not present
is_in_ref = tf.reduce_any(ref_label_mask, axis=-1)
remapped *= tf.cast(is_in_ref, tf.int32)
labels[idx] = remapped[..., tf.newaxis]
if sample_adjacent_and_consistent_query_frames:
if first_frame_finetuning and generate_prev_frame_mask_by_mask_damaging:
preceding_frame_label = mask_damaging.damage_masks(labels[1])
elif add_prev_frame_label:
# Discard the image of the additional frame and take the label as
# initialization for softmax feedback.
original_images = [original_images[0]] + original_images[2:]
preceding_frame_label = labels[1]
images = [images[0]] + images[2:]
labels = [labels[0]] + labels[2:]
num_frames_per_video -= 1
original_image = tf.stack(original_images, axis=0)
image = tf.stack(images, axis=0)
label = tf.stack(labels, axis=0)
else:
if label is not None:
# Need to set label shape due to batching.
label.set_shape([num_frames_per_video,
None if crop_size is None else crop_size[0],
None if crop_size is None else crop_size[1],
1])
original_image = tf.to_float(tf.zeros_like(label))
if crop_size is None:
height = tf.shape(image)[1]
width = tf.shape(image)[2]
else:
height = crop_size[0]
width = crop_size[1]
sample = {'image': image,
'image_name': image_name,
'height': height,
'width': width,
'video_id': video_id}
if label is not None:
sample['label'] = label
if object_label is not None:
sample['object_label'] = object_label
if preceding_frame_label is not None:
sample['preceding_frame_label'] = preceding_frame_label
if not is_training:
# Original image is only used during visualization.
sample['original_image'] = original_image
if is_training:
if first_frame_finetuning:
keep_input = tf.constant(True)
else:
keep_input = tf.logical_and(sampling_is_valid, tf.logical_and(
_has_enough_pixels_of_each_object_in_first_frame(
label, decoder_output_stride),
_has_foreground_and_background_in_first_frame_2(
label, decoder_output_stride)))
batched = tf.train.maybe_batch(sample,
keep_input=keep_input,
batch_size=batch_size,
num_threads=num_threads,
capacity=batch_capacity_factor * batch_size,
dynamic_pad=True)
else:
batched = tf.train.batch(sample,
batch_size=batch_size,
num_threads=num_threads,
capacity=batch_capacity_factor * batch_size,
dynamic_pad=True)
# Flatten from [batch, num_frames_per_video, ...] to
# batch * num_frames_per_video, ...].
cropped_height = train_utils.resolve_shape(batched['image'])[2]
cropped_width = train_utils.resolve_shape(batched['image'])[3]
if num_frames_per_video is None:
first_dim = -1
else:
first_dim = batch_size * num_frames_per_video
batched['image'] = tf.reshape(batched['image'],
[first_dim, cropped_height, cropped_width, 3])
if label is not None:
batched['label'] = tf.reshape(batched['label'],
[first_dim, cropped_height, cropped_width, 1])
return batched
|
gdmix-trainer/src/gdmix/io/dataset_metadata.py | Kostyansa/gdmix | 130 | 12755541 | <reponame>Kostyansa/gdmix
import tensorflow as tf
from gdmix.util.io_utils import read_json_file, namedtuple_with_defaults
class DatasetMetadata:
"""Abstract Metadata class from which all dataset metadata classes derive"""
# define mapping of dtype in meta data to dtype in TensorFlow
TO_TF_DTYPE = {
'int': tf.int32,
'long': tf.int64,
'float': tf.float32,
'double': tf.float64,
'bytes': tf.string,
'string': tf.string
}
TF_INT_DTYPES = {tf.int8, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int16, tf.int32, tf.int64}
FEATURES = "features"
LABELS = "labels"
INDICES = "indices"
VALUES = "values"
NUMBER_OF_TRAINING_SAMPLES = "numberOfTrainingSamples"
SUPPORTED_TYPES = frozenset(['int', 'long', 'float', 'double', 'bytes', 'string'])
METADATA_FIELDS = frozenset(["name", "dtype", "shape", "isSparse"])
METADATA_FIELD_DEFAULT_VALUES = (None, None, None, False)
MetadataInfo = namedtuple_with_defaults("MetadataInfo", METADATA_FIELDS, defaults=METADATA_FIELD_DEFAULT_VALUES)
def __init__(self, path_or_metadata):
"""
Take a metadata str or dict to build up the tensor metadata infos
:param path_or_metadata: Path to the metadata file or a JSON dict
corresponding to the metadata
"""
# ensure m is dict
if isinstance(path_or_metadata, str):
try:
path_or_metadata = read_json_file(path_or_metadata)
except Exception as err:
raise ("Input of type str must be a valid JSON file. {}".format(err))
# ensure features and labels are list
if not isinstance(path_or_metadata.get(self.FEATURES, []), list):
raise TypeError("Features must be a list. Type {} detected."
.format(type(path_or_metadata[self.FEATURES])))
if not isinstance(path_or_metadata.get(self.LABELS, []), list):
raise TypeError("Labels must be a list. Type {} detected."
.format(type(path_or_metadata[self.LABELS])))
def parseMetadata(key):
tensors = {}
for entity in path_or_metadata.get(key, []):
name = entity["name"]
# Check if there are duplicated names in the metadata
if name in tensors:
raise ValueError("Tensor name in your metadata appears more than once:{}".format(name))
tensors[name] = self._build_metadata_info(entity.copy())
return tensors
try:
feature_tensors = parseMetadata(self.FEATURES)
label_tensors = parseMetadata(self.LABELS)
except (TypeError, ValueError) as err:
raise ValueError("Invalid field: {}".format(err))
self._tensors = {**feature_tensors, **label_tensors}
self._features = list(feature_tensors.values())
self._labels = list(label_tensors.values())
self._feature_names = list(feature_tensors.keys())
self._label_names = list(label_tensors.keys())
self._number_of_training_samples = path_or_metadata.get(
"numberOfTrainingSamples", -1)
@classmethod
def _build_metadata_info(cls, metadata_dict):
"""
Create namedtuple from metadata dict
:param metadata_dict: the metadata in dict form
:return: metadata namedtuple
"""
if not cls.METADATA_FIELDS.issubset(metadata_dict.keys()):
raise ValueError("Required metadata fields are {0}. "
"Proved fields are {1}".format(",".join(
cls.METADATA_FIELDS), ",".join(metadata_dict.keys())))
metadata_obj = cls.MetadataInfo(**metadata_dict)
if metadata_obj.name is None or not isinstance(metadata_obj.name, str):
raise ValueError("Feature name can not be None and must be str")
if metadata_obj.dtype not in cls.SUPPORTED_TYPES:
raise ValueError("User provided dtype '{}' is not supported. "
"Supported types are '{}'.".format(
metadata_obj.dtype, list(cls.SUPPORTED_TYPES)))
metadata_obj = metadata_obj._replace(dtype=cls.TO_TF_DTYPE[metadata_obj.dtype])
if metadata_obj.shape is None or not isinstance(metadata_obj.shape, list):
raise ValueError("Feature shape can not be None and must be a list")
return metadata_obj
def get_features(self):
return self._features.copy()
def get_labels(self):
return self._labels.copy()
def get_label_names(self):
return self._label_names.copy()
def get_feature_names(self):
return self._feature_names.copy()
def get_feature_shape(self, feature_name):
return next(filter(lambda x: x.name == feature_name, self.get_features())).shape
def get_tensors(self):
return self._tensors.copy()
def get_number_of_training_samples(self):
return self._number_of_training_samples
@staticmethod
def map_int(in_dtype):
"""
TFRecord features only support three data types:
1. tf.float32
2. tf.int64
3. tf.string
This function maps int32 and int16 to int64 and
leave other types intact.
:param in_dtype: Input TF data type
:return: Mapped TF data type
"""
if in_dtype in DatasetMetadata.TF_INT_DTYPES:
return tf.int64
else:
return in_dtype
|
utils/AttendanceCheck.py | datamonday/Face-Recognition-Class-Attendance-System | 105 | 12755559 | from datetime import datetime
import pandas as pd
import numpy as np
# 将根目录(execute所在目录)添加到环境变量
from utils.GlobalVar import add_path_to_sys
rootdir = add_path_to_sys()
# 导入考勤状态判断相关函数和变量
from utils.GlobalVar import COURSE_TIME, LATE_SPAN
filenames = ['Auxiliary_Info.xlsx',
'Classroom_Course_Schedule.xlsx',
'Classroom_Info.xlsx',
'College_Class_Info.xlsx',
'Attendance_Logs.xlsx']
au_info = pd.read_excel(rootdir + '/development/' + filenames[0])
def calculate_current_teach_week(semester_first_week_date='2021-3-08 08:00:00'):
"""
计算当前日期所属教学周,实现思路是:当前日期所属一年中的周 - 每学期的第一周
----
param: semester_first_week_date: 学期第一周的日期,例如 '2021-3-08 08:00:00'
return: 当前教学周
"""
# 获取指定日期属于当年的第几周, 返回字符串
semester_first_week = datetime.strptime(semester_first_week_date, '%Y-%m-%d %H:%M:%S').strftime('%W')
# 获取当前日期是一年中的第几周, 返回字符串
current_year_week = datetime.now().strftime('%W')
# 计算当前日期所属的教学周
# ( ) 中的减一表示第一周之前的周数
# 最后加一是因为计算周数是从索引00开始的,所以需要加1
current_teach_week = int(current_year_week) - (int(semester_first_week) - 1) + 1
return current_teach_week
def holiday_judgment(judg_time=datetime.now(), holidays=au_info['Holiday Date']):
"""
判断是否为假期
----
param: judg_time: 需要被判断的时间
param: holidays: 当前学期的假期列表
return:
如果有课,则返回考勤时间设置;
如果没课,则返回None,并提示空教室。
"""
# 因为表中有 NaT类型,这在遍历时会导致错误,因此需要先过滤掉NaT值
# 不包含 NaT 的索引
indexes_without_nat = [(type(holiday) != type(pd.NaT)) for holiday in au_info['Holiday Date']]
# 不包含 NaT 的假期列表
holidays_pure = list(holidays[indexes_without_nat])
# 获取完整的时间格式
now = datetime.now()
# 相同的功能
judg_time_ymd = now.date()
# 是否为假期的标志位
is_now_holiday = False
# 遍历假期列表
for holiday in holidays_pure:
# 截取当前假期的年月日
holiday_month_day = datetime(holiday.year, holiday.month, holiday.day)
if judg_time_ymd - holiday_month_day == 0:
is_now_holiday = True
if is_now_holiday:
print(f'[INFO] {judg_time_ymd} is Holiday!')
else:
print(f'[INFO] {judg_time_ymd} is not Holiday!')
return is_now_holiday
def attendance_check(set_time='08:00:00'):
"""
注意: 隔天比较存在问题,比如23:00考勤开始,00:30打卡,但是这种情况在现实中不存在
考勤状态判断,根据指定的时间判断考勤状态
手动设定考勤时间(简单),例如:
- 1)正常:考勤时间设定之前的一小时内签到
- 2)迟到:上课之后45分钟内
- 3)其他:上课超过45分钟
- 4)旷课:上课时间未到
- 5)请假:通过销假系统自动读取,或者老师手动填写
----
param set_time: = '19:00:00'
"""
####################### 自定义参数 #####################
# 正常:考勤时间设定之前的一小时内(3600s)签到
normal_span = 60 * 60 # seconds
# 设定一节课(大课,两小节)时长
course_time = COURSE_TIME # minutes
# 设定上课多长时间认为是迟到
late_span = LATE_SPAN
########################################################
# 获取完整的时间格式
now = datetime.now()
# 分别获取当前的年,月,日,时,分,秒,均为int类型
judg_time = now
now_y = judg_time.year
now_m = judg_time.month
now_d = judg_time.day
# 定义考勤标识符
att_state = '正常'
# 格式化考勤时间
att_time = datetime.strptime(f'{now_y}-{now_m}-{now_d} {set_time}', '%Y-%m-%d %H:%M:%S')
# 计算当前时间与设定时间的差值
time_diff = now - att_time
# print(time_diff)
time_diff_days, time_diff_seconds = time_diff.days, time_diff.seconds
# print(time_diff_days, time_diff_seconds)
# 如果time_diff_days为负,说明还未到考勤时间,此时计算距离考勤的时间
if time_diff_days < 0:
# 一天的秒数减去time_diff_days
time_span_att = 60 * 60 * 23 - time_diff_seconds
if time_span_att < normal_span:
att_state = '正常'
else:
print(f'[INFO] 无效!请在考勤时间设定之前的一小时内签到!距离考勤时间设定还有{round((time_span_att - 60*60)/60, 2)}分钟!')
# 如果time_diff_days为正,说明考勤时间已过,此时判断是否为迟到或旷课
else:
# 上课45分钟内,算迟到
if time_diff_seconds - late_span * 60 <= 0:
att_state = '迟到'
elif (time_diff_seconds > late_span * 60) and (time_diff_seconds <= course_time * 60):
att_state = '其他'
print('[INFO] 已经超过迟到时间范围,请联系老师处理!')
else:
att_state = '旷课'
print(f'[INFO] 时间设定:{att_time},考勤时间:{now},考勤状态:{att_state}')
return att_state |
python/federatedml/secure_information_retrieval/secure_information_retrieval_host.py | rubenlozanoaht3m/DataDogm | 715 | 12755572 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secure_information_retrieval.base_secure_information_retrieval import \
BaseSecureInformationRetrieval
from federatedml.param.sir_param import SecureInformationRetrievalParam
from federatedml.param.intersect_param import IntersectParam
from federatedml.secureprotol.oblivious_transfer.hauck_oblivious_transfer.hauck_oblivious_transfer_sender import \
HauckObliviousTransferSender
from federatedml.secureprotol.symmetric_encryption.py_aes_encryption import AESEncryptKey
from federatedml.secureprotol.symmetric_encryption.cryptor_executor import CryptoExecutor
from federatedml.statistic import data_overview
from federatedml.statistic.intersect import DhIntersectionHost
from federatedml.util import consts, abnormal_detection, LOGGER
MODEL_PARAM_NAME = 'SecureInformationRetrievalParam'
MODEL_META_NAME = 'SecureInformationRetrievalMeta'
class SecureInformationRetrievalHost(BaseSecureInformationRetrieval):
def __init__(self):
super(SecureInformationRetrievalHost, self).__init__()
self.oblivious_transfer = None
self.target_indexes = None
def _init_model(self, param: SecureInformationRetrievalParam):
self._init_base_model(param)
self.intersection_obj = DhIntersectionHost()
self.intersection_obj.role = consts.HOST
intersect_param = IntersectParam(dh_params=self.dh_params)
self.intersection_obj.load_params(intersect_param)
self.intersection_obj.host_party_id_list = self.component_properties.host_party_idlist
self.intersection_obj.guest_party_id = self.component_properties.guest_partyid
if self.model_param.oblivious_transfer_protocol == consts.OT_HAUCK.lower():
self.oblivious_transfer = HauckObliviousTransferSender()
else:
raise ValueError("SIR only supports Hauck's OT")
def fit(self, data_inst):
"""
:param data_inst: Table
:return:
"""
# LOGGER.info("data count = {}".format(data_inst.count()))
abnormal_detection.empty_table_detection(data_inst)
self._update_target_indexes(data_inst.schema)
match_data = data_inst
if data_overview.check_with_inst_id(data_inst):
match_data = self._recover_match_id(data_inst)
# 0. Raw retrieval
if self.model_param.raw_retrieval or self.security_level == 0:
LOGGER.info("enter raw information retrieval host")
# abnormal_detection.empty_table_detection(data_inst)
self._raw_information_retrieval(match_data)
self._display_result(block_num='N/A')
self._sync_coverage(data_inst)
return data_inst
# 1. Data pre-processing
LOGGER.info("enter secure information retrieval host")
# abnormal_detection.empty_table_detection(data_inst)
self._parse_security_level(match_data)
if not self._check_oblivious_transfer_condition():
self._failure_response()
# 2. Guest find intersection
self.intersection_obj.get_intersect_doubly_encrypted_id(match_data)
id_list_host_first = self.intersection_obj.id_list_local_first
# 3. Get the re-indexed doubly encrypted ID from guest
id_blocks = self._iteratively_get_id_blocks()
# 4. Restore value for the intersection
id_blocks = _restore_value(id_list_host_first,
id_blocks,
self.target_indexes,
self.need_label) # List[(Ei, val)]
LOGGER.info("interested values restored")
# 8. Execute OT as sender
LOGGER.info("enter oblivious transfer protocol as a sender")
key_list = self.oblivious_transfer.key_derivation(self.block_num)
LOGGER.info("oblivious transfer key derived")
# 9. Encrypt and transmit
self._non_committing_encrypt(id_blocks, key_list) # List[(Ei, Eval)]
LOGGER.info("non-committing encryption and transmission completed")
# 10. Slack
self._sync_coverage(data_inst)
self._display_result()
LOGGER.info("secure information retrieval finished")
return data_inst
def _sync_nonce_list(self, nonce, time):
self.transfer_variable.nonce_list.remote(nonce,
suffix=(time,),
role=consts.GUEST,
idx=0)
LOGGER.info("sent {}-th nonce to guest".format(time))
def _transmit_value_ciphertext(self, id_block, time):
self.transfer_variable.id_blocks_ciphertext.remote(id_block,
suffix=(time,),
role=consts.GUEST,
idx=0)
LOGGER.info("sent {}-th id block ciphertext to guest".format(time))
def _non_committing_encrypt(self, id_blocks, key_list):
"""
Use non-committing cipher to encrypt id blocks
:param id_blocks: List[(Ei, val)]
:param key_list: List[ObliviousTransferKey]
:return:
"""
for i in range(self.block_num):
if self.model_param.non_committing_encryption == consts.AES.lower():
aes_key = CryptoExecutor(AESEncryptKey(key_list[i].key))
else:
raise ValueError("only supports AES cipher for non-committing encryption")
self._transmit_value_ciphertext(aes_key.map_values_encrypt(id_blocks[i], mode=0), time=i)
self._sync_nonce_list(aes_key.get_nonce(), time=i)
block_confirm = self.transfer_variable.block_confirm.get(idx=0,
suffix=(i,))
if block_confirm:
continue
def _update_target_indexes(self, schema):
self.need_label = self._check_need_label()
if self.need_label:
return
header = schema["header"]
target_indexes = []
for col_name in self.target_cols:
try:
i = header.index(col_name)
target_indexes.append(i)
except ValueError:
raise ValueError(f"{col_name} does not exist in table header. Please check.")
self.target_indexes = target_indexes
@staticmethod
def extract_value(instance, target_indexes, need_label):
if need_label:
return instance.label
features = [instance.features[i] for i in target_indexes]
return features
def _sync_natural_indexation(self, id_list=None, time=None):
id_list_natural_indexation = self.transfer_variable.natural_indexation.get(idx=0,
suffix=(time,))
LOGGER.info(f"got naturally indexed block {time} from guest")
return id_list_natural_indexation
def _parse_security_level(self, data_instance):
self._sync_block_num()
def _sync_block_num(self):
self.block_num = self.transfer_variable.block_num.get(idx=0)
LOGGER.info("got block num {} from guest".format(self.block_num))
def _raw_information_retrieval(self, data_instance):
id_list_guest = self.transfer_variable.raw_id_list.get(idx=0)
LOGGER.info("got raw id list from guest")
target_indexes, need_label = self.target_indexes, self.need_label
id_intersect = data_instance.join(id_list_guest,
lambda v, u: SecureInformationRetrievalHost.extract_value(v,
target_indexes,
need_label))
self.transfer_variable.raw_value_list.remote(id_intersect,
role=consts.GUEST,
idx=0)
LOGGER.info("sent raw value list to guest")
# self._sync_coverage(data_instance)
def _sync_coverage(self, data_instance):
self.coverage = self.transfer_variable.coverage.get(idx=0) / data_instance.count()
LOGGER.info(f"got coverage {self.coverage} from guest")
def _iteratively_get_id_blocks(self):
"""
:return: List[Table]
"""
id_blocks = [None for _ in range(self.block_num)]
for i in range(self.block_num):
id_block = self._sync_natural_indexation(time=i) # get List[(Ei, -1)]
id_blocks[i] = id_block
return id_blocks
def _restore_value(id_list_host, id_blocks, target_indexes, need_label):
"""
:param id_list_host: (h, (Eh, Instance))
:param id_blocks: List[(Ei, -1)]
:return:
"""
id_value_blocks = []
for i in range(len(id_blocks)):
restored_table = id_list_host.join(id_blocks[i],
lambda v, u:
SecureInformationRetrievalHost.extract_value(v[1],
target_indexes,
need_label))
id_value_blocks.append(restored_table)
return id_value_blocks
|
docs/docs_env/Lib/sre.py | gilsonbp/Django-facebook | 6,989 | 12755577 | <gh_stars>1000+
"""This file is only retained for backwards compatibility.
It will be removed in the future. sre was moved to re in version 2.5.
"""
import warnings
warnings.warn("The sre module is deprecated, please import re.",
DeprecationWarning, 2)
from re import *
from re import __all__
# old pickles expect the _compile() reconstructor in this module
from re import _compile
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.