max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/messages_data/error_emails/missing_content_disposition.py | unqx/imap_tools | 344 | 11146157 | import datetime
from imap_tools import EmailAddress
DATA = dict(
subject='Redacted',
from_='<EMAIL>',
to=('<EMAIL>',),
cc=(),
bcc=(),
reply_to=(),
date=datetime.datetime(2002, 1, 22, 14, 35, 28, tzinfo=datetime.timezone.utc),
date_str='Tue, 22 Jan 2002 14:35:28 UT',
text='',
html='',
headers={'message-id': ('<<EMAIL>>',), 'content-type': ('multipart/related; boundary="_----------=_10117101281980"',), 'mime-version': ('1.0',), 'date': ('Tue, 22 Jan 2002 14:35:28 UT',), 'subject': ('Redacted',), 'to': ('<EMAIL>',), 'from': ('<EMAIL>',), 'return-path': ('<EMAIL>',)},
attachments=[],
from_values=EmailAddress('', '<EMAIL>', '<EMAIL>'),
to_values=(EmailAddress('', '<EMAIL>', '<EMAIL>'),),
cc_values=(),
bcc_values=(),
reply_to_values=(),
) |
test/integration/expected_out_single_line/simple_format.py | Inveracity/flynt | 487 | 11146190 | <gh_stars>100-1000
var = 5
a = f"my string {var:.2f}" |
hickle/legacy_v3/helpers.py | texadactyl/hickle | 402 | 11146207 | <filename>hickle/legacy_v3/helpers.py
import re
import six
def get_type_and_data(h_node):
""" Helper function to return the py_type and data block for a HDF node """
py_type = h_node.attrs["type"][0]
data = h_node[()]
# if h_node.shape == ():
# data = h_node.value
# else:
# data = h_node[:]
return py_type, data
def get_type(h_node):
""" Helper function to return the py_type for a HDF node """
py_type = h_node.attrs["type"][0]
return py_type
def sort_keys(key_list):
""" Take a list of strings and sort it by integer value within string
Args:
key_list (list): List of keys
Returns:
key_list_sorted (list): List of keys, sorted by integer
"""
# Py3 h5py returns an irritating KeysView object
# Py3 also complains about bytes and strings, convert all keys to bytes
if six.PY3:
key_list2 = []
for key in key_list:
if isinstance(key, str):
key = bytes(key, 'ascii')
key_list2.append(key)
key_list = key_list2
# Check which keys contain a number
numbered_keys = [re.search(br'\d+', key) for key in key_list]
# Sort the keys on number if they have it, or normally if not
if(len(key_list) and not numbered_keys.count(None)):
to_int = lambda x: int(re.search(br'\d+', x).group(0))
return(sorted(key_list, key=to_int))
else:
return(sorted(key_list))
def check_is_iterable(py_obj):
""" Check whether a python object is iterable.
Note: this treats unicode and string as NON ITERABLE
Args:
py_obj: python object to test
Returns:
iter_ok (bool): True if item is iterable, False is item is not
"""
if six.PY2:
string_types = (str, unicode)
else:
string_types = (str, bytes, bytearray)
if isinstance(py_obj, string_types):
return False
try:
iter(py_obj)
return True
except TypeError:
return False
def check_is_hashable(py_obj):
""" Check if a python object is hashable
Note: this function is currently not used, but is useful for future
development.
Args:
py_obj: python object to test
"""
try:
py_obj.__hash__()
return True
except TypeError:
return False
def check_iterable_item_type(iter_obj):
""" Check if all items within an iterable are the same type.
Args:
iter_obj: iterable object
Returns:
iter_type: type of item contained within the iterable. If
the iterable has many types, a boolean False is returned instead.
References:
http://stackoverflow.com/questions/13252333/python-check-if-all-elements-of-a-list-are-the-same-type
"""
iseq = iter(iter_obj)
try:
first_type = type(next(iseq))
except StopIteration:
return False
except Exception as ex:
return False
else:
return first_type if all((type(x) is first_type) for x in iseq) else False
|
tools/jenkins-scripts/configs/cocostudiox-daily-build.py | wzhengsen/engine-x | 113 | 11146224 | <reponame>wzhengsen/engine-x
import os
import platform
if(os.path.exists('CocoStudio/CSX/CSX/CSX.pro') == False):
node_name = os.environ['NODE_NAME']
source_dir = '../../../cocostudiox-base-repo/node/' + node_name
if(platform.system() == 'Windows'):
source_dir = source_dir.replace("/", os.sep)
os.system("xcopy " + source_dir + ' . /E /Y /H')
else:
os.system("cp -r " + source_dir + "/. .")
os.system('git pull origin')
os.system('git submodule update --init --force')
node_name = os.environ['NODE_NAME']
os.chdir('CocoStudio/CSX/CSX')
ret = os.system('qmake -r')
if(ret == 0):
if(node_name == 'android_mac'):
ret = os.system('make -j8')
elif(node_name == 'win32_win7'):
ret = os.system('mingw32-make -j8')
os.chdir('../../..')
os.system('git clean -xdf')
os.system('git reset --hard')
print ret
if(ret > 0):
ret = 1
exit(ret)
|
tools/third_party/hyper/hyper/packages/rfc3986/uri.py | ziransun/wpt | 2,479 | 11146231 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Rackspace
# Copyright (c) 2015 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from .compat import to_str
from .exceptions import InvalidAuthority, ResolutionError
from .misc import (
ABSOLUTE_URI_MATCHER, FRAGMENT_MATCHER, IPv4_MATCHER, PATH_MATCHER,
QUERY_MATCHER, SCHEME_MATCHER, SUBAUTHORITY_MATCHER, URI_MATCHER,
URI_COMPONENTS, merge_paths
)
from .normalizers import (
encode_component, normalize_scheme, normalize_authority, normalize_path,
normalize_query, normalize_fragment
)
class URIReference(namedtuple('URIReference', URI_COMPONENTS)):
slots = ()
def __new__(cls, scheme, authority, path, query, fragment,
encoding='utf-8'):
ref = super(URIReference, cls).__new__(
cls,
scheme or None,
authority or None,
path or None,
query or None,
fragment or None)
ref.encoding = encoding
return ref
def __eq__(self, other):
other_ref = other
if isinstance(other, tuple):
other_ref = URIReference(*other)
elif not isinstance(other, URIReference):
try:
other_ref = URIReference.from_string(other)
except TypeError:
raise TypeError(
'Unable to compare URIReference() to {0}()'.format(
type(other).__name__))
# See http://tools.ietf.org/html/rfc3986#section-6.2
naive_equality = tuple(self) == tuple(other_ref)
return naive_equality or self.normalized_equality(other_ref)
@classmethod
def from_string(cls, uri_string, encoding='utf-8'):
"""Parse a URI reference from the given unicode URI string.
:param str uri_string: Unicode URI to be parsed into a reference.
:param str encoding: The encoding of the string provided
:returns: :class:`URIReference` or subclass thereof
"""
uri_string = to_str(uri_string, encoding)
split_uri = URI_MATCHER.match(uri_string).groupdict()
return cls(split_uri['scheme'], split_uri['authority'],
encode_component(split_uri['path'], encoding),
encode_component(split_uri['query'], encoding),
encode_component(split_uri['fragment'], encoding), encoding)
def authority_info(self):
"""Returns a dictionary with the ``userinfo``, ``host``, and ``port``.
If the authority is not valid, it will raise a ``InvalidAuthority``
Exception.
:returns:
``{'userinfo': 'username:password', 'host': 'www.example.com',
'port': '80'}``
:rtype: dict
:raises InvalidAuthority: If the authority is not ``None`` and can not
be parsed.
"""
if not self.authority:
return {'userinfo': None, 'host': None, 'port': None}
match = SUBAUTHORITY_MATCHER.match(self.authority)
if match is None:
# In this case, we have an authority that was parsed from the URI
# Reference, but it cannot be further parsed by our
# SUBAUTHORITY_MATCHER. In this case it must not be a valid
# authority.
raise InvalidAuthority(self.authority.encode(self.encoding))
# We had a match, now let's ensure that it is actually a valid host
# address if it is IPv4
matches = match.groupdict()
host = matches.get('host')
if (host and IPv4_MATCHER.match(host) and not
valid_ipv4_host_address(host)):
# If we have a host, it appears to be IPv4 and it does not have
# valid bytes, it is an InvalidAuthority.
raise InvalidAuthority(self.authority.encode(self.encoding))
return matches
@property
def host(self):
"""If present, a string representing the host."""
try:
authority = self.authority_info()
except InvalidAuthority:
return None
return authority['host']
@property
def port(self):
"""If present, the port (as a string) extracted from the authority."""
try:
authority = self.authority_info()
except InvalidAuthority:
return None
return authority['port']
@property
def userinfo(self):
"""If present, the userinfo extracted from the authority."""
try:
authority = self.authority_info()
except InvalidAuthority:
return None
return authority['userinfo']
def is_absolute(self):
"""Determine if this URI Reference is an absolute URI.
See http://tools.ietf.org/html/rfc3986#section-4.3 for explanation.
:returns: ``True`` if it is an absolute URI, ``False`` otherwise.
:rtype: bool
"""
return bool(ABSOLUTE_URI_MATCHER.match(self.unsplit()))
def is_valid(self, **kwargs):
"""Determines if the URI is valid.
:param bool require_scheme: Set to ``True`` if you wish to require the
presence of the scheme component.
:param bool require_authority: Set to ``True`` if you wish to require
the presence of the authority component.
:param bool require_path: Set to ``True`` if you wish to require the
presence of the path component.
:param bool require_query: Set to ``True`` if you wish to require the
presence of the query component.
:param bool require_fragment: Set to ``True`` if you wish to require
the presence of the fragment component.
:returns: ``True`` if the URI is valid. ``False`` otherwise.
:rtype: bool
"""
validators = [
(self.scheme_is_valid, kwargs.get('require_scheme', False)),
(self.authority_is_valid, kwargs.get('require_authority', False)),
(self.path_is_valid, kwargs.get('require_path', False)),
(self.query_is_valid, kwargs.get('require_query', False)),
(self.fragment_is_valid, kwargs.get('require_fragment', False)),
]
return all(v(r) for v, r in validators)
def _is_valid(self, value, matcher, require):
if require:
return (value is not None
and matcher.match(value))
# require is False and value is not None
return value is None or matcher.match(value)
def authority_is_valid(self, require=False):
"""Determines if the authority component is valid.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the authority is valid. ``False`` otherwise.
:rtype: bool
"""
try:
self.authority_info()
except InvalidAuthority:
return False
is_valid = self._is_valid(self.authority,
SUBAUTHORITY_MATCHER,
require)
# Ensure that IPv4 addresses have valid bytes
if is_valid and self.host and IPv4_MATCHER.match(self.host):
return valid_ipv4_host_address(self.host)
# Perhaps the host didn't exist or if it did, it wasn't an IPv4-like
# address. In either case, we want to rely on the `_is_valid` check,
# so let's return that.
return is_valid
def scheme_is_valid(self, require=False):
"""Determines if the scheme component is valid.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the scheme is valid. ``False`` otherwise.
:rtype: bool
"""
return self._is_valid(self.scheme, SCHEME_MATCHER, require)
def path_is_valid(self, require=False):
"""Determines if the path component is valid.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the path is valid. ``False`` otherwise.
:rtype: bool
"""
return self._is_valid(self.path, PATH_MATCHER, require)
def query_is_valid(self, require=False):
"""Determines if the query component is valid.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the query is valid. ``False`` otherwise.
:rtype: bool
"""
return self._is_valid(self.query, QUERY_MATCHER, require)
def fragment_is_valid(self, require=False):
"""Determines if the fragment component is valid.
:param str require: Set to ``True`` to require the presence of this
component.
:returns: ``True`` if the fragment is valid. ``False`` otherwise.
:rtype: bool
"""
return self._is_valid(self.fragment, FRAGMENT_MATCHER, require)
def normalize(self):
"""Normalize this reference as described in Section 6.2.2
This is not an in-place normalization. Instead this creates a new
URIReference.
:returns: A new reference object with normalized components.
:rtype: URIReference
"""
# See http://tools.ietf.org/html/rfc3986#section-6.2.2 for logic in
# this method.
return URIReference(normalize_scheme(self.scheme or ''),
normalize_authority(
(self.userinfo, self.host, self.port)),
normalize_path(self.path or ''),
normalize_query(self.query or ''),
normalize_fragment(self.fragment or ''))
def normalized_equality(self, other_ref):
"""Compare this URIReference to another URIReference.
:param URIReference other_ref: (required), The reference with which
we're comparing.
:returns: ``True`` if the references are equal, ``False`` otherwise.
:rtype: bool
"""
return tuple(self.normalize()) == tuple(other_ref.normalize())
def resolve_with(self, base_uri, strict=False):
"""Use an absolute URI Reference to resolve this relative reference.
Assuming this is a relative reference that you would like to resolve,
use the provided base URI to resolve it.
See http://tools.ietf.org/html/rfc3986#section-5 for more information.
:param base_uri: Either a string or URIReference. It must be an
absolute URI or it will raise an exception.
:returns: A new URIReference which is the result of resolving this
reference using ``base_uri``.
:rtype: :class:`URIReference`
:raises ResolutionError: If the ``base_uri`` is not an absolute URI.
"""
if not isinstance(base_uri, URIReference):
base_uri = URIReference.from_string(base_uri)
if not base_uri.is_absolute():
raise ResolutionError(base_uri)
# This is optional per
# http://tools.ietf.org/html/rfc3986#section-5.2.1
base_uri = base_uri.normalize()
# The reference we're resolving
resolving = self
if not strict and resolving.scheme == base_uri.scheme:
resolving = resolving.copy_with(scheme=None)
# http://tools.ietf.org/html/rfc3986#page-32
if resolving.scheme is not None:
target = resolving.copy_with(path=normalize_path(resolving.path))
else:
if resolving.authority is not None:
target = resolving.copy_with(
scheme=base_uri.scheme,
path=normalize_path(resolving.path)
)
else:
if resolving.path is None:
if resolving.query is not None:
query = resolving.query
else:
query = base_uri.query
target = resolving.copy_with(
scheme=base_uri.scheme,
authority=base_uri.authority,
path=base_uri.path,
query=query
)
else:
if resolving.path.startswith('/'):
path = normalize_path(resolving.path)
else:
path = normalize_path(
merge_paths(base_uri, resolving.path)
)
target = resolving.copy_with(
scheme=base_uri.scheme,
authority=base_uri.authority,
path=path,
query=resolving.query
)
return target
def unsplit(self):
"""Create a URI string from the components.
:returns: The URI Reference reconstituted as a string.
:rtype: str
"""
# See http://tools.ietf.org/html/rfc3986#section-5.3
result_list = []
if self.scheme:
result_list.extend([self.scheme, ':'])
if self.authority:
result_list.extend(['//', self.authority])
if self.path:
result_list.append(self.path)
if self.query:
result_list.extend(['?', self.query])
if self.fragment:
result_list.extend(['#', self.fragment])
return ''.join(result_list)
def copy_with(self, scheme=None, authority=None, path=None, query=None,
fragment=None):
attributes = {
'scheme': scheme,
'authority': authority,
'path': path,
'query': query,
'fragment': fragment,
}
for key, value in list(attributes.items()):
if value is None:
del attributes[key]
return self._replace(**attributes)
def valid_ipv4_host_address(host):
# If the host exists, and it might be IPv4, check each byte in the
# address.
return all([0 <= int(byte, base=10) <= 255 for byte in host.split('.')])
|
tests/common/test_builder_state.py | sarthakpati/nncf | 310 | 11146241 | <reponame>sarthakpati/nncf<gh_stars>100-1000
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any
from typing import Dict
from typing import List
from typing import Union
from nncf import NNCFConfig
from nncf.api.compression import CompressionAlgorithmController
from nncf.api.compression import ModelType
from nncf.common.composite_compression import CompositeCompressionAlgorithmBuilder
from nncf.common.compression import BaseCompressionAlgorithmBuilder
from nncf.common.graph.transformations.layout import TransformationLayout
from nncf.common.utils.registry import Registry
STATE_ATTR = 'state'
DIFF_STATE_ATTR = STATE_ATTR + '__'
class A(BaseCompressionAlgorithmBuilder):
def __init__(self, config: NNCFConfig, should_init: bool = True, state_value: int = 1, name: str = 'A'):
setattr(self, Registry.REGISTERED_NAME_ATTR, name)
super().__init__(config, should_init)
self.state_value = state_value
def _load_state_without_name(self, state_without_name: Dict[str, Any]):
self.state_value = state_without_name.get(STATE_ATTR)
def _get_state_without_name(self) -> Dict[str, Any]:
return {STATE_ATTR: self.state_value}
def apply_to(self, model: ModelType) -> ModelType:
pass
def _build_controller(self, model: ModelType) -> CompressionAlgorithmController:
pass
def get_transformation_layout(self, model: ModelType) -> TransformationLayout:
pass
def initialize(self, model: ModelType) -> None:
pass
class CA(CompositeCompressionAlgorithmBuilder):
@property
def name(self) -> str:
pass
def add(self, child_builder) -> None:
self._child_builders.append(child_builder)
def apply_to(self, model: ModelType) -> ModelType:
pass
def build_controller(self, model: ModelType) -> CompressionAlgorithmController:
pass
def get_transformation_layout(self, model: ModelType) -> TransformationLayout:
pass
def initialize(self, model: ModelType) -> None:
pass
def _get_mock_config(algo_name: Union[List[str], str]) -> NNCFConfig:
config = NNCFConfig()
config["input_info"] = {
"sample_size": [1, 1]
}
if isinstance(algo_name, list):
lst = []
for alg_n in algo_name:
lst.append({"algorithm": alg_n})
config["compression"] = lst
else:
assert isinstance(algo_name, str)
config["compression"] = {
"algorithm": algo_name
}
return config
def test_builder_state_load():
config = _get_mock_config('A')
builder = A(config, True, 1)
builder.state_value += 1
saved_state = builder.get_state()
builder = A(config, True, 1)
builder.load_state(saved_state)
assert builder.state_value == 2
def test_basic_composite_builder_load():
def create_builder():
config = _get_mock_config(['A', 'A2'])
c = CA(config, True)
a = A(config, True, 1)
b = A(config, True, 2, 'A2')
c.add(a)
c.add(b)
return c, a, b
composite_bldr, bldr1, bldr2 = create_builder()
bldr1.state_value += 1
bldr2.state_value += 2
saved_state = composite_bldr.get_state()
composite_bldr, bldr1, bldr2 = create_builder()
composite_bldr.load_state(saved_state)
assert bldr1.state_value == 2
assert bldr2.state_value == 4
def test_advanced_composite_ctrl_load():
config = _get_mock_config(['A', 'A2', 'A3'])
composite_builder = CA(config, True)
ctrl1 = A(config, True, 1)
ctrl2 = A(config, True, 2, name='A2')
composite_builder.add(ctrl1)
composite_builder.add(ctrl2)
ctrl1.state_value += 1
ctrl2.state_value += 2
saved_state = composite_builder.get_state()
composite_builder = CA(config, True)
ctrl1 = A(config, True, 1)
ctrl3 = A(config, True, 3, name='A3')
composite_builder.add(ctrl1)
composite_builder.add(ctrl3)
composite_builder.load_state(saved_state)
assert ctrl1.state_value == 2
assert ctrl3.state_value == 3
|
homeassistant/components/media_player/const.py | liangleslie/core | 30,023 | 11146335 | """Provides the constants needed for component."""
from enum import IntEnum
# How long our auth signature on the content should be valid for
CONTENT_AUTH_EXPIRY_TIME = 3600 * 24
ATTR_APP_ID = "app_id"
ATTR_APP_NAME = "app_name"
ATTR_ENTITY_PICTURE_LOCAL = "entity_picture_local"
ATTR_GROUP_MEMBERS = "group_members"
ATTR_INPUT_SOURCE = "source"
ATTR_INPUT_SOURCE_LIST = "source_list"
ATTR_MEDIA_ANNOUNCE = "announce"
ATTR_MEDIA_ALBUM_ARTIST = "media_album_artist"
ATTR_MEDIA_ALBUM_NAME = "media_album_name"
ATTR_MEDIA_ARTIST = "media_artist"
ATTR_MEDIA_CHANNEL = "media_channel"
ATTR_MEDIA_CONTENT_ID = "media_content_id"
ATTR_MEDIA_CONTENT_TYPE = "media_content_type"
ATTR_MEDIA_DURATION = "media_duration"
ATTR_MEDIA_ENQUEUE = "enqueue"
ATTR_MEDIA_EXTRA = "extra"
ATTR_MEDIA_EPISODE = "media_episode"
ATTR_MEDIA_PLAYLIST = "media_playlist"
ATTR_MEDIA_POSITION = "media_position"
ATTR_MEDIA_POSITION_UPDATED_AT = "media_position_updated_at"
ATTR_MEDIA_REPEAT = "repeat"
ATTR_MEDIA_SEASON = "media_season"
ATTR_MEDIA_SEEK_POSITION = "seek_position"
ATTR_MEDIA_SERIES_TITLE = "media_series_title"
ATTR_MEDIA_SHUFFLE = "shuffle"
ATTR_MEDIA_TITLE = "media_title"
ATTR_MEDIA_TRACK = "media_track"
ATTR_MEDIA_VOLUME_LEVEL = "volume_level"
ATTR_MEDIA_VOLUME_MUTED = "is_volume_muted"
ATTR_SOUND_MODE = "sound_mode"
ATTR_SOUND_MODE_LIST = "sound_mode_list"
DOMAIN = "media_player"
MEDIA_CLASS_ALBUM = "album"
MEDIA_CLASS_APP = "app"
MEDIA_CLASS_ARTIST = "artist"
MEDIA_CLASS_CHANNEL = "channel"
MEDIA_CLASS_COMPOSER = "composer"
MEDIA_CLASS_CONTRIBUTING_ARTIST = "contributing_artist"
MEDIA_CLASS_DIRECTORY = "directory"
MEDIA_CLASS_EPISODE = "episode"
MEDIA_CLASS_GAME = "game"
MEDIA_CLASS_GENRE = "genre"
MEDIA_CLASS_IMAGE = "image"
MEDIA_CLASS_MOVIE = "movie"
MEDIA_CLASS_MUSIC = "music"
MEDIA_CLASS_PLAYLIST = "playlist"
MEDIA_CLASS_PODCAST = "podcast"
MEDIA_CLASS_SEASON = "season"
MEDIA_CLASS_TRACK = "track"
MEDIA_CLASS_TV_SHOW = "tv_show"
MEDIA_CLASS_URL = "url"
MEDIA_CLASS_VIDEO = "video"
MEDIA_TYPE_ALBUM = "album"
MEDIA_TYPE_APP = "app"
MEDIA_TYPE_APPS = "apps"
MEDIA_TYPE_ARTIST = "artist"
MEDIA_TYPE_CHANNEL = "channel"
MEDIA_TYPE_CHANNELS = "channels"
MEDIA_TYPE_COMPOSER = "composer"
MEDIA_TYPE_CONTRIBUTING_ARTIST = "contributing_artist"
MEDIA_TYPE_EPISODE = "episode"
MEDIA_TYPE_GAME = "game"
MEDIA_TYPE_GENRE = "genre"
MEDIA_TYPE_IMAGE = "image"
MEDIA_TYPE_MOVIE = "movie"
MEDIA_TYPE_MUSIC = "music"
MEDIA_TYPE_PLAYLIST = "playlist"
MEDIA_TYPE_PODCAST = "podcast"
MEDIA_TYPE_SEASON = "season"
MEDIA_TYPE_TRACK = "track"
MEDIA_TYPE_TVSHOW = "tvshow"
MEDIA_TYPE_URL = "url"
MEDIA_TYPE_VIDEO = "video"
SERVICE_CLEAR_PLAYLIST = "clear_playlist"
SERVICE_JOIN = "join"
SERVICE_PLAY_MEDIA = "play_media"
SERVICE_SELECT_SOUND_MODE = "select_sound_mode"
SERVICE_SELECT_SOURCE = "select_source"
SERVICE_UNJOIN = "unjoin"
REPEAT_MODE_ALL = "all"
REPEAT_MODE_OFF = "off"
REPEAT_MODE_ONE = "one"
REPEAT_MODES = [REPEAT_MODE_OFF, REPEAT_MODE_ALL, REPEAT_MODE_ONE]
class MediaPlayerEntityFeature(IntEnum):
"""Supported features of the media player entity."""
PAUSE = 1
SEEK = 2
VOLUME_SET = 4
VOLUME_MUTE = 8
PREVIOUS_TRACK = 16
NEXT_TRACK = 32
TURN_ON = 128
TURN_OFF = 256
PLAY_MEDIA = 512
VOLUME_STEP = 1024
SELECT_SOURCE = 2048
STOP = 4096
CLEAR_PLAYLIST = 8192
PLAY = 16384
SHUFFLE_SET = 32768
SELECT_SOUND_MODE = 65536
BROWSE_MEDIA = 131072
REPEAT_SET = 262144
GROUPING = 524288
# These SUPPORT_* constants are deprecated as of Home Assistant 2022.5.
# Please use the MediaPlayerEntityFeature enum instead.
SUPPORT_PAUSE = 1
SUPPORT_SEEK = 2
SUPPORT_VOLUME_SET = 4
SUPPORT_VOLUME_MUTE = 8
SUPPORT_PREVIOUS_TRACK = 16
SUPPORT_NEXT_TRACK = 32
SUPPORT_TURN_ON = 128
SUPPORT_TURN_OFF = 256
SUPPORT_PLAY_MEDIA = 512
SUPPORT_VOLUME_STEP = 1024
SUPPORT_SELECT_SOURCE = 2048
SUPPORT_STOP = 4096
SUPPORT_CLEAR_PLAYLIST = 8192
SUPPORT_PLAY = 16384
SUPPORT_SHUFFLE_SET = 32768
SUPPORT_SELECT_SOUND_MODE = 65536
SUPPORT_BROWSE_MEDIA = 131072
SUPPORT_REPEAT_SET = 262144
SUPPORT_GROUPING = 524288
|
Algo and DSA/LeetCode-Solutions-master/Python/baseball-game.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11146392 | # Time: O(n)
# Space: O(n)
class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
history = []
for op in ops:
if op == '+':
history.append(history[-1] + history[-2])
elif op == 'D':
history.append(history[-1] * 2)
elif op == 'C':
history.pop()
else:
history.append(int(op))
return sum(history)
|
python/tests/api/types/test_comparable_comparator.py | moulimukherjee/incubator-iceberg | 2,161 | 11146398 | <filename>python/tests/api/types/test_comparable_comparator.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from iceberg.api.expressions import (IntegerLiteral,
Literal)
import pytest
@pytest.mark.parametrize("larger,smaller", [
(34, 33),
(-1, -2)])
@pytest.mark.parametrize("op", [
lambda x, y: x > y,
lambda y, x: x < y])
def test_natural_order(larger, smaller, op):
assert op(Literal.of(larger), Literal.of(smaller))
@pytest.mark.parametrize("input_val", [
1,
0,
-1])
def test_natural_order_eq(input_val):
assert Literal.of(input_val) == Literal.of(input_val)
@pytest.mark.parametrize("larger,smaller", [
(34, None)])
@pytest.mark.parametrize("op", [
lambda x, y: x > y,
lambda y, x: x < y])
def test_null_handling(larger, smaller, op):
assert op(IntegerLiteral(larger), IntegerLiteral(smaller))
def test_null_handling_eq():
assert IntegerLiteral(None) == IntegerLiteral(None)
|
perfzero/lib/perfzero/process_info_tracker.py | chensusu11/benchmarks | 1,073 | 11146406 | <filename>perfzero/lib/perfzero/process_info_tracker.py<gh_stars>1000+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keep track of process information such as maximum memory usage with a separate thread."""
from __future__ import absolute_import
import json
import logging
import os
import sched
import threading
import time
import traceback
import psutil
class ProcessInfoTracker(object):
"""Keep track of process information such as maximum memory usage with separate thread."""
def __init__(self, output_dir):
self.process_info_log = open(os.path.join(output_dir, 'process_info.log'),
'w')
self.scheduler = sched.scheduler(time.time, time.sleep)
self.process_info = {}
self.process_info['max_rss'] = 0
self.process_info['max_vms'] = 0
self.process_info['max_cpu_percent'] = 0
self.exit_event = threading.Event()
self.last_exception = None
self.start_time = None
def start(self):
self.start_time = time.time()
# 4th positional arg added to support Python2 for the short-term.
self.scheduler.enter(1, 1, self._update_process_info, ()) # pylint: disable=no-value-for-parameter
threading.Thread(target=self.scheduler.run).start()
logging.info('Started process information tracker.')
def stop(self):
self.exit_event.set()
self.process_info_log.flush()
logging.info('Stopped process information tracker.')
if self.last_exception is not None:
raise self.last_exception # pylint: disable=raising-bad-type
return dict(self.process_info)
def _update_process_info(self):
"""Read and update process info using background thread every 1 second."""
try:
p = psutil.Process(os.getpid())
memory_info = p.memory_info()
# This is a blocking call which takes 0.1 second.
# This affects the interval # at which the metrics are reported
cpu_percent = p.cpu_percent(interval=0.1)
self.process_info['max_rss'] = max(self.process_info['max_rss'],
memory_info.rss)
self.process_info['max_vms'] = max(self.process_info['max_vms'],
memory_info.vms)
self.process_info['max_cpu_percent'] = max(
self.process_info['max_cpu_percent'], cpu_percent)
entry = {}
entry['time'] = time.time() - self.start_time
entry['rss'] = memory_info.rss
entry['vms'] = memory_info.vms
entry['cpu_percent'] = cpu_percent
self.process_info_log.write(json.dumps(entry) + '\n')
if not self.exit_event.is_set():
# Schedule the next event to be run after 1 second
# 4th positional arg added to support Python2 for the short-term.
self.scheduler.enter(1, 1, self._update_process_info, ()) # pylint: disable=no-value-for-parameter
except Exception as e: # pylint: disable=broad-except
logging.error('Process info tracker failed due to error:\n %s',
traceback.format_exc())
self.last_exception = e
|
configs/_base_/datasets/deepfashion_full.py | nightfuryyy/mmpose | 1,775 | 11146419 | <reponame>nightfuryyy/mmpose<filename>configs/_base_/datasets/deepfashion_full.py
dataset_info = dict(
dataset_name='deepfashion_full',
paper_info=dict(
author='<NAME> and <NAME> and <NAME> '
'and <NAME> and <NAME>',
title='DeepFashion: Powering Robust Clothes Recognition '
'and Retrieval with Rich Annotations',
container='Proceedings of IEEE Conference on Computer '
'Vision and Pattern Recognition (CVPR)',
year='2016',
homepage='http://mmlab.ie.cuhk.edu.hk/projects/'
'DeepFashion/LandmarkDetection.html',
),
keypoint_info={
0:
dict(
name='left collar',
id=0,
color=[255, 255, 255],
type='',
swap='right collar'),
1:
dict(
name='right collar',
id=1,
color=[255, 255, 255],
type='',
swap='left collar'),
2:
dict(
name='left sleeve',
id=2,
color=[255, 255, 255],
type='',
swap='right sleeve'),
3:
dict(
name='right sleeve',
id=3,
color=[255, 255, 255],
type='',
swap='left sleeve'),
4:
dict(
name='left waistline',
id=0,
color=[255, 255, 255],
type='',
swap='right waistline'),
5:
dict(
name='right waistline',
id=1,
color=[255, 255, 255],
type='',
swap='left waistline'),
6:
dict(
name='left hem',
id=2,
color=[255, 255, 255],
type='',
swap='right hem'),
7:
dict(
name='right hem',
id=3,
color=[255, 255, 255],
type='',
swap='left hem'),
},
skeleton_info={},
joint_weights=[1.] * 8,
sigmas=[])
|
validators/hashes.py | vphilippon/validators | 586 | 11146464 | import re
from .utils import validator
md5_regex = re.compile(
r"^[0-9a-f]{32}$",
re.IGNORECASE
)
sha1_regex = re.compile(
r"^[0-9a-f]{40}$",
re.IGNORECASE
)
sha224_regex = re.compile(
r"^[0-9a-f]{56}$",
re.IGNORECASE
)
sha256_regex = re.compile(
r"^[0-9a-f]{64}$",
re.IGNORECASE
)
sha512_regex = re.compile(
r"^[0-9a-f]{128}$",
re.IGNORECASE
)
@validator
def md5(value):
"""
Return whether or not given value is a valid MD5 hash.
Examples::
>>> md5('d41d8cd98f00b204e9800998ecf8427e')
True
>>> md5('900zz11')
ValidationFailure(func=md5, args={'value': '900zz11'})
:param value: MD5 string to validate
"""
return md5_regex.match(value)
@validator
def sha1(value):
"""
Return whether or not given value is a valid SHA1 hash.
Examples::
>>> sha1('da39a3ee5e6b4b0d3255bfef95601890afd80709')
True
>>> sha1('900zz11')
ValidationFailure(func=sha1, args={'value': '900zz11'})
:param value: SHA1 string to validate
"""
return sha1_regex.match(value)
@validator
def sha224(value):
"""
Return whether or not given value is a valid SHA224 hash.
Examples::
>>> sha224('d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f')
True
>>> sha224('900zz11')
ValidationFailure(func=sha224, args={'value': '900zz11'})
:param value: SHA224 string to validate
"""
return sha224_regex.match(value)
@validator
def sha256(value):
"""
Return whether or not given value is a valid SHA256 hash.
Examples::
>>> sha256(
... 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b'
... '855'
... )
True
>>> sha256('900zz11')
ValidationFailure(func=sha256, args={'value': '900zz11'})
:param value: SHA256 string to validate
"""
return sha256_regex.match(value)
@validator
def sha512(value):
"""
Return whether or not given value is a valid SHA512 hash.
Examples::
>>> sha512(
... 'cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce'
... '9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af9'
... '27da3e'
... )
True
>>> sha512('900zz11')
ValidationFailure(func=sha512, args={'value': '900zz11'})
:param value: SHA512 string to validate
"""
return sha512_regex.match(value)
|
src/python/strelka/scanners/scan_json.py | weslambert/strelka | 513 | 11146468 | import json
from strelka import strelka
class ScanJson(strelka.Scanner):
"""Collects keys from JSON files."""
def scan(self, data, file, options, expire_at):
self.event.setdefault('keys', [])
try:
self._get_keys(self, json.loads(data.decode()))
except UnicodeDecodeError:
self.flags.append('unicode_decode_error')
except json.decoder.JSONDecodeError:
self.flags.append('json_decode_error')
@staticmethod
def _get_keys(self, variable):
"""Recursively parses JSON.
Args:
variable: Variable to recursively parse.
"""
if isinstance(variable, dict):
for (key, value) in variable.items():
if key not in self.event['keys']:
self.event['keys'].append(key)
self._get_keys(self, value)
elif isinstance(variable, list):
for v in variable:
self._get_keys(self, v)
|
Sierra-RV50X-logger.py | danlshields/scream | 137 | 11146483 | <reponame>danlshields/scream
import requests
import json
import datetime
import time
import socket
# OUT_FILE = 'data.txt'
# This simple application logs the Sierra RV50X (possibly other Sierra modems too)
# The data is echoed to 127.0.0.1:35000, the SCReAM wrappers can get the data this
# way and then include it the SCReAM logging
# >python3 Sierra-RV50X-logger.py
base_url = 'http://192.168.13.31:9191'
username = 'user'
password = '<PASSWORD>'
login_url = '{}/xml/Connect.xml'.format(base_url)
login_payload = '''<request xmlns="urn:acemanager">
<connect>
<login>{}</login>
<password><![CDATA[{}]]\x3e</password>
</connect>
</request>'''.format(username, password)
req_url = '{}/cgi-bin/Embedded_Ace_Get_Task.cgi'.format(base_url)
req_headers = {
'Content-Type': 'text/xml',
}
# Param name to id map
param_ids = {
'Cellular IP Address': 303,
'ESN/EID/IMEI': 10,
'Active Frequency Band': 671,
'SIM ID': 771,
'Cell ID': 773,
'RSRP': 10210,
'SINR': 10209,
'RSSI': 261,
}
param_names = dict(map(lambda tup: (tup[1], tup[0]), param_ids.items()))
# payload = '303,12001,12002,10,771,11202,10701,10702,10704,785,773,774,775,671,674,672,675,1105,1104,10230,10298,5030,1082,1083,12005,12006,1091,1136,2753,5046,283,284,10283,10284,281,282,51006,52007,52008,53000,53101,53200,12003,12003,12003,12003'
#payload = '303,12001,10'
payload = '10210,10209,261,773'
def make_params_payload(params):
param_list = map(lambda param: str(param_ids[param]), params)
return ','.join(param_list)
def parse_pair(pair):
[pid_str, value] = pair.split('=')
pid = int(pid_str)
param_name = param_names[pid] if pid in param_names else 'param {}'.format(pid)
return (param_name, value)
def parse_params(data):
pairs = data.split('!')[:-1]
return dict(map(lambda pair: parse_pair(pair), pairs))
def make_request(params):
with requests.Session() as s:
p = s.post(login_url, data=login_payload)
print('login response text:', p.text)
#payload = make_params_payload(params)
r = s.post(req_url, headers=req_headers, data=payload)
# print('request text:', r.text)
return {
'timestamp': datetime.datetime.now().isoformat(),
'data': parse_params(r.text),
}
# return None
def send_to_udp_socket(result):
UDP_IP = "127.0.0.1"
UDP_PORT = 35000
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(result, 'utf-8'), (UDP_IP, UDP_PORT))
# params = ['Cellular IP Address', 'ESN/EID/IMEI']
params = param_ids.keys()
try:
while True:
result = make_request(params)
#result = make_request(payload)
#print('result:', json.dumps(result, indent=4))
print(result)
send_to_udp_socket(json.dumps(result))
time.sleep(1)
except KeyboardInterrupt:
print('\n Terminated from keyboard. \n')
# with open(OUT_FILE, 'a') as f:
# f.write('row\n')
|
Trakttv.bundle/Contents/Libraries/Shared/stash/main.py | disrupted/Trakttv.bundle | 1,346 | 11146489 | <gh_stars>1000+
from stash.core.modules.manager import ModuleManager
from collections import MutableMapping
from threading import Lock
class Stash(MutableMapping):
def __init__(self, archive, algorithm='lru:///', serializer='none:///', cache='memory:///', key_transform=None):
# Construct modules
self.archive = ModuleManager.construct(self, 'archive', archive)
self.algorithm = ModuleManager.construct(self, 'algorithm', algorithm)
self.serializer = ModuleManager.construct(self, 'serializer', serializer)
self.cache = ModuleManager.construct(self, 'cache', cache)
self.key_transform = key_transform or (lambda key: key, lambda key: key)
self._flushing = Lock()
def compact(self, force=False):
return self.algorithm.compact(force=force)
def delete(self, keys):
return self.algorithm.delete(keys)
def flush(self, force=False):
if force:
# Wait until flush can be started
self._flushing.acquire()
elif not self._flushing.acquire(False):
# Flush already running
return False
try:
# Take exclusive access of cache
with self.cache.exclusive:
# Update `archive` with the items in `cache`
self.archive.update(self.cache.iteritems(__force=True))
# Flush complete
return True
finally:
self._flushing.release()
def items(self):
self.flush()
return self.archive.items()
def iteritems(self):
self.flush()
return self.archive.iteritems()
def iterkeys(self):
self.flush()
return self.archive.iterkeys()
def itervalues(self):
self.flush()
return self.archive.itervalues()
def prime(self, keys=None, force=False):
"""Prime cache with `keys` from archive.
:param keys: list of keys to load, or `None` to load everything
:type keys: list of any or None
:param force: force the loading of items (by ignoring the algorithm capacity parameter).
**Note:** these items will be removed on the next `compact()` call.
:type force: bool
"""
return self.algorithm.prime(
keys=keys,
force=force
)
def save(self):
# Flush items from `cache` to `archive`
self.flush()
# Ensure `archive` is completely saved
self.archive.save()
def __delitem__(self, key):
del self.algorithm[key]
def __getitem__(self, key):
return self.algorithm[key]
def __iter__(self):
self.flush()
return iter(self.archive)
def __len__(self):
self.flush()
return len(self.archive)
def __setitem__(self, key, value):
self.algorithm[key] = value
|
desktop/core/ext-py/docutils-0.14/tools/quicktest.py | kokosing/hue | 5,079 | 11146495 | #!/usr/bin/env python
# $Id: quicktest.py 8126 2017-06-23 09:34:28Z milde $
# Authors: <NAME> <<EMAIL>>;
# <NAME> <<EMAIL>>
# Copyright: This module has been placed in the public domain.
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import sys
import os
import getopt
import docutils
from docutils.frontend import OptionParser
from docutils.utils import new_document
from docutils.parsers.rst import Parser
usage_header = """\
quicktest.py: Quickly test the reStructuredText parser. This is not an
interface to the full functionality of Docutils. Use one of the ``rst2*.py``
front-end tools instead.
Usage::
quicktest.py [options] [<source> [<destination>]]
``source`` is the name of the file to use as input (default is stdin).
``destination`` is the name of the file to create as output (default is
stdout).
Options:
"""
options = [('pretty', 'p',
'output pretty pseudo-xml: no "&abc;" entities (default)'),
('test', 't', 'output test-ready data (input & expected output, '
'ready to be copied to a parser test module)'),
('rawxml', 'r', 'output raw XML'),
('styledxml=', 's', 'output raw XML with XSL style sheet '
'reference (filename supplied in the option argument)'),
('xml', 'x', 'output pretty XML (indented)'),
('attributes', 'A', 'dump document attributes after processing'),
('debug', 'd', 'debug mode (lots of output)'),
('version', 'V', 'show Docutils version then exit'),
('help', 'h', 'show help text then exit')]
"""See ``distutils.fancy_getopt.FancyGetopt.__init__`` for a description of
the data structure: (long option, short option, description)."""
def usage():
print(usage_header)
for longopt, shortopt, description in options:
if longopt[-1:] == '=':
opts = '-%s arg, --%sarg' % (shortopt, longopt)
else:
opts = '-%s, --%s' % (shortopt, longopt)
sys.stdout.write('%-15s' % opts)
if len(opts) > 14:
sys.stdout.write('%-16s' % '\n')
while len(description) > 60:
limit = description.rindex(' ', 0, 60)
print(description[:limit].strip())
description = description[limit + 1:]
sys.stdout.write('%-15s' % ' ')
print(description)
def _pretty(input, document, optargs):
return document.pformat()
def _rawxml(input, document, optargs):
return document.asdom().toxml()
def _styledxml(input, document, optargs):
docnode = document.asdom().childNodes[0]
return '%s\n%s\n%s' % (
'<?xml version="1.0" encoding="ISO-8859-1"?>',
'<?xml-stylesheet type="text/xsl" href="%s"?>'
% optargs['styledxml'], docnode.toxml())
def _prettyxml(input, document, optargs):
return document.asdom().toprettyxml(' ', '\n')
def _test(input, document, optargs):
tq = '"""'
output = document.pformat() # same as _pretty()
return """\
totest['change_this_test_name'] = [
[%s\\
%s
%s,
%s\\
%s
%s],
]
""" % ( tq, escape(input.rstrip()), tq, tq, escape(output.rstrip()), tq )
def escape(text):
"""
Return `text` in triple-double-quoted Python string form.
"""
text = text.replace('\\', '\\\\') # escape backslashes
text = text.replace('"""', '""\\"') # break up triple-double-quotes
text = text.replace(' \n', ' \\n\\\n') # protect trailing whitespace
return text
_outputFormatters = {
'rawxml': _rawxml,
'styledxml': _styledxml,
'xml': _prettyxml,
'pretty' : _pretty,
'test': _test
}
def format(outputFormat, input, document, optargs):
formatter = _outputFormatters[outputFormat]
return formatter(input, document, optargs)
def getArgs():
if os.name == 'mac' and len(sys.argv) <= 1:
return macGetArgs()
else:
return posixGetArgs(sys.argv[1:])
def posixGetArgs(argv):
outputFormat = 'pretty'
# convert fancy_getopt style option list to getopt.getopt() arguments
shortopts = ''.join([option[1] + ':' * (option[0][-1:] == '=')
for option in options if option[1]])
longopts = [option[0] for option in options if option[0]]
try:
opts, args = getopt.getopt(argv, shortopts, longopts)
except getopt.GetoptError:
usage()
sys.exit(2)
optargs = {'debug': 0, 'attributes': 0}
for o, a in opts:
if o in ['-h', '--help']:
usage()
sys.exit()
elif o in ['-V', '--version']:
sys.stderr.write('quicktest.py (Docutils %s%s)\n' %
(docutils.__version__,
docutils.__version_details__ and
' [%s]'%docutils.__version_details__ or ''))
sys.exit()
elif o in ['-r', '--rawxml']:
outputFormat = 'rawxml'
elif o in ['-s', '--styledxml']:
outputFormat = 'styledxml'
optargs['styledxml'] = a
elif o in ['-x', '--xml']:
outputFormat = 'xml'
elif o in ['-p', '--pretty']:
outputFormat = 'pretty'
elif o in ['-t', '--test']:
outputFormat = 'test'
elif o in ['--attributes', '-A']:
optargs['attributes'] = 1
elif o in ['-d', '--debug']:
optargs['debug'] = 1
else:
raise getopt.GetoptError("getopt should have saved us!")
if len(args) > 2:
print('Maximum 2 arguments allowed.')
usage()
sys.exit(1)
inputFile = sys.stdin
outputFile = sys.stdout
if args:
inputFile = open(args.pop(0))
if args:
outputFile = open(args.pop(0), 'w')
return inputFile, outputFile, outputFormat, optargs
def macGetArgs():
import EasyDialogs
EasyDialogs.Message("""\
Use the next dialog to build a command line:
1. Choose an output format from the [Option] list
2. Click [Add]
3. Choose an input file: [Add existing file...]
4. Save the output: [Add new file...]
5. [OK]""")
optionlist = [(longopt, description)
for (longopt, shortopt, description) in options]
argv = EasyDialogs.GetArgv(optionlist=optionlist, addfolder=0)
return posixGetArgs(argv)
def main():
# process cmdline arguments:
inputFile, outputFile, outputFormat, optargs = getArgs()
settings = OptionParser(components=(Parser,)).get_default_values()
settings.debug = optargs['debug']
parser = Parser()
input = inputFile.read()
document = new_document(inputFile.name, settings)
parser.parse(input, document)
output = format(outputFormat, input, document, optargs)
outputFile.write(output)
if optargs['attributes']:
import pprint
pprint.pprint(document.__dict__)
if __name__ == '__main__':
sys.stderr = sys.stdout
main()
|
model/utils.py | yashkant/Vision-Language-Transformer | 132 | 11146515 | <gh_stars>100-1000
import keras.backend as K
import tensorflow as tf
def expand_and_tile(x, outsize):
x = K.expand_dims(x, axis=1)
x = K.expand_dims(x, axis=1)
x = K.tile(x, [1, outsize, outsize, 1])
return x
def expand_and_tile_1(x, outchannels):
x = K.expand_dims(x, axis=-1)
x = K.tile(x, [1, 1, outchannels])
return x
def normalize_by_dim(x, dim=1024.):
d = tf.convert_to_tensor(dim)
return x/K.sqrt(d)
def split_dim_concat_batch(x, n):
return tf.concat(tf.split(x, n, axis=-1), axis=0)
def split_batch_concat_dim(x, n):
return tf.concat(tf.split(x, n, axis=0), axis=-1)
def normalize(x):
x = (x+1.)/2.
return K.clip(x, 1e-6, 1.-1e-6)
def l2_normalize(x):
return tf.nn.l2_normalize(x, axis=-1, epsilon=1e-6)
def softmax(x):
return K.softmax(x-tf.reduce_max(x), -1)
def concat_coord(x):
ins_feat = x # [N, h, w, c]
batch_size = tf.shape(x)[0]
h = tf.shape(x)[1]
w = tf.shape(x)[2]
float_h = K.cast(h, 'float32')
float_w = K.cast(w, 'float32')
y_range = K.arange(float_h, dtype='float32') # [h, ]
y_range = 2.0 * y_range / (float_h - 1.0) - 1.0
x_range = K.arange(float_w, dtype='float32') # [w, ]
x_range = 2.0 * x_range / (float_w - 1.0) - 1.0
x_range = x_range[None, :] # [1, w]
y_range = y_range[:, None] # [h, 1]
x = K.tile(x_range, [h, 1]) # [h, w]
y = K.tile(y_range, [1, w]) # [h, w]
x = x[None, :, :, None] # [1, h, w, 1]
y = y[None, :, :, None] # [1, h, w, 1]
x = K.tile(x, [batch_size, 1, 1, 1]) # [N, h, w, 1]
y = K.tile(y, [batch_size, 1, 1, 1]) # [N, h, w, 1]
ins_feat_out = K.concatenate([ins_feat, x, x, x, y, y, y]) # [N, h, w, c+6]
return ins_feat_out
|
tools/bagging.py | zake7749/DeepToxic | 206 | 11146528 | <gh_stars>100-1000
import sys
import numpy as np
import pandas as pd
from sklearn import *
from scipy.special import expit,logit
ensembeled = sys.argv[1:]
print("Going ensemble on",)
subs = []
for e in ensembeled:
print(e)
subs.append(pd.read_csv(e))
classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
for sub in subs[1:]:
for c in classes:
subs[0][c] += sub[c]
for c in classes:
subs[0][c] /= len(subs)
subs[0].to_csv('Bagging.csv', index=False) |
src/features/migrations/0015_auto_20190916_1338.py | nixplay/bullet-train-api | 1,259 | 11146555 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-09-16 13:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('features', '0014_auto_20190607_1642'),
]
operations = [
migrations.AddField(
model_name='featurestate',
name='feature_segment',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='feature_states', to='features.FeatureSegment'),
),
migrations.AddField(
model_name='historicalfeaturestate',
name='feature_segment',
field=models.ForeignKey(blank=True, db_constraint=False, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='features.FeatureSegment'),
),
migrations.AlterField(
model_name='featuresegment',
name='feature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='feature_segments', to='features.Feature'),
),
]
|
rotkehlchen/crypto.py | rotkehlchenio/rotkehlchen | 137 | 11146581 | <reponame>rotkehlchenio/rotkehlchen
import base64
import os
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from rotkehlchen.errors.misc import UnableToDecryptRemoteData
AES_BLOCK_SIZE = 16
# AES encrypt/decrypt taken from here: https://stackoverflow.com/a/44212550/110395
# and updated to use cryptography library as pyCrypto is deprecated
# TODO: Perhaps use Fernet instead of this algorithm in the future? The docs of the
# cryptography library seem to suggest it's the safest options. Problem is the
# already encrypted and saved database files and how to handle the previous encryption
# We need to keep a versioning of encryption used for each file.
def encrypt(key: bytes, source: bytes) -> str:
assert isinstance(key, bytes), 'key should be given in bytes'
assert isinstance(source, bytes), 'source should be given in bytes'
digest = hashes.Hash(hashes.SHA256())
digest.update(key)
key = digest.finalize() # use SHA-256 over our key to get a proper-sized AES key
iv = os.urandom(AES_BLOCK_SIZE)
cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
encryptor = cipher.encryptor()
padding = AES_BLOCK_SIZE - len(source) % AES_BLOCK_SIZE # calculate needed padding
source += bytes([padding]) * padding # Python 2.x: source += chr(padding) * padding
# store the iv at the beginning and encrypt
data = iv + (encryptor.update(source) + encryptor.finalize())
return base64.b64encode(data).decode("latin-1")
def decrypt(key: bytes, given_source: str) -> bytes:
"""
Decrypts the given source data we with the given key.
Returns the decrypted data.
If data can't be decrypted then raises UnableToDecryptRemoteData
"""
assert isinstance(key, bytes), 'key should be given in bytes'
assert isinstance(given_source, str), 'source should be given in string'
source = base64.b64decode(given_source.encode("latin-1"))
digest = hashes.Hash(hashes.SHA256())
digest.update(key)
key = digest.finalize() # use SHA-256 over our key to get a proper-sized AES key
iv = source[:AES_BLOCK_SIZE] # extract the iv from the beginning
cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
decryptor = cipher.decryptor()
data = source[AES_BLOCK_SIZE:] # decrypt
data = decryptor.update(data)
padding = data[-1] # pick the padding value from the end; Python 2.x: ord(data[-1])
if data[-padding:] != bytes([padding]) * padding: # Python 2.x: chr(padding) * padding
raise UnableToDecryptRemoteData(
'Invalid padding when decrypting the DB data we received from the server. '
'Are you using a new user and if yes have you used the same password as before? '
'If you have then please open a bug report.',
)
return data[:-padding] # remove the padding
def sha3(data: bytes) -> bytes:
"""
Raises:
RuntimeError: If Keccak lib initialization failed, or if the function
failed to compute the hash.
TypeError: This function does not accept unicode objects, they must be
encoded prior to usage.
"""
digest = hashes.Hash(hashes.SHA3_256())
digest.update(data)
return digest.finalize()
|
test/example_thirdparty/example_thirdparty/__init__.py | timgates42/pybbm | 134 | 11146598 | <gh_stars>100-1000
# coding=utf-8
from __future__ import unicode_literals |
basis_set_exchange/tests/test_bundle_slow.py | ltalirz/basis_set_exchange | 108 | 11146611 | <reponame>ltalirz/basis_set_exchange
"""
Tests for creating bundles/archives of formatted data
"""
import pytest
from .test_bundle import _run_test_bundles, _bundle_exts
# yapf: disable
@pytest.mark.slow
@pytest.mark.parametrize('ext', _bundle_exts)
@pytest.mark.parametrize('fmt, reffmt', [('nwchem', 'bib'),
('psi4', 'txt'),
('json', 'json')])
# yapf: enable
def test_bundles_slow(tmp_path, fmt, reffmt, ext):
_run_test_bundles(tmp_path, fmt, reffmt, ext, None)
|
IoT Edge anomaly detection tutorial/iot_score.py | kgremban/ai-toolkit-iot-edge | 184 | 11146629 | # This script generates the scoring file
# with the init and run functions needed to
# operationalize the anomaly detection sample
import pickle
import json
import pandas
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(input_str):
try:
input_json = json.loads(input_str)
input_df = pandas.DataFrame([[input_json['machine']['temperature'],input_json['machine']['pressure'],input_json['ambient']['temperature'],input_json['ambient']['humidity']]])
pred = model.predict(input_df)
print("Prediction is ", pred[0])
except Exception as e:
result = str(e)
if pred[0] == 1:
input_json['anomaly']=True
else:
input_json['anomaly']=False
return [json.dumps(input_json)]
|
cos/training/network.py | abidlabs/cone-of-silence | 113 | 11146637 | <reponame>abidlabs/cone-of-silence
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def rescale_conv(conv, reference):
"""
Rescale a convolutional module with `reference`.
"""
std = conv.weight.std().detach()
scale = (std / reference)**0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale
def rescale_module(module, reference):
"""
Rescale a module with `reference`.
"""
for sub in module.modules():
if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)):
rescale_conv(sub, reference)
def center_trim(tensor, reference):
"""
Trim a tensor to match with the dimension of `reference`.
"""
if hasattr(reference, "size"):
reference = reference.size(-1)
diff = tensor.size(-1) - reference
if diff < 0:
raise ValueError("tensor must be larger than reference")
if diff:
tensor = tensor[..., diff // 2:-(diff - diff // 2)]
return tensor
def left_trim(tensor, reference):
"""
Trim a tensor to match with the dimension of `reference`. Trims only the end.
"""
if hasattr(reference, "size"):
reference = reference.size(-1)
diff = tensor.size(-1) - reference
if diff < 0:
raise ValueError("tensor must be larger than reference")
if diff:
tensor = tensor[..., 0:-diff]
return tensor
def normalize_input(data):
"""
Normalizes the input to have mean 0 std 1 for each input
Inputs:
data - torch.tensor of size batch x n_mics x n_samples
"""
data = (data * 2**15).round() / 2**15
ref = data.mean(1) # Average across the n microphones
means = ref.mean(1).unsqueeze(1).unsqueeze(2)
stds = ref.std(1).unsqueeze(1).unsqueeze(2)
data = (data - means) / stds
return data, means, stds
def unnormalize_input(data, means, stds):
"""
Unnormalizes the step done in the previous function
"""
data = (data * stds.unsqueeze(3) + means.unsqueeze(3))
return data
class CoSNetwork(nn.Module):
"""
Cone of Silence network based on the Demucs network for audio source separation.
"""
def __init__(
self,
n_audio_channels: int = 4, # pylint: disable=redefined-outer-name
window_conditioning_size: int = 5,
kernel_size: int = 8,
stride: int = 4,
context: int = 3,
depth: int = 6,
channels: int = 64,
growth: float = 2.0,
lstm_layers: int = 2,
rescale: float = 0.1): # pylint: disable=redefined-outer-name
super().__init__()
self.n_audio_channels = n_audio_channels
self.window_conditioning_size = window_conditioning_size
self.kernel_size = kernel_size
self.stride = stride
self.context = context
self.depth = depth
self.channels = channels
self.growth = growth
self.lstm_layers = lstm_layers
self.rescale = rescale
self.encoder = nn.ModuleList() # Source encoder
self.decoder = nn.ModuleList() # Audio output decoder
activation = nn.GLU(dim=1)
in_channels = n_audio_channels # Number of input channels
# Wave U-Net structure
for index in range(depth):
encode = nn.ModuleDict()
encode["conv1"] = nn.Conv1d(in_channels, channels, kernel_size,
stride)
encode["relu"] = nn.ReLU()
encode["conv2"] = nn.Conv1d(channels, 2 * channels, 1)
encode["activation"] = activation
encode["gc_embed1"] = nn.Conv1d(self.window_conditioning_size, channels, 1)
encode["gc_embed2"] = nn.Conv1d(self.window_conditioning_size, 2 * channels, 1)
self.encoder.append(encode)
decode = nn.ModuleDict()
if index > 0:
out_channels = in_channels
else:
out_channels = 2 * n_audio_channels
decode["conv1"] = nn.Conv1d(channels, 2 * channels, context)
decode["activation"] = activation
decode["conv2"] = nn.ConvTranspose1d(channels, out_channels,
kernel_size, stride)
decode["gc_embed1"] = nn.Conv1d(self.window_conditioning_size, 2 * channels, 1)
decode["gc_embed2"] = nn.Conv1d(self.window_conditioning_size, out_channels, 1)
if index > 0:
decode["relu"] = nn.ReLU()
self.decoder.insert(0,
decode) # Put it at the front, reverse order
in_channels = channels
channels = int(growth * channels)
# Bi-directional LSTM for the bottleneck layer
channels = in_channels
self.lstm = nn.LSTM(bidirectional=True,
num_layers=lstm_layers,
hidden_size=channels,
input_size=channels)
self.lstm_linear = nn.Linear(2 * channels, channels)
rescale_module(self, reference=rescale)
def forward(self, mix: torch.Tensor, angle_conditioning: torch.Tensor): # pylint: disable=arguments-differ
"""
Forward pass. Note that in our current work the use of `locs` is disregarded.
Args:
mix (torch.Tensor) - An input recording of size `(batch_size, n_mics, time)`.
Output:
x - A source separation output at every microphone
"""
x = mix
saved = [x]
# Encoder
for encode in self.encoder:
x = encode["conv1"](x) # Conv 1d
embedding = encode["gc_embed1"](angle_conditioning.unsqueeze(2))
x = encode["relu"](x + embedding)
x = encode["conv2"](x)
embedding2 = encode["gc_embed2"](angle_conditioning.unsqueeze(2))
x = encode["activation"](x + embedding2)
saved.append(x)
# Bi-directional LSTM at the bottleneck layer
x = x.permute(2, 0, 1) # prep input for LSTM
self.lstm.flatten_parameters() # to improve memory usage.
x = self.lstm(x)[0]
x = self.lstm_linear(x)
x = x.permute(1, 2, 0)
# Source decoder
for decode in self.decoder:
skip = center_trim(saved.pop(-1), x)
x = x + skip
x = decode["conv1"](x)
embedding = decode["gc_embed1"](angle_conditioning.unsqueeze(2))
x = decode["activation"](x + embedding)
x = decode["conv2"](x)
embedding2 = decode["gc_embed2"](angle_conditioning.unsqueeze(2))
if "relu" in decode:
x = decode["relu"](x + embedding2)
# Reformat the output
x = x.view(x.size(0), 2, self.n_audio_channels, x.size(-1))
return x
def loss(self, voice_signals, gt_voice_signals):
"""Simple L1 loss between voice and gt"""
return F.l1_loss(voice_signals, gt_voice_signals)
def valid_length(self, length: int) -> int: # pylint: disable=redefined-outer-name
"""
Find the length of the input to the network such that the output's length is
equal to the given `length`.
"""
for _ in range(self.depth):
length = math.ceil((length - self.kernel_size) / self.stride) + 1
length = max(1, length)
length += self.context - 1
for _ in range(self.depth):
length = (length - 1) * self.stride + self.kernel_size
return int(length)
def load_pretrain(model, state_dict): # pylint: disable=redefined-outer-name
"""Loads the pretrained keys in state_dict into model"""
for key in state_dict.keys():
try:
_ = model.load_state_dict({key: state_dict[key]}, strict=False)
print("Loaded {} (shape = {}) from the pretrained model".format(
key, state_dict[key].shape))
except Exception as e:
print("Failed to load {}".format(key))
print(e)
|
ggplot/scales/scale_reverse.py | themiwi/ggplot | 1,133 | 11146638 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
class scale_y_reverse(scale):
"""
Reverse y axis
Examples
--------
>>> ggplot(diamonds, aes(x='price')) + geom_histogram() + scale_y_reverse()
"""
def __radd__(self, gg):
gg = deepcopy(gg)
gg.scale_y_reverse = True
return gg
class scale_x_reverse(scale):
"""
Reverse x axis
Examples
--------
>>> ggplot(diamonds, aes(x='price')) + geom_histogram() + scale_x_reverse()
"""
def __radd__(self, gg):
gg = deepcopy(gg)
gg.scale_x_reverse = True
return gg
|
back/tests/test_views/test_player_views/test_get.py | ramonakira/piclodio3 | 120 | 11146639 | from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from unittest.mock import patch
from restapi.models import WebRadio
from utils.player_manager import PlayerManager
class TestGet(APITestCase):
def setUp(self):
super(TestGet, self).setUp()
self.url = reverse('api:player:get_player_or_update')
def test_get_when_player_stopped_no_web_radio(self):
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue("active" in response.data)
self.assertFalse(response.data["active"])
expected = {'active': False, 'webradio': 'no default web radio yet'}
self.assertEquals(expected, response.data)
def test_get_when_player_started(self):
WebRadio.objects.create(name="test", url="http://test.com", is_default=True)
with patch.object(PlayerManager, 'is_started', return_value=True):
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue("active" in response.data)
self.assertTrue(response.data["active"])
expected = {'active': True,
'webradio': {'id': 1, 'name': 'test', 'url': 'http://test.com', 'is_default': True}}
self.assertEquals(expected, response.data)
|
boundary_layer/schemas/internal/operators.py | aksswami/boundary-layer | 252 | 11146674 | <reponame>aksswami/boundary-layer<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2018 Etsy Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import marshmallow as ma
from boundary_layer.schemas.internal.base import BaseSpecSchema
from boundary_layer.schemas.base import StrictSchema
class PropertyPreprocessorSchema(StrictSchema):
type = ma.fields.String(required=True)
properties = ma.fields.Dict()
apply_to_properties = ma.fields.List(ma.fields.String, required=True)
class OperatorSpecSchema(BaseSpecSchema):
operator_class = ma.fields.String(required=True)
operator_class_module = ma.fields.String(required=True)
property_preprocessors = ma.fields.List(ma.fields.Nested(PropertyPreprocessorSchema))
@ma.validates_schema
def valid_preprocessor_property_names(self, data):
preprocessors = data.get('property_preprocessors', [])
if not preprocessors:
return
properties = frozenset(data.get('parameters_jsonschema', {}).get('properties', []))
for preprocessor in preprocessors:
missing = [
property_name for property_name in preprocessor['apply_to_properties']
if property_name not in properties]
if missing:
raise ma.ValidationError(
'Properties specified by preprocessor `{}` are not present '
'in the schema: `{}`'.format(
preprocessor['type'],
'`, `'.join(missing)))
assigned_preprocessors = {
property_name: [
preprocessor['type'] for preprocessor in preprocessors
if property_name in preprocessor['apply_to_properties']]
for property_name in properties
}
dupes = [
key for (key, value) in six.iteritems(assigned_preprocessors)
if len(value) > 1]
if dupes:
raise ma.ValidationError(
'One or more properties were assigned multiple preprocessors. '
'This is not permitted. Found: {}'.format({
key: value for (key, value) in six.iteritems(assigned_preprocessors)
if len(value) > 1
}),
['property_preprocessors'])
|
api/tacticalrmm/checks/migrations/0024_auto_20210606_1632.py | infinite8co/tacticalrmm | 903 | 11146722 | <filename>api/tacticalrmm/checks/migrations/0024_auto_20210606_1632.py
# Generated by Django 3.2.1 on 2021-06-06 16:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checks', '0023_check_run_interval'),
]
operations = [
migrations.RemoveField(
model_name='checkhistory',
name='check_history',
),
migrations.AddField(
model_name='checkhistory',
name='check_id',
field=models.PositiveIntegerField(default=0),
),
]
|
rex/crash_tracer/__init__.py | shellphish/rex | 471 | 11146742 | import logging
from abc import abstractmethod
import archr
from angr import sim_options as so
l = logging.getLogger("rex.CrashTracer")
class NonCrashingInput(Exception):
pass
class CrashTracerError(Exception):
pass
class TraceMode:
DUMB = "dumb"
HALFWAY = "halfway"
FULL_SYMBOLIC = "full_symbolic"
remove_options = {so.TRACK_REGISTER_ACTIONS, so.TRACK_TMP_ACTIONS, so.TRACK_JMP_ACTIONS,
so.ACTION_DEPS, so.TRACK_CONSTRAINT_ACTIONS, so.LAZY_SOLVES, so.SIMPLIFY_MEMORY_WRITES,
so.ALL_FILES_EXIST, so.UNICORN, so.CPUID_SYMBOLIC}
add_options = {so.MEMORY_SYMBOLIC_BYTES_MAP, so.TRACK_ACTION_HISTORY, so.CONCRETIZE_SYMBOLIC_WRITE_SIZES,
so.CONCRETIZE_SYMBOLIC_FILE_READ_SIZES, so.TRACK_MEMORY_ACTIONS, so.KEEP_IP_SYMBOLIC}
class CrashTracer:
def __init__(self, crash, tracer_bow=None, angr_project_bow=None, is_cgc=False):
"""
:param tracer_bow: The bow instance to use for tracing operations
:param angr_project_bow: The project bow to use, can be used for custom hooks and syscalls
"""
self.crash = crash
self.tracer_bow = tracer_bow
self.angr_project_bow = angr_project_bow
self.project = None
# cgc related
self._is_cgc = is_cgc
self.cgc_flag_page_magic = None
@abstractmethod
def concrete_trace(self, testcase, channel, pre_fire_hook, delay=0, actions=None, taint=None):
"""
generate a concrete trace and maybe core dump
"""
raise NotImplementedError()
@abstractmethod
def create_project(self, target, **kwargs):
"""
create an angr project
"""
raise NotImplementedError()
@abstractmethod
def create_state(self, target, **kwargs):
"""
create an initial angr state for later symbolic tracing
"""
raise NotImplementedError()
@abstractmethod
def bootstrap_state(self, state, **kwargs):
"""
modify the initial angr state for later symbolic tracing
"""
raise NotImplementedError()
def _init_angr_project_bow(self, target):
# pass tracer_bow to datascoutanalyzer to make addresses in angr consistent with those
# in the analyzer
if not self.angr_project_bow:
dsb = archr.arsenal.DataScoutBow(target, analyzer=self.tracer_bow)
self.angr_project_bow = archr.arsenal.angrProjectBow(target, dsb)
@staticmethod
def _channel_to_input_type(channel):
s = channel.split(":")[0]
return CrashInputType.STDIN if s == 'stdio' else s
@staticmethod
def identify_bad_bytes(crash):
"""
identify the bad bytes by inspecting constraints in an unconstrained state
the extracted bad bytes are used to help angrop filter gadgets
"""
state = crash.state
bad_bytes = []
sim_bytes = []
# in case its a partial IP overwrite
for i in range(state.project.arch.bytes):
byte = state.ip.get_byte(i)
if len(state.solver.eval_upto(byte, 2)) == 2:
sim_bytes.append(byte)
# a byte is a bad byte if none of the bytes in
# the pc can be that byte
for c in range(0x100):
if any(state.solver.satisfiable(extra_constraints=[c==x]) for x in sim_bytes):
continue
bad_bytes.append(c)
return bad_bytes
from ..enums import CrashInputType
from .full_tracer import SimTracer
from .halfway_tracer import HalfwayTracer
from .dumb_tracer import DumbTracer
|
frontend/templatetags/qunit_fixture_data.py | mepsd/CLAC | 126 | 11146751 | <reponame>mepsd/CLAC<filename>frontend/templatetags/qunit_fixture_data.py
import json
from django import template
from django import forms
from django.utils.safestring import mark_safe
from ..upload import UploadWidget
register = template.Library()
class UploadTestsForm(forms.Form):
file = forms.FileField(widget=UploadWidget(
accept=('.csv', 'application/test')
))
class AjaxformTestsForm(forms.Form):
foo = forms.CharField()
a_radio = forms.ChoiceField(choices=((1, "one"), (2, "two")),
widget=forms.RadioSelect)
some_checkboxes = forms.MultipleChoiceField(
required=False,
widget=forms.CheckboxSelectMultiple,
choices=(('a', 'Option A'), ('b', 'Option B'), ('c', 'Option C')))
file = forms.FileField(widget=UploadWidget(
accept=''
))
def render(self):
return ''.join([
'<form enctype="multipart/form-data" method="post"',
' is="ajax-form"',
' action="/post-stuff">',
' %s' % str(self.as_ul()),
' <button type="submit">submit</button>',
' <button type="submit" name="cancel">cancel</button>',
'</form>'
])
@register.simple_tag
def qunit_fixture_data_json():
data = {
'UPLOAD_TESTS_HTML': str(UploadTestsForm()['file']),
'AJAXFORM_TESTS_HTML': AjaxformTestsForm().render()
}
return mark_safe(json.dumps(data)) # nosec
|
plenum/common/config_helper.py | andkononykhin/plenum | 148 | 11146795 | import os
from common.exceptions import PlenumValueError, ValueUndefinedError
class PConfigHelper:
def __init__(self, config, *, chroot=None):
if config is None:
raise ValueUndefinedError('config')
if chroot is not None and not chroot.startswith("/"):
raise PlenumValueError('chroot', chroot, "starts with '/'")
self.config = config
self.chroot = chroot
def chroot_if_needed(self, path):
return self._chroot_if_needed(path, self.chroot)
@property
def log_dir(self):
return self.chroot_if_needed(self.config.LOG_DIR)
@property
def genesis_dir(self):
return self.chroot_if_needed(self.config.GENESIS_DIR)
@property
def plugins_dir(self):
return self.chroot_if_needed(self.config.PLUGINS_DIR)
@property
def keys_dir(self):
return self.chroot_if_needed(self.config.KEYS_DIR)
@property
def node_info_dir(self):
return self.chroot_if_needed(self.config.NODE_INFO_DIR)
@staticmethod
def _chroot_if_needed(path, chroot):
result = path
if chroot is not None and chroot != "/":
_path = path[1:] if path.startswith("/") else path
result = os.path.join(chroot, _path)
return result
class PNodeConfigHelper(PConfigHelper):
def __init__(self, name: str, config, *, chroot=None):
if name is None:
raise ValueUndefinedError('name')
super().__init__(config, chroot=chroot)
self.name = name
@property
def ledger_dir(self):
return self.chroot_if_needed(os.path.join(self.config.LEDGER_DIR,
self.name))
|
earth_enterprise/src/fusion/portableglobe/servers/fileunpacker/build_and_test.py | ezeeyahoo/earthenterprise | 2,661 | 11146819 | <reponame>ezeeyahoo/earthenterprise
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds and tests Portable Server file unpacker library.
Builds the library for the 3 support systems: Mac, Linux
and Windows. Does basic tests to make sure that it can
read files and 3d and 2d packet data from globes and
maps.
"""
import distutils.sysconfig
import os
import platform
import re
import shutil
import subprocess
import sys
import util
# Add the OpenGEE Python libraries to the module search path:
opengee_lib_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'..', '..', '..', 'lib', 'python')
if opengee_lib_path not in sys.path:
sys.path.insert(1, opengee_lib_path)
import opengee.c_compiler
import opengee.environ
import opengee.version
def build_swig(source_dir):
opengee_build_dir = os.path.join(source_dir, 'NATIVE-REL-x86_64')
swig_builder = subprocess.Popen('python2.7 /usr/bin/scons -j1 release=1 portable_3rd_libs=1 build',
shell=True, cwd=os.path.join(source_dir, "../"), env=os.environ.copy())
if swig_builder.wait() != 0:
raise ValueError('Swig build failed!')
opengee.environ.env_prepend_path('PATH', os.path.join(opengee_build_dir, 'bin'), if_present='move')
def configure_c_compiler(os_dir):
if os_dir != 'Linux':
return
version = opengee.c_compiler.get_cc_version('g++')
if not version:
raise ValueError('Unable to determine g++ version!')
if not opengee.version.is_version_ge(version, [4, 8]):
# Check for GCC 4.8 from the devtoolset-2-toolchain package on Red Hat 6:
cc_dir = '/opt/rh/devtoolset-2/root/usr/bin'
if os.path.isfile('{0}/g++'.format(cc_dir)):
opengee.environ.env_prepend_path('PATH', cc_dir, if_present='move')
opengee.environ.env_prepend_path(
'LIBRARY_PATH',
'/opt/rh/devtoolset-2/root/usr/lib',
if_present='move'
)
opengee.environ.env_prepend_path(
'LIBRARY_PATH',
'/opt/rh/devtoolset-2/root/usr/lib64',
if_present='move')
else:
raise ValueError('Version of g++ ({0}) is below minimum (4.8)!'.format(
'.'.join(version)))
def BuildLibrary(os_dir, ignore_results, source_dir):
"""Returns whether able to build file unpacker library."""
try:
os.mkdir("dist")
except OSError:
pass # ok if it already exists
configure_c_compiler(os_dir)
specialDefs = ''
if os_dir == "Windows":
# The logic below fixes a method used by the swig c++ wrapper. Mingw python headers
# should detect and fix this but for some reason they aren't working with mingw64
pythonCLib = "libpython{0}{1}.a".format(sys.version_info[0], sys.version_info[1])
pathToLib = os.path.join(sys.exec_prefix, "libs", pythonCLib)
if not os.path.isfile(pathToLib):
print "ERROR: {0} was not found. It is needed for linking".format(pathToLib)
return False
archData = platform.architecture(pathToLib)
if archData[0] == "64bit":
specialDefs = "-DMS_WIN64"
elif os_dir == "Linux":
build_swig(source_dir)
os.chdir("dist")
fp = open("../%s/build_lib" % os_dir)
build_vars = {
'prefix': sys.prefix,
'exec_prefix': sys.exec_prefix,
'python_inc_dir': distutils.sysconfig.get_python_inc(),
'python_lib_dir': distutils.sysconfig.get_python_lib(),
'special_defs': specialDefs
}
for line in fp:
result = util.ExecuteCmd(line.format(**build_vars), use_shell=True)
if result:
if ignore_results:
print result
else:
print "FAIL: %s" % result
fp.close()
return False
fp.close()
return True
def RunTests():
"""Runs tests using the generated library."""
print "Running tests ..."
shutil.copyfile("../util.py", "util.py")
shutil.copyfile("../test.py", "test.py")
old_path = sys.path
sys.path = [os.getcwd()] + sys.path
import test
sys.path = old_path
test.main()
def main(argv):
"""Main for build and test."""
print(argv)
if ((len(argv) < 2 or argv[1].lower() not in ["mac", "windows", "linux"]) or
(len(argv) < 3 and arv[1].lower() == "linux")):
print "Usage: build_and_test.py <OS_target> <source_dir>"
print
print "<OS_target> can be Mac, Windows, or Linux"
print "<source_dir> is only needed for Linux"
return
os.chdir(os.path.dirname(os.path.realpath(__file__)))
os_dir = "%s%s" % (argv[1][0:1].upper(), argv[1][1:].lower())
print "Build and test file unpacker library for %s Portable Server." % os_dir
if BuildLibrary(os_dir, argv[1].lower()=="windows", argv[2]):
print "Library built."
RunTests()
else:
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
|
d2go/data/keypoint_metadata_registry.py | wenliangzhao2018/d2go | 687 | 11146855 | #!/usr/bin/env python3
from typing import NamedTuple, List, Tuple
from detectron2.utils.registry import Registry
KEYPOINT_METADATA_REGISTRY = Registry("KEYPOINT_METADATA")
KEYPOINT_METADATA_REGISTRY.__doc__ = "Registry keypoint metadata definitions"
class KeypointMetadata(NamedTuple):
names: List[str]
flip_map: List[Tuple[str, str]]
connection_rules: List[Tuple[str, str, Tuple[int, int, int]]]
def to_dict(self):
return {
"keypoint_names": self.names,
"keypoint_flip_map": self.flip_map,
"keypoint_connection_rules": self.connection_rules,
}
def get_keypoint_metadata(name):
return KEYPOINT_METADATA_REGISTRY.get(name)().to_dict()
|
app/main.py | keller-mark/conda-starlette-docker | 129 | 11146865 | import sys
from starlette.applications import Starlette
from starlette.responses import JSONResponse
version = f"{sys.version_info.major}.{sys.version_info.minor}"
app = Starlette()
@app.route("/")
async def homepage(request):
message = f"Hello world! From Starlette running on Uvicorn with Gunicorn. Using Python {version}"
return JSONResponse({"message": message})
|
tests/tools/protocol/requests/test_api_versions_v0.py | akashvacher/kafka-tools | 578 | 11146904 | <filename>tests/tools/protocol/requests/test_api_versions_v0.py
import unittest
from kafka.tools.protocol.requests import ArgumentError
from kafka.tools.protocol.requests.api_versions_v0 import ApiVersionsV0Request
class ApiVersionsV0RequestTests(unittest.TestCase):
def test_process_arguments(self):
assert ApiVersionsV0Request.process_arguments([]) == {}
def test_process_arguments_extra(self):
self.assertRaises(ArgumentError, ApiVersionsV0Request.process_arguments, ['foo'])
|
autogl/module/hpo/suggestion/algorithm/quasi_random_search.py | general502570/AutoGL | 824 | 11146916 | from ...suggestion.algorithm.base_chocolate_algorithm import BaseChocolateAlgorithm
class QuasiRandomSearchAlgorithm(BaseChocolateAlgorithm):
"""
The implementation is based on https://github.com/tobegit3hub/advisor
Get the new suggested trials with quasi random search algorithm.
"""
def __init__(self):
super(QuasiRandomSearchAlgorithm, self).__init__("QuasiRandom")
|
model_zoo/exceptions.py | ModelZoo/ModelZoo | 191 | 11146924 | <gh_stars>100-1000
class DefineException(Exception):
pass
class LoadException(Exception):
pass |
docs_src/examples/use_cases/example_cg_newton.py | jabader97/backpack | 395 | 11146938 | <gh_stars>100-1000
r"""Matrix-free second-order optimization
=========================================
This example walks you through a second-order optimizer that uses the conjugate
gradient (CG) method and matrix-free multiplication with the block diagonal of
different curvature matrices to solve for the Newton step.
The optimizer is tested on the `classic MNIST example from PyTorch
<https://github.com/pytorch/examples/blob/master/mnist/main.py>`_.
In particular, we will use a model that suffers from the vanishing gradient
problem and is hence difficult to optimizer for gradient descent. Second-order
methods are less affected by that issue and can train these models, as they
rescale the gradient according to the local curvature.
A local quadratic model of the loss defined by a curvature matrix :math:`C(x_t)`
(the Hessian, generalized Gauss-Newton, or other approximations)is minimized by
taking the Newton step
.. math::
x_{t+1} = x_t - \gamma (C(x_t) + \lambda I)^{-1} g(x_t),
where
.. math::
\begin{array}{ll}
x_t: & \text{parameters of the model} \\
g(x_t): & \text{gradient} \\
C(x_t): & \text{curvature of the local quadratic model at `x_t`} \\
\lambda: & \text{damping parameter} \\
\gamma: & \text{step-size} \\
\end{array}
"""
# %%
# Let's get the imports, configuration and some helper functions out of the way first.
# Notice that we are choosing a net with many sigmoids to make it hard to train for SGD.
#
# .. note::
# Larger batch sizes are usually recommended for second-order methods. However, the
# memory constraints imposed by the architecture used to build this example restrict
# us to rather small values.
import math
import matplotlib.pyplot as plt
import torch
from backpack import backpack, extend, extensions
from backpack.utils.examples import get_mnist_dataloader
BATCH_SIZE = 64
LR = 0.1
DAMPING = 1e-2
CG_TOL = 0.1
CG_ATOL = 1e-6
CG_MAX_ITER = 20
MAX_ITER = 50
PRINT_EVERY = 10
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.manual_seed(0)
mnist_loader = get_mnist_dataloader(batch_size=BATCH_SIZE)
def make_model():
return torch.nn.Sequential(
torch.nn.Conv2d(1, 10, 5, 1),
torch.nn.Sigmoid(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Conv2d(10, 20, 5, 1),
torch.nn.Sigmoid(),
torch.nn.MaxPool2d(2, 2),
torch.nn.Flatten(),
torch.nn.Linear(4 * 4 * 20, 50),
torch.nn.Sigmoid(),
torch.nn.Linear(50, 10),
)
model = make_model().to(DEVICE)
loss_function = torch.nn.CrossEntropyLoss().to(DEVICE)
def get_accuracy(output, targets):
"""Helper function to print the accuracy"""
predictions = output.argmax(dim=1, keepdim=True).view_as(targets)
return predictions.eq(targets).float().mean().item()
# %%
# Writing the optimizer
# ---------------------
# To compute the update, we need access to the curvature matrix in form of
# matrix-vector products. We can then solve the linear system implied by the
# Newton step with CG,
#
# .. math::
#
# (C(x_t) + \lambda I) v = - g(x_t),
#
# and perform the update
#
# .. math::
#
# x_{t+1} = x_t - \gamma v,
#
# for every parameter.
#
# Here is the optimizer. At its core is a simple implementation of CG that
# will iterate until the residual norm decreases a certain threshold (determined
# the ``atol`` and ``tol`` arguments), or exceeds a maximum budget (``maxiter``).
class CGNOptimizer(torch.optim.Optimizer):
def __init__(
self,
parameters,
bp_extension,
lr=0.1,
damping=1e-2,
maxiter=100,
tol=1e-1,
atol=1e-8,
):
super().__init__(
parameters,
dict(
lr=lr,
damping=damping,
maxiter=maxiter,
tol=tol,
atol=atol,
savefield=bp_extension.savefield,
),
)
self.bp_extension = bp_extension
def step(self):
for group in self.param_groups:
for p in group["params"]:
damped_curvature = self.damped_matvec(
p, group["damping"], group["savefield"]
)
direction, info = self.cg(
damped_curvature,
-p.grad.data,
maxiter=group["maxiter"],
tol=group["tol"],
atol=group["atol"],
)
p.data.add_(direction, alpha=group["lr"])
def damped_matvec(self, param, damping, savefield):
curvprod_fn = getattr(param, savefield)
def matvec(v):
v = v.unsqueeze(0)
result = damping * v + curvprod_fn(v)
return result.squeeze(0)
return matvec
@staticmethod
def cg(A, b, x0=None, maxiter=None, tol=1e-5, atol=1e-8):
r"""Solve :math:`Ax = b` for :math:`x` using conjugate gradient.
The interface is similar to CG provided by :code:`scipy.sparse.linalg.cg`.
The main iteration loop follows the pseudo code from Wikipedia:
https://en.wikipedia.org/w/index.php?title=Conjugate_gradient_method&oldid=855450922
Parameters
----------
A : function
Function implementing matrix-vector multiplication by `A`.
b : torch.Tensor
Right-hand side of the linear system.
x0 : torch.Tensor
Initialization estimate.
atol: float
Absolute tolerance to accept convergence. Stop if
:math:`|| A x - b || <` `atol`
tol: float
Relative tolerance to accept convergence. Stop if
:math:`|| A x - b || / || b || <` `tol`.
maxiter: int
Maximum number of iterations.
Returns
-------
x (torch.Tensor): Approximate solution :math:`x` of the linear system
info (int): Provides convergence information, if CG converges info
corresponds to numiter, otherwise info is set to zero.
"""
maxiter = b.numel() if maxiter is None else min(maxiter, b.numel())
x = torch.zeros_like(b) if x0 is None else x0
# initialize parameters
r = (b - A(x)).detach()
p = r.clone()
rs_old = (r ** 2).sum().item()
# stopping criterion
norm_bound = max([tol * torch.norm(b).item(), atol])
def converged(rs, numiter):
"""Check whether CG stops (convergence or steps exceeded)."""
norm_converged = norm_bound > math.sqrt(rs)
info = numiter if norm_converged else 0
iters_exceeded = numiter > maxiter
return (norm_converged or iters_exceeded), info
# iterate
iterations = 0
while True:
Ap = A(p).detach()
alpha = rs_old / (p * Ap).sum().item()
x.add_(p, alpha=alpha)
r.sub_(Ap, alpha=alpha)
rs_new = (r ** 2).sum().item()
iterations += 1
stop, info = converged(rs_new, iterations)
if stop:
return x, info
p.mul_(rs_new / rs_old)
p.add_(r)
rs_old = rs_new
# %%
# Running and plotting
# --------------------
# Let's try the Newton-style CG optimizer with the generalized Gauss-Newton (GGN)
# as curvature matrix.
#
# After ``extend``-ing the model and the loss function and creating the optimizer,
# we have to add the curvature-matrix product extension to the ``with backpack(...)``
# context in a backward pass, such that the optimizer has access to the GGN product.
# The rest is just a canonical training loop which logs and visualizes training
# loss and accuracy.
model = extend(model)
loss_function = extend(loss_function)
optimizer = CGNOptimizer(
model.parameters(),
extensions.GGNMP(),
lr=LR,
damping=DAMPING,
maxiter=CG_MAX_ITER,
tol=CG_TOL,
atol=CG_ATOL,
)
losses = []
accuracies = []
for batch_idx, (x, y) in enumerate(mnist_loader):
optimizer.zero_grad()
x, y = x.to(DEVICE), y.to(DEVICE)
outputs = model(x)
loss = loss_function(outputs, y)
with backpack(optimizer.bp_extension):
loss.backward()
optimizer.step()
# Logging
losses.append(loss.detach().item())
accuracies.append(get_accuracy(outputs, y))
if (batch_idx % PRINT_EVERY) == 0:
print(
"Iteration %3.d/%3.d " % (batch_idx, MAX_ITER)
+ "Minibatch Loss %.5f " % losses[-1]
+ "Accuracy %.5f" % accuracies[-1]
)
if MAX_ITER is not None and batch_idx > MAX_ITER:
break
fig = plt.figure()
axes = [fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)]
axes[0].plot(losses)
axes[0].set_title("Loss")
axes[0].set_xlabel("Iteration")
axes[1].plot(accuracies)
axes[1].set_title("Accuracy")
axes[1].set_xlabel("Iteration")
# %%
# Vanishing gradients: comparison with SGD
# ----------------------------------------
# By intention, we chose a model that is different to optimize with gradient descent
# due to the large number of sigmoids that reduce the gradient signal in backpropagation.
#
# To verify that, let's compare the Newton optimizer for different curvatures with SGD.
# SGD is run for a large range of learning rates :code:`lr ∈ [10, 1, 0.1, 0.01, 0.001]`.
#
# The performance of CG-Newton versus SGD is shown below (using a somewhat simplified
# color scheme to simplify the visualization).
def make_cgn_optimizer_fn(extension):
def optimizer_fn(model):
return CGNOptimizer(
model.parameters(),
extension,
lr=LR,
damping=DAMPING,
maxiter=CG_MAX_ITER,
tol=CG_TOL,
atol=CG_ATOL,
)
return optimizer_fn
curvatures = [
extensions.GGNMP(),
extensions.HMP(),
extensions.PCHMP(modify="abs"),
extensions.PCHMP(modify="clip"),
]
labels = [
"GGN",
"Hessian",
"PCH-abs",
"PCH-clip",
]
optimizers = []
for curvature in curvatures:
optimizers.append(make_cgn_optimizer_fn(curvature))
def make_sgd_optimizer_fn(lr):
def optimizer_fn(model):
return torch.optim.SGD(model.parameters(), lr=lr)
return optimizer_fn
sgd_lrs = [
10,
1,
0.1,
0.01,
0.001,
]
for lr in sgd_lrs:
optimizers.append(make_sgd_optimizer_fn(lr))
labels.append("SGD, lr={}".format(lr))
def train(optim_fn):
torch.manual_seed(0)
mnist_loader = get_mnist_dataloader(batch_size=BATCH_SIZE)
model = make_model().to(DEVICE)
loss_function = torch.nn.CrossEntropyLoss().to(DEVICE)
optimizer = optim_fn(model)
need_backpack = isinstance(optimizer, CGNOptimizer)
if need_backpack:
model = extend(model)
loss_function = extend(loss_function)
losses = []
accuracies = []
for batch_idx, (x, y) in enumerate(mnist_loader):
optimizer.zero_grad()
x, y = x.to(DEVICE), y.to(DEVICE)
outputs = model(x)
loss = loss_function(outputs, y)
if need_backpack:
with backpack(optimizer.bp_extension):
loss.backward()
else:
loss.backward()
optimizer.step()
# Logging
losses.append(loss.detach().item())
accuracies.append(get_accuracy(outputs, y))
if (batch_idx % PRINT_EVERY) == 0:
print(
"Iteration %3.d/%3.d " % (batch_idx, MAX_ITER)
+ "Minibatch Loss %.5f " % losses[-1]
+ "Accuracy %.5f" % accuracies[-1]
)
if MAX_ITER is not None and batch_idx > MAX_ITER:
break
return losses, accuracies
fig = plt.figure()
axes = [fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)]
axes[0].set_title("Loss")
axes[0].set_ylim(0, 2.5)
axes[0].set_xlabel("Iteration")
axes[1].set_title("Accuracy")
axes[1].set_xlabel("Iteration")
for optim_fn, label in zip(optimizers, labels):
print(label)
losses, accuracies = train(optim_fn)
if "SGD" in label:
axes[0].plot(losses, "-", color="tab:orange", label=label)
axes[1].plot(accuracies, "-", color="tab:orange", label=label)
elif "Hessian" in label:
axes[0].plot(losses, "-.", color="tab:green", label=label)
axes[1].plot(accuracies, "-.", color="tab:green", label=label)
else:
axes[0].plot(losses, "--", color="tab:blue", label=label)
axes[1].plot(accuracies, "--", color="tab:blue", label=label)
plt.legend()
# %%
# While SGD is not capable to train this particular model, the second-order methods
# are still able to do so. Such methods may be interesting for optimization tasks
# that first-order methods struggle with.
#
# Note that the Hessian of the net is not positive semi-definite.
# In this case, the local quadratic model does not have a global minimum and that
# complicates the usage of the Hessian in second-order optimization. This also
# provides a motivation for the other positive semi-definite Hessian approximations
# shown here.
|
examples/midifiles/play_midi_file.py | fooker/mido | 658 | 11146952 | <reponame>fooker/mido
#!/usr/bin/env python
"""
Play MIDI file on output port.
Run with (for example):
./play_midi_file.py 'SH-201 MIDI 1' 'test.mid'
"""
import sys
import mido
import time
from mido import MidiFile
filename = sys.argv[1]
if len(sys.argv) == 3:
portname = sys.argv[2]
else:
portname = None
with mido.open_output(portname) as output:
try:
midifile = MidiFile(filename)
t0 = time.time()
for message in midifile.play():
print(message)
output.send(message)
print('play time: {:.2f} s (expected {:.2f})'.format(
time.time() - t0, midifile.length))
except KeyboardInterrupt:
print()
output.reset()
|
src/sqlfluff/testing/__init__.py | pvonglehn/sqlfluff | 3,024 | 11146983 | """Testing utils we want to expose for usage by plugins."""
|
SqlmapCelery/model/hunter_model.py | tt9133github/hunter | 322 | 11147027 | #!/ usr/bin/env
# coding=utf-8
#
# Copyright 2019 ztosec & https://www.zto.com/
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
author: b5mali4
"""
import json
import peewee
from common.mysql_util import MysqlManage
class HunterModel(peewee.Model):
def __str__(self):
"""
格式化成json
:return:
"""
result = dict()
for key in self.__data__.keys():
try:
result[key] = str(getattr(self, key))
except:
result[key] = json.dumps(getattr(self, key))
return json.dumps(result)
class HunterModelService(object):
@staticmethod
@MysqlManage.close_database
def save(cls, **kwargs):
"""
保存数据内容
:param cls:
:param kwargs:
:return:
"""
if not kwargs:
return
return cls.create(**kwargs)
@staticmethod
@MysqlManage.close_database
def count(cls, **kwargs):
"""
某个查找结果的数量
:param cls:
:param kwargs:
:return:
"""
if not kwargs: # 全删除
return cls.select().count()
if "where" in kwargs: # 查询条件
if isinstance(kwargs["where"], tuple):
return cls.select().where(*kwargs["where"]).count()
else:
return cls.select().where(kwargs["where"]).count()
@staticmethod
@MysqlManage.close_database
def update(cls, **kwargs):
"""
执行UPDATE操作
:param cls:
:param kwargs:
:return:
"""
if not kwargs and "fields" not in kwargs: # 什么都没填或者字段没填
return
if "where" not in kwargs and "fields" in kwargs: # 字段填入,条件没填,默认全更新
if isinstance(kwargs["fields"], dict):
return cls.update(kwargs["fields"]).execute()
return cls.update(*kwargs["fields"]).execute()
if "where" in kwargs and "fields" in kwargs: # 两个都填的情况
if isinstance(kwargs["where"], tuple) and isinstance(kwargs["fields"], dict):
return cls.update(kwargs["fields"]).where(*kwargs["where"]).execute()
if isinstance(kwargs["where"], tuple) and not isinstance(kwargs["fields"], dict):
return cls.update(*kwargs["fields"]).where(*kwargs["where"]).execute()
if not isinstance(kwargs["where"], tuple) and isinstance(kwargs["fields"], dict):
return cls.update(kwargs["fields"]).where(tuple([kwargs["where"]])).execute()
if not isinstance(kwargs["where"], tuple) and not isinstance(kwargs["fields"], dict):
return cls.select(*kwargs["fields"]).where(tuple([kwargs["where"]])).execute()
@staticmethod
@MysqlManage.close_database
def remove(cls, **kwargs):
"""
删除
:return:
"""
if not kwargs: # 全删除
return cls.delete().execute()
if "where" in kwargs: # 查询条件
if isinstance(kwargs["where"], tuple):
return cls.delete().where(*kwargs["where"]).execute()
else:
return cls.delete().where(kwargs["where"]).execute()
@staticmethod
@MysqlManage.close_database
def get_objects(cls, **kwargs):
"""
对外暴露接口
:param kwargs:
:return:
"""
objects = list()
for object in HunterModelService.get_fields_by_where(cls, **kwargs):
objects.append(object)
return objects
@staticmethod
@MysqlManage.close_database
def get_fields_by_where(cls, **kwargs):
"""
基础的CURD操作,通用类
To use:
>>> tasks = HunterModelService.__get_fields_by_where(fields=(Task.id, Task.hunter_status), where=(Task.id > 1))
>>> print(tasks)
:param kwargs:
:return:
"""
# cls = self.__class__
if not kwargs: # 什么都没填
return cls.select().execute()
if "fields" not in kwargs and "where" in kwargs: # 要的结果字段没填
if isinstance(kwargs["where"], tuple):
return cls.select().where(*kwargs["where"]).execute()
return cls.select().where(tuple([kwargs["where"]])).execute()
if "where" not in kwargs and "fields" in kwargs: # 要的结果字段没填
if isinstance(kwargs["fields"], tuple):
return cls.select(*kwargs["fields"]).execute()
return cls.select(kwargs["fields"]).execute()
if "where" in kwargs and "fields" in kwargs: # 两个都填的情况
if isinstance(kwargs["where"], tuple) and isinstance(kwargs["fields"], tuple):
return cls.select(*kwargs["fields"]).where(*kwargs["where"]).execute()
if isinstance(kwargs["where"], tuple) and not isinstance(kwargs["fields"], tuple):
return cls.select(kwargs["fields"]).where(*kwargs["where"]).execute()
if not isinstance(kwargs["where"], tuple) and isinstance(kwargs["fields"], tuple):
return cls.select(*kwargs["fields"]).where(tuple([kwargs["where"]])).execute()
if not isinstance(kwargs["where"], tuple) and not isinstance(kwargs["fields"], tuple):
return cls.select(kwargs["fields"]).where(tuple([kwargs["where"]])).execute()
class OrmModelJsonSerializer:
@staticmethod
def serializer(instances):
"""
序列化工具
:param self:
:return:
"""
if not isinstance(instances, list) and isinstance(instances, HunterModel) and hasattr(instances, '__str__'):
return json.loads(instances.__str__())
if isinstance(instances, peewee.CursorWrapper):
results = list()
for instance in instances:
result = OrmModelJsonSerializer.serializer(instance)
results.append(result)
return results
|
algoexpert.io/python/Lowest_Common_Manager.py | mamane19/coding-interview-gym | 713 | 11147050 | class OrgInfo:
def __init__(self, lowestCommonManager, numImportantReports):
self.lowestCommonManager = lowestCommonManager
self.numImportantReports = numImportantReports
class Node:
def __init__(self, directReports):
self.directReports = directReports
# O(n) time | O(d) spaces
def getLowestCommonManager(topManager, reportOne, reportTwo):
return getOrgInfo(topManager, reportOne, reportTwo).lowestCommonManager
def getOrgInfo(manager: Node, reportOne, reportTwo):
numImportantReports = 0
for directReport in manager.directReports:
orgInfo = getOrgInfo(directReport, reportOne, reportTwo)
if orgInfo.lowestCommonManager is not None:
return orgInfo
numImportantReports += orgInfo.numImportantReports
if manager == reportOne or manager == reportTwo:
numImportantReports += 1
lowestCommonManager = manager if numImportantReports == 2 else None
return OrgInfo(lowestCommonManager, numImportantReports)
|
generate-standalone-package.py | mblack20/flintrock | 615 | 11147077 | import os
import platform
import shutil
import subprocess
from flintrock import __version__ as flintrock_version
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
if __name__ == '__main__':
operating_system = platform.system()
if operating_system.lower() == 'darwin':
operating_system = 'macOS'
machine_type = platform.machine()
subprocess.run(
[
'pyinstaller',
'--noconfirm',
'--clean',
'--name', 'flintrock',
'--additional-hooks-dir', '.',
# This hidden import is introduced by botocore.
# We won't need this when this issue is resolved:
# https://github.com/pyinstaller/pyinstaller/issues/1844
'--hidden-import', 'html.parser',
# This hidden import is also introduced by botocore.
# It appears to be related to this issue:
# https://github.com/pyinstaller/pyinstaller/issues/1935
'--hidden-import', 'configparser',
'standalone.py'
],
check=True)
shutil.make_archive(
base_name=os.path.join(
THIS_DIR, 'dist',
'Flintrock-{v}-standalone-{os}-{m}'.format(
v=flintrock_version,
os=operating_system,
m=machine_type)),
format='zip',
root_dir=os.path.join(THIS_DIR, 'dist', 'flintrock'))
|
cogdl/wrappers/model_wrapper/node_classification/node_classification_mw.py | THUDM/cogdl | 1,072 | 11147107 | <reponame>THUDM/cogdl
import torch
from cogdl.wrappers.model_wrapper import ModelWrapper, register_model_wrapper
@register_model_wrapper("node_classification_mw")
class NodeClfModelWrapper(ModelWrapper):
def __init__(self, model, optimizer_config):
super(NodeClfModelWrapper, self).__init__()
self.optimizer_config = optimizer_config
self.model = model
def train_step(self, subgraph):
graph = subgraph
pred = self.model(graph)
train_mask = graph.train_mask
loss = self.default_loss_fn(pred[train_mask], graph.y[train_mask])
return loss
def val_step(self, subgraph):
graph = subgraph
pred = self.model(graph)
y = graph.y
val_mask = graph.val_mask
loss = self.default_loss_fn(pred[val_mask], y[val_mask])
metric = self.evaluate(pred[val_mask], graph.y[val_mask], metric="auto")
self.note("val_loss", loss.item())
self.note("val_metric", metric)
def test_step(self, batch):
graph = batch
pred = self.model(graph)
test_mask = batch.test_mask
loss = self.default_loss_fn(pred[test_mask], batch.y[test_mask])
metric = self.evaluate(pred[test_mask], batch.y[test_mask], metric="auto")
self.note("test_loss", loss.item())
self.note("test_metric", metric)
def setup_optimizer(self):
cfg = self.optimizer_config
if hasattr(self.model, "setup_optimizer"):
model_spec_optim = self.model.setup_optimizer(cfg)
if model_spec_optim is not None:
return model_spec_optim
return torch.optim.Adam(self.model.parameters(), lr=cfg["lr"], weight_decay=cfg["weight_decay"])
def set_early_stopping(self):
return "val_metric", ">"
|
actions/update_ticket_status.py | kodithuw/stackstorm-zendesk | 164 | 11147133 | from lib.zendesk import ZendeskAction
__all__ = [
'UpdateTicketStatusAction'
]
class UpdateTicketStatusAction(ZendeskAction):
def run(self, ticket_id, status):
return self.update_ticket_status(ticket_id, status)
|
tests/test_reading_1_4.py | CCInc/laspy | 240 | 11147139 | <filename>tests/test_reading_1_4.py
import pytest
import laspy
from tests.test_common import test1_4_las
@pytest.fixture()
def file():
return laspy.read(test1_4_las)
def test_unscaled_x(file):
assert file.X.max() == 1751224820
assert file.X.min() == 1320803567
def test_unscaled_y(file):
assert file.Y.max() == -860121188
assert file.Y.min() == -864646690
def test_unscaled_z(file):
assert file.Z.max() == -1745638014
assert file.Z.min() == -1751937981
def test_intensity(file):
assert file.intensity.max() == 68
assert file.intensity.min() == 2
def test_return_number(file):
assert file.return_number.max() == 4
assert file.return_number.min() == 1
def test_number_of_returns(file):
assert file.number_of_returns.max() == 4
assert file.number_of_returns.min() == 1
def test_edge_of_flight_line(file):
assert file.edge_of_flight_line.max() == 1
assert file.edge_of_flight_line.min() == 0
def scan_direction_flag(file):
assert file.scan_direction_flag.max() == 1
assert file.scan_direction_flag.min() == 0
def test_classification(file):
assert file.classification.max() == 2
assert file.classification.min() == 2
def test_scan_angle_rank(file):
assert file.scan_angle.max() == 3173
assert file.scan_angle.min() == 1837
def test_user_data(file):
assert file.user_data.max() == 0
assert file.user_data.min() == 0
def test_point_source_id(file):
assert file.point_source_id.max() == 202
assert file.point_source_id.min() == 202
def test_gps_time(file):
assert file.gps_time.max() == pytest.approx(83177420.601045)
assert file.gps_time.min() == pytest.approx(83177420.534005)
def test_scanner_channel(file):
assert file.scanner_channel.max() == 0
assert file.scanner_channel.min() == 0
|
svtools/lmerge.py | NeolithEra/svtools | 120 | 11147151 | import sys
import svtools.l_bp as l_bp
from svtools.breakpoint import Breakpoint
import svtools.logspace as ls
from svtools.vcf.file import Vcf
from svtools.vcf.variant import Variant
from svtools.utils import parse_bnd_alt_string, InputStream
from svtools.exceptions import MissingProbabilitiesException
import sys
import numpy as np
import argparse
import heapq
import re
def null_format_string(format_string):
null_list = []
num_null_fields = len(format_string.split(':'))
if format_string.startswith('GT:'):
null_list = ['./.']
num_null_fields -= 1
null_list.extend(list('.' * num_null_fields))
null_string = ':'.join(null_list)
return null_string
def merge_single_bp(BP, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes):
A = BP[0].l.rstrip().split('\t')
var = Variant(A,vcf)
try:
sname = var.get_info('SNAME')
var.set_info('SNAME', sname + ':' + var.var_id)
except KeyError:
pass
var.var_id=str(v_id)
if use_product:
var.set_info('ALG', 'PROD')
else:
var.set_info('ALG', 'SUM')
GTS = None
if include_genotypes:
null_string = null_format_string(A[8])
gt_dict = { sname: A[9] }
GTS = '\t'.join([gt_dict.get(x, null_string) for x in sample_order])
var.gts = None
var.gts_string = GTS
return var
def order_cliques(BP, C):
#Sweep the set. Find the largest intersecting set. Remove it. Continue.
BP_i = range(len(BP)) # index set of each node in the graph
while len(BP_i) > 0:
h_l = [] #heap of left breakpoint end coordinates and node id (index). heapq is a min heap and the end coord is what will be used for the sorting.
max_c = []
max_c_len = 0
for i in BP_i:
# remove anything in the heap that doesn't intersect with the current breakpoint
while (len(h_l) > 0) and (h_l[0][0] < BP[i].left.start):
heapq.heappop(h_l)
heapq.heappush(h_l, (BP[i].left.end, i)) # add to the heap
# at this point everything in h_l intersects on the left
# but we need to take into account what is going on on the right
h_r = [] # heap with rightmost starts
h_l_i = [x[1] for x in h_l] # this is all of the node ids on the heap currently
h_l_i.sort(key=lambda x:BP[x].right.start) # sort them by their right start
for j in h_l_i:
# remove anything in the heap that doesn't intersect with the current breakpoint on the right end
while (len(h_r) > 0) and (h_r[0][0] < BP[j].right.start):
heapq.heappop(h_r)
# add something to the right heap
heapq.heappush(h_r, (BP[j].right.end, j))
if max_c_len < len(h_r):
# max clique! Register what nodes we have
max_c_len = len(h_r)
max_c = [y[1] for y in h_r]
C.append(max_c)
for c in max_c:
BP_i.remove(c)
def getCI95( p_L, p_R, max_i_L, max_i_R):
ninefive_i_L_start = max_i_L
ninefive_i_L_end = max_i_L
ninefive_i_L_total = p_L[max_i_L]
while (ninefive_i_L_total < 0.95):
if (ninefive_i_L_start <= 0) and (ninefive_i_L_end >= (len(p_L)-1)):
break
ninefive_i_L_start = max(0, ninefive_i_L_start - 1)
ninefive_i_L_end = min(len(p_L)-1, ninefive_i_L_end +1)
ninefive_i_L_total = sum(p_L[ninefive_i_L_start:ninefive_i_L_end+1])
ninefive_i_L_start = ninefive_i_L_start - max_i_L
ninefive_i_L_end = ninefive_i_L_end - max_i_L
ninefive_i_R_start = max_i_R
ninefive_i_R_end = max_i_R
ninefive_i_R_total = p_R[max_i_R]
while (ninefive_i_R_total < 0.95):
if (ninefive_i_R_start <= 0) and (ninefive_i_R_end >= len(p_R)-1):
break
ninefive_i_R_start = max(0, ninefive_i_R_start - 1)
ninefive_i_R_end = min(len(p_R)-1, ninefive_i_R_end +1)
ninefive_i_R_total = sum(p_R[ninefive_i_R_start:ninefive_i_R_end+1])
ninefive_i_R_end = ninefive_i_R_end - max_i_R
ninefive_i_R_start = ninefive_i_R_start - max_i_R
CIPOS95=str(ninefive_i_L_start) + ',' + str(ninefive_i_L_end)
CIEND95=str(ninefive_i_R_start) + ',' + str(ninefive_i_R_end)
return [CIPOS95, CIEND95]
def combine_pdfs(BP, c, use_product, weighting_scheme):
L = []
R = []
for b_i in c:
b = BP[b_i]
L.append([b.left.start, b.left.end, b.left.p])
R.append([b.right.start, b.right.end, b.right.p])
[start_R, end_R, a_R] = l_bp.align_intervals(R)
[start_L, end_L, a_L] = l_bp.align_intervals(L)
p_L = [0] * len(a_L[0])
p_R = [0] * len(a_R[0])
wts = [1] * len(c)
for c_i in range(len(c)):
if weighting_scheme == 'evidence_wt':
A = BP[c[c_i]].l.rstrip().split('\t', 10)
m = l_bp.to_map(A[7])
wt=int(m['SU'])
#sys.stderr.write("wt\t0\t"+str(wt)+"\n")
a_L[c_i]=[wt*ali for ali in a_L[c_i]]
a_R[c_i]=[wt*ari for ari in a_R[c_i]]
elif weighting_scheme == 'carrier_wt':
A = BP[c[c_i]].l.rstrip().split('\t', 10)
m = l_bp.to_map(A[7])
wt = 1
if 'SNAME' in m:
wt=len(m['SNAME'].split(','))
a_L[c_i]=[wt*ali for ali in a_L[c_i]]
a_R[c_i]=[wt*ari for ari in a_R[c_i]]
for i in range(len(a_L[c_i])):
#sys.stderr.write("L\t"+str(i)+"\t"+str(c_i)+"\t"+str(a_L[c_i][i])+"\n")
p_L[i] += a_L[c_i][i]
for i in range(len(a_R[c_i])):
#sys.stderr.write("R\t"+str(i)+"\t"+str(c_i)+"\t"+str(a_R[c_i][i])+"\n")
p_R[i] += a_R[c_i][i]
ALG = 'SUM'
if use_product:
pmax_i_L = p_L.index(max(p_L))
pmax_i_R = p_R.index(max(p_R))
miss = 0
for c_i in range(len(c)):
if (a_L[c_i][pmax_i_L] == 0) or (a_R[c_i][pmax_i_R] == 0):
miss += 1
if miss == 0:
ALG = "PROD"
ls_p_L = [ls.get_ls(1)] * len(a_L[0])
ls_p_R = [ls.get_ls(1)] * len(a_R[0])
for c_i in range(len(c)):
for i in range(len(a_L[c_i])):
ls_p_L[i] = ls.ls_multiply(ls_p_L[i], ls.get_ls(a_L[c_i][i]))
for i in range(len(a_R[c_i])):
ls_p_R[i] = ls.ls_multiply(ls_p_R[i], ls.get_ls(a_R[c_i][i]))
ls_sum_L = ls.get_ls(0)
ls_sum_R = ls.get_ls(0)
for ls_p in ls_p_L:
ls_sum_L = ls.ls_add(ls_sum_L, ls_p)
for ls_p in ls_p_R:
ls_sum_R = ls.ls_add(ls_sum_R, ls_p)
p_L = []
for ls_p in ls_p_L:
p_L.append(ls.get_p(ls.ls_divide(ls_p, ls_sum_L)))
p_R = []
for ls_p in ls_p_R:
p_R.append(ls.get_p(ls.ls_divide(ls_p, ls_sum_R)))
sum_L = sum(p_L)
sum_R = sum(p_R)
p_L = [x/sum_L for x in p_L]
p_R = [x/sum_L for x in p_R]
[clip_start_L, clip_end_L] = l_bp.trim(p_L)
[clip_start_R, clip_end_R] = l_bp.trim(p_R)
[ new_start_L, new_end_L ] = [ start_L + clip_start_L, end_L - clip_end_L ]
[ new_start_R, new_end_R ] = [ start_R + clip_start_R, end_R - clip_end_R ]
p_L = p_L[clip_start_L:len(p_L)-clip_end_L]
p_R = p_R[clip_start_R:len(p_R)-clip_end_R]
s_p_L = sum(p_L)
s_p_R = sum(p_R)
p_L = [x/s_p_L for x in p_L]
p_R = [x/s_p_R for x in p_R]
#sys.exit(1)
return new_start_L, new_start_R, p_L, p_R, ALG
def create_merged_variant(BP, c, v_id, vcf, use_product, weighting_scheme='unweighted'):
new_start_L, new_start_R, p_L , p_R, ALG = combine_pdfs(BP, c, use_product, weighting_scheme)
max_i_L = p_L.index(max(p_L))
max_i_R = p_R.index(max(p_R))
[cipos95, ciend95]=getCI95( p_L, p_R, max_i_L, max_i_R)
new_pos_L = new_start_L + max_i_L
new_pos_R = new_start_R + max_i_R
BP0=BP[c[0]]
# sometimes after looking at PRs, the left and right can be swapped.
# flip them back so downstream tools don't break.
if new_pos_R < new_pos_L and BP0.sv_type != 'BND':
new_pos_R, new_pos_L = new_pos_L, new_pos_R
cipos95, ciend95 = ciend95, cipos95
p_L, p_R = p_R, p_L
max_i_R, max_i_L = max_i_L, max_i_R
A=BP0.l.rstrip().split('\t', 10)
ALT = ''
if BP0.sv_type == 'BND':
if BP0.strands[:2] == '++':
ALT = 'N]' + BP0.right.chrom + ':' + str(new_pos_R) + ']'
elif BP0.strands[:2] == '-+':
ALT = ']' + BP0.right.chrom + ':' + str(new_pos_R) + ']N'
elif BP0.strands[:2] == '+-':
ALT = 'N[' + BP0.right.chrom + ':' + str(new_pos_R) + '['
elif BP0.strands[:2] == '--':
ALT = '[' + BP0.right.chrom + ':' + str(new_pos_R) + '[N'
else:
ALT = '<' + BP0.sv_type + '>'
var_list=[ BP0.left.chrom,
new_pos_L,
str(v_id),
'N',
ALT,
0.0,
'.',
''] + A[8:]
var=Variant(var_list, vcf)
var.set_info('SVTYPE', BP0.sv_type)
var.set_info('ALG', ALG)
if var.get_info('SVTYPE')=='DEL':
var.set_info('SVLEN', new_pos_L - new_pos_R)
elif BP0.left.chrom == BP0.right.chrom:
var.set_info('SVLEN', new_pos_R - new_pos_L)
else:
SVLEN = None
if var.get_info('SVTYPE') == 'BND':
var.set_info('EVENT', str(v_id))
elif var.get_info('SVTYPE') == 'INS':
var.set_info('END', new_pos_L)
else:
var.set_info('END', new_pos_R )
var.set_info('CIPOS95', cipos95)
var.set_info('CIEND95', ciend95)
var.set_info('CIPOS', ','.join([str(x) for x in [-1*max_i_L, len(p_L) - max_i_L - 1]]))
var.set_info('CIEND', ','.join([str(x) for x in [-1*max_i_R, len(p_R) - max_i_R - 1]]))
var.set_info('PRPOS', ','.join([str(x) for x in p_L]))
var.set_info('PREND', ','.join([str(x) for x in p_R]))
return var
def combine_var_support(var, BP, c, include_genotypes, sample_order):
strand_map = {}
qual = 0.0
[ SU, PE, SR ] = [0,0,0]
s_name_list = []
s1_name_list = []
format_string = var.get_format_string()
gt_dict = dict()
for b_i in c:
A = BP[b_i].l.rstrip().split('\t')
if A[5].isdigit():
qual += float(A[5])
m = l_bp.to_map(A[7])
for strand_entry in m['STRANDS'].split(','):
s_type,s_count = strand_entry.split(':')
if s_type not in strand_map:
strand_map[s_type] = 0
strand_map[s_type] += int(s_count)
SU += int(m['SU'])
PE += int(m['PE'])
SR += int(m['SR'])
if 'SNAME' in m:
s_name_list.append(m['SNAME'] + ':' + A[2])
if include_genotypes:
if format_string == A[8]:
gt_dict[m['SNAME']] = A[9]
else:
format_dict = dict(zip(A[8].split(':'), A[9].split(':')))
geno = ':'.join([format_dict.get(i, '.') for i in var.format_list])
gt_dict[m['SNAME']] = geno
else:
var.format_dict=None
if s_name_list:
var.set_info('SNAME', ','.join(s_name_list))
GTS = None
if include_genotypes:
null_string = null_format_string(format_string)
GTS = '\t'.join([gt_dict.get(x, null_string) for x in sample_order])
var.gts=None
var.gts_string=GTS
strand_types_counts = []
for strand in strand_map:
strand_types_counts.append(strand + ':' + str(strand_map[strand]))
var.set_info('STRANDS', ','.join(strand_types_counts))
var.qual = qual
var.set_info('PE', str(PE))
var.set_info('SU', str(SU))
var.set_info('SR', str(SR))
def invtobnd(var):
strands=var.get_info('STRANDS')
strand_dict = dict(x.split(':') for x in strands.split(','))
for o in strand_dict.keys():
if strand_dict[o] == '0':
del(strand_dict[o])
strands=','.join(['%s:%s' % (o,strand_dict[o]) for o in strand_dict])
var.set_info('STRANDS', strands)
if strands[:2] == '++':
ALT = 'N]' + var.chrom + ':' + str(var.get_info('END')) + ']'
elif strands[:2] == '--':
ALT = '[' + var.chrom + ':' + str(var.get_info('END')) + '[N'
var.set_info('SVTYPE', 'BND')
var.alt = ALT
[ tempci, temp95 ] = [var.get_info('CIPOS'), var.get_info('CIPOS95')]
try:
temppr = var.get_info('PRPOS')
except KeyError:
raise MissingProbabilitiesException('Required tag PRPOS not found.')
var.set_info('CIPOS', var.get_info('CIEND'))
var.set_info('CIEND', tempci)
var.set_info('CIPOS95', var.get_info('CIEND95'))
var.set_info('CIEND95', temp95 )
try:
var.set_info('PRPOS', var.get_info('PREND'))
except KeyError:
raise MissingProbabilitiesException('Required tag PREND not found.')
var.set_info('PREND', temppr )
def write_var(var, vcf_out, include_genotypes=False):
v_id=var.var_id
if var.get_info('CIPOS95') != '0,0' or var.get_info('CIEND95') != '0,0':
var.set_info('IMPRECISE', True)
else:
var.set_info('IMPRECISE', False)
if var.get_info('SVTYPE') == 'INV' and ('--:0' in var.get_info('STRANDS') or '++:0' in var.get_info('STRANDS')):
invtobnd(var)
if var.alt not in ['<DEL>', '<DUP>', '<INV>', '<INS>']:
var.var_id=str(v_id)+'_1'
var.set_info('EVENT', v_id)
var.set_info('MATEID', str(v_id)+'_2')
var.info.pop('END', None)
var.info.pop('SVLEN', None)
varstring=var.get_var_string(use_cached_gt_string=True)
if not include_genotypes:
varstring='\t'.join(varstring.split('\t', 10)[:8])
vcf_out.write(varstring+'\n')
new_alt = ''
if var.alt[0] == '[':
new_alt = '[' + var.chrom + ':' + str(var.pos) + '[N'
elif var.alt[0] == ']':
new_alt = 'N[' + var.chrom + ':' + str(var.pos) + '['
elif var.alt[-1] == '[':
new_alt = ']' + var.chrom + ':' + str(var.pos) + ']N'
elif var.alt[-1] == ']':
new_alt = 'N]' + var.chrom + ':' + str(var.pos) + ']'
sep, chrom, pos = parse_bnd_alt_string(var.alt)
var.chrom = chrom
var.pos = int(pos)
var.var_id = str(v_id)+'_2'
var.set_info('MATEID', str(v_id)+'_1')
var.set_info('SECONDARY', True)
var.alt = new_alt
[ tempci, temp95 ] = [var.get_info('CIPOS'), var.get_info('CIPOS95')]
try:
temppr = var.get_info('PRPOS')
except KeyError:
raise MissingProbabilitiesException('Required tag PRPOS not found.')
var.set_info('CIPOS', var.get_info('CIEND'))
var.set_info('CIEND', tempci)
var.set_info('CIPOS95', var.get_info('CIEND95'))
var.set_info('CIEND95', temp95 )
try:
var.set_info('PRPOS', var.get_info('PREND'))
except KeyError:
raise MissingProbabilitiesException('Required tag PREND not found.')
var.set_info('PREND', temppr )
varstring=var.get_var_string(use_cached_gt_string=True)
if not include_genotypes:
varstring='\t'.join(varstring.split('\t', 10)[:8])
vcf_out.write(varstring+'\n')
else:
varstring=var.get_var_string(use_cached_gt_string=True)
if not include_genotypes:
varstring='\t'.join(varstring.split('\t', 10)[:8])
vcf_out.write(varstring+'\n')
def merge(BP, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes=False, weighting_scheme='unweighted'):
if len(BP) == 1:
#merge a single breakpoint
v_id+=1
var=merge_single_bp(BP, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes)
write_var(var, vcf_out, include_genotypes)
else:
BP.sort(key=lambda x: x.left.start)
ordered_cliques = []
order_cliques(BP, ordered_cliques)
#merge cliques
for cliq in ordered_cliques:
v_id+=1
var=create_merged_variant(BP, cliq, v_id, vcf, use_product, weighting_scheme)
combine_var_support(var, BP, cliq, include_genotypes, sample_order)
write_var(var, vcf_out, include_genotypes)
return v_id
def r_cluster(BP_l, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes=False, weighting_scheme='unweighted'):
# need to resort based on the right side, then extract clusters
BP_l.sort(key=lambda x: x.right.start)
BP_l.sort(key=lambda x: x.right.chrom)
BP_r = []
BP_max_end_r = -1
BP_chr_r = ''
for b in BP_l:
if (len(BP_r) == 0) or \
((b.right.start <= BP_max_end_r) and \
(b.right.chrom == BP_chr_r)):
BP_r.append(b)
BP_max_end_r = max(BP_max_end_r, b.right.end)
BP_chr_r = b.right.chrom
else:
v_id = merge(BP_r, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes, weighting_scheme)
BP_r = [b]
BP_max_end_r = b.right.end
BP_chr_r = b.right.chrom
if len(BP_r) > 0:
v_id = merge(BP_r, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes, weighting_scheme)
return v_id
def l_cluster_by_line(file_name, tempdir, percent_slop=0, fixed_slop=0, use_product=False, include_genotypes=False, weighting_scheme='unweighted'):
v_id = 0
in_header = True
header = []
vcf = Vcf()
vcf_out=sys.stdout
with InputStream(file_name, tempdir) as vcf_stream:
BP_l = []
BP_sv_type = ''
BP_max_end_l = -1
BP_chr_l = ''
sample_order = []
for line in vcf_stream:
if in_header:
if line.startswith('##'):
header.append(line)
continue
elif line.startswith('#CHROM'):
v=line.rstrip().split('\t')
for headline in header:
if headline[:8] == '##SAMPLE':
sample_order.append(headline.rstrip()[13:-1])
hline=''
if include_genotypes :
v.extend(sample_order)
hline='\t'.join(v)
else :
v=v[:8]
hline='\t'.join(v)
header.append(hline)
in_header=False
vcf.add_header(header)
vcf.add_info('ALG', '1', 'String', 'Algorithm used to merge this breakpoint')
if include_genotypes:
vcf_out.write(vcf.get_header()+'\n')
else:
vcf_out.write(vcf.get_header(False)+'\n')
continue
b = Breakpoint(l_bp.parse_vcf_record(line), percent_slop=percent_slop, fixed_slop=fixed_slop)
if (len(BP_l) == 0) or ((b.left.start <= BP_max_end_l) and (b.left.chrom == BP_chr_l) and (b.sv_type == BP_sv_type)):
BP_l.append(b)
BP_max_end_l = max(BP_max_end_l, b.left.end)
BP_chr_l = b.left.chrom
BP_sv_type = b.sv_type
else:
v_id = r_cluster(BP_l, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes, weighting_scheme)
BP_l = [b]
BP_max_end_l = b.left.end
BP_sv_type = b.sv_type
BP_chr_l = b.left.chrom
if len(BP_l) > 0:
v_id = r_cluster(BP_l, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes, weighting_scheme)
def description():
return 'merge LUMPY calls inside a single file from svtools lsort'
def epilog():
return 'Note that if both slop parameters are set then the maximum is used.'
def add_arguments_to_parser(parser):
parser.add_argument('-i', '--inFile', metavar='<FILE>', help='a sorted VCF file generated by svtools lsort. Each INFO field must contain an SNAME tag containing the sample name (e.g. SNAME=SAMPLE_NAME)')
parser.add_argument('-p', '--percent-slop', metavar='<FLOAT>', type=float, default=0.0, help='increase the the breakpoint confidence interval both up and down stream by a given proportion of the original size')
parser.add_argument('-f', '--fixed-slop', metavar='<INT>', type=int, default=0, help='increase the the breakpoint confidence interval both up and down stream by a given fixed size')
parser.add_argument('--sum', dest='use_product', action='store_false', default=True, help='calculate breakpoint PDF and position using sum algorithm instead of product')
parser.add_argument('-g', dest='include_genotypes', action='store_true', default=False, help='include original genotypes in output. When multiple variants are merged, the last will dictate the genotype field')
parser.add_argument('-w', dest='weighting_scheme', metavar='<STRING>', default="unweighted", choices=['carrier_wt', 'evidence_wt'], help='weighting scheme (intended for use in tiered merging), options: unweighted, carrier_wt, evidence_wt')
parser.add_argument('-t', '--tempdir', metavar='<DIR>', required=False, default=None, help='Directory for temp file downloads')
parser.set_defaults(entry_point=run_from_args)
def command_parser():
parser = argparse.ArgumentParser(description=description(), epilog=epilog())
add_arguments_to_parser(parser)
return parser
def run_from_args(args):
l_cluster_by_line(args.inFile,
args.tempdir,
percent_slop=args.percent_slop,
fixed_slop=args.fixed_slop,
use_product=args.use_product,
include_genotypes=args.include_genotypes,
weighting_scheme=args.weighting_scheme)
if __name__ == "__main__":
parser = command_parser()
args = parser.parse_args()
sys.exit(args.entry_point(args))
|
pykafka/protocol/offset_commit.py | Instamojo/pykafka | 1,174 | 11147189 | # - coding: utf-8 -
import struct
from collections import namedtuple, defaultdict
from .base import Request, Response
from ..utils import struct_helpers
from ..utils.compat import iteritems
class GroupCoordinatorRequest(Request):
"""A consumer metadata request
Specification::
GroupCoordinatorRequest => ConsumerGroup
ConsumerGroup => string
"""
API_KEY = 10
def __init__(self, consumer_group):
"""Create a new group coordinator request"""
self.consumer_group = consumer_group
def __len__(self):
"""Length of the serialized message, in bytes"""
# Header + len(self.consumer_group)
return self.HEADER_LEN + 2 + len(self.consumer_group)
def get_bytes(self):
"""Serialize the message
:returns: Serialized message
:rtype: :class:`bytearray`
"""
output = bytearray(len(self))
self._write_header(output)
cglen = len(self.consumer_group)
struct.pack_into('!h%ds' % cglen, output, self.HEADER_LEN, cglen,
self.consumer_group)
return output
class GroupCoordinatorResponse(Response):
"""A group coordinator response
Specification::
GroupCoordinatorResponse => ErrorCode CoordinatorId CoordinatorHost CoordinatorPort
ErrorCode => int16
CoordinatorId => int32
CoordinatorHost => string
CoordinatorPort => int32
"""
def __init__(self, buff):
"""Deserialize into a new Response
:param buff: Serialized message
:type buff: :class:`bytearray`
"""
fmt = 'hiSi'
response = struct_helpers.unpack_from(fmt, buff, 0)
error_code = response[0]
if error_code != 0:
self.raise_error(error_code, response)
self.coordinator_id = response[1]
self.coordinator_host = response[2]
self.coordinator_port = response[3]
_PartitionOffsetCommitRequest = namedtuple(
'PartitionOffsetCommitRequest',
['topic_name', 'partition_id', 'offset', 'timestamp', 'metadata']
)
class PartitionOffsetCommitRequest(_PartitionOffsetCommitRequest):
"""Offset commit request for a specific topic/partition
:ivar topic_name: Name of the topic to look up
:ivar partition_id: Id of the partition to look up
:ivar offset:
:ivar timestamp:
:ivar metadata: arbitrary metadata that should be committed with this offset commit
"""
pass
class OffsetCommitRequest(Request):
"""An offset commit request
Specification::
OffsetCommitRequest => ConsumerGroupId ConsumerGroupGenerationId ConsumerId [TopicName [Partition Offset TimeStamp Metadata]]
ConsumerGroupId => string
ConsumerGroupGenerationId => int32
ConsumerId => string
TopicName => string
Partition => int32
Offset => int64
TimeStamp => int64
Metadata => string
"""
API_KEY = 8
def __init__(self,
consumer_group,
consumer_group_generation_id,
consumer_id,
partition_requests=[]):
"""Create a new offset commit request
:param partition_requests: Iterable of
:class:`kafka.pykafka.protocol.PartitionOffsetCommitRequest` for
this request
"""
self.consumer_group = consumer_group
self.consumer_group_generation_id = consumer_group_generation_id
self.consumer_id = consumer_id
self._reqs = defaultdict(dict)
for t in partition_requests:
self._reqs[t.topic_name][t.partition_id] = (t.offset,
t.timestamp,
t.metadata)
def __len__(self):
"""Length of the serialized message, in bytes"""
# Header + string size + consumer group size
size = self.HEADER_LEN + 2 + len(self.consumer_group)
# + generation id + string size + consumer_id size + array length
size += 4 + 2 + len(self.consumer_id) + 4
for topic, parts in iteritems(self._reqs):
# topic name + len(parts)
size += 2 + len(topic) + 4
# partition + offset + timestamp => for each partition
size += (4 + 8 + 8) * len(parts)
# metadata => for each partition
for partition, (_, _, metadata) in iteritems(parts):
size += 2 + len(metadata)
return size
def get_bytes(self):
"""Serialize the message
:returns: Serialized message
:rtype: :class:`bytearray`
"""
output = bytearray(len(self))
self._write_header(output, api_version=1)
offset = self.HEADER_LEN
fmt = '!h%dsih%dsi' % (len(self.consumer_group), len(self.consumer_id))
struct.pack_into(fmt, output, offset,
len(self.consumer_group), self.consumer_group,
self.consumer_group_generation_id,
len(self.consumer_id), self.consumer_id,
len(self._reqs))
offset += struct.calcsize(fmt)
for topic_name, partitions in iteritems(self._reqs):
fmt = '!h%dsi' % len(topic_name)
struct.pack_into(fmt, output, offset, len(topic_name),
topic_name, len(partitions))
offset += struct.calcsize(fmt)
for pnum, (poffset, timestamp, metadata) in iteritems(partitions):
fmt = '!iqq'
struct.pack_into(fmt, output, offset,
pnum, poffset, timestamp)
offset += struct.calcsize(fmt)
metalen = len(metadata) or -1
fmt = '!h'
pack_args = [fmt, output, offset, metalen]
if metalen != -1:
fmt += '%ds' % metalen
pack_args = [fmt, output, offset, metalen, metadata]
struct.pack_into(*pack_args)
offset += struct.calcsize(fmt)
return output
OffsetCommitPartitionResponse = namedtuple(
'OffsetCommitPartitionResponse',
['err']
)
class OffsetCommitResponse(Response):
"""An offset commit response
Specification::
OffsetCommitResponse => [TopicName [Partition ErrorCode]]]
TopicName => string
Partition => int32
ErrorCode => int16
"""
def __init__(self, buff):
"""Deserialize into a new Response
:param buff: Serialized message
:type buff: :class:`bytearray`
"""
fmt = '[S [ih ] ]'
response = struct_helpers.unpack_from(fmt, buff, 0)
self.topics = {}
for topic_name, partitions in response:
self.topics[topic_name] = {}
for partition in partitions:
self.topics[topic_name][partition[0]] = OffsetCommitPartitionResponse(partition[1])
_PartitionOffsetFetchRequest = namedtuple(
'PartitionOffsetFetchRequest',
['topic_name', 'partition_id']
)
class PartitionOffsetFetchRequest(_PartitionOffsetFetchRequest):
"""Offset fetch request for a specific topic/partition
:ivar topic_name: Name of the topic to look up
:ivar partition_id: Id of the partition to look up
"""
pass
class OffsetFetchRequest(Request):
"""An offset fetch request
Specification::
OffsetFetchRequest => ConsumerGroup [TopicName [Partition]]
ConsumerGroup => string
TopicName => string
Partition => int32
"""
API_VERSION = 0
API_KEY = 9
@classmethod
def get_versions(cls):
return {0: OffsetFetchRequest, 1: OffsetFetchRequestV1, 2: OffsetFetchRequestV2}
def __init__(self, consumer_group, partition_requests=[]):
"""Create a new offset fetch request
:param partition_requests: Iterable of
:class:`kafka.pykafka.protocol.PartitionOffsetFetchRequest` for
this request
"""
self.consumer_group = consumer_group
self._reqs = defaultdict(list)
for t in partition_requests:
self._reqs[t.topic_name].append(t.partition_id)
def _reqs_len(self):
return len(self._reqs)
def __len__(self):
"""Length of the serialized message, in bytes"""
# Header + consumer group + len(topics)
size = self.HEADER_LEN + 2 + len(self.consumer_group) + 4
for topic, parts in iteritems(self._reqs):
# topic name + len(parts)
size += 2 + len(topic) + 4
# partition => for each partition
size += 4 * len(parts)
return size
def get_bytes(self):
"""Serialize the message
:returns: Serialized message
:rtype: :class:`bytearray`
"""
output = bytearray(len(self))
self._write_header(output, api_version=self.API_VERSION)
offset = self.HEADER_LEN
fmt = '!h%dsi' % len(self.consumer_group)
struct.pack_into(fmt, output, offset,
len(self.consumer_group), self.consumer_group,
self._reqs_len())
offset += struct.calcsize(fmt)
for topic_name, partitions in iteritems(self._reqs):
fmt = '!h%dsi' % len(topic_name)
struct.pack_into(fmt, output, offset, len(topic_name),
topic_name, len(partitions))
offset += struct.calcsize(fmt)
for pnum in partitions:
fmt = '!i'
struct.pack_into(fmt, output, offset, pnum)
offset += struct.calcsize(fmt)
return output
class OffsetFetchRequestV1(OffsetFetchRequest):
API_VERSION = 1
class OffsetFetchRequestV2(OffsetFetchRequestV1):
API_VERSION = 2
def _reqs_len(self):
# v2 allows a null array to select all topics
return len(self._reqs) or -1
OffsetFetchPartitionResponse = namedtuple(
'OffsetFetchPartitionResponse',
['offset', 'metadata', 'err']
)
class OffsetFetchResponse(Response):
"""An offset fetch response v0
Specification::
OffsetFetch Response (Version: 0) => [responses]
responses => topic [partition_responses]
topic => STRING
partition_responses => partition offset metadata error_code
partition => INT32
offset => INT64
metadata => NULLABLE_STRING
error_code => INT16
"""
API_VERSION = 0
API_KEY = 9
@classmethod
def get_versions(cls):
return {0: OffsetFetchResponse, 1: OffsetFetchResponseV1, 2: OffsetFetchResponseV2}
def __init__(self, buff):
"""Deserialize into a new Response
:param buff: Serialized message
:type buff: :class:`bytearray`
"""
fmt = '[S [iqSh ] ]'
response = struct_helpers.unpack_from(fmt, buff, 0)
self._populate_partition_responses(response)
def _populate_partition_responses(self, partition_responses):
self.topics = {}
for topic_name, partitions in partition_responses:
self.topics[topic_name] = {}
for partition in partitions:
pres = OffsetFetchPartitionResponse(partition[1],
partition[2],
partition[3])
self.topics[topic_name][partition[0]] = pres
class OffsetFetchResponseV1(OffsetFetchResponse):
"""An offset fetch response v1 (all the same as v0)
Specification::
OffsetFetch Response (Version: 1) => [responses]
responses => topic [partition_responses]
topic => STRING
partition_responses => partition offset metadata error_code
partition => INT32
offset => INT64
metadata => NULLABLE_STRING
error_code => INT16
"""
API_VERSION = 1
class OffsetFetchResponseV2(OffsetFetchResponseV1):
"""An offset fetch response v2
Specification::
OffsetFetch Response (Version: 2) => [responses] error_code
responses => topic [partition_responses]
topic => STRING
partition_responses => partition offset metadata error_code
partition => INT32
offset => INT64
metadata => NULLABLE_STRING
error_code => INT16
error_code => INT16 (new since v1)
"""
API_VERSION = 2
def __init__(self, buff):
fmt = '[S [iqSh ] ] h'
response = struct_helpers.unpack_from(fmt, buff, 0)
partition_responses, self.err = response
self._populate_partition_responses(partition_responses)
|
configs/selfsup/simmim/simmim_swin-base_8xb256-coslr-100e_in1k-192.py | mitming/mmselfsup | 355 | 11147215 | _base_ = 'simmim_swin-base_16xb128-coslr-100e_in1k.py'
# data
data = dict(samples_per_gpu=256)
|
vumi/dispatchers/endpoint_dispatchers.py | seidu626/vumi | 199 | 11147232 | # -*- test-case-name: vumi.dispatchers.tests.test_endpoint_dispatchers -*-
"""Basic tools for building dispatchers."""
from twisted.internet.defer import gatherResults, maybeDeferred
from vumi.worker import BaseWorker
from vumi.config import ConfigDict, ConfigList
from vumi import log
class DispatcherConfig(BaseWorker.CONFIG_CLASS):
receive_inbound_connectors = ConfigList(
"List of connectors that will receive inbound messages and events.",
required=True, static=True)
receive_outbound_connectors = ConfigList(
"List of connectors that will receive outbound messages.",
required=True, static=True)
class Dispatcher(BaseWorker):
"""Base class for a dispatcher."""
CONFIG_CLASS = DispatcherConfig
def setup_worker(self):
d = maybeDeferred(self.setup_dispatcher)
d.addCallback(lambda r: self.unpause_connectors())
return d
def teardown_worker(self):
d = self.pause_connectors()
d.addCallback(lambda r: self.teardown_dispatcher())
return d
def setup_dispatcher(self):
"""
All dispatcher specific setup should happen in here.
Subclasses should override this method to perform extra setup.
"""
pass
def teardown_dispatcher(self):
"""
Clean-up of setup done in setup_dispatcher should happen here.
"""
pass
def get_configured_ri_connectors(self):
return self.get_static_config().receive_inbound_connectors
def get_configured_ro_connectors(self):
return self.get_static_config().receive_outbound_connectors
def default_errback(self, f, msg, connector_name):
log.error(f, "Error routing message for %s" % (connector_name,))
def process_inbound(self, config, msg, connector_name):
raise NotImplementedError()
def errback_inbound(self, f, msg, connector_name):
return f
def process_outbound(self, config, msg, connector_name):
raise NotImplementedError()
def errback_outbound(self, f, msg, connector_name):
return f
def process_event(self, config, event, connector_name):
raise NotImplementedError()
def errback_event(self, f, event, connector_name):
return f
def _mkhandler(self, handler_func, errback_func, connector_name):
def handler(msg):
d = maybeDeferred(self.get_config, msg)
d.addCallback(handler_func, msg, connector_name)
d.addErrback(errback_func, msg, connector_name)
d.addErrback(self.default_errback, msg, connector_name)
return d
return handler
def setup_connectors(self):
def add_ri_handlers(connector, connector_name):
connector.set_default_inbound_handler(
self._mkhandler(
self.process_inbound, self.errback_inbound,
connector_name))
connector.set_default_event_handler(
self._mkhandler(
self.process_event, self.errback_event, connector_name))
return connector
def add_ro_handlers(connector, connector_name):
connector.set_default_outbound_handler(
self._mkhandler(
self.process_outbound, self.errback_outbound,
connector_name))
return connector
deferreds = []
for connector_name in self.get_configured_ri_connectors():
d = self.setup_ri_connector(connector_name)
d.addCallback(add_ri_handlers, connector_name)
deferreds.append(d)
for connector_name in self.get_configured_ro_connectors():
d = self.setup_ro_connector(connector_name)
d.addCallback(add_ro_handlers, connector_name)
deferreds.append(d)
return gatherResults(deferreds)
def publish_inbound(self, msg, connector_name, endpoint):
return self.connectors[connector_name].publish_inbound(msg, endpoint)
def publish_outbound(self, msg, connector_name, endpoint):
return self.connectors[connector_name].publish_outbound(msg, endpoint)
def publish_event(self, event, connector_name, endpoint):
return self.connectors[connector_name].publish_event(event, endpoint)
class RoutingTableDispatcherConfig(Dispatcher.CONFIG_CLASS):
routing_table = ConfigDict(
"Routing table. Keys are connector names, values are dicts mapping "
"endpoint names to [connector, endpoint] pairs.", required=True)
class RoutingTableDispatcher(Dispatcher):
CONFIG_CLASS = RoutingTableDispatcherConfig
def find_target(self, config, msg, connector_name):
endpoint_name = msg.get_routing_endpoint()
endpoint_routing = config.routing_table.get(connector_name)
if endpoint_routing is None:
log.warning("No routing information for connector '%s'" % (
connector_name,))
return None
target = endpoint_routing.get(endpoint_name)
if target is None:
log.warning("No routing information for endpoint '%s' on '%s'" % (
endpoint_name, connector_name,))
return None
return target
def process_inbound(self, config, msg, connector_name):
target = self.find_target(config, msg, connector_name)
if target is None:
return
return self.publish_inbound(msg, target[0], target[1])
def process_outbound(self, config, msg, connector_name):
target = self.find_target(config, msg, connector_name)
if target is None:
return
return self.publish_outbound(msg, target[0], target[1])
def process_event(self, config, event, connector_name):
target = self.find_target(config, event, connector_name)
if target is None:
return
return self.publish_event(event, target[0], target[1])
|
tests/spectral/test_S2FFT_NFFT.py | machism0/lie_learn | 140 | 11147242 | <filename>tests/spectral/test_S2FFT_NFFT.py
import lie_learn.spaces.S2 as S2
from lie_learn.spectral.S2FFT_NFFT import S2FFT_NFFT
from lie_learn.representations.SO3.spherical_harmonics import *
def test_S2FFT_NFFT():
"""
Testing S2FFT NFFT
"""
b = 8
convention = 'Gauss-Legendre'
#convention = 'Clenshaw-Curtis'
x = S2.meshgrid(b=b, grid_type=convention)
print(x[0].shape, x[1].shape)
x = np.c_[x[0][..., None], x[1][..., None]]#.reshape(-1, 2)
print(x.shape)
x = x.reshape(-1, 2)
w = S2.quadrature_weights(b=b, grid_type=convention).flatten()
F = S2FFT_NFFT(L_max=b, x=x, w=w)
for l in range(0, b):
for m in range(-l, l + 1):
#l = b; m = b
f = sh(l, m, x[..., 0], x[..., 1], field='real', normalization='quantum', condon_shortley=True)
#f2 = np.random.randn(*f.shape)
print(f)
f_hat = F.analyze(f)
print(np.round(f_hat, 3))
f_reconst = F.synthesize(f_hat)
#print np.round(f, 3)
print(np.round(f_reconst, 3))
#print np.round(f/f_reconst, 3)
print(np.abs(f-f_reconst).sum())
assert np.isclose(np.abs(f-f_reconst).sum(), 0.)
print(np.round(f_hat, 3))
assert np.isclose(f_hat[l ** 2 + l + m], 1.)
#assert False |
examples/twisted/wamp/app/keyvalue/store.py | rapyuta-robotics/autobahn-python | 1,670 | 11147244 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import shelve
from twisted.internet.defer import inlineCallbacks
from autobahn import wamp
from autobahn.twisted.wamp import ApplicationSession
class KeyValueStore(ApplicationSession):
"""
Simple, persistent key-value store.
"""
@inlineCallbacks
def onJoin(self, details):
self.store = shelve.open("keyvalue", flag='c', writeback=False)
yield self.register(self)
print("Ok, keyvalue-store procedures registered!")
@wamp.register("com.example.keyvalue.set")
def set(self, key=None, value=None):
if key is not None:
k = str(key)
if value is not None:
self.store[k] = value
else:
if k in self.store:
del self.store[k]
else:
self.store.clear()
self.store.sync()
@wamp.register("com.example.keyvalue.get")
def get(self, key=None):
if key is None:
res = {}
for key, value in self.store.items():
res[key] = value
return res
else:
return self.store.get(str(key), None)
@wamp.register("com.example.keyvalue.keys")
def keys(self):
return self.store.keys()
if __name__ == '__main__':
import sys
import argparse
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--web", type=int, default=8080,
help='Web port to use for embedded Web server. Use 0 to disable.')
parser.add_argument("--router", type=str, default=None,
help='If given, connect to this WAMP router. Else run an embedded router on 9000.')
args = parser.parse_args()
from twisted.python import log
log.startLogging(sys.stdout)
# import Twisted reactor
from twisted.internet import reactor
print("Using Twisted reactor {0}".format(reactor.__class__))
# create embedded web server for static files
if args.web:
from twisted.web.server import Site
from twisted.web.static import File
reactor.listenTCP(args.web, Site(File(".")))
# run WAMP application component
from autobahn.twisted.wamp import ApplicationRunner
router = args.router or 'ws://127.0.0.1:9000'
runner = ApplicationRunner(router, "realm1", standalone=not args.router)
# start the component and the Twisted reactor ..
runner.run(KeyValueStore)
|
setup.py | joannadiong/zEpid | 101 | 11147257 | <reponame>joannadiong/zEpid<filename>setup.py
from setuptools import setup
exec(compile(open('zepid/version.py').read(),
'zepid/version.py', 'exec'))
with open("README.md") as f:
descript = f.read()
setup(name='zepid',
version=__version__,
description='Tool package for epidemiologic analyses',
keywords='epidemiology inverse-probability-weights risk-ratio g-computation g-formula IPW AIPW TMLE',
packages=['zepid',
'zepid.calc',
'zepid.graphics',
'zepid.sensitivity_analysis',
'zepid.superlearner',
'zepid.causal.ipw',
'zepid.causal.gformula',
'zepid.causal.doublyrobust',
'zepid.causal.generalize',
'zepid.causal.snm',
'zepid.causal.causalgraph',
'zepid.datasets'],
include_package_data=True,
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/pzivich/zepid',
classifiers=['Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'],
install_requires=['pandas>=0.18',
'numpy',
'statsmodels>=0.7.0',
'matplotlib>=2.0',
'scipy',
'tabulate',
'sklearn',
'patsy'],
extras_require={"DirectedAcyclicGraph": ["networkx"], },
long_description=descript,
long_description_content_type="text/markdown",
)
|
tutorials/eboutique/microservices/cart/src/commands/services.py | bhardwajRahul/minos-python | 247 | 11147266 | from minos.cqrs import (
CommandService,
)
from minos.networks import (
Request,
Response,
ResponseException,
enroute,
)
from minos.saga import (
SagaContext,
SagaStatus,
)
from ..aggregates import (
CartAggregate,
)
from .saga.add_cart import (
ADD_CART_ITEM,
)
class CartCommandService(CommandService):
"""CartCommandService class."""
@enroute.rest.command("/cart", "POST")
async def create_cart(self, request: Request) -> Response:
"""Create a new ``Cart`` instance.
:param request: The ``Request`` instance.
:return: A ``Response`` instance.
"""
try:
content = await request.content()
uuid = await CartAggregate.createCart(content)
return Response({"uuid": uuid})
except Exception as exc:
raise ResponseException(f"An error occurred during Cart creation:{content} {exc}")
@enroute.rest.command("/cart/{uuid}/item", "POST")
async def create_cart_item(self, request: Request) -> Response:
"""Create a new ``Cart`` instance.
:param request: The ``Request`` instance.
:return: A ``Response`` instance.
"""
data = await request.content()
params = await request.params()
saga_execution = await self.saga_manager.run(
ADD_CART_ITEM,
context=SagaContext(cart_uid=params["uuid"], product_uid=data["product"], quantity=data["quantity"]),
)
if saga_execution.status == SagaStatus.Finished:
return Response(saga_execution.context["cart"])
else:
raise ResponseException("Error executing SAGA.")
|
examples/d2/roessler.py | manu-mannattil/nolitsa | 118 | 11147283 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""D2 of the Rössler oscillator.
The estimates here match the "accepted" value of 1.991 quite closely.
"""
import numpy as np
import matplotlib.pyplot as plt
from nolitsa import d2, data, utils
x0 = [-3.2916983, -1.42162302, 0.02197593]
x = utils.rescale(data.roessler(length=5000, x0=x0)[1][:, 0])
dim = np.arange(1, 10 + 1)
tau = 14
plt.title(u'Local $D_2$ vs $r$ for Rössler oscillator')
plt.xlabel(r'Distance $r$')
plt.ylabel(r'Local $D_2$')
for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=50,
r=utils.gprange(0.001, 1.0, 100)):
plt.semilogx(r[3:-3], d2.d2(r, c), color='#4682B4')
plt.semilogx(utils.gprange(0.001, 1.0, 100), 1.991 * np.ones(100),
color='#000000')
plt.show()
|
openprompt/utils/calibrate.py | puraminy/OpenPrompt | 979 | 11147300 |
from yacs.config import CfgNode
from openprompt.data_utils import FewShotSampler
from torch.utils.data.dataset import Dataset
from transformers.data.processors.utils import InputExample
from openprompt.pipeline_base import PromptDataLoader, PromptModel, PromptForClassification
from typing import *
import torch
from tqdm import tqdm
# def pmi_calibrate(prompt_model: PromptModel,context_dataloader, max_seq_length: int) -> torch.Tensor:
# r"""Pmi calibrate. See `Paper <https://arxiv.org/pdf/2104.08315.pdf>`_
# Args:
# prompt_model (:obj:`PromptForClassification`): the PromptForClassification model.
# max_seq_length: (:obj:`int`): the truncation parameters for dataloader.
# """
# prompt = prompt_model.prompt
# tokenizer = prompt_model.tokenizer
# virtual_dataset = [InputExample(guid='000', text_a='', text_b='')]
# context_dataloader = PromptDataLoader(virtual_dataset, prompt, tokenizer, max_seq_length=max_seq_length, batch_size=len(virtual_dataset),device=prompt_model.device)
# for batch in context_dataloader:
# logits = prompt_model.forward_without_verbalize(batch)
# logits = logits[torch.where(batch.loss_ids>0)]
# return logits.mean(dim=0)
def calibrate(prompt_model: PromptForClassification, dataloader: PromptDataLoader) -> torch.Tensor:
r"""Calibrate. See `Paper <https://arxiv.org/abs/2108.02035>`_
Args:
prompt_model (:obj:`PromptForClassification`): the PromptForClassification model.
dataloader (:obj:`List`): the dataloader to conduct the calibrate, could be a virtual one, i.e. contain an only-template example.
Return:
(:obj:`torch.Tensor`) A tensor of shape (vocabsize) or (mask_num, vocabsize), the logits calculated for each word in the vocabulary
"""
all_logits = []
prompt_model.eval()
for batch in tqdm(dataloader,desc='ContextCali'):
batch = batch.to(prompt_model.device)
logits = prompt_model.forward_without_verbalize(batch)
all_logits.append(logits.detach())
all_logits = torch.cat(all_logits, dim=0)
return all_logits.mean(dim=0)
# def calibrate(model: PromptForClassification, calibrate_method: str=None, config: CfgNode =None , train_dataset: Optional[List]=None, valid_dataset: Optional[List]=None):
# r"""Calibrate the PromptForClassification model. Select and run the calibrate using the global config node.
# Args:
# model (:obj:`PromptForClassification`): the PromptForClassification model.
# config (:obj:`CfgNode`): The global config node.
# train_dataset: (:obj:`List`): the training dataset, if use the training dataset to do contextualized calibrate.
# valid_dataset: (:obj:`List`): the valid dataset, if use the valid dataset to do contextualized calibrate.
# """
# if config.calibrate == "pmi_calibrate":
# calibrate_logits = pmi_calibrate(model, max_seq_length=config.dataloader.max_seq_length)
# model.register_calibrate_logits(calibrate_logits)
# elif config.calibrate_type == "contextualized_calibrate":
# if config.contextualized_calibrate.use_split == "train":
# context_dataset = train_dataset
# elif config.contextualized_calibrate.use_split == "valid":
# context_dataset = valid_dataset
# elif config.contextualized_calibrate.use_split is None and config.contextualized_calibrate.num_example is not None:
# sampler = FewShotSampler(num_examples_total=config.contextualized_calibrate.num_example,
# also_sample_dev=False)
# context_dataset = sampler(train_dataset)
# calibrate_logits = contextualized_calibrate(model, context=context_dataset, max_seq_length=config.dataloader.max_seq_length)
# model.register_calibrate_logits(calibrate_logits)
|
packages/pegasus-python/src/Pegasus/cli/pegasus-submitdir.py | ahnitz/pegasus | 127 | 11147309 | <filename>packages/pegasus-python/src/Pegasus/cli/pegasus-submitdir.py<gh_stars>100-1000
#!/usr/bin/env python3
from Pegasus.submitdir import main
main()
|
cape_privacy/audit/__init__.py | vismaya-Kalaiselvan/cape-python | 144 | 11147326 | <filename>cape_privacy/audit/__init__.py
from cape_privacy.audit.audit import AuditLogger, APPLY_POLICY_EVENT
__all__ = [
"AuditLogger",
"APPLY_POLICY_EVENT"
] |
unit_tests/appzoo_tests/test_ez_serialization.py | johnson7788/EasyTransfer | 806 | 11147335 | <reponame>johnson7788/EasyTransfer
# coding=utf-8
# Copyright (c) 2019 Alibaba PAI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import unittest
import shutil
class TestEzSerialization(unittest.TestCase):
def test_serialization_finetune(self):
argvs = ['easy_transfer_app',
'--mode', 'preprocess',
'--inputTable', '../ut_data/ez_text_classify/dev.csv',
'--outputTable', 'serialization.pred.csv',
'--inputSchema', 'example_id:int:1,content:str:1,label:str:1,label_str:str:1,keywords:str:1',
'--firstSequence', 'content',
'--modelName', 'google-bert-base-zh',
'--appendCols', 'example_id,keywords,label_str',
'--outputSchema', 'input_ids,input_mask,segment_ids,label_id',
'--labelName', 'label',
'--labelEnumerateValues', '100,101,102,103,104,105,106,107,108,109,110,112,113,114,115,116',
'--batchSize', '100',
'--workerCount', '1',
'--workerGPU', '1',
'--workerCPU', '1',
]
print(' '.join(argvs))
try:
res = subprocess.check_output(' '.join(argvs), stderr=subprocess.STDOUT, shell=True)
print(res)
self.assertTrue(os.path.exists('serialization.pred.csv'))
shutil.rmtree('serialization.pred.csv', ignore_errors=True)
except subprocess.CalledProcessError as e:
print(e.output)
raise RuntimeError
def test_serialization_predict(self):
argvs = ['easy_transfer_app',
'--mode', 'preprocess',
'--inputTable', '../ut_data/ez_text_classify/dev.csv',
'--outputTable', 'serialization.pred.csv',
'--inputSchema', 'example_id:int:1,content:str:1,label:str:1,label_str:str:1,keywords:str:1',
'--firstSequence', 'content',
'--modelName', 'google-bert-base-zh',
'--appendCols', 'example_id,keywords,label_str',
'--outputSchema', 'input_ids,input_mask,segment_ids',
'--batchSize', '100',
'--workerCount', '1',
'--workerGPU', '1',
'--workerCPU', '1',
]
print(' '.join(argvs))
try:
res = subprocess.check_output(' '.join(argvs), stderr=subprocess.STDOUT, shell=True)
print(res)
except subprocess.CalledProcessError as e:
print(e.output)
raise RuntimeError
self.assertTrue(os.path.exists('serialization.pred.csv'))
os.remove('serialization.pred.csv')
if __name__ == "__main__":
unittest.main() |
example/example/dynamic_preferences_registry.py | EliotBerriot/django-dynamic-preferences | 244 | 11147358 | <reponame>EliotBerriot/django-dynamic-preferences<gh_stars>100-1000
from dynamic_preferences.types import *
from dynamic_preferences.registries import global_preferences_registry
from dynamic_preferences.users.registries import user_preferences_registry
from .models import MyModel
_section = Section("section")
@global_preferences_registry.register
class RegistrationAllowed(BooleanPreference):
"""
Are new registrations allowed ?
"""
verbose_name = "Allow new users to register"
section = "auth"
name = "registration_allowed"
default = False
@global_preferences_registry.register
class MaxUsers(IntPreference):
"""
Are new registrations allowed ?
"""
section = "auth"
name = "max_users"
default = 100
help_text = "Please fill in the form"
@global_preferences_registry.register
class Header(LongStringPreference):
section = "general"
name = "presentation"
default = "You need a presentation"
@user_preferences_registry.register
class ItemsPerPage(IntPreference):
section = "display"
name = "items_per_page"
default = 25
@user_preferences_registry.register
class FavoriteVegetable(ChoicePreference):
choices = (
("C", "Carrot"),
("T", "Tomato. I know, it's not a vegetable"),
("P", "Potato"),
)
section = "auth"
name = "favorite_vegetable"
default = "C"
@global_preferences_registry.register
class AdminUsers(MultipleChoicePreference):
name = "admin_users"
section = "auth"
default = None
choices = (("0", "Serge"), ("1", "Alina"), ("2", "Anand"))
@user_preferences_registry.register
class FavouriteColour(StringPreference):
"""
What's your favourite colour ?
"""
section = "misc"
name = "favourite_colour"
default = "Green"
@user_preferences_registry.register
class IsZombie(BooleanPreference):
"""
Are you a zombie ?
"""
section = "misc"
name = "is_zombie"
default = True
@user_preferences_registry.register
class IsFanOfTokioHotel(BooleanPreference):
section = "music"
name = "is_fan_of_tokio_hotel"
default = False
@user_preferences_registry.register
class MyModelPreference(ModelChoicePreference):
section = _section
name = "MyModel_preference"
default = None
queryset = MyModel.objects.all()
required = False
|
unittests/test_exceptions.py | CLIP-HPC/reframe | 167 | 11147368 | # Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import pytest
import reframe.core.exceptions as exc
def assert_args(exc_type, *args):
e = exc_type(*args)
assert args == e.args
def test_soft_error():
with pytest.raises(exc.ReframeError, match=r'random error'):
raise exc.ReframeError('random error')
assert_args(exc.ReframeError, 'error msg')
assert_args(exc.ReframeError, 'error msg', 'another arg')
def test_reraise_soft_error():
try:
try:
raise ValueError('random value error')
except ValueError as e:
# reraise as ReframeError
raise exc.ReframeError('soft error') from e
except exc.ReframeError as e:
assert 'soft error: random value error' == str(e)
def test_fatal_error():
try:
raise exc.ReframeFatalError('fatal error')
except Exception:
pytest.fail('fatal error should not derive from Exception')
except BaseException:
pass
def test_reraise_fatal_error():
try:
try:
raise ValueError('random value error')
except ValueError as e:
# reraise as ReframeError
raise exc.ReframeFatalError('fatal error') from e
except exc.ReframeFatalError as e:
assert 'fatal error: random value error' == str(e)
def test_spawned_process_error():
exc_args = ('foo bar', 'partial output', 'error message', 1)
e = exc.SpawnedProcessError(*exc_args)
with pytest.raises(
exc.ReframeError,
match=(r"command 'foo bar' failed with exit code 1:\n"
r"--- stdout ---\n"
r'partial output\n'
r"--- stdout ---\n"
r"--- stderr ---\n"
r"error message\n"
r"--- stderr ---")
):
raise e
assert exc_args == e.args
def test_spawned_process_error_list_args():
exc_args = (['foo', 'bar'], 'partial output', 'error message', 1)
e = exc.SpawnedProcessError(*exc_args)
with pytest.raises(
exc.ReframeError,
match=(r"command 'foo bar' failed with exit code 1:\n"
r"--- stdout ---\n"
r'partial output\n'
r"--- stdout ---\n"
r"--- stderr ---\n"
r"error message\n"
r"--- stderr ---")
):
raise e
assert exc_args == e.args
def test_spawned_process_error_nostdout():
exc_args = ('foo bar', '', 'error message', 1)
e = exc.SpawnedProcessError(*exc_args)
with pytest.raises(
exc.ReframeError,
match=(r"command 'foo bar' failed with exit code 1:\n"
r"--- stdout ---\n"
r"--- stdout ---\n"
r"--- stderr ---\n"
r"error message\n"
r"--- stderr ---")
):
raise e
def test_spawned_process_error_nostderr():
exc_args = ('foo bar', 'partial output', '', 1)
e = exc.SpawnedProcessError(*exc_args)
with pytest.raises(
exc.ReframeError,
match=(r"command 'foo bar' failed with exit code 1:\n"
r"--- stdout ---\n"
r'partial output\n'
r"--- stdout ---\n"
r"--- stderr ---\n"
r"--- stderr ---")
):
raise e
def test_spawned_process_timeout():
exc_args = ('foo bar', 'partial output', 'partial error', 10)
e = exc.SpawnedProcessTimeout(*exc_args)
with pytest.raises(exc.ReframeError,
match=(r"command 'foo bar' timed out after 10s:\n"
r"--- stdout ---\n"
r'partial output\n'
r"--- stdout ---\n"
r"--- stderr ---\n"
r"partial error\n"
r"--- stderr ---")):
raise e
def test_spawned_process_timeout_nostdout():
exc_args = ('foo bar', '', 'partial error', 10)
e = exc.SpawnedProcessTimeout(*exc_args)
with pytest.raises(exc.ReframeError,
match=(r"command 'foo bar' timed out after 10s:\n"
r"--- stdout ---\n"
r"--- stdout ---\n"
r"--- stderr ---\n"
r"partial error\n"
r"--- stderr ---")):
raise e
def test_spawned_process_timeout_nostderr():
exc_args = ('foo bar', 'partial output', '', 10)
e = exc.SpawnedProcessTimeout(*exc_args)
with pytest.raises(exc.ReframeError,
match=(r"command 'foo bar' timed out after 10s:\n"
r"--- stdout ---\n"
r'partial output\n'
r"--- stdout ---\n"
r"--- stderr ---\n"
r"--- stderr ---")):
raise e
def test_job_error():
exc_args = ('some error',)
e = exc.JobError(*exc_args, jobid=1234)
assert 1234 == e.jobid
with pytest.raises(exc.JobError, match=r'\[jobid=1234\] some error'):
raise e
assert exc_args == e.args
def test_reraise_job_error():
try:
try:
raise ValueError('random value error')
except ValueError as e:
raise exc.JobError('some error', jobid=1234) from e
except exc.JobError as e:
assert '[jobid=1234] some error: random value error' == str(e)
def test_reraise_job_error_no_message():
try:
try:
raise ValueError('random value error')
except ValueError as e:
raise exc.JobError(jobid=1234) from e
except exc.JobError as e:
assert '[jobid=1234]: random value error' == str(e)
|
seriously/test/tests.py | Mego/Seriously | 104 | 11147416 | <filename>seriously/test/tests.py
#!/usr/bin/env python3
import argparse
import collections
import contextlib
import math
import random
from io import StringIO
import os
import sys
import unittest
from seriouslylib.cp437 import CP437
from seriouslylib.iterable import as_list
from seriouslylib.nicenames import nice_names
from ..seriously import Seriously, minimize
from ..SeriouslyCommands import SeriousFunction
from ..probably_prime import probably_prime
ord_cp437 = CP437.ord
chr_cp437 = CP437.chr
debug_mode = False
def NinetyNineBottles():
x = 99
res = ''
for i in range(99):
w = 'Take one down and pass it around, '+str((x-(i+1)))+' bottle{0} of beer on the wall.'.format(['s',''][x-i==2])
y = str((x-i))+' bottle{0} of beer on the wall, '+str((x-i))+' bottle{0} of beer.'
y=y.format(['s',''][x-i==1])
z = 'Go to the store and buy some more, '+str(x)+' bottles of beer on the wall.'
if i == (x-1):
res += y + '\n' + z
else:
res += y + '\n' + w
i += 1
res += '\n\n'
return res
class UtilTests(unittest.TestCase):
def __init__(self, *args):
super().__init__(*args)
# fix for Python < 3.4
if not hasattr(self, 'subTest'):
self.subTest = self.dummy_manager
@contextlib.contextmanager
def dummy_manager(*args, **kwargs):
yield
def test_utils(self):
self.assertEqual(as_list(range(5)), [0, 1, 2, 3, 4])
self.assertEqual(as_list((1,2,3,4) for x in range(3)), [[1, 2, 3, 4]]*3)
self.assertEqual(as_list(2), [2])
self.assertEqual([chr_cp437(x) for x in range(256)], [x for x in CP437.table])
self.assertEqual([ord_cp437(x) for x in CP437.table], [x for x in range(256)])
with self.assertRaises(ValueError):
chr_cp437(257)
self.assertEqual(CP437.from_Unicode(chr_cp437(0x8D)+'\u2266'), [0x8D, 0xE2, 0x89, 0xA6])
def test_seriously_class(self):
srs = Seriously()
srs.push(1)
srs.prepend(0)
self.assertEqual(srs.stack, collections.deque([0, 1]))
def test_nice_names(self):
for i, nice_name in enumerate(nice_names):
with self.subTest(i=i):
self.assertEqual(minimize(nice_name), chr_cp437(nice_names[nice_name]))
self.assertEqual(minimize("3 copy add square half"), "3;+²½")
class SeriousTest(unittest.TestCase):
def __init__(self, *args):
super().__init__(*args)
# fix for Python < 3.4
if not hasattr(self, 'subTest'):
self.subTest = self.dummy_manager
@contextlib.contextmanager
def dummy_manager(*args, **kwargs):
yield
def setUp(self):
self.srs = Seriously(debug_mode=debug_mode)
def tearDown(self):
self.srs = None
if sys.stdin is not sys.__stdin__:
sys.stdin.close()
sys.stdin = sys.__stdin__
def setInput(self, str):
if sys.stdin is not sys.__stdin__:
sys.stdin.close()
sys.stdin = StringIO(str)
def assert_serious(self, code, output, input=None, close=False):
if input is not None:
self.setInput(input)
else:
self.setInput('')
if not close:
self.assertEqual(self.srs.eval(code), output)
else:
for a, b in zip(self.srs.eval(code), output):
self.assertAlmostEqual(a, b)
self.srs.clear_stack()
class IOTests(SeriousTest):
def test_raw_input(self):
self.assert_serious(chr_cp437(0x09), ['a'], "a\n")
self.assert_serious(chr_cp437(0x15), ['abc\n'], "abc\n")
def test_formatted_input(self):
self.assert_serious(',', ['a'], '"a"\n')
self.assert_serious(',', [12345], '12345\n')
self.assert_serious(',', [[3, 2, 1]], '[3,2,1]\n')
self.assert_serious(',', [[3, 2, 1]], '3, 2, 1\n')
def test_implicit_input(self):
self.assert_serious('', ['a'], '"a"\n')
def test_nth_input(self):
self.assert_serious(chr_cp437(0xE1), ['a','a'], '"a"\n')
self.assert_serious('0'+chr_cp437(0xE1), ['a','b','a'], '"a"\n"b"\n')
self.assert_serious("'r"+chr_cp437(0xE1), ['a', 'r', 'a'], '"a"\n')
def test_file_io(self):
self.assert_serious("'b'a"+chr_cp437(0x01), [])
self.assert_serious("'a"+chr_cp437(0x02), ['b'])
os.remove("a")
class LiteralTests(SeriousTest):
def test_strings(self):
self.assert_serious('"a', ['a'])
self.assert_serious('"a"', ['a'])
self.assert_serious("'a", ['a'])
self.assert_serious("'a'b", ['b', 'a'])
def test_digits(self):
for i in range(10):
with self.subTest(i=i):
self.assert_serious(str(i), [i])
def test_numerics(self):
self.assert_serious(':12345', [12345])
self.assert_serious(':-1', [-1])
self.assert_serious(':.5', [0.5])
self.assert_serious(':1+2.2i', [1+2.2j])
self.assert_serious(':12+', [12])
self.assert_serious(':+', [0])
def test_functions(self):
self.assert_serious("`f", [SeriousFunction("f")])
self.assert_serious("⌠foo⌡", [SeriousFunction("foo")])
self.assert_serious("⌠⌠foo⌡⌡", [SeriousFunction("⌠foo⌡")]),
self.assert_serious("⌠foo⌡$", ["foo"])
def test_eval(self):
self.assert_serious('"len(set([1,2,2,3]))"{}'.format(chr_cp437(0xF0)),
[3])
def test_lists(self):
self.assert_serious("[1,2,3]", [[1,2,3]])
self.assert_serious("[[1],[2]]", [[[1],[2]]])
class StackTests(SeriousTest):
def test_count(self):
self.assert_serious('1 ', [1, 1])
def test_dupe(self):
self.assert_serious('1;', [1, 1])
self.assert_serious('"abc";', ["abc", "abc"])
self.assert_serious('3R;', [[1, 2, 3], [1, 2, 3]])
self.assert_serious('3R;Z;', [[[1,1], [2,2], [3,3]], [[1,1], [2,2], [3,3]]])
def test_rotations(self):
self.assert_serious('123(', [1, 3, 2])
self.assert_serious('123)', [2, 1, 3])
self.assert_serious('123@', [2, 3, 1])
self.assert_serious('1232{', [2, 1, 3])
self.assert_serious('1232}', [1, 3, 2])
self.assert_serious('13422'+chr_cp437(0xAF), [4,2,3,1])
self.assert_serious('13422'+chr_cp437(0xAE), [4,3,2,1])
def test_logic(self):
self.assert_serious(r'120I', [1])
def test_repeat(self):
self.assert_serious('1;', [1,1])
self.assert_serious('23n', [3,3])
def test_quine(self):
self.assert_serious('Q;', ['Q;', 'Q;'])
def test_loop(self):
self.assert_serious('5W;D', [0, 1, 2, 3, 4, 5])
def test_misc(self):
self.assert_serious('123a', [1, 2, 3])
with self.assertRaises(SystemExit):
self.srs.eval('123'+chr_cp437(0x7F))
self.srs.clear_stack()
self.assert_serious('123'+chr_cp437(0xB3), [3, 2, 1, 3, 2, 1])
self.assert_serious('123'+chr_cp437(0xC5), [3, 3, 2, 2, 1, 1])
self.assert_serious('12'+chr_cp437(0xC6), [1, 1])
self.assert_serious('1'+chr_cp437(0xEC)+'D', [0, 1])
self.assert_serious('N', [NinetyNineBottles()])
def test_repeat(self):
self.assert_serious('3¶5', [5, 5, 5])
self.assert_serious('52¶²', [5**4])
def test_fork(self):
self.assert_serious('23♫+-', [[5, 1]])
self.assert_serious('23♫k*', [[[2, 3], 6]])
class RegisterTests(SeriousTest):
def test_push_pop(self):
self.assert_serious('1{}2{}{}{}'.format(
chr_cp437(0xBB), chr_cp437(0xBC),
chr_cp437(0xBE), chr_cp437(0xBD)), [1, 2])
self.assert_serious('53{}3{}'.format(chr_cp437(0xBF),
chr_cp437(0xC0)), [5])
def test_push_iterable(self):
self.assert_serious('"abc"O╗3R⌠╜@⌡M', [[1, [97, 98, 99], 2, [97, 98, 99], 3, [97, 98, 99]]])
def test_input(self):
self.assert_serious(chr_cp437(0xCA)+chr_cp437(0xBD)+chr_cp437(0xBE),
[2, 'b'],
'"b"\n2\n')
class MathTests(SeriousTest):
def test_arithmetic(self):
self.assert_serious('23+', [5])
self.assert_serious('23-', [1])
self.assert_serious('23*', [6])
self.assert_serious('24\\', [2])
self.assert_serious('25/', [2.5])
self.assert_serious('25\\', [2])
self.assert_serious('4'+chr_cp437(0xFD), [16])
self.assert_serious('32'+chr_cp437(0xFC), [8])
self.assert_serious('36'+chr_cp437(0x1F), [2, 1])
self.assert_serious('4!', [24])
self.assert_serious('24'+chr_cp437(0xDB), [6])
self.assert_serious('24'+chr_cp437(0xDC), [12])
self.assert_serious('21'+chr_cp437(0x80), [1+2j])
self.assert_serious(':12w', [[[2,2],[3,1]]])
self.assert_serious(':12y', [[2,3]])
self.assert_serious(':12o', [[2, 2, 3]])
self.assert_serious(':-3s', [-1])
self.assert_serious('3s', [1])
self.assert_serious('0s', [0])
self.assert_serious('25^', [7])
self.assert_serious('24%', [0])
self.assert_serious('3Ru', [[2,3,4]])
self.assert_serious('[1,2,3]3+', [[4,5,6]])
self.assert_serious('3R3+', [[4,5,6]])
self.assert_serious('2[2,4,6]+', [[4, 6, 8]])
self.assert_serious('23R2*+', [[4, 6, 8]])
self.assert_serious('[1,2,3]3*', [[3,6,9]])
self.assert_serious('3R3*', [[3,6,9]])
self.assert_serious('2[2,4,6]*', [[4, 8, 12]])
self.assert_serious('23R2**', [[4, 8, 12]])
self.assert_serious('2'+chr_cp437(0x8C), [2j])
self.assert_serious('[2,3]'+chr_cp437(0x8C), [[2j,3j]])
self.assert_serious('2Ru'+chr_cp437(0x8C), [[2j,3j]])
self.assert_serious(':1+2j'+chr_cp437(0xD7), [2, 1])
self.assert_serious('6:21▲', [42])
# weird prime bug test
self.assert_serious('9uyX9uR`p░', [[2, 3, 5, 7]])
self.assert_serious(':2.7!', [4.170651783796603], close=True)
def test_lists(self):
self.assert_serious('[1][1,2]-', [[2]])
self.assert_serious('1R2R-', [[2]])
self.assert_serious('[2,3];*', [13])
self.assert_serious('3R;*', [14])
self.assert_serious('[1,2,3]M', [3])
self.assert_serious('3RM', [3])
self.assert_serious('[1,2,3]m', [1])
self.assert_serious('3Rm', [1])
self.assert_serious('[1,2,3,4]'+chr_cp437(0xE4), [10])
self.assert_serious('4R'+chr_cp437(0xE4), [10])
self.assert_serious('[2.5,2.5]'+chr_cp437(0xE4), [5.0])
self.assert_serious('[1,2,3,4]'+chr_cp437(0xE3), [24])
self.assert_serious('4R'+chr_cp437(0xE3), [24])
self.assert_serious('[1,2,3,4]'+chr_cp437(0xBA), [2.5])
self.assert_serious('4R'+chr_cp437(0xBA), [2.5])
self.assert_serious('[1,2,6,3,4]'+chr_cp437(0xBA), [6])
self.assert_serious('5R'+chr_cp437(0xBA), [3])
self.assert_serious('[1,2,3,3]'+chr_cp437(0x9A), [3])
self.assert_serious('33Rik'+chr_cp437(0x9A), [3])
self.assert_serious('[3,6,9,12]'+chr_cp437(0x1F), [[1, 2, 3, 4]])
self.assert_serious('4R3*'+chr_cp437(0x1F), [[1, 2, 3, 4]])
self.assert_serious('4r', [[0,1,2,3]])
self.assert_serious('4R', [[1,2,3,4]])
self.assert_serious('[1,2,3,4]'+chr_cp437(0x80), [3+4j, 1+2j])
self.assert_serious('4R'+chr_cp437(0x80), [3+4j, 1+2j])
self.assert_serious('[1,2,3,4,5]'+chr_cp437(0x80), [5, 3+4j, 1+2j])
self.assert_serious('5R'+chr_cp437(0x80), [5, 3+4j, 1+2j])
self.assert_serious('2[1,2,3]'+chr_cp437(0xFC), [[1, 4, 9]])
self.assert_serious('23R'+chr_cp437(0xFC), [[1, 4, 9]])
self.assert_serious('[1,2,3,4]'+chr_cp437(0x91), [2.5])
self.assert_serious('4R'+chr_cp437(0x91), [2.5])
self.assert_serious('[1,2,3,4]'+chr_cp437(0xE5), [[1, 3, 6, 10]])
self.assert_serious('4R'+chr_cp437(0xE5), [[1, 3, 6, 10]])
self.assert_serious('[1,2,3]3R=', [1])
self.assert_serious('[65,66,67]"ABC"O=', [1])
self.assert_serious('2Rx', [[1]])
self.assert_serious('"ABC"OΣ', [65+66+67])
self.assert_serious('4RΣ', [1+2+3+4])
self.assert_serious('3r:65+"ABC"O=', [1])
self.assert_serious('[8,9,21]▲', [504])
self.assert_serious('[42]▲', [42])
self.assert_serious('[]▲', [[]])
self.assert_serious('3R⌐', [[3, 4, 5]])
self.assert_serious('3R¬', [[-1, 0, 1]])
def test_filters(self):
self.assert_serious("[4]12'3k"+chr_cp437(0x8D), [[1, 2]])
self.assert_serious("[4]12'3k"+chr_cp437(0x92), [['3']])
self.assert_serious("[4]12'3k"+chr_cp437(0xA5), [[[4]]])
def test_sequences(self):
self.assert_serious('2:547*p', [0])
self.assert_serious(':23p', [1])
self.assert_serious(':100P', [547])
self.assert_serious('8f', [6])
self.assert_serious(':16'+chr_cp437(0xDF), ['0123456789ABCDEF'])
self.assert_serious('2'+chr_cp437(0xB9), [[1, 2, 1]])
self.assert_serious(':12F', [144])
self.assert_serious(':20F', [6765])
self.assert_serious(':38F', [39088169])
self.assert_serious(':50'+chr_cp437(0xF6), [[1, 2, 5, 10, 25, 50]])
self.assert_serious('5'+chr_cp437(0xF6), [[1, 5]])
self.assert_serious(':10▓', [4])
self.assert_serious(':15▓', [6])
def test_trig(self):
trig_fns = {
'sin': ('S', chr_cp437(0x83)),
'sinh': (chr_cp437(0x8E), chr_cp437(0x87)),
'cos': ('C', chr_cp437(0x84)),
'cosh': (chr_cp437(0x8F), chr_cp437(0x88)),
'tan': ('T', chr_cp437(0x85)),
'tanh': (chr_cp437(0x90), chr_cp437(0x89)),
}
for fn in trig_fns:
with self.subTest(function=fn):
fns = trig_fns[fn]
self.assert_serious('1{}{}'.format(*fns), [1], close=True)
if fn not in ('sinh', 'cosh', 'tanh'):
#skip hyperbolic functions for complex because they don't work right
# maybe some time in the future I'll learn enough math to make these tests work
self.assert_serious(':1+2j{}{}'.format(*fns), [1+2j], close=True)
def test_complex(self):
self.assert_serious('[1+2j,2+1j]Σ', [3+3j])
self.assert_serious('[1+2j,2+1j]π', [5j])
self.assert_serious('[1+2j,2+1j]σ', [[1+2j, 3+3j]])
self.assert_serious('[1+2j,2+1j]µ', [complex(math.sqrt(2), math.sqrt(2))], close=True)
self.assert_serious('[1+2j,2+1j]æ', [1.5+1.5j])
class StringAndListTests(SeriousTest):
def test_format(self):
self.assert_serious('[2,3]"{}.{}"f', ["2.3"])
self.assert_serious('3R"{}.{}{}"f', ["1.23"])
self.assert_serious('[2,3]"%d.%d"%', ["2.3"])
self.assert_serious('3R"%d.%d%d"%', ["1.23"])
def test_modify(self):
self.assert_serious('52[2,3,4]T', [[2, 3, 5]])
self.assert_serious('523RuT', [[2, 3, 5]])
self.assert_serious('52"234"T', ["235"])
def test_ords(self):
self.assert_serious('"abc"O', [[0x61, 0x62, 0x63]])
self.assert_serious('9'+chr_cp437(0xDA), [chr_cp437(0x09)])
self.assert_serious("'"+chr_cp437(0x09)+chr_cp437(0xD9), [9])
def test_string_methods(self):
self.assert_serious('"ab"2*', ["abab"])
self.assert_serious('"ab"0DD*', ["baba"])
self.assert_serious('" ab c "'+chr_cp437(0x93), ["ab c"])
self.assert_serious('" ab c "'+chr_cp437(0x94), ["ab c "])
self.assert_serious('" ab c "'+chr_cp437(0x95), [" ab c"])
self.assert_serious('"CAFE babe 123"'+chr_cp437(0x96),
["CAFE BABE 123"])
self.assert_serious('"CAFE babe 123"'+chr_cp437(0x97),
["cafe babe 123"])
self.assert_serious('"CAFE babe 123"'+chr_cp437(0x98),
["Cafe Babe 123"])
self.assert_serious('"CAFE babe 123"'+chr_cp437(0x99),
["cafe BABE 123"])
self.assert_serious('"abcd"N', ["d"])
self.assert_serious('"abcd"F', ["a"])
self.assert_serious("""'0"010203040"s""", [['', '1', '2', '3', '4', '']])
self.assert_serious('4"Hello"H', ['Hell'])
self.assert_serious('1"Hello"t', ['ello'])
self.assert_serious('2"1234"V', [['1', '12', '23', '34', '4']])
self.assert_serious('"123""345"^', ['4512'])
self.assert_serious('" A""_a""abc_def"t', ['Abc def'])
self.assert_serious('"abc"p', ['a', 'bc'])
self.assert_serious('"abc"d', ['c', 'ab'])
self.assert_serious('\'d"abc"q', ['abcd'])
self.assert_serious('\'a"bcd"o', ['abcd'])
self.assert_serious('"abcd"'+chr_cp437(0x8A), ["'abcd'"])
self.assert_serious(':123.45'+chr_cp437(0x8A), ['123.45'])
self.assert_serious(':1+2i'+chr_cp437(0x8A), ['(1+2j)'])
self.assert_serious('⌠foo⌡'+chr_cp437(0x8A), ['⌠foo⌡'])
self.assert_serious('"1.23"i', [1.23])
self.assert_serious('"123"R', ["321"])
self.assert_serious('"abc"3*', ['abcabcabc'])
self.assert_serious('3"abc"*', ['abcabcabc'])
self.assert_serious('3"1234"'+chr_cp437(0xD8), [['123', '4']])
self.assert_serious('3"1234"'+chr_cp437(0xB5), [['1', '2', '34']])
self.assert_serious('"abc"3'+chr_cp437(0xE0), [["abc", "abc", "abc"]])
self.assert_serious('53'+chr_cp437(0xE0), [[5, 5, 5]])
self.assert_serious("' u", ['!'])
self.assert_serious("'!D", [' '])
self.assert_serious('240"abcdef"'+chr_cp437(0xE8), ["ac"])
self.assert_serious('[0,4,2]"abcdef"'+chr_cp437(0xE8), ["ac"])
self.assert_serious("3R'.*", [['.', '..', '...']])
self.assert_serious("{}±".format('''"'foo'"'''), ['"foo"'])
self.assert_serious("{}±±".format('''"'foo'"'''), ["'foo'"])
self.assert_serious('"45""12345"í', [3])
self.assert_serious('"1""0101010"╢', [5])
def test_list_methods(self):
self.assert_serious('[1,2,3][4,5,6]'+chr_cp437(0x9D), [[5, 7, 9]])
self.assert_serious('3R;3+'+chr_cp437(0x9D), [[5, 7, 9]])
self.assert_serious('3R5#+', [[5, 1, 2, 3]])
self.assert_serious('3R"abc"+', [["abc", 1, 2, 3]])
self.assert_serious("""'0"010203040"#s""", [[[],['1'],['2'],['3'],['4'],[]]])
self.assert_serious('0"10203"s', [['1', '2', '3']])
self.assert_serious('2[1,2,3,4]V', [[[1],[1,2,],[2,3],[3,4],[4]]])
self.assert_serious('24RV', [[[1],[1,2,],[2,3],[3,4],[4]]])
self.assert_serious('[1,2,3][3,4,5]^', [[4,5,1,2]])
self.assert_serious('3R;2+^', [[4,5,1,2]])
self.assert_serious('2[1,2,3]'+chr_cp437(0xCF),
[[[1, 2], [1, 3], [2, 3]]])
self.assert_serious('23R'+chr_cp437(0xCF),
[[[1, 2], [1, 3], [2, 3]]])
self.assert_serious('2[1,2,3]'+chr_cp437(0xD0),
[[[1, 2], [1, 3], [2, 1],
[2, 3], [3, 1], [3, 2]]])
self.assert_serious('23R'+chr_cp437(0xD0),
[[[1, 2], [1, 3], [2, 1],
[2, 3], [3, 1], [3, 2]]])
self.assert_serious('2[1,2,3]'+chr_cp437(0xF9),
[[[1, 1], [1, 2], [1, 3],
[2, 1], [2, 2], [2, 3],
[3, 1], [3, 2], [3, 3]]])
self.assert_serious('23R'+chr_cp437(0xF9),
[[[1, 1], [1, 2], [1, 3],
[2, 1], [2, 2], [2, 3],
[3, 1], [3, 2], [3, 3]]])
self.assert_serious('[1,2,3][1,2,3]'+chr_cp437(0xF9),
[[[1, 1], [1, 2], [1, 3],
[2, 1], [2, 2], [2, 3],
[3, 1], [3, 2], [3, 3]]])
self.assert_serious('3R;'+chr_cp437(0xF9),
[[[1, 1], [1, 2], [1, 3],
[2, 1], [2, 2], [2, 3],
[3, 1], [3, 2], [3, 3]]])
self.assert_serious('[1,2,3]♂D', [[0, 1, 2]])
self.assert_serious('3R♂D', [[0, 1, 2]])
self.assert_serious('[1,2,3];♀ⁿ', [[1, 4, 27]])
self.assert_serious('3R;♀ⁿ', [[1, 4, 27]])
self.assert_serious('[1,2,3]2♀>', [[1, 0, 0]])
self.assert_serious('3R2♀>', [[1, 0, 0]])
self.assert_serious('12♀>', [[1]])
self.assert_serious('[1,2,3]/', [[3,1,2]])
self.assert_serious('3R/', [[3,1,2]])
self.assert_serious('[1,2,3]\\', [[2,3,1]])
self.assert_serious('3R\\', [[2,3,1]])
self.assert_serious('[1,2,3]d@q', [[1,2,3]])
self.assert_serious('3Rd@q', [[1,2,3]])
self.assert_serious('[1,2,3]p@o', [[1,2,3]])
self.assert_serious('3Rp@o', [[1,2,3]])
self.assert_serious('[1,2,3]i', [1,2,3])
self.assert_serious('3Ri', [1,2,3])
self.assert_serious('[1,2,3]R', [[3,2,1]])
self.assert_serious('3RR', [[3,2,1]])
self.assert_serious('1#', [[1]])
self.assert_serious('"123"#', [['1','2','3']])
self.assert_serious('[1,2,3]#', [[1,2,3]])
self.assert_serious('3R#', [[1,2,3]])
self.assert_serious('[1,2,3][0,1]'+chr_cp437(0xB0), [[2]])
self.assert_serious('3R2r'+chr_cp437(0xB0), [[2]])
self.assert_serious('[1,2,3]⌠2>⌡'+chr_cp437(0xB0), [[1]])
self.assert_serious('3R⌠2>⌡'+chr_cp437(0xB0), [[1]])
self.assert_serious('[1,2,3]N', [3])
self.assert_serious('3RN', [3])
self.assert_serious('[1,2,3]F', [1])
self.assert_serious('3RF', [1])
self.assert_serious('3[1,2,3,4]'+chr_cp437(0xD8), [[[1,2,3], [4]]])
self.assert_serious('34R'+chr_cp437(0xB5), [[[1],[2],[3, 4]]])
self.assert_serious('4#5'+chr_cp437(0xE0), [[4,4,4,4,4]])
self.assert_serious('[4,5]5'+chr_cp437(0xE0), [[4, 5, 4, 5, 4, 5, 4, 5, 4, 5]])
self.assert_serious('2R3'+chr_cp437(0xE0), [[1, 2, 1, 2, 1, 2]])
self.assert_serious('3R'+chr_cp437(0xE6), [2.160246899469287])
self.assert_serious('3Rd', [3, [1, 2]])
self.assert_serious('3Rp', [1, [2, 3]])
self.assert_serious('2406R'+chr_cp437(0xE8), [[1,3]])
self.assert_serious('[0,4,2]6R'+chr_cp437(0xE8), [[1,3]])
self.assert_serious('36R╡', [[[1, 2], [3, 4], [5, 6]]])
self.assert_serious('3[1,2,3,4]╡', [[[1], [2], [3, 4]]])
self.assert_serious('[[1,2],"ab",3]r', [[0,1,2]])
self.assert_serious('[]r', [[]])
self.assert_serious('"abc"r', [[0,1,2]])
self.assert_serious('""r', [[]])
self.assert_serious('⌠foo⌡r', [[0,1,2]])
self.assert_serious('⌠⌡r', [[]])
self.assert_serious('1[0,1,0,1,0,1,0]╢', [5])
self.assert_serious('3R`+_', [6])
self.assert_serious('3R`+┴', [[1, 3, 6]])
class BaseConversionTests(SeriousTest):
def test_bases(self):
self.assert_serious('2:5.5'+chr_cp437(0xAD), ["101.1"])
self.assert_serious(':16:-26'+chr_cp437(0xAD), ["-1A"])
self.assert_serious(':11'+chr_cp437(0xC3), ["1011"])
self.assert_serious('"Foo"'+chr_cp437(0xC3), ["010001100110111101101111"])
self.assert_serious(':3.07'+chr_cp437(0xC3), ['0100000000001000100011110101110000101000111101011100001010001111'])
self.assert_serious(':256"{}"'.format(chr_cp437(0xA8)+chr_cp437(0xAD))+chr_cp437(0xA8), [0xA8*256+0xAD])
self.assert_serious(':256:{}'.format(0xA8*256+0xAD)+chr_cp437(0xAD), [chr_cp437(0xA8)+chr_cp437(0xAD)])
self.assert_serious('20k:16@'+chr_cp437(0xA8), [0x20])
class FunctionTests(SeriousTest):
def test_function_methods(self):
self.assert_serious('⌠foo⌡'+chr_cp437(0x9C), ['foo'])
self.assert_serious('"foo"'+chr_cp437(0x9C), [SeriousFunction('foo')])
self.assert_serious('5'+chr_cp437(0x9C), [5])
self.assert_serious('⌠foo⌡l', [3])
self.assert_serious('⌠bar⌡⌠foo⌡+', [SeriousFunction('foobar')])
self.assert_serious('⌠foo⌡3*', [SeriousFunction('foofoofoo')])
self.assert_serious('["oo"]⌠f%s⌡%', [SeriousFunction('foo')])
self.assert_serious('⌠foo⌡"foo"=', [1])
self.assert_serious('⌠foo⌡⌠bar⌡=', [0])
self.assert_serious('⌠foo⌡3=', [3, SeriousFunction('foo')])
self.assert_serious('[1,2,3]⌠++⌡R', [[6]])
self.assert_serious('3`1n', [1,1,1])
self.assert_serious('5⌠2@%Y⌡'+chr_cp437(0xD6), [[0,2,4,6,8]])
self.assert_serious('[1,2,3,4,5]`pc', [3])
self.assert_serious('[2,4,6,8]⌠5>⌡c', [2])
def test_combinators(self):
self.assert_serious('3⌠1kMD⌡Y', [0])
class RandomTests(SeriousTest):
def test_random(self):
random.seed(0)
self.assert_serious('2v52BG52V6J"abcd"J"abcd"'+chr_cp437(0xC8), ['badc', 'c', 1, 3.0831724219508216, 0.09158478740507359, 2])
class TestProbablyPrime(unittest.TestCase):
def test_simple(self):
self.assertTrue(probably_prime(13))
def test_first_1000(self):
for num in [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101,
103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,
211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317,
331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443,
449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577,
587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839,
853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983,
991, 997]:
self.assertTrue(probably_prime(num))
def test_large(self):
large_primes = [19823931826121, 21718972014737, 2866953310097]
for num in large_primes:
self.assertTrue(probably_prime(num))
class MetaTests(SeriousTest):
def test_meta(self):
self.assert_serious(chr_cp437(0x03), [Seriously.VERSION])
|
adapter/protobuf/test/protobuf/consume.py | higherkindness/mu-haskell | 286 | 11147420 | from example_pb2 import *
import sys
f = open(sys.argv[1], "rb")
example_person = person()
example_person.ParseFromString(f.read())
f.close()
print(example_person)
|
hubspot/crm/extensions/calling/__init__.py | Ronfer/hubspot-api-python | 117 | 11147486 | # coding: utf-8
# flake8: noqa
"""
Calling Extensions API
Provides a way for apps to add custom calling options to a contact record. This works in conjunction with the [Calling SDK](#), which is used to build your phone/calling UI. The endpoints here allow your service to appear as an option to HubSpot users when they access the *Call* action on a contact record. Once accessed, your custom phone/calling UI will be displayed in an iframe at the specified URL with the specified dimensions on that record. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "1.0.0"
# import apis into sdk package
from hubspot.crm.extensions.calling.api.settings_api import SettingsApi
# import ApiClient
from hubspot.crm.extensions.calling.api_client import ApiClient
from hubspot.crm.extensions.calling.configuration import Configuration
from hubspot.crm.extensions.calling.exceptions import OpenApiException
from hubspot.crm.extensions.calling.exceptions import ApiTypeError
from hubspot.crm.extensions.calling.exceptions import ApiValueError
from hubspot.crm.extensions.calling.exceptions import ApiKeyError
from hubspot.crm.extensions.calling.exceptions import ApiException
# import models into sdk package
from hubspot.crm.extensions.calling.models.error import Error
from hubspot.crm.extensions.calling.models.error_detail import ErrorDetail
from hubspot.crm.extensions.calling.models.settings_patch_request import SettingsPatchRequest
from hubspot.crm.extensions.calling.models.settings_request import SettingsRequest
from hubspot.crm.extensions.calling.models.settings_response import SettingsResponse
|
test/optimizer/benchmark/main.py | Dee-Why/lite-bo | 184 | 11147496 | import os
import sys
import time
import pickle
import argparse
import tabulate
import numpy as np
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
dataset_set = 'dna,pollen,abalone,splice,madelon,spambase,wind,page-blocks(1),pc2,segment'
parser.add_argument('--datasets', type=str, default=dataset_set)
parser.add_argument('--algo_id', type=str, default='random_forest,adaboost')
parser.add_argument('--methods', type=str, default='openbox,smac,hyperopt')
parser.add_argument('--rep_num', type=int, default=10)
parser.add_argument('--start_id', type=int, default=0)
parser.add_argument('--seed', type=int, default=1)
save_dir = './data/benchmark_results/exp1/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
args = parser.parse_args()
if __name__ == "__main__":
for dataset in dataset_list:
for algo_id in algo_ids:
for run_id in range(start_id, start_id + rep):
seed = int(seeds[run_id])
if mth == '':
evaluate_hmab(algorithms, run_id, dataset=dataset, seed=seed,
eval_type=eval_type,
time_limit=time_limit,
enable_ens=enable_ensemble)
elif mth == 'ausk':
evaluate_autosklearn(algorithms, run_id,
dataset=dataset, time_limit=time_limit, seed=seed,
enable_ens=enable_ensemble,
eval_type=eval_type)
else:
raise ValueError('Invalid method name: %s.' % mth)
|
tests/common/test_op/ascend/matmul4d_ad.py | tianjiashuo/akg | 286 | 11147516 | <filename>tests/common/test_op/ascend/matmul4d_ad.py
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: matmul4d_ad"""
import akg.tvm
import akg
from akg.ops.math.ascend import MatMul
from akg.utils import custom_tiling as ct_util
def get_shape(pld): return [d.value for d in pld.shape]
matmul4d_ad_set_dim_map = {
str(([1, 4, 64, 16, 16], [1, 64, 2, 16, 16], False, False)): ((4, 4), (16, 16), (128, 128), (16, 16), (16, 16)),
str(([1, 64, 4, 16, 16], [1, 64, 2, 16, 16], True, False)): ((4, 4), (16, 16), (128, 128), (16, 16), (16, 16)),
str(([1, 4, 64, 16, 16], [1, 2, 64, 16, 16], False, True)): ((4, 4), (16, 16), (128, 128), (16, 16), (16, 16)),
str(([1, 64, 4, 16, 16], [1, 2, 64, 16, 16], True, True)): ((4, 4), (16, 16), (128, 128), (16, 16), (16, 16)),
str(([1, 4, 8, 16, 16], [1, 8, 2, 16, 16], False, False)): ((65536, 65536), (65536, 65536), (65536, 65536), (65536, 65536), (65536, 65536)),
}
def matmul4d_ad_set_dim_func(head, x, y, b, out_dtype, adj_x=False, adj_y=False):
key = []
key.append(get_shape(x))
key.append(get_shape(y))
key.append(adj_x)
key.append(adj_y)
hash_key = str(tuple(key))
if hash_key in matmul4d_ad_set_dim_map.keys():
return ct_util.set_dims(matmul4d_ad_set_dim_map[hash_key])
else:
return ""
@ct_util.reg_set_dim_func(matmul4d_ad_set_dim_func)
def matmul4d_ad(head, x, y, b, out_dtype, adj_x=False, adj_y=False):
"""compute 4d format mat shape from shape inputs."""
shape_xx = get_shape(x)
if adj_x: # no need to change in this case
shape_xx_forward = shape_xx
else:
batch_num, m_o, k_o, m_i, k_i = shape_xx
shape_xx_forward = (batch_num, k_o, m_o, k_i, m_i)
########################################
# compute the forward kernel #
########################################
x_temp = akg.tvm.placeholder(shape_xx_forward, name="input_1", dtype=x.dtype)
# we transfer all cases to that of adj_x=False
out = MatMul(x_temp, y, b, out_dtype, "zN", "nZ", "zN", False, adj_y)[0]
########################################
# compute the backward kernel #
########################################
_jacs = list(akg.differentiate(out, [x_temp], head))
if adj_x:
grad = akg.tvm.compute(shape_xx, lambda n, ko, mo, ki, mi: _jacs[0][n, ko, mo, mi, ki])
else:
grad = akg.tvm.compute(shape_xx, lambda n, mo, ko, mi, ki: _jacs[0][n, ko, mo, mi, ki])
sjacs = akg.tvm.create_schedule([grad.op])
attrs = dict()
attrs["pragma_data_transpose"] = "Y"
attrs["pragma_data_transpose_block"] = "Y"
if not adj_y:
attrs["pragma_weight_transpose"] = "Y"
return grad, attrs
|
lib/python/frugal/tests/aio/transport/test_nats_transport.py | ariasheets-wk/frugal | 144 | 11147546 | # Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import mock
from nats.aio.client import Client
from thrift.transport.TTransport import TTransportException
from frugal import _NATS_MAX_MESSAGE_SIZE
from frugal.aio.transport import FNatsTransport
from frugal.exceptions import TTransportExceptionType
from frugal.tests.aio import utils
class TestFNatsTransport(utils.AsyncIOTestCase):
def setUp(self):
super().setUp()
self.mock_nats_client = mock.Mock(spec=Client)
self.subject = 'foo'
self.inbox = 'bar'
self.transport = FNatsTransport(
self.mock_nats_client,
self.subject,
inbox=self.inbox
)
@mock.patch('frugal.aio.transport.nats_transport.new_inbox')
def test_init(self, mock_new_inbox):
self.assertEqual(self.mock_nats_client, self.transport._nats_client)
self.assertEqual(self.subject, self.transport._subject)
self.assertEqual(self.inbox, self.transport._inbox)
mock_new_inbox.return_value = 'a new inbox'
transport = FNatsTransport(self.mock_nats_client,
self.subject)
mock_new_inbox.assert_called_once_with()
self.assertEqual('a new inbox', transport._inbox)
@utils.async_runner
async def test_open_nats_not_connected(self):
self.mock_nats_client.is_connected = False
with self.assertRaises(TTransportException) as cm:
await self.transport.open()
self.assertEqual(TTransportExceptionType.NOT_OPEN, cm.exception.type)
@utils.async_runner
async def test_open_already_open(self):
self.mock_nats_client.is_connected = True
self.transport._is_open = True
with self.assertRaises(TTransportException) as cm:
await self.transport.open()
self.assertEqual(TTransportExceptionType.ALREADY_OPEN, cm.exception.type)
@utils.async_runner
async def test_open_subscribes(self):
future = asyncio.Future()
future.set_result(235)
self.mock_nats_client.subscribe_async.return_value = future
await self.transport.open()
self.assertEqual(235, self.transport._sub_id)
self.mock_nats_client.subscribe_async.assert_called_once_with(
self.inbox + ".*",
cb=self.transport._on_message_callback
)
self.assertTrue(self.transport._is_open)
@utils.async_runner
async def test_on_message_callback(self):
message = mock.Mock()
message.data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
callback = mock.Mock()
future = asyncio.Future()
future.set_result(None)
callback.return_value = future
self.transport.handle_response = callback
await self.transport._on_message_callback(message)
callback.assert_called_once_with(message)
@utils.async_runner
async def test_close_not_subscribed(self):
self.transport._sub_id = None
await self.transport.close()
self.assertFalse(self.mock_nats_client.unsubscribe.called)
@utils.async_runner
async def test_close_unsubscribes(self):
self.transport._is_open = True
self.transport._sub_id = 235
future = asyncio.Future()
future.set_result(None)
self.mock_nats_client.unsubscribe.return_value = future
await self.transport.close()
self.assertIsNone(self.transport._sub_id)
self.assertFalse(self.transport._is_open)
self.mock_nats_client.unsubscribe.assert_called_once_with(235)
@utils.async_runner
async def test_flush(self):
self.transport._is_open = True
data = bytearray([2, 3, 4, 5, 6, 7])
data_len = bytearray([0, 0, 0, 6])
frame = data_len + data
future = asyncio.Future()
future.set_result(None)
self.mock_nats_client.publish_request.return_value = future
await self.transport.flush(frame)
self.mock_nats_client.publish_request.assert_called_once_with(
self.subject,
self.inbox,
frame
)
@utils.async_runner
async def test_flush_op(self):
self.transport._is_open = True
op_id = 1
data = bytearray([2, 3, 4, 5, 6, 7])
data_len = bytearray([0, 0, 0, 6])
frame = data_len + data
future = asyncio.Future()
future.set_result(None)
self.mock_nats_client.publish_request.return_value = future
await self.transport.flush_op(op_id, frame)
self.mock_nats_client.publish_request.assert_called_once_with(
self.subject,
f"{self.inbox}.{op_id}",
frame
)
def test_request_size_limit(self):
self.assertEqual(_NATS_MAX_MESSAGE_SIZE,
self.transport.get_request_size_limit())
@utils.async_runner
async def test_handle_status_message(self):
message = mock.Mock()
message.data = []
message.subject = "subject.1"
await self.transport._on_message_callback(message) |
examples/ex03_proc/ubmark/proc_ubmark_cksum_roll_data.py | kevinyuan/pymtl3 | 152 | 11147554 | #=========================================================================
# proc_ubmark_cksum_roll_data.py
#=========================================================================
dataset_size = 150
mask = [ 0x0000ffff ]
ref = [ 0x9f6b0de5 ]
src = [
0x01510248, 0x02cb03f5, 0x001c0387,
0x00010264, 0x000e0090, 0x02880210,
0x0376002c, 0x0106032c, 0x02470142,
0x007400ed, 0x037e0199, 0x018702b6,
0x03da018e, 0x004b0082, 0x0238039b,
0x02f50350, 0x02ed0358, 0x02d202e8,
0x02ad000b, 0x027a0178, 0x03e502bb,
0x0085028d, 0x00ee00f9, 0x036702ad,
0x001a0141, 0x00fd032d, 0x02cf027a,
0x02450151, 0x02550200, 0x029100b5,
0x03670135, 0x01cb015d, 0x01bc031d,
0x038e036b, 0x0006034a, 0x006e03ce,
0x00ef00a2, 0x03f802a2, 0x00e700b5,
0x030303df, 0x038701d6, 0x00d00329,
0x029001eb, 0x01030259, 0x007b03aa,
0x0358028b, 0x03420157, 0x01bd0112,
0x018e0010, 0x009c001c, 0x017d03fc,
0x01df03ee, 0x014f0079, 0x01f7023e,
0x0221039d, 0x03de0363, 0x0113031a,
0x021002a1, 0x00a5010f, 0x01c703f6,
0x039b0073, 0x025c0038, 0x007a03d5,
0x00a901da, 0x0374016b, 0x02520070,
0x01de02ee, 0x005002ad, 0x01a80378,
0x02d103bd, 0x01bc038b, 0x01980078,
0x000a006b, 0x03950258, 0x028f020e,
]
|
src/python/phyre/check_solutions.py | aallaire91/phyre | 432 | 11147558 | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
from thrift import TSerialization
import phyre.interface.task.ttypes as task_if
import phyre.server
import phyre.settings
import phyre.simulator
import phyre.util
def yield_is_solution(task_fname, solutions_fnames):
with (phyre.settings.TASK_DIR / task_fname).open('rb') as stream:
task = TSerialization.deserialize(task_if.Task(), stream.read())
for solution_fname in solutions_fnames:
solution = phyre.util.load_user_input(
str(phyre.settings.SOLUTION_DIR / solution_fname))
yield phyre.simulator.magic_ponies(task, solution)[0]
def main():
def group_by_keys(fnames):
d = collections.defaultdict(list)
for fname in fnames:
key = fname.split('.')[0]
d[key].append(fname)
return d
tasks = group_by_keys(os.listdir(str(phyre.settings.TASK_DIR)))
solutions = group_by_keys(os.listdir(str(phyre.settings.SOLUTION_DIR)))
print(f'Found {len(tasks)} tasks and {len(solutions)} solutions')
n_weird = n_nosolution = n_wrong = n_correct = 0
for key in sorted(set(tasks) | set(solutions)):
if not key in tasks:
print(f'{key}: WARNING! Have solutions, but not tasks!')
n_weird += 1
elif key not in solutions:
print(f'{key}: no solutions')
n_nosolution += 1
else:
key_tasks = tasks[key]
key_solutions = solutions[key]
print(f'{key}: checking solution ...', end=' ', flush=True)
assert len(key_tasks) == 1, key_tasks
for is_valid in yield_is_solution(key_tasks[0], key_solutions):
if is_valid:
print('GOOD', flush=True, end=' ')
n_correct += 1
else:
print('BAD', flush=True, end=' ')
n_wrong += 1
print('')
print('Stats: weird=%d nosolution=%d correct=%d wrong=%d' %
(n_weird, n_nosolution, n_correct, n_wrong))
if __name__ == '__main__':
main()
|
third_party/virtualbox/src/libs/xpcom18a4/python/test/test_component/py_test_component.py | Fimbure/icebox-1 | 521 | 11147612 | <reponame>Fimbure/icebox-1
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is the Python XPCOM language bindings.
#
# The Initial Developer of the Original Code is
# ActiveState Tool Corp.
# Portions created by the Initial Developer are Copyright (C) 2000, 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME> <<EMAIL>> (original author)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
# NOTE: This is a TEST interface, not a DEMO interface :-)
# We try to get as many data-types etc exposed, meaning this
# doesnt really make a good demo of a "simple component"
from xpcom import components, verbose
class PythonTestComponent:
# Note we only list the "child" interface, not our intermediate interfaces
# (which we must, by definition, also support)
_com_interfaces_ = components.interfaces.nsIPythonTestInterfaceDOMStrings
_reg_clsid_ = "{7EE4BDC6-CB53-42c1-A9E4-616B8E012ABA}"
_reg_contractid_ = "Python.TestComponent"
def __init__(self):
self.boolean_value = 1
self.octet_value = 2
self.short_value = 3
self.ushort_value = 4
self.long_value = 5
self.ulong_value = 6
self.long_long_value = 7
self.ulong_long_value = 8
self.float_value = 9.0
self.double_value = 10.0
self.char_value = "a"
self.wchar_value = "b"
self.string_value = "cee"
self.wstring_value = "dee"
self.astring_value = "astring"
self.acstring_value = "acstring"
self.utf8string_value = "utf8string"
self.iid_value = self._reg_clsid_
self.interface_value = None
self.isupports_value = None
self.domstring_value = "dom"
def __del__(self):
if verbose:
print "Python.TestComponent: __del__ method called - object is destructing"
def do_boolean(self, p1, p2):
# boolean do_boolean(in boolean p1, inout boolean p2, out boolean p3);
ret = p1 ^ p2
return ret, not ret, ret
def do_octet(self, p1, p2):
# octet do_octet(in octet p1, inout octet p2, out octet p3);
return p1+p2, p1-p2, p1*p2
def do_short(self, p1, p2):
# short do_short(in short p1, inout short p2, out short p3);
return p1+p2, p1-p2, p1*p2
def do_unsigned_short(self, p1, p2):
# unsigned short do_unsigned_short(in unsigned short p1, inout unsigned short p2, out unsigned short p3);
return p1+p2, p1-p2, p1*p2
def do_long(self, p1, p2):
# long do_long(in long p1, inout long p2, out long p3);
return p1+p2, p1-p2, p1*p2
def do_unsigned_long(self, p1, p2):
# unsigned long do_unsigned_long(in unsigned long p1, inout unsigned long p2, out unsigned long p3);
return p1+p2, p1-p2, p1*p2
def do_long_long(self, p1, p2):
# long long do_long_long(in long long p1, inout long long p2, out long long p3);
return p1+p2, p1-p2, p1*p2
def do_unsigned_long_long(self, p1, p2):
# unsigned long long do_unsigned_long_long(in unsigned long long p1, inout unsigned long long p2, out unsigned long long p3);
return p1+p2, p1-p2, p1*p2
def do_float(self, p1, p2):
# float do_float(in float p1, inout float p2, out float p3);
return p1+p2, p1-p2, p1*p2
def do_double(self, p1, p2):
# double do_double(in double p1, inout double p2, out double p3);
return p1+p2, p1-p2, p1*p2
def do_char(self, p1, p2):
# char do_char(in char p1, inout char p2, out char p3);
return chr(ord(p1)+ord(p2)), p2, p1
def do_wchar(self, p1, p2):
# wchar do_wchar(in wchar p1, inout wchar p2, out wchar p3);
return chr(ord(p1)+ord(p2)), p2, p1
def do_string(self, p1, p2):
# string do_string(in string p1, inout string p2, out string p3);
ret = ""
if p1 is not None: ret = ret + p1
if p2 is not None: ret = ret + p2
return ret, p1, p2
def do_wstring(self, p1, p2):
# wstring do_wstring(in wstring p1, inout wstring p2, out wstring p3);
ret = u""
if p1 is not None: ret = ret + p1
if p2 is not None: ret = ret + p2
return ret, p1, p2
def do_nsIIDRef(self, p1, p2):
# nsIIDRef do_nsIIDRef(in nsIIDRef p1, inout nsIIDRef p2, out nsIIDRef p3);
return p1, self._reg_clsid_, p2
def do_nsIPythonTestInterface(self, p1, p2):
# nsIPythonTestInterface do_nsIPythonTestInterface(in nsIPythonTestInterface p1, inout nsIPythonTestInterface p2, out nsIPythonTestInterface p3);
return p2, p1, self
def do_nsISupports(self, p1, p2):
# nsISupports do_nsISupports(in nsISupports p1, inout nsISupports p2, out nsISupports p3);
return self, p1, p2
def do_nsISupportsIs(self, iid):
# void do_nsISupportsIs(in nsIIDRef iid, [iid_is(iid),retval] out nsQIResult result)
# Note the framework does the QI etc on us, so there is no real point me doing it.
# (However, user code _should_ do the QI - otherwise any errors are deemed "internal" (as they
# are raised by the C++ framework), and therefore logged to the console, etc.
# A user QI allows the user to fail gracefully, whatever gracefully means for them!
return self
# Do I really need these??
## def do_nsISupportsIs2(self, iid, interface):
## # void do_nsISupportsIs2(inout nsIIDRef iid, [iid_is(iid),retval] inout nsQIResult result);
## return iid, interface
## def do_nsISupportsIs3(self, interface):
## # void do_nsISupportsIs3(out nsIIDRef iid, [iid_is(iid)] inout nsQIResult result);
## return self._com_interfaces_, interface
## def do_nsISupportsIs4(self):
## # void do_nsISupportsIs4(out nsIIDRef iid, [iid_is(iid)] out nsQIResult result);
## return self._com_interfaces_, self
# Methods from the nsIPythonTestInterfaceExtra interface
#
def MultiplyEachItemInIntegerArray(self, val, valueArray):
# void MultiplyEachItemInIntegerArray(
# in PRInt32 val,
# in PRUint32 count,
# [array, size_is(count)] inout PRInt32 valueArray);
# NOTE - the "sizeis" params are never passed to or returned from Python!
results = []
for item in valueArray:
results.append(item * val)
return results
def MultiplyEachItemInIntegerArrayAndAppend(self, val, valueArray):
#void MultiplyEachItemInIntegerArrayAndAppend(
# in PRInt32 val,
# inout PRUint32 count,
# [array, size_is(count)] inout PRInt32 valueArray);
results = valueArray[:]
for item in valueArray:
results.append(item * val)
return results
def DoubleStringArray(self, valueArray):
# void DoubleStringArray(inout PRUint32 count,
# [array, size_is(count)] inout string valueArray);
results = []
for item in valueArray:
results.append(item * 2)
return results
def ReverseStringArray(self, valueArray):
# void ReverseStringArray(in PRUint32 count,
# [array, size_is(count)] inout string valueArray);
valueArray.reverse()
return valueArray
# Note that this method shares a single "size_is" between 2 params!
def CompareStringArrays(self, ar1, ar2):
# void CompareStringArrays([array, size_is(count)] in string arr1,
# [array, size_is(count)] in string arr2,
# in unsigned long count,
# [retval] out short result);
return cmp(ar1, ar2)
def DoubleString(self, val):
# void DoubleString(inout PRUint32 count,
# [size_is(count)] inout string str);
return val * 2
def DoubleString2(self, val):
# void DoubleString2(in PRUint32 in_count, [size_is(in_count)] in string in_str,
# out PRUint32 out_count, [size_is(out_count)] out string out_str);
return val * 2
def DoubleString3(self, val):
# void DoubleString3(in PRUint32 in_count, [size_is(in_count)] in string in_str,
# out PRUint32 out_count, [size_is(out_count), retval] string out_str);
return val * 2
def DoubleString4(self, val):
# void DoubleString4([size_is(count)] in string in_str, inout PRUint32 count, [size_is(count)] out string out_str);
return val * 2
def UpString(self, val):
# // UpString defines the count as only "in" - meaning the result must be the same size
# void UpString(in PRUint32 count,
# [size_is(count)] inout string str);
return val.upper()
UpString2 = UpString
# // UpString2 defines count as only "in", and a string as only "out"
# void UpString2(in PRUint32 count,
# [size_is(count)] inout string in_str,
# [size_is(count)]out string out_str);
def GetFixedString(self, count):
# void GetFixedString(in PRUint32 count, [size_is(count)out string out_str);
return "A" * count
# DoubleWideString functions are identical to DoubleString, except use wide chars!
def DoubleWideString(self, val):
return val * 2
def DoubleWideString2(self, val):
return val * 2
def DoubleWideString3(self, val):
return val * 2
def DoubleWideString4(self, val):
return val * 2
def UpWideString(self, val):
return val.upper()
UpWideString2 = UpWideString
def CopyUTF8String(self, v):
return v
def CopyUTF8String2(self, v):
return v.encode("utf8")
# Test we can get an "out" array with an "in" size (and the size is not used anywhere as a size for an in!)
def GetFixedWideString(self, count):
# void GetFixedWideString(in PRUint32 count, [size_is(count)out string out_str);
return u"A" * count
def GetStrings(self):
# void GetStrings(out PRUint32 count,
# [retval, array, size_is(count)] out string str);
return "Hello from the Python test component".split()
# Some tests for our special "PRUint8" support.
def UpOctetArray( self, data ):
# void UpOctetArray(inout PRUint32 count,
# [array, size_is(count)] inout PRUint8 data);
return data.upper()
def UpOctetArray2( self, data ):
# void UpOctetArray2(inout PRUint32 count,
# [array, size_is(count)] inout PRUint8 data);
data = data.upper()
# This time we return a list of integers.
return map( ord, data )
# Arrays of interfaces
def CheckInterfaceArray(self, interfaces):
# void CheckInterfaceArray(in PRUint32 count,
# [array, size_is(count)] in nsISupports data,
# [retval] out PRBool all_non_null);
ret = 1
for i in interfaces:
if i is None:
ret = 0
break
return ret
def CopyInterfaceArray(self, a):
return a
def GetInterfaceArray(self):
# void GetInterfaceArray(out PRUint32 count,
# [array, size_is(count)] out nsISupports data);
return self, self, self, None
def ExtendInterfaceArray(self, data):
# void ExtendInterfaceArray(inout PRUint32 count,
# [array, size_is(count)] inout nsISupports data);
return data * 2
# Arrays of IIDs
def CheckIIDArray(self, data):
# void CheckIIDArray(in PRUint32 count,
# [array, size_is(count)] in nsIIDRef data,
# [retval] out PRBool all_mine);
ret = 1
for i in data:
if i!= self._com_interfaces_ and i != self._reg_clsid_:
ret = 0
break
return ret
def GetIIDArray(self):
# void GetIIDArray(out PRUint32 count,
# [array, size_is(count)] out nsIIDRef data);
return self._com_interfaces_, self._reg_clsid_
def ExtendIIDArray(self, data):
# void ExtendIIDArray(inout PRUint32 count,
# [array, size_is(count)] inout nsIIDRef data);
return data * 2
# Test our count param can be shared as an "in" param.
def SumArrays(self, array1, array2):
# void SumArrays(in PRUint32 count, [array, size_is(count)]in array1, [array, size_is(count)]in array2, [retval]result);
if len(array1)!=len(array2):
print "SumArrays - not expecting different lengths!"
result = 0
for i in array1:
result = result + i
for i in array2:
result = result+i
return result
# Test our count param can be shared as an "out" param.
def GetArrays(self):
# void GetArrays(out PRUint32 count, [array, size_is(count)]out array1, [array, size_is(count)]out array2);
return (1,2,3), (4,5,6)
# Test we can get an "out" array with an "in" size
def GetFixedArray(self, size):
# void GetFixedArray(in PRUint32 count, [array, size_is(count)]out PRInt32 array1]);
return 0 * size
# Test our "in" count param can be shared as one "in", plus one "out" param.
def CopyArray(self, array1):
# void CopyArray(in PRUint32 count, [array, size_is(count)]in array1, [array, size_is(count)]out array2);
return array1
# Test our "in-out" count param can be shared as one "in", plus one "out" param.
def CopyAndDoubleArray(self, array):
# void CopyAndDoubleArray(inout PRUint32 count, [array, size_is(count)]in array1, [array, size_is(count)]out array2);
return array + array
# Test our "in-out" count param can be shared as one "in", plus one "in-out" param.
def AppendArray(self, array1, array2):
# void AppendArray(inout PRUint32 count, [array, size_is(count)]in array1, [array, size_is(count)]inout array2);
rc = array1
if array2 is not None:
rc.extend(array2)
return rc
# Test nsIVariant support
def AppendVariant(self, invar, inresult):
if type(invar)==type([]):
invar_use = invar[0]
for v in invar[1:]:
invar_use += v
else:
invar_use = invar
if type(inresult)==type([]):
inresult_use = inresult[0]
for v in inresult[1:]:
inresult_use += v
else:
inresult_use = inresult
if inresult_use is None and invar_use is None:
return None
return inresult_use + invar_use
def CopyVariant(self, invar):
return invar
def SumVariants(self, variants):
if len(variants) == 0:
return None
result = variants[0]
for v in variants[1:]:
result += v
return result
# Some tests for the "new" (Feb-2001) DOMString type.
def GetDOMStringResult( self, length ):
# Result: DOMString &
if length == -1:
return None
return "P" * length
def GetDOMStringOut( self, length ):
# Result: DOMString &
if length == -1:
return None
return "y" * length
def GetDOMStringLength( self, param0 ):
# Result: uint32
# In: param0: DOMString &
if param0 is None: return -1
return len(param0)
def GetDOMStringRefLength( self, param0 ):
# Result: uint32
# In: param0: DOMString &
if param0 is None: return -1
return len(param0)
def GetDOMStringPtrLength( self, param0 ):
# Result: uint32
# In: param0: DOMString *
if param0 is None: return -1
return len(param0)
def ConcatDOMStrings( self, param0, param1 ):
# Result: void - None
# In: param0: DOMString &
# In: param1: DOMString &
# Out: DOMString &
return param0 + param1
def get_domstring_value( self ):
# Result: DOMString &
return self.domstring_value
def set_domstring_value( self, param0 ):
# Result: void - None
# In: param0: DOMString &
self.domstring_value = param0
def get_domstring_value_ro( self ):
# Result: DOMString &
return self.domstring_value
|
docs/examples/compute/cloudframes/auth_kwargs.py | dupontz/libcloud | 1,435 | 11147641 | from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
CloudFrames = get_driver(Provider.CLOUDFRAMES)
driver = CloudFrames(key='admin', secret='admin', secure=False,
host='cloudframes', port=80)
|
dags/ethereumetl_airflow/parse/parse_logic.py | datawaves-xyz/ethereum-etl-airflow | 204 | 11147660 | <reponame>datawaves-xyz/ethereum-etl-airflow<filename>dags/ethereumetl_airflow/parse/parse_logic.py
import json
import logging
import re
import time
from eth_utils import event_abi_to_log_topic, function_abi_to_4byte_selector
from google.cloud import bigquery
from google.api_core.exceptions import Conflict
from ethereumetl_airflow.bigquery_utils import submit_bigquery_job, read_bigquery_schema_from_json_recursive, query, \
create_view, does_table_exist
from ethereumetl_airflow.parse.templates import render_parse_udf_template, render_parse_sql_template, \
render_merge_template, render_stitch_view_template
ref_regex = re.compile(r"ref\(\'([^']+)\'\)")
def parse(
bigquery_client,
table_definition,
ds,
source_project_id,
source_dataset_name,
destination_project_id,
sqls_folder,
parse_all_partitions,
time_func=time.time
):
# Refer to this issue for more detail https://github.com/blockchain-etl/ethereum-etl-airflow/issues/80.
internal_project_id = destination_project_id + '-internal'
dataset_name = 'ethereum_' + table_definition['table']['dataset_name']
create_or_replace_internal_view(
bigquery_client=bigquery_client,
dataset_name=dataset_name,
table_definition=table_definition,
ds=ds,
public_project_id=source_project_id,
public_dataset_name=source_dataset_name,
internal_project_id=internal_project_id,
destination_project_id=destination_project_id,
sqls_folder=sqls_folder
)
dataset = create_dataset(bigquery_client, dataset_name, internal_project_id)
table_name = table_definition['table']['table_name']
history_table_name = table_name + '_history'
if parse_all_partitions is None:
history_table_ref = dataset.table(history_table_name)
history_table_exists = does_table_exist(bigquery_client, history_table_ref)
parse_all_partitions = not history_table_exists
logging.info('parse_all_partitions is set to {}'.format(str(parse_all_partitions)))
create_or_update_history_table(
bigquery_client=bigquery_client,
dataset_name=dataset_name,
history_table_name=history_table_name,
table_definition=table_definition,
ds=ds,
public_project_id=source_project_id,
public_dataset_name=source_dataset_name,
internal_project_id=internal_project_id,
destination_project_id=destination_project_id,
sqls_folder=sqls_folder,
parse_all_partitions=parse_all_partitions,
time_func=time_func
)
create_or_replace_stitch_view(
bigquery_client=bigquery_client,
dataset_name=dataset_name,
table_definition=table_definition,
ds=ds,
internal_project_id=internal_project_id,
destination_project_id=destination_project_id,
sqls_folder=sqls_folder,
)
def create_or_replace_internal_view(
bigquery_client,
dataset_name,
table_definition,
ds,
public_project_id,
public_dataset_name,
internal_project_id,
destination_project_id,
sqls_folder
):
table_name = table_definition['table']['table_name']
parser_type = table_definition['parser'].get('type', 'log')
udf_name = 'parse_{}'.format(table_name)
dataset = create_dataset(bigquery_client, dataset_name, internal_project_id)
# # # Create UDF
sql = render_parse_udf_template(
sqls_folder,
parser_type,
internal_project_id=internal_project_id,
dataset_name=dataset_name,
udf_name=udf_name,
abi=json.dumps(table_definition['parser']['abi']),
struct_fields=create_struct_string_from_schema(table_definition['table']['schema'])
)
query(bigquery_client, sql)
# # # Create view
selector = abi_to_selector(parser_type, table_definition['parser']['abi'])
parse_mode = get_parse_mode(HistoryType.LIVE)
full_source_table_name = get_source_table(
parser_type=parser_type,
parse_mode=parse_mode,
ds=ds,
internal_project_id=internal_project_id,
public_project_id=public_project_id,
public_dataset_name=public_dataset_name,
selector=selector
)
sql = generate_parse_sql_template(
sqls_folder,
parser_type,
parse_mode,
full_source_table_name=full_source_table_name,
selector=selector,
internal_project_id=internal_project_id,
destination_project_id=destination_project_id,
dataset_name=dataset_name,
udf_name=udf_name,
table_definition=table_definition,
parse_all_partitions=None,
ds=ds
)
dest_view_ref = dataset.table(table_name)
create_view(bigquery_client, sql, dest_view_ref)
def create_or_update_history_table(
bigquery_client,
dataset_name,
history_table_name,
table_definition,
ds,
public_project_id,
public_dataset_name,
internal_project_id,
destination_project_id,
sqls_folder,
parse_all_partitions,
time_func=time.time
):
table_name = table_definition['table']['table_name']
schema = table_definition['table']['schema']
parser_type = table_definition['parser'].get('type', 'log')
schema = read_bigquery_schema_from_dict(schema, parser_type)
# # # Create a temporary table
dataset_name_temp = 'parse_temp'
create_dataset(bigquery_client, dataset_name_temp)
temp_table_name = 'temp_{table_name}_{milliseconds}' \
.format(table_name=table_name, milliseconds=int(round(time_func() * 1000)))
temp_table_ref = bigquery_client.dataset(dataset_name_temp).table(temp_table_name)
temp_table = bigquery.Table(temp_table_ref, schema=schema)
table_description = table_definition['table']['table_description']
temp_table.description = table_description
temp_table.time_partitioning = bigquery.TimePartitioning(field='block_timestamp')
logging.info('Creating table: ' + json.dumps(temp_table.to_api_repr()))
temp_table = bigquery_client.create_table(temp_table)
assert temp_table.table_id == temp_table_name
# # # Query to temporary table
udf_name = 'parse_{}'.format(table_name)
selector = abi_to_selector(parser_type, table_definition['parser']['abi'])
parse_mode = get_parse_mode(HistoryType.HISTORY, parse_all_partitions=parse_all_partitions)
full_source_table_name = get_source_table(
parser_type=parser_type,
parse_mode=parse_mode,
ds=ds,
internal_project_id=internal_project_id,
public_project_id=public_project_id,
public_dataset_name=public_dataset_name,
selector=selector
)
sql = generate_parse_sql_template(
sqls_folder,
parser_type,
parse_mode,
full_source_table_name=full_source_table_name,
selector=selector,
internal_project_id=internal_project_id,
destination_project_id=destination_project_id,
dataset_name=dataset_name,
udf_name=udf_name,
table_definition=table_definition,
parse_all_partitions=parse_all_partitions,
ds=ds
)
query(bigquery_client, sql, destination=temp_table_ref)
# # # Copy / merge to destination
if parse_all_partitions:
# Copy temporary table to destination
copy_job_config = bigquery.CopyJobConfig()
copy_job_config.write_disposition = 'WRITE_TRUNCATE'
dataset = create_dataset(bigquery_client, dataset_name, internal_project_id)
dest_table_ref = dataset.table(history_table_name)
copy_job = bigquery_client.copy_table(temp_table_ref, dest_table_ref, location='US', job_config=copy_job_config)
submit_bigquery_job(copy_job, copy_job_config)
assert copy_job.state == 'DONE'
# Need to do update description as copy above won't respect the description in case destination table
# already exists
table = bigquery_client.get_table(dest_table_ref)
table.description = table_description
table = bigquery_client.update_table(table, ["description"])
assert table.description == table_description
else:
merge_sql = render_merge_template(
sqls_folder,
table_schema=schema,
internal_project_id=internal_project_id,
dataset_name=dataset_name,
destination_table_name=history_table_name,
dataset_name_temp=dataset_name_temp,
source_table=temp_table_name,
ds=ds
)
query(bigquery_client, merge_sql)
# Delete temp table
bigquery_client.delete_table(temp_table_ref)
def create_or_replace_stitch_view(
bigquery_client,
dataset_name,
table_definition,
ds,
destination_project_id,
internal_project_id,
sqls_folder
):
table_name = table_definition['table']['table_name']
history_table_name = table_name + '_history'
# # # Create view
sql = render_stitch_view_template(
sqls_folder=sqls_folder,
internal_project_id=internal_project_id,
dataset_name=dataset_name,
table_name=table_name,
history_table_name=history_table_name,
ds=ds
)
print('Stitch view: ' + sql)
dataset = create_dataset(bigquery_client, dataset_name, destination_project_id)
dest_view_ref = dataset.table(table_name)
create_view(bigquery_client, sql, dest_view_ref)
def get_parse_mode(
history_type,
parse_all_partitions=None,
):
if history_type == HistoryType.HISTORY:
if parse_all_partitions is None:
raise ValueError('If history_type is "history" parse_all_partitions must be set to True or False')
if parse_all_partitions:
parse_mode = ParseMode.HISTORY_ALL_DATES
else:
parse_mode = ParseMode.HISTORY_SINGLE_DATE
elif history_type == HistoryType.LIVE:
parse_mode = ParseMode.LIVE
else:
raise ValueError(f'unknown history type {history_type}. Allowed values: history, live')
assert parse_mode is not None
return parse_mode
def get_source_table(
parser_type,
parse_mode,
ds,
internal_project_id,
public_project_id,
public_dataset_name,
selector
):
partitioned_dataset_name = 'crypto_ethereum_partitioned'
if parse_mode == ParseMode.HISTORY_ALL_DATES:
source_project_id = public_project_id
source_dataset_name = public_dataset_name
if parser_type == 'log':
source_table_name = 'logs'
elif parser_type == 'trace':
source_table_name = 'traces'
else:
raise ValueError(f'unknown parser type {parser_type}')
elif parse_mode == ParseMode.HISTORY_SINGLE_DATE:
if ds is None:
raise ValueError('If history_type is "history" and parse_all_partitions is True ds must be provided')
source_project_id = internal_project_id
source_dataset_name = partitioned_dataset_name
if parser_type == 'log':
source_table_name = 'logs_by_date_' + ds.replace('-', '_')
elif parser_type == 'trace':
source_table_name = 'traces_by_date_' + ds.replace('-', '_')
else:
raise ValueError(f'unknown parser type {parser_type}')
elif parse_mode == ParseMode.LIVE:
source_project_id = internal_project_id
source_dataset_name = partitioned_dataset_name
table_suffix = selector[:5]
if parser_type == 'log':
table_prefix = 'logs_by_topic_'
elif parser_type == 'trace':
table_prefix = 'traces_by_input_'
else:
raise ValueError(f'unknown parser type {parser_type}')
source_table_name = table_prefix + table_suffix
else:
raise ValueError(f'unknown parse mode {parse_mode}. Allowed values: history_all_dates, history_single_date, live')
return f'{source_project_id}.{source_dataset_name}.{source_table_name}'
def generate_parse_sql_template(
sqls_folder,
parser_type,
parse_mode,
full_source_table_name,
selector,
internal_project_id,
destination_project_id,
dataset_name,
udf_name,
table_definition,
parse_all_partitions,
ds):
contract_address = table_definition['parser']['contract_address']
if contract_address is not None and not contract_address.startswith('0x'):
table_definition['parser']['contract_address_sql'] = replace_refs(
contract_address, ref_regex, destination_project_id, dataset_name
)
sql = render_parse_sql_template(
sqls_folder,
parser_type,
parse_mode=parse_mode,
full_source_table_name=full_source_table_name,
internal_project_id=internal_project_id,
dataset_name=dataset_name,
udf_name=udf_name,
parser=table_definition['parser'],
table=table_definition['table'],
selector=selector,
parse_all_partitions=parse_all_partitions,
ds=ds
)
return sql
def create_struct_string_from_schema(schema):
def get_type(field):
if field.get('type') == 'RECORD':
type_str = 'STRUCT<{struct_string}>'.format(
struct_string=create_struct_string_from_schema(field.get('fields')))
else:
type_str = field.get('type')
if field.get('mode') == 'REPEATED':
type_str = 'ARRAY<{type}>'.format(type=type_str)
return type_str
def get_field_def(field):
return '`' + field.get('name') + '` ' + get_type(field)
fields = [get_field_def(field) for field in schema]
return ', '.join(fields)
def replace_refs(contract_address, ref_regex, project_id, dataset_name):
return ref_regex.sub(
r"`{project_id}.{dataset_name}.\g<1>`".format(
project_id=project_id, dataset_name=dataset_name
), contract_address)
def create_dataset(client, dataset_name, project=None):
dataset = client.dataset(dataset_name, project=project)
try:
logging.info('Creating new dataset ...')
dataset = client.create_dataset(dataset)
logging.info('New dataset created: ' + dataset_name)
except Conflict as error:
logging.info('Dataset already exists')
return dataset
def read_bigquery_schema_from_dict(schema, parser_type):
result = [
bigquery.SchemaField(
name='block_timestamp',
field_type='TIMESTAMP',
mode='REQUIRED',
description='Timestamp of the block where this event was emitted'),
bigquery.SchemaField(
name='block_number',
field_type='INTEGER',
mode='REQUIRED',
description='The block number where this event was emitted'),
bigquery.SchemaField(
name='transaction_hash',
field_type='STRING',
mode='REQUIRED',
description='Hash of the transactions in which this event was emitted')
]
if parser_type == 'log':
result.append(bigquery.SchemaField(
name='log_index',
field_type='INTEGER',
mode='REQUIRED',
description='Integer of the log index position in the block of this event'))
result.append(bigquery.SchemaField(
name='contract_address',
field_type='STRING',
mode='REQUIRED',
description='Address of the contract that produced the log'))
elif parser_type == 'trace':
result.append(bigquery.SchemaField(
name='transaction_index',
field_type='INTEGER',
description='Integer of the transactions index position in the block'))
result.append(bigquery.SchemaField(
name='trace_address',
field_type='STRING',
description='Comma separated list of trace address in call tree'))
result.append(bigquery.SchemaField(
name='to_address',
field_type='STRING',
description='Address of the called contract'))
result.append(bigquery.SchemaField(
name='status',
field_type='INT64',
description='Either 1 (success) or 0 (failure, due to any operation that can cause the call itself or any top-level call to revert)'))
result.append(bigquery.SchemaField(
name='error',
field_type='STRING',
description='Error in case input parsing failed'))
result.extend(read_bigquery_schema_from_json_recursive(schema))
return result
def abi_to_selector(parser_type, abi):
if parser_type == 'log':
return '0x' + event_abi_to_log_topic(abi).hex()
else:
return '0x' + function_abi_to_4byte_selector(abi).hex()
class HistoryType:
LIVE = 'live'
HISTORY = 'history'
class ParseMode:
LIVE = 'live'
HISTORY_ALL_DATES = 'history_all_dates'
HISTORY_SINGLE_DATE = 'history_single_date'
|
Recommender_System/algorithm/MKR/layer.py | Holldean/Recommender-System | 348 | 11147675 | import tensorflow as tf
class CrossLayer(tf.keras.layers.Layer):
def call(self, inputs):
v, e = inputs # (batch, dim)
v = tf.expand_dims(v, axis=2) # (batch, dim, 1)
e = tf.expand_dims(e, axis=1) # (batch, 1, dim)
c_matrix = tf.matmul(v, e) # (batch, dim, dim)
c_matrix_t = tf.transpose(c_matrix, perm=[0, 2, 1]) # (batch, dim, dim)
return c_matrix, c_matrix_t
class CompressLayer(tf.keras.layers.Layer):
def __init__(self, weight_regularizer, **kwargs):
super(CompressLayer, self).__init__(**kwargs)
self.weight_regularizer = tf.keras.regularizers.get(weight_regularizer)
def build(self, input_shape):
self.dim = input_shape[0][-1]
self.weight = self.add_weight(shape=(self.dim, 1), regularizer=self.weight_regularizer, name='weight')
self.weight_t = self.add_weight(shape=(self.dim, 1), regularizer=self.weight_regularizer, name='weight_t')
self.bias = self.add_weight(shape=self.dim, initializer='zeros', name='bias')
def call(self, inputs):
c_matrix, c_matrix_t = inputs # (batch, dim, dim)
c_matrix = tf.reshape(c_matrix, shape=[-1, self.dim]) # (batch * dim, dim)
c_matrix_t = tf.reshape(c_matrix_t, shape=[-1, self.dim]) # (batch * dim, dim)
return tf.reshape(tf.matmul(c_matrix, self.weight) + tf.matmul(c_matrix_t, self.weight_t),
shape=[-1, self.dim]) + self.bias # (batch, dim)
def cross_compress_unit(inputs, weight_regularizer):
cross_feature_matrix = CrossLayer()(inputs)
v_out = CompressLayer(weight_regularizer)(cross_feature_matrix)
e_out = CompressLayer(weight_regularizer)(cross_feature_matrix)
return v_out, e_out
|
models/model_variants.py | Minsoo2022/Pose-Transfer | 692 | 11147706 | import torch.nn as nn
import functools
import torch
import functools
import torch.nn.functional as F
from torch.autograd import Variable
class PATBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, cated_stream2=False):
super(PATBlock, self).__init__()
self.conv_block_stream1 = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, cal_att=False)
self.conv_block_stream2 = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, cal_att=True, cated_stream2=cated_stream2)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, cated_stream2=False, cal_att=False):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if cated_stream2:
conv_block += [nn.Conv2d(dim*2, dim*2, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim*2),
nn.ReLU(True)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if cal_att:
if cated_stream2:
conv_block += [nn.Conv2d(dim*2, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x1, x2):
x1_out = self.conv_block_stream1(x1)
x2_out = self.conv_block_stream2(x2)
att = F.sigmoid(x2_out)
x1_out = x1_out * att
out = x1 + x1_out # residual connection
# stream2 receive feedback from stream1
x2_out = torch.cat((x2_out, out), 1)
return out, x2_out, x1_out
class PATNModel(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
assert(n_blocks >= 0 and type(input_nc) == list)
super(PATNModel, self).__init__()
self.input_nc_s1 = input_nc[0]
self.input_nc_s2 = input_nc[1]
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
# down_sample
model_stream1_down = [nn.ReflectionPad2d(3),
nn.Conv2d(self.input_nc_s1, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
model_stream2_down = [nn.ReflectionPad2d(3),
nn.Conv2d(self.input_nc_s2, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
# n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model_stream1_down += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
model_stream2_down += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
# att_block in place of res_block
mult = 2**n_downsampling
cated_stream2 = [True for i in range(n_blocks)]
cated_stream2[0] = False
attBlock = nn.ModuleList()
for i in range(n_blocks):
attBlock.append(PATBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, cated_stream2=cated_stream2[i]))
# up_sample
model_stream1_up = []
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model_stream1_up += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model_stream1_up += [nn.ReflectionPad2d(3)]
model_stream1_up += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model_stream1_up += [nn.Tanh()]
# self.model = nn.Sequential(*model)
self.stream1_down = nn.Sequential(*model_stream1_down)
self.stream2_down = nn.Sequential(*model_stream2_down)
# self.att = nn.Sequential(*attBlock)
self.att = attBlock
self.stream1_up = nn.Sequential(*model_stream1_up)
def forward(self, input): # x from stream 1 and stream 2
# here x should be a tuple
x1, x2 = input
# down_sample
x1 = self.stream1_down(x1)
x2 = self.stream2_down(x2)
# att_block
for model in self.att:
x1, x2, _ = model(x1, x2)
# up_sample
x1 = self.stream1_up(x1)
return x1
class PATNetwork(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
super(PATNetwork, self).__init__()
assert type(input_nc) == list and len(input_nc) == 2, 'The AttModule take input_nc in format of list only!!'
self.gpu_ids = gpu_ids
self.model = PATNModel(input_nc, output_nc, ngf, norm_layer, use_dropout, n_blocks, gpu_ids, padding_type, n_downsampling=n_downsampling)
def forward(self, input):
if self.gpu_ids and isinstance(input[0].data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
qmpy/configuration/vasp_settings/__init__.py | tachyontraveler/qmpy | 103 | 11147727 | import yaml
import os, os.path
from .inputs import *
from qmpy import INSTALL_PATH
vs_path = os.path.dirname(os.path.abspath(__file__))
thubbards = yaml.safe_load(open(vs_path + "/hubbards.yml").read())
hubbards = {}
for setting, data in list(thubbards.items()):
hubbards[setting] = {}
for k, v in list(data.items()):
elt, lig, ox = k.split("_")
if ox == "*":
hubbards[setting][(elt, lig, None)] = v
else:
hubbards[setting][(elt, lig, float(ox))] = v
HUBBARDS = hubbards
POTENTIALS = yaml.safe_load(open(vs_path + "/potentials.yml").read())
|
cloudbaseinit/tests/plugins/windows/test_winrmlistener.py | andia10240/cloudbase-init | 160 | 11147730 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit.plugins.common import base
from cloudbaseinit.tests import testutils
CONF = cloudbaseinit_conf.CONF
class ConfigWinRMListenerPluginTests(unittest.TestCase):
def setUp(self):
self._mock_wintypes = mock.MagicMock()
self._mock_pywintypes = mock.MagicMock()
self._mock_win32 = mock.MagicMock()
self._moves_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules',
{'ctypes': self._mock_wintypes,
'ctypes.wintypes': self._mock_wintypes,
'pywintypes': self._mock_pywintypes,
'win32com': self._mock_win32,
'six.moves': self._moves_mock
})
self._module_patcher.start()
self._winreg_mock = self._moves_mock.winreg
winrmlistener = importlib.import_module('cloudbaseinit.plugins.'
'windows.winrmlistener')
self._winrmlistener = winrmlistener.ConfigWinRMListenerPlugin()
def tearDown(self):
self._module_patcher.stop()
def _test_check_winrm_service(self, service_exists):
mock_osutils = mock.MagicMock()
mock_osutils.check_service_exists.return_value = service_exists
mock_osutils.SERVICE_START_MODE_MANUAL = 'fake start'
mock_osutils.SERVICE_START_MODE_DISABLED = 'fake start'
mock_osutils.SERVICE_STATUS_STOPPED = 'fake status'
mock_osutils.get_service_start_mode.return_value = 'fake start'
mock_osutils.get_service_status.return_value = 'fake status'
with testutils.LogSnatcher('cloudbaseinit.plugins.windows.'
'winrmlistener') as snatcher:
response = self._winrmlistener._check_winrm_service(mock_osutils)
if not service_exists:
expected_logging = [
"Cannot configure the WinRM listener as the service "
"is not available"
]
self.assertEqual(expected_logging, snatcher.output)
self.assertFalse(response)
else:
mock_osutils.get_service_start_mode.assert_called_once_with(
self._winrmlistener._winrm_service_name)
mock_osutils.get_service_start_mode.assert_called_once_with(
self._winrmlistener._winrm_service_name)
mock_osutils.set_service_start_mode.assert_called_once_with(
self._winrmlistener._winrm_service_name,
mock_osutils .SERVICE_START_MODE_AUTOMATIC)
mock_osutils.get_service_status.assert_called_once_with(
self._winrmlistener._winrm_service_name)
mock_osutils.start_service.assert_called_once_with(
self._winrmlistener._winrm_service_name)
self.assertTrue(response)
def test_check_winrm_service(self):
self._test_check_winrm_service(service_exists=True)
def test_check_winrm_service_no_service(self):
self._test_check_winrm_service(service_exists=False)
@mock.patch('cloudbaseinit.utils.windows.security.'
'WindowsSecurityUtils')
def _test_check_uac_remote_restrictions(self, mock_SecurityUtils,
disable_uac_remote_restrictions):
mock_security_utils = mock.MagicMock()
mock_SecurityUtils.return_value = mock_security_utils
mock_osutils = mock.Mock()
mock_osutils.check_os_version.side_effect = [True, False]
if disable_uac_remote_restrictions:
mock_security_utils.get_uac_remote_restrictions.return_value = \
disable_uac_remote_restrictions
with self._winrmlistener._check_uac_remote_restrictions(mock_osutils):
mock_SecurityUtils.assert_called_once_with()
mock_osutils.check_os_version.assert_has_calls(
[mock.call(6, 0), mock.call(6, 2)])
(mock_security_utils.get_uac_remote_restrictions.
assert_called_once_with())
if disable_uac_remote_restrictions:
expected_set_token_calls = [mock.call(enable=True)]
else:
expected_set_token_calls = [mock.call(enable=False),
mock.call(enable=True)]
mock_security_utils.set_uac_remote_restrictions.has_calls(
expected_set_token_calls)
def test_check_uac_remote_restrictions(self):
self._test_check_uac_remote_restrictions(
disable_uac_remote_restrictions=True)
def test_check_uac_remote_restrictions_no_disable_restrictions(self):
self._test_check_uac_remote_restrictions(
disable_uac_remote_restrictions=False)
def _test_configure_winrm_listener(self, has_listener=True):
mock_listener_config = mock.MagicMock()
mock_winrm_config = mock.MagicMock()
mock_osutils = mock.MagicMock()
mock_osutils.PROTOCOL_TCP = mock.sentinel.PROTOCOL_TCP
mock_winrm_config.get_listener.side_effect = [
has_listener, mock_listener_config]
port = 9999
protocol = mock.sentinel.protocol
cert_thumbprint = mock.sentinel.cert_thumbprint
mock_listener_config.get.return_value = port
self._winrmlistener._configure_winrm_listener(
mock_osutils, mock_winrm_config, protocol, cert_thumbprint)
if has_listener:
mock_winrm_config.delete_listener.assert_called_once_with(
protocol=protocol)
mock_winrm_config.create_listener.assert_called_once_with(
cert_thumbprint=cert_thumbprint, protocol=protocol)
mock_listener_config.get.assert_called_once_with("Port")
mock_osutils.firewall_create_rule.assert_called_once_with(
"WinRM %s" % protocol, port, mock_osutils.PROTOCOL_TCP)
def test_configure_winrm_listener(self):
self._test_configure_winrm_listener()
def test_configure_winrm_listener_no_initial_listener(self):
self._test_configure_winrm_listener(has_listener=False)
def _test_get_winrm_listeners_config(self, listeners_config=None,
http_listener=None,
https_listener=None):
winrmconfig = importlib.import_module('cloudbaseinit.utils.'
'windows.winrmconfig')
mock_service = mock.MagicMock()
mock_service.get_winrm_listeners_configuration.return_value = \
listeners_config
expected_result = listeners_config
if listeners_config is None:
expected_result = []
if http_listener:
expected_result.append(
{"protocol": winrmconfig.LISTENER_PROTOCOL_HTTP})
if https_listener:
expected_result.append(
{"protocol": winrmconfig.LISTENER_PROTOCOL_HTTPS})
with testutils.ConfPatcher("winrm_configure_http_listener",
http_listener):
with testutils.ConfPatcher("winrm_configure_https_listener",
https_listener):
result = self._winrmlistener._get_winrm_listeners_config(
mock_service)
self.assertEqual(result, expected_result)
def test_get_winrm_listeners_config_has_listeners(self):
self._test_get_winrm_listeners_config(
listeners_config=mock.sentinel.listeners)
def test_get_winrm_listeners_config_http_listener(self):
self._test_get_winrm_listeners_config(http_listener=True)
def test_get_winrm_listeners_config_https_listener(self):
self._test_get_winrm_listeners_config(https_listener=True)
@mock.patch('cloudbaseinit.utils.windows.x509.CryptoAPICertManager')
def test_create_self_signed_certificate(self, mock_CryptoAPICertManager):
mock_cert_mgr = mock.MagicMock()
mock_CryptoAPICertManager.return_value = mock_cert_mgr
mock_cert_mgr.create_self_signed_cert.return_value = \
mock.sentinel.cert_thumbprint, mock.sentinel.cert_str
result = self._winrmlistener._create_self_signed_certificate()
self.assertEqual(result, mock.sentinel.cert_thumbprint)
mock_CryptoAPICertManager.assert_called_once_with()
mock_cert_mgr.create_self_signed_cert.assert_called_once_with(
self._winrmlistener._cert_subject)
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin._configure_winrm_listener')
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin._check_uac_remote_restrictions')
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin._get_winrm_listeners_config')
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin._check_winrm_service')
@mock.patch('cloudbaseinit.utils.windows.winrmconfig.WinRMConfig')
@mock.patch('cloudbaseinit.plugins.windows.winrmlistener'
'.ConfigWinRMListenerPlugin._create_self_signed_certificate')
def _test_execute(self, mock_create_cert, mock_WinRMConfig,
mock_check_winrm_service, mock_get_os_utils,
mock_get_winrm_listeners, mock_check_restrictions,
mock_configure_listener,
service_status=True, protocol=None,
listeners_config=True, certificate_thumbprint=None):
mock_winrm_config = mock.MagicMock()
mock_WinRMConfig.return_value = mock_winrm_config
mock_osutils = mock.MagicMock()
mock_get_os_utils.return_value = mock_osutils
mock_check_winrm_service.return_value = service_status
if not service_status:
expected_result = (base.PLUGIN_EXECUTE_ON_NEXT_BOOT, False)
elif not listeners_config:
mock_get_winrm_listeners.return_value = None
expected_result = (base.PLUGIN_EXECUTION_DONE, False)
else:
expected_result = (base.PLUGIN_EXECUTION_DONE, False)
if certificate_thumbprint is not None:
certificate_thumbprint = \
str(mock.sentinel.certificate_thumbprint)
listener_config = {
"protocol": protocol,
"certificate_thumbprint": certificate_thumbprint
}
mock_get_winrm_listeners.return_value = [listener_config]
winrm_enable_basic_auth = mock.Mock(spec=bool)
with testutils.ConfPatcher('winrm_enable_basic_auth',
winrm_enable_basic_auth):
result = self._winrmlistener.execute(
mock.sentinel.service, mock.sentinel.shared_data)
self.assertEqual(result, expected_result)
mock_get_os_utils.assert_called_once_with()
mock_check_winrm_service.assert_called_once_with(mock_osutils)
if service_status:
mock_get_winrm_listeners.assert_called_once_with(
mock.sentinel.service)
if listeners_config:
mock_check_restrictions.assert_called_once_with(mock_osutils)
mock_WinRMConfig.assert_called_once_with()
mock_winrm_config.set_auth_config.assert_called_once_with(
basic=winrm_enable_basic_auth)
winrmconfig = importlib.import_module('cloudbaseinit.utils.'
'windows.winrmconfig')
if (protocol == winrmconfig.LISTENER_PROTOCOL_HTTPS and
not certificate_thumbprint):
certificate_thumbprint = mock_create_cert.return_value
mock_create_cert.assert_called_once_with()
mock_configure_listener.assert_called_once_with(
mock_osutils, mock_winrm_config, protocol.upper(),
certificate_thumbprint)
def test_execute_service_status_is_false(self):
self._test_execute(service_status=False)
def test_execute_no_listeners_config(self):
self._test_execute(listeners_config=None)
def test_execute_http_protocol(self):
self._test_execute(protocol=str(mock.sentinel.http))
def test_execute_https_protocol(self):
self._test_execute(protocol="HTTPS")
|
tests/test_client_tasks.py | FiyaFly/python-asana | 266 | 11147743 | from .helpers import *
class TestClientTasks(ClientTestCase):
def test_tasks_create(self):
req = {
"data": {
"assignee": 1235,
"followers": [5678],
"name": "<NAME>.",
"notes": "How are you today?",
"workspace": 14916
}
}
res = {
"data": {
"assignee": { "id": 1235, "name": "<NAME>" },
"assignee_status": "inbox",
"completed": false,
"completed_at": null,
"created_at": "2012-02-22T02:06:58.158Z",
"due_on": null,
"followers": [{ "id": 5678, "name": "<NAME>" } ],
"id": 1001,
"modified_at": "2012-02-22T02:06:58.158Z",
"name": "<NAME>!",
"notes": "How are you today?",
"parent": null,
"projects": [{ "id": 14641, "name": "Cat Stuff" }],
"workspace": { "id": 14916, "name": "My Favorite Workspace" }
}
}
responses.add(POST, 'http://app/tasks', status=201, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.create(req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_find_by_id(self):
res = {
"data": {
"assignee": { "id": 1234, "name": "<NAME>" },
"created_at": "2012-02-22T02:06:58.158Z"
}
}
responses.add(GET, 'http://app/tasks/1001', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.find_by_id(1001), res['data'])
def test_tasks_find_by_project(self):
res = {
"data": [
{ "id": 2001, "name": "Catnip" },
{ "id": 2002, "name": "<NAME>" }
]
}
responses.add(GET, 'http://app/projects/1331/tasks', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.find_by_project(1331), res['data'])
def test_tasks_update(self):
req = { "data": { "assignee": "me" } }
res = {
"data": {
"assignee": { "id": 1234, "name": "<NAME>" },
"id": 1001
}
}
responses.add(PUT, 'http://app/tasks/1001', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.update(1001, req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_delete(self):
res = { "data": {} }
responses.add(DELETE, 'http://app/tasks/1001', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.delete(1001), res['data'])
def test_tasks_find_all(self):
res = {
"data": [
{ "id": 1248, "name": "Buy catnip" },
{ "id": 24816, "name": "Reflect on role of kittens in society" }
]
}
responses.add(GET, 'http://app/tasks?workspace=14916&assignee=me', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.find_all({ 'workspace': 14916, 'assignee': 'me' }), res['data'])
def test_tasks_add_subtask(self):
req = {
"data": {
"assignee": 1235,
"followers": [5678],
"name": "<NAME>",
"notes": "He's going to be upset."
}
}
res = {
"data": {
"assignee": { "id": 1235, "name": "<NAME>" },
"assignee_status": "inbox",
"completed": false,
"completed_at": null,
"created_at": "2012-02-22T02:06:58.158Z",
"due_on": null,
"followers": [{ "id": 5678, "name": "<NAME>" } ],
"id": 1001,
"modified_at": "2012-02-22T02:06:58.158Z",
"name": "<NAME>",
"notes": "He's going to be upset.",
"parent": { "id": 2272, "name": "Tell kids I am their father." },
"projects": [],
"workspace": { "id": 14916, "name": "Star Wars" }
}
}
responses.add(POST, 'http://app/tasks/2272/subtasks', status=201, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.add_subtask(2272, req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_subtasks(self):
res = {
"data": [
{ "id": 5005, "name": "<NAME>" },
{ "id": 6709, "name": "???" },
{ "id": 9812, "name": "Profit" }
]
}
responses.add(GET, 'http://app/tasks/7331/subtasks', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.subtasks(7331), res['data'])
def test_tasks_set_parent(self):
req = { "data": { "parent": 1331 } }
res = {
"data": {
"id": 2272,
"name": "<NAME>",
"parent": [{ "id": 1331, "name": "Tell kids I am their father" }]
}
}
responses.add(POST, 'http://app/tasks/2272/setParent', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.set_parent(2272, req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_projects(self):
res = {
"data": [
{ "id": 1331, "name": "Things To Buy" },
{ "id": 14641, "name": "<NAME>" }
]
}
responses.add(GET, 'http://app/tasks/1001/projects', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.projects(1001), res['data'])
def test_tasks_add_project(self):
req = { "data": { "project": 14641 } }
res = { "data": {} }
responses.add(POST, 'http://app/tasks/1001/addProject', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.add_project(1001, req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_remove_project(self):
req = { "data": { "project": 14641 } }
res = { "data": {} }
responses.add(POST, 'http://app/tasks/1001/removeProject', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.remove_project(1001, req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_tags(self):
res = {
"data": [
{ "id": 1331, "name": "orange" },
{ "id": 1771, "name": "fluffy" }
]
}
responses.add(GET, 'http://app/tasks/1001/tags', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.tags(1001), res['data'])
def test_tasks_1001_addTag(self):
req = { "data": { "tag": 1771 } }
res = { "data": {} }
responses.add(POST, 'http://app/tasks/1001/addTag', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.add_tag(1001, req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_removeTag(self):
req = { "data": { "tag": 1771 } }
res = { "data": {} }
responses.add(POST, 'http://app/tasks/1001/removeTag', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.remove_tag(1001, req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_add_followers(self):
req = { "data": { "followers": [1235] } }
res = {
"data": {
"followers": [{ "id": 1235, "name": "<NAME>" }],
"id": 1001
}
}
responses.add(POST, 'http://app/tasks/1001/addFollowers', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.add_followers(1001, req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_remove_followers(self):
req = { "data": { "followers": [1235] } }
res = { "data": { "followers": [], "id": 1001 } }
responses.add(POST, 'http://app/tasks/1001/removeFollowers', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.remove_followers(1001, req['data']), res['data'])
self.assertEqual(json.loads(responses.calls[0].request.body), req)
def test_tasks_find_by_tag(self):
res = {
"data": [
{ "id": 2001, "name": "Catnip" },
{ "id": 2002, "name": "<NAME>" }
]
}
responses.add(GET, 'http://app/tags/1331/tasks', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.find_by_tag(1331), res['data'])
def test_tasks_custom_field_data(self):
res = {
"data": {
"id": 1001,
"name": "<NAME>!",
"completed": false,
"custom_fields": [
{
"id": 124578,
"name": "Priority",
"type": "enum",
"enum_value": {
"id": 789,
"name": "Low",
"enabled": true,
"color": "blue"
}
}
]
}
}
responses.add(GET, 'http://app/tasks/1001', status=200, body=json.dumps(res), match_querystring=True)
self.assertEqual(self.client.tasks.find_by_id(1001), res['data'])
|
py/mistql/gardenwall.py | adamtrilling/mistql | 263 | 11147761 | from typing import Any
from mistql.runtime_value import RuntimeValue
def input_garden_wall(data: Any) -> RuntimeValue:
return RuntimeValue.of(data)
def output_garden_wall(data: RuntimeValue) -> Any:
return data.to_python()
|
atomic_reactor/utils/flatpak_util.py | qixiang/atomic-reactor | 113 | 11147784 | <filename>atomic_reactor/utils/flatpak_util.py<gh_stars>100-1000
"""
Copyright (c) 2022 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import logging
from typing import Any, Dict, List, Optional
import gi
from flatpak_module_tools.flatpak_builder import ModuleInfo, FlatpakSourceInfo
from osbs.repo_utils import ModuleSpec
from atomic_reactor.config import get_koji_session, Configuration
from atomic_reactor.source import SourceConfig
from atomic_reactor.utils.koji import get_koji_module_build
try:
gi.require_version('Modulemd', '2.0')
except ValueError as e:
# Normalize to ImportError to simplify handling
raise ImportError(str(e)) from e
from gi.repository import Modulemd
logger = logging.getLogger(__name__)
# ODCS API constant
SOURCE_TYPE_MODULE = 2
class ComposeInfo(object):
def __init__(self, source_spec, main_module, modules):
self.source_spec = source_spec
self.main_module = main_module
self.modules = modules
def koji_metadata(self):
sorted_modules = [self.modules[k] for k in sorted(self.modules.keys())]
# We exclude the 'platform' pseudo-module here since we don't enable
# it for package installation - it doesn't influence the image contents
return {
'source_modules': [self.source_spec],
'modules': ['-'.join((m.name, m.stream, m.version)) for
m in sorted_modules if m.name != 'platform'],
'flatpak': True,
}
class FlatpakUtil:
def __init__(self, workflow_config: Configuration, source_config: SourceConfig,
composes=Optional[List[Dict[str, Any]]]):
self.workflow_config = workflow_config
self.source_config = source_config
self.composes = composes
def get_flatpak_source_spec(self) -> str:
modules = self.source_config.compose.get('modules', [])
if not modules:
raise RuntimeError('"compose" config has no modules, a module is required for Flatpaks')
source_spec = modules[0]
if len(modules) > 1:
logger.info("compose config contains multiple modules, using first module %s",
source_spec)
return source_spec
def resolve_modules(self, modules) -> Dict[str, ModuleInfo]:
koji_session = get_koji_session(self.workflow_config)
resolved_modules = {}
for module_spec in modules:
build, rpm_list = get_koji_module_build(koji_session, module_spec)
# The returned RPM list contains source RPMs and RPMs for all
# architectures.
rpms = ['{name}-{epochnum}:{version}-{release}.{arch}.rpm'
.format(epochnum=rpm['epoch'] or 0, **rpm)
for rpm in rpm_list]
# strict=False - don't break if new fields are added
mmd = Modulemd.ModuleStream.read_string(
build['extra']['typeinfo']['module']['modulemd_str'], strict=False)
# Make sure we have a version 2 modulemd file
mmd = mmd.upgrade(Modulemd.ModuleStreamVersionEnum.TWO)
resolved_modules[module_spec.name] = ModuleInfo(module_spec.name,
module_spec.stream,
module_spec.version,
mmd, rpms)
return resolved_modules
def build_compose_info(self, modules, source_spec) -> ComposeInfo:
main_module = ModuleSpec.from_str(source_spec)
main_module_info = modules[main_module.name]
assert main_module_info.stream == main_module.stream
if main_module.version is not None:
assert main_module_info.version == main_module.version
return ComposeInfo(source_spec=source_spec,
main_module=main_module_info,
modules=modules)
def get_flatpak_compose_info(self) -> ComposeInfo:
source_spec = self.get_flatpak_source_spec()
main_module = ModuleSpec.from_str(source_spec)
for compose_info in self.composes:
if compose_info['source_type'] != SOURCE_TYPE_MODULE:
continue
modules = [ModuleSpec.from_str(s) for s in compose_info['source'].split()]
for module in modules:
if module.name == main_module.name and module.stream == main_module.stream:
resolved_modules = self.resolve_modules(modules)
return self.build_compose_info(resolved_modules, source_spec)
logger.debug('Compose info: %s', self.composes)
raise RuntimeError("Can't find main module %s in compose result" % source_spec)
def get_flatpak_source_info(self) -> FlatpakSourceInfo:
flatpak_yaml = self.source_config.flatpak
compose_info = self.get_flatpak_compose_info()
module_spec = ModuleSpec.from_str(compose_info.source_spec)
source_info = FlatpakSourceInfo(flatpak_yaml,
compose_info.modules,
compose_info.main_module,
module_spec.profile)
return source_info
|
bigflow_python/python/bigflow/transform_impls/test/intersection_test.py | advancedxy/bigflow_python | 1,236 | 11147795 | <reponame>advancedxy/bigflow_python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @Author: zhangyuncong
# @Date: 2015-06-30 20:27:08
# @Last Modified by: zhangyuncong
# @Last Modified time: 2015-06-30 22:21:02
import unittest
from bigflow.test import test_base
class IntersectionTestCase(test_base.PipelineBasedTest):
def test_intersection(self):
a = self._pipeline.parallelize([1, 2, 3, 1, 3, 5])
b = self._pipeline.parallelize([1, 2, 1, 4, 2, 3])
without_distinct = a.intersection(b)
with_distinct = a.intersection(b, True)
self.assertItemsEqual([1, 2, 3], without_distinct.get())
self.assertItemsEqual([1, 1, 2, 3], with_distinct.get())
if __name__ == "__main__":
unittest.main()
|
tests/test_calendar_event.py | damianfs/canvasapi | 386 | 11147805 | import unittest
import requests_mock
from canvasapi import Canvas
from canvasapi.calendar_event import CalendarEvent
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestCalendarEvent(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({"calendar_event": ["get_calendar_event"]}, m)
self.calendar_event = self.canvas.get_calendar_event(567)
# delete()
def test_delete_calendar_event(self, m):
register_uris({"calendar_event": ["delete_calendar_event"]}, m)
deleted_calendar_event = self.calendar_event.delete()
self.assertIsInstance(deleted_calendar_event, CalendarEvent)
self.assertTrue(hasattr(deleted_calendar_event, "title"))
self.assertEqual(deleted_calendar_event.title, "Test Event 3")
# edit()
def test_edit_calendar_event(self, m):
register_uris({"calendar_event": ["edit_calendar_event"]}, m)
title = "<NAME>"
edited_calendar_event = self.calendar_event.edit(
calendar_event={"title": title}
)
self.assertIsInstance(edited_calendar_event, CalendarEvent)
self.assertTrue(hasattr(edited_calendar_event, "title"))
self.assertEqual(edited_calendar_event.title, title)
# __str__()
def test__str__(self, m):
string = str(self.calendar_event)
self.assertIsInstance(string, str)
|
zentral/core/queues/backends/kombu.py | arubdesu/zentral | 634 | 11147843 | from importlib import import_module
import logging
import time
from zentral.conf import settings
from kombu import Connection, Consumer, Exchange, Queue
from kombu.mixins import ConsumerMixin, ConsumerProducerMixin
from kombu.pools import producers
from zentral.utils.json import save_dead_letter
logger = logging.getLogger('zentral.core.queues.backends.kombu')
raw_events_exchange = Exchange('raw_events', type='direct', durable=True)
events_exchange = Exchange('events', type="fanout", durable=True)
enrich_events_queue = Queue('enrich_events',
exchange=events_exchange,
durable=True)
enriched_events_exchange = Exchange('enriched_events', type="fanout", durable=True)
process_events_queue = Queue('process_events',
exchange=enriched_events_exchange,
durable=True)
class BaseWorker:
name = "UNDEFINED"
counters = []
def setup_metrics_exporter(self, *args, **kwargs):
self.metrics_exporter = kwargs.pop("metrics_exporter", None)
if self.metrics_exporter:
for name, label in self.counters:
self.metrics_exporter.add_counter(name, [label])
self.metrics_exporter.start()
def inc_counter(self, name, label):
if self.metrics_exporter:
self.metrics_exporter.inc(name, label)
def log(self, msg, level, *args):
logger.log(level, "{} - {}".format(self.name, msg), *args)
def log_debug(self, msg, *args):
self.log(msg, logging.DEBUG, *args)
def log_info(self, msg, *args):
self.log(msg, logging.INFO, *args)
def log_error(self, msg, *args):
self.log(msg, logging.ERROR, *args)
class PreprocessWorker(ConsumerProducerMixin, BaseWorker):
name = "preprocess worker"
counters = (
("preprocessed_events", "routing_key"),
("produced_events", "event_type"),
)
def __init__(self, connection):
self.connection = connection
# preprocessors
self.preprocessors = {
preprocessor.routing_key: preprocessor
for preprocessor in self._get_preprocessors()
}
def _get_preprocessors(self):
for app in settings['apps']:
try:
preprocessors_module = import_module("{}.preprocessors".format(app))
except ImportError:
pass
else:
yield from getattr(preprocessors_module, "get_preprocessors")()
def run(self, *args, **kwargs):
self.log_info("run")
super().setup_metrics_exporter(*args, **kwargs)
super().run(*args, **kwargs)
def get_consumers(self, _, default_channel):
queues = [
Queue(preprocessor.routing_key, exchange=raw_events_exchange,
routing_key=preprocessor.routing_key, durable=True)
for routing_key, preprocessor in self.preprocessors.items()
]
return [Consumer(default_channel,
queues=queues,
accept=['json'],
callbacks=[self.do_preprocess_raw_event])]
def do_preprocess_raw_event(self, body, message):
routing_key = message.delivery_info.get("routing_key")
if not routing_key:
logger.error("Message w/o routing key")
else:
preprocessor = self.preprocessors.get(routing_key)
if not preprocessor:
logger.error("No preprocessor for routing key %s", routing_key)
else:
for event in preprocessor.process_raw_event(body):
self.producer.publish(event.serialize(machine_metadata=False),
serializer='json',
exchange=events_exchange,
declare=[events_exchange])
self.inc_counter("produced_events", event.event_type)
message.ack()
self.inc_counter("preprocessed_events", routing_key or "UNKNOWN")
class EnrichWorker(ConsumerProducerMixin, BaseWorker):
name = "enrich worker"
counters = (
("enriched_events", "event_type"),
("produced_events", "event_type"),
)
def __init__(self, connection, enrich_event):
self.connection = connection
self.enrich_event = enrich_event
self.name = "enrich worker"
def run(self, *args, **kwargs):
self.log_info("run")
super().setup_metrics_exporter(*args, **kwargs)
super().run(*args, **kwargs)
def get_consumers(self, _, default_channel):
return [Consumer(default_channel,
queues=[enrich_events_queue],
accept=['json'],
callbacks=[self.do_enrich_event])]
def do_enrich_event(self, body, message):
self.log_debug("enrich event")
try:
for event in self.enrich_event(body):
self.producer.publish(event.serialize(machine_metadata=True),
serializer='json',
exchange=enriched_events_exchange,
declare=[enriched_events_exchange])
self.inc_counter("produced_events", event.event_type)
except Exception as exception:
logger.exception("Requeuing message with 1s delay: %s", exception)
time.sleep(1)
message.requeue()
else:
message.ack()
self.inc_counter("enriched_events", event.event_type)
class ProcessWorker(ConsumerMixin, BaseWorker):
name = "process worker"
counters = (
("processed_events", "event_type"),
)
def __init__(self, connection, process_event):
self.connection = connection
self.process_event = process_event
def run(self, *args, **kwargs):
self.log_info("run")
super().setup_metrics_exporter(*args, **kwargs)
super().run(*args, **kwargs)
def get_consumers(self, _, default_channel):
return [Consumer(default_channel,
queues=[process_events_queue],
accept=['json'],
callbacks=[self.do_process_event])]
def do_process_event(self, body, message):
self.log_debug("process event")
event_type = body['_zentral']['type']
self.process_event(body)
message.ack()
self.inc_counter("processed_events", event_type)
class StoreWorker(ConsumerMixin, BaseWorker):
counters = (
("stored_events", "event_type"),
)
def __init__(self, connection, event_store):
self.connection = connection
self.event_store = event_store
self.name = "store worker {}".format(self.event_store.name)
self.input_queue = Queue(('store_events_{}'.format(self.event_store.name)).replace(" ", "_"),
exchange=enriched_events_exchange,
durable=True)
def run(self, *args, **kwargs):
self.log_info("run")
super().setup_metrics_exporter(*args, **kwargs)
super().run(*args, **kwargs)
def get_consumers(self, _, default_channel):
return [Consumer(default_channel,
queues=[self.input_queue],
accept=['json'],
callbacks=[self.do_store_event])]
def do_store_event(self, body, message):
self.log_debug("store event")
event_type = body['_zentral']['type']
if not self.event_store.is_event_type_included(event_type):
self.log_debug("skip %s event", event_type)
message.ack()
return
try:
self.event_store.store(body)
except Exception:
logger.exception("Could add event to store %s", self.event_store.name)
save_dead_letter(body, "event store {} error".format(self.event_store.name))
message.reject()
else:
message.ack()
self.inc_counter("stored_events", event_type)
class EventQueues(object):
def __init__(self, config_d):
self.backend_url = config_d['backend_url']
self.transport_options = config_d.get('transport_options')
self.connection = self._get_connection()
def _get_connection(self):
return Connection(self.backend_url, transport_options=self.transport_options)
def get_preprocess_worker(self):
return PreprocessWorker(self._get_connection())
def get_enrich_worker(self, enrich_event):
return EnrichWorker(self._get_connection(), enrich_event)
def get_process_worker(self, process_event):
return ProcessWorker(self._get_connection(), process_event)
def get_store_worker(self, event_store):
return StoreWorker(self._get_connection(), event_store)
def post_raw_event(self, routing_key, raw_event):
with producers[self.connection].acquire(block=True) as producer:
producer.publish(raw_event,
serializer='json',
exchange=raw_events_exchange,
routing_key=routing_key,
declare=[raw_events_exchange])
def post_event(self, event):
with producers[self.connection].acquire(block=True) as producer:
producer.publish(event.serialize(machine_metadata=False),
serializer='json',
exchange=events_exchange,
declare=[events_exchange])
|
chrome/common/extensions/docs/server2/caching_file_system.py | google-ar/chromium | 2,151 | 11147844 | <reponame>google-ar/chromium
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import sys
from file_system import FileSystem, StatInfo, FileNotFoundError
from future import All, Future
from path_util import AssertIsDirectory, IsDirectory, ToDirectory
from third_party.json_schema_compiler.memoize import memoize
class CachingFileSystem(FileSystem):
'''FileSystem which implements a caching layer on top of |file_system|. If
|fail_on_miss| is True then cache misses throw a FileNotFoundError rather than
falling back onto the underlying FileSystem.
'''
def __init__(self, file_system, object_store_creator, fail_on_miss=False):
self._file_system = file_system
self._fail_on_miss = fail_on_miss
def create_object_store(category, start_empty=True):
return object_store_creator.Create(
CachingFileSystem,
category='%s/%s' % (file_system.GetIdentity(), category),
start_empty=start_empty)
# We only start the stat cache empty if |fail_on_miss| is False, i.e. if
# we're NOT running on a live instance and we can afford to fall back onto
# the underlying FileSystem impl.
self._stat_cache = create_object_store('stat', start_empty=not fail_on_miss)
self._read_cache = create_object_store('read', start_empty=False)
self._walk_cache = create_object_store('walk', start_empty=False)
def Refresh(self):
return self._file_system.Refresh()
def StatAsync(self, path):
'''Stats the directory given, or if a file is given, stats the file's parent
directory to get info about the file.
'''
# Always stat the parent directory, since it will have the stat of the child
# anyway, and this gives us an entire directory's stat info at once.
dir_path, file_path = posixpath.split(path)
dir_path = ToDirectory(dir_path)
def make_stat_info(dir_stat):
'''Converts a dir stat into the correct resulting StatInfo; if the Stat
was for a file, the StatInfo should just contain that file.
'''
if path == dir_path:
return dir_stat
# Was a file stat. Extract that file.
file_version = dir_stat.child_versions.get(file_path)
if file_version is None:
raise FileNotFoundError('No stat found for %s in %s (found %s)' %
(path, dir_path, dir_stat.child_versions))
return StatInfo(file_version)
def raise_cache_miss(path):
raise FileNotFoundError('Got cache miss when trying to stat %s' % path)
dir_stat = self._stat_cache.Get(dir_path).Get()
if dir_stat is not None:
return Future(callback=lambda: make_stat_info(dir_stat))
if self._fail_on_miss:
logging.warning('Bailing on stat cache miss for %s on %s' %
(dir_path, self.GetIdentity()))
return Future(callback=lambda: raise_cache_miss(dir_path))
def next(dir_stat):
assert dir_stat is not None # should have raised a FileNotFoundError
# We only ever need to cache the dir stat.
self._stat_cache.Set(dir_path, dir_stat)
return make_stat_info(dir_stat)
return self._MemoizedStatAsyncFromFileSystem(dir_path).Then(next)
@memoize
def _MemoizedStatAsyncFromFileSystem(self, dir_path):
'''This is a simple wrapper to memoize Futures to directory stats, since
StatAsync makes heavy use of it. Only cache directories so that the
memoized cache doesn't blow up.
'''
assert IsDirectory(dir_path)
return self._file_system.StatAsync(dir_path)
def Read(self, paths, skip_not_found=False):
'''Reads a list of files. If a file is cached and it is not out of
date, it is returned. Otherwise, the file is retrieved from the file system.
'''
# Files which aren't found are cached in the read object store as
# (path, None, None). This is to prevent re-reads of files we know
# do not exist.
cached_read_values = self._read_cache.GetMulti(paths).Get()
cached_stat_values = self._stat_cache.GetMulti(paths).Get()
# Populate a map of paths to Futures to their stat. They may have already
# been cached in which case their Future will already have been constructed
# with a value.
stat_futures = {}
def handle(error):
if isinstance(error, FileNotFoundError):
return None
raise error
for path in paths:
stat_value = cached_stat_values.get(path)
if stat_value is None:
stat_future = self.StatAsync(path)
if skip_not_found:
stat_future = stat_future.Then(lambda x: x, handle)
else:
stat_future = Future(value=stat_value)
stat_futures[path] = stat_future
# Filter only the cached data which is up to date by comparing to the latest
# stat. The cached read data includes the cached version. Remove it for
# the result returned to callers. |version| == None implies a non-existent
# file, so skip it.
up_to_date_data = dict(
(path, data) for path, (data, version) in cached_read_values.iteritems()
if version is not None and stat_futures[path].Get().version == version)
if skip_not_found:
# Filter out paths which we know do not exist, i.e. if |path| is in
# |cached_read_values| *and* has a None version, then it doesn't exist.
# See the above declaration of |cached_read_values| for more information.
paths = [path for path in paths
if cached_read_values.get(path, (None, True))[1]]
remaining_paths = set(paths) - set(up_to_date_data.iterkeys())
if len(remaining_paths) == 0:
# Everything was cached and up to date.
return Future(value=up_to_date_data)
def raise_cache_miss(paths):
raise FileNotFoundError('Got cache miss when trying to stat %s' % paths)
if self._fail_on_miss:
# Ignore missing values and return anyway.
logging.warn('Read cache miss for %s on %s' %
(remaining_paths, self.GetIdentity()))
return Future(callback=lambda: raise_cache_miss(remaining_paths))
def next(new_results):
# Update the cache. This is a path -> (data, version) mapping.
self._read_cache.SetMulti(
dict((path, (new_result, stat_futures[path].Get().version))
for path, new_result in new_results.iteritems()))
# Update the read cache to include files that weren't found, to prevent
# constantly trying to read a file we now know doesn't exist.
self._read_cache.SetMulti(
dict((path, (None, None)) for path in paths
if stat_futures[path].Get() is None))
new_results.update(up_to_date_data)
return new_results
# Read in the values that were uncached or old.
return self._file_system.Read(remaining_paths,
skip_not_found=skip_not_found).Then(next)
def GetCommitID(self):
return self._file_system.GetCommitID()
def GetPreviousCommitID(self):
return self._file_system.GetPreviousCommitID()
def Walk(self, root, depth=-1):
'''Overrides FileSystem.Walk() to provide caching functionality.
'''
def file_lister(root):
res, root_stat = All((self._walk_cache.Get(root),
self.StatAsync(root))).Get()
if res and res[2] == root_stat.version:
dirs, files = res[0], res[1]
else:
# Wasn't cached, or not up to date.
dirs, files = [], []
for f in self.ReadSingle(root).Get():
if IsDirectory(f):
dirs.append(f)
else:
files.append(f)
# Update the cache. This is a root -> (dirs, files, version) mapping.
self._walk_cache.Set(root, (dirs, files, root_stat.version))
return dirs, files
return self._file_system.Walk(root, depth=depth, file_lister=file_lister)
def GetIdentity(self):
return self._file_system.GetIdentity()
def GetVersion(self):
return self._file_system.GetVersion()
def __repr__(self):
return '%s of <%s>' % (type(self).__name__, repr(self._file_system))
|
histoqc/_worker.py | kaczmarj/HistoQC | 140 | 11147872 | <gh_stars>100-1000
"""histoqc worker functions"""
import os
import shutil
from histoqc.BaseImage import BaseImage
from histoqc._pipeline import load_pipeline
from histoqc._pipeline import setup_plotting_backend
# --- worker functions --------------------------------------------------------
def worker_setup(c):
"""needed for multiprocessing worker setup"""
setup_plotting_backend()
load_pipeline(config=c)
def worker(idx, file_name, *,
process_queue, config, outdir, log_manager, lock, shared_dict, num_files, force):
"""pipeline worker function"""
# --- output directory preparation --------------------------------
fname_outdir = os.path.join(outdir, os.path.basename(file_name))
if os.path.isdir(fname_outdir): # directory exists
if not force:
log_manager.logger.warning(
f"{file_name} already seems to be processed (output directory exists),"
" skipping. To avoid this behavior use --force"
)
return
else:
# remove entire directory to ensure no old files are present
shutil.rmtree(fname_outdir)
# create output dir
os.makedirs(fname_outdir)
log_manager.logger.info(f"-----Working on:\t{file_name}\t\t{idx+1} of {num_files}")
try:
s = BaseImage(file_name, fname_outdir, dict(config.items("BaseImage.BaseImage")))
for process, process_params in process_queue:
process_params["lock"] = lock
process_params["shared_dict"] = shared_dict
process(s, process_params)
s["completed"].append(process.__name__)
except Exception as exc:
# reproduce histoqc error string
_oneline_doc_str = exc.__doc__.replace('\n', '')
err_str = f"{exc.__class__} {_oneline_doc_str} {exc}"
log_manager.logger.error(
f"{file_name} - Error analyzing file (skipping): \t {err_str}"
)
if exc.__traceback__.tb_next is not None:
func_tb_obj = str(exc.__traceback__.tb_next.tb_frame.f_code)
else:
func_tb_obj = str(exc.__traceback__)
exc.__histoqc_err__ = (file_name, err_str, func_tb_obj)
raise exc
else:
# TODO:
# the histoqc workaround below is due an implementation detail in BaseImage:
# BaseImage keeps an OpenSlide instance stored under os_handle and leaks a
# file handle. This will need fixing in BaseImage.
# -> best solution would be to make BaseImage a contextmanager and close
# and cleanup the OpenSlide handle on __exit__
s["os_handle"] = None # need to get rid of handle because it can't be pickled
return s
def worker_success(s, result_file):
"""success callback"""
if s is None:
return
with result_file:
if result_file.is_empty_file():
result_file.write_headers(s)
_fields = '\t'.join([str(s[field]) for field in s['output']])
_warnings = '|'.join(s['warnings'])
result_file.write_line("\t".join([_fields, _warnings]))
def worker_error(e, failed):
"""error callback"""
if hasattr(e, '__histoqc_err__'):
file_name, err_str, tb = e.__histoqc_err__
else:
# error outside of pipeline
# todo: it would be better to handle all of this as a decorator
# around the worker function
file_name, err_str, tb = "N/A", f"error outside of pipeline {e!r}", None
failed.append((file_name, err_str, tb))
|
examples/PYBV11/toasters/toasters.py | jdtsmith/st7789_mpy | 153 | 11147887 | '''
toasters.py
An example using bitmap to draw sprites on a ST7789 TFT display
connected to a pyboard1.1.
spritesheet from CircuitPython_Flying_Toasters
https://learn.adafruit.com/circuitpython-sprite-animation-pendant-mario-clouds-flying-toasters
'''
import time
import random
from pyb import SPI, Pin
import st7789
import t1,t2,t3,t4,t5
TOASTERS = [t1, t2, t3, t4]
TOAST = [t5]
class toast():
'''
toast class to keep track of a sprites locaton and step
'''
def __init__(self, sprites, x, y):
self.sprites = sprites
self.steps = len(sprites)
self.x = x
self.y = y
self.step = random.randint(0, self.steps-1)
self.speed = random.randint(2, 5)
def move(self):
if self.x <= 0:
self.speed = random.randint(2, 5)
self.x = 320-64
self.step += 1
self.step %= self.steps
self.x -= self.speed
def main():
'''
Draw and move sprite
'''
try:
spi = SPI(1, SPI.MASTER, baudrate=42000000, prescaler=2)
# initialize display
tft = st7789.ST7789(
spi,
240,
320,
reset=Pin('X3', Pin.OUT),
cs=Pin('X5', Pin.OUT),
dc=Pin('X4', Pin.OUT),
backlight=Pin('X2', Pin.OUT),
rotation=3)
# enable display and clear screen
tft.init()
tft.fill(st7789.BLACK)
# create toast spites in random positions
sprites = [
toast(TOASTERS, 320-64, 0),
toast(TOAST, 320-64*2, 80),
toast(TOASTERS, 320-64*4, 160)
]
# move and draw sprites
while True:
for man in sprites:
bitmap = man.sprites[man.step]
tft.fill_rect(
man.x+bitmap.WIDTH-man.speed,
man.y,
man.speed,
bitmap.HEIGHT,
st7789.BLACK)
man.move()
if man.x > 0:
tft.bitmap(bitmap, man.x, man.y)
else:
tft.fill_rect(
0,
man.y,
bitmap.WIDTH,
bitmap.HEIGHT,
st7789.BLACK)
time.sleep(0.05)
finally:
# shutdown spi
spi.deinit()
main()
|
imodels/tree/figs.py | csinva/interpretability-implementations-demos | 102 | 11147924 | from copy import deepcopy
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.datasets
from sklearn import datasets
from sklearn import tree
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import plot_tree, DecisionTreeClassifier
from sklearn.utils import check_X_y, check_array
from sklearn.utils.validation import _check_sample_weight
from imodels.tree.viz_utils import DecisionTreeViz
plt.rcParams['figure.dpi'] = 300
class Node:
def __init__(self, feature: int = None, threshold: int = None,
value=None, idxs=None, is_root: bool = False, left=None,
impurity_reduction: float = None, tree_num: int = None,
right=None):
"""Node class for splitting
"""
# split or linear
self.is_root = is_root
self.idxs = idxs
self.tree_num = tree_num
self.feature = feature
self.impurity_reduction = impurity_reduction
# different meanings
self.value = value # for split this is mean, for linear this is weight
# split-specific
self.threshold = threshold
self.left = left
self.right = right
self.left_temp = None
self.right_temp = None
def setattrs(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
if self.is_root:
return f'X_{self.feature} <= {self.threshold:0.3f} (Tree #{self.tree_num} root)'
elif self.left is None and self.right is None:
return f'Val: {self.value[0][0]:0.3f} (leaf)'
else:
return f'X_{self.feature} <= {self.threshold:0.3f} (split)'
def print_root(self, y):
try:
one_count = pd.Series(y).value_counts()[1.0]
except KeyError:
one_count = 0
one_proportion = f' {one_count}/{y.shape[0]} ({round(100 * one_count / y.shape[0], 2)}%)'
if self.is_root:
return f'X_{self.feature} <= {self.threshold:0.3f}' + one_proportion
elif self.left is None and self.right is None:
return f'ΔRisk = {self.value[0][0]:0.2f}' + one_proportion
else:
return f'X_{self.feature} <= {self.threshold:0.3f}' + one_proportion
def __repr__(self):
return self.__str__()
class FIGS(BaseEstimator):
"""FIGS (sum of trees) classifier.
Fast Interpretable Greedy-Tree Sums (FIGS) is an algorithm for fitting concise rule-based models.
Specifically, FIGS generalizes CART to simultaneously grow a flexible number of trees in a summation.
The total number of splits across all the trees can be restricted by a pre-specified threshold, keeping the model interpretable.
Experiments across real-world datasets show that FIGS achieves state-of-the-art prediction performance when restricted to just a few splits (e.g. less than 20).
https://arxiv.org/abs/2201.11931
"""
def __init__(self, max_rules: int = 12, min_impurity_decrease: float = 0.0, random_state=None):
super().__init__()
self.max_rules = max_rules
self.min_impurity_decrease = min_impurity_decrease
self.random_state = random_state
self._init_estimator_type() # decides between regressor and classifier
self._init_decision_function()
def _init_estimator_type(self):
"""
FIGSRegressor and FIGSClassifier override this method
to alter the prediction task. When using this class directly,
it is equivalent to FIGSRegressor
"""
self._estimator_type = 'regressor'
def _init_decision_function(self):
"""Sets decision function based on _estimator_type
"""
# used by sklearn GrriidSearchCV, BaggingClassifier
if self._estimator_type == 'classifier':
decision_function = lambda x: self.predict_proba(x)[:, 1]
elif self._estimator_type == 'regressor':
decision_function = self.predict
def _construct_node_with_stump(self, X, y, idxs, tree_num, sample_weight=None,
compare_nodes_with_sample_weight=True):
"""
Params
------
compare_nodes_with_sample_weight: Deprecated
If this is set to true and sample_weight is passed, use sample_weight to compare nodes
Otherwise, use sample_weight only for picking a split given a particular node
"""
# array indices
SPLIT = 0
LEFT = 1
RIGHT = 2
# fit stump
stump = tree.DecisionTreeRegressor(max_depth=1)
sweight = None
if sample_weight is not None:
sweight = sample_weight[idxs]
stump.fit(X[idxs], y[idxs], sample_weight=sweight)
# these are all arrays, arr[0] is split node
# note: -2 is dummy
feature = stump.tree_.feature
threshold = stump.tree_.threshold
impurity = stump.tree_.impurity
n_node_samples = stump.tree_.n_node_samples
value = stump.tree_.value
# no split
if len(feature) == 1:
# print('no split found!', idxs.sum(), impurity, feature)
return Node(idxs=idxs, value=value[SPLIT], tree_num=tree_num,
feature=feature[SPLIT], threshold=threshold[SPLIT],
impurity_reduction=None)
# manage sample weights
idxs_split = X[:, feature[SPLIT]] <= threshold[SPLIT]
idxs_left = idxs_split & idxs
idxs_right = ~idxs_split & idxs
if sample_weight is None:
n_node_samples_left = n_node_samples[LEFT]
n_node_samples_right = n_node_samples[RIGHT]
else:
n_node_samples_left = sample_weight[idxs_left].sum()
n_node_samples_right = sample_weight[idxs_right].sum()
n_node_samples_split = n_node_samples_left + n_node_samples_right
# calculate impurity
impurity_reduction = (
impurity[SPLIT] -
impurity[LEFT] * n_node_samples_left / n_node_samples_split -
impurity[RIGHT] * n_node_samples_right / n_node_samples_split
) * n_node_samples_split
node_split = Node(idxs=idxs, value=value[SPLIT], tree_num=tree_num,
feature=feature[SPLIT], threshold=threshold[SPLIT],
impurity_reduction=impurity_reduction)
# print('\t>>>', node_split, 'impurity', impurity, 'num_pts', idxs.sum(), 'imp_reduc', impurity_reduction)
# manage children
node_left = Node(idxs=idxs_left, value=value[LEFT], tree_num=tree_num)
node_right = Node(idxs=idxs_right, value=value[RIGHT], tree_num=tree_num)
node_split.setattrs(left_temp=node_left, right_temp=node_right, )
return node_split
def fit(self, X, y=None, feature_names=None, verbose=False, sample_weight=None):
"""
Params
------
_sample_weight: array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Splits that would create child nodes with net zero or negative weight
are ignored while searching for a split in each node.
"""
X, y = check_X_y(X, y)
y = y.astype(float)
if feature_names is not None:
self.feature_names_ = feature_names
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
self.trees_ = [] # list of the root nodes of added trees
self.complexity_ = 0 # tracks the number of rules in the model
y_predictions_per_tree = {} # predictions for each tree
y_residuals_per_tree = {} # based on predictions above
# set up initial potential_splits
# everything in potential_splits either is_root (so it can be added directly to self.trees_)
# or it is a child of a root node that has already been added
idxs = np.ones(X.shape[0], dtype=bool)
node_init = self._construct_node_with_stump(X=X, y=y, idxs=idxs, tree_num=-1, sample_weight=sample_weight)
potential_splits = [node_init]
for node in potential_splits:
node.setattrs(is_root=True)
potential_splits = sorted(potential_splits, key=lambda x: x.impurity_reduction)
# start the greedy fitting algorithm
finished = False
while len(potential_splits) > 0 and not finished:
# print('potential_splits', [str(s) for s in potential_splits])
split_node = potential_splits.pop() # get node with max impurity_reduction (since it's sorted)
# don't split on node
if split_node.impurity_reduction < self.min_impurity_decrease:
finished = True
break
# split on node
if verbose:
print('\nadding ' + str(split_node))
self.complexity_ += 1
# if added a tree root
if split_node.is_root:
# start a new tree
self.trees_.append(split_node)
# update tree_num
for node_ in [split_node, split_node.left_temp, split_node.right_temp]:
if node_ is not None:
node_.tree_num = len(self.trees_) - 1
# add new root potential node
node_new_root = Node(is_root=True, idxs=np.ones(X.shape[0], dtype=bool),
tree_num=-1)
potential_splits.append(node_new_root)
# add children to potential splits
# assign left_temp, right_temp to be proper children
# (basically adds them to tree in predict method)
split_node.setattrs(left=split_node.left_temp, right=split_node.right_temp)
# add children to potential_splits
potential_splits.append(split_node.left)
potential_splits.append(split_node.right)
# update predictions for altered tree
for tree_num_ in range(len(self.trees_)):
y_predictions_per_tree[tree_num_] = self._predict_tree(self.trees_[tree_num_], X)
y_predictions_per_tree[-1] = np.zeros(X.shape[0]) # dummy 0 preds for possible new trees
# update residuals for each tree
# -1 is key for potential new tree
for tree_num_ in list(range(len(self.trees_))) + [-1]:
y_residuals_per_tree[tree_num_] = deepcopy(y)
# subtract predictions of all other trees
for tree_num_other_ in range(len(self.trees_)):
if not tree_num_other_ == tree_num_:
y_residuals_per_tree[tree_num_] -= y_predictions_per_tree[tree_num_other_]
# recompute all impurities + update potential_split children
potential_splits_new = []
for potential_split in potential_splits:
y_target = y_residuals_per_tree[potential_split.tree_num]
# re-calculate the best split
potential_split_updated = self._construct_node_with_stump(X=X,
y=y_target,
idxs=potential_split.idxs,
tree_num=potential_split.tree_num,
sample_weight=sample_weight, )
# need to preserve certain attributes from before (value at this split + is_root)
# value may change because residuals may have changed, but we want it to store the value from before
potential_split.setattrs(
feature=potential_split_updated.feature,
threshold=potential_split_updated.threshold,
impurity_reduction=potential_split_updated.impurity_reduction,
left_temp=potential_split_updated.left_temp,
right_temp=potential_split_updated.right_temp,
)
# this is a valid split
if potential_split.impurity_reduction is not None:
potential_splits_new.append(potential_split)
# sort so largest impurity reduction comes last (should probs make this a heap later)
potential_splits = sorted(potential_splits_new, key=lambda x: x.impurity_reduction)
if verbose:
print(self)
if self.max_rules is not None and self.complexity_ >= self.max_rules:
finished = True
break
return self
def _tree_to_str(self, root: Node, prefix=''):
if root is None:
return ''
elif root.threshold is None:
return ''
pprefix = prefix + '\t'
return prefix + str(root) + '\n' + self._tree_to_str(root.left, pprefix) + self._tree_to_str(root.right,
pprefix)
def _tree_to_str_with_data(self, X, y, root: Node, prefix=''):
if root is None:
return ''
elif root.threshold is None:
return ''
pprefix = prefix + '\t'
left = X[:, root.feature] <= root.threshold
return (
prefix + root.print_root(y) + '\n' +
self._tree_to_str_with_data(X[left], y[left], root.left, pprefix) +
self._tree_to_str_with_data(X[~left], y[~left], root.right, pprefix))
def __str__(self):
s = '> ------------------------------\n'
s += '> FIGS-Fast Interpretable Greedy-Tree Sums:\n'
s += '> \tPredictions are made by summing the "Val" reached by traversing each tree\n'
s += '> ------------------------------\n'
s += '\n\t+\n'.join([self._tree_to_str(t) for t in self.trees_])
if hasattr(self, 'feature_names_') and self.feature_names_ is not None:
for i in range(len(self.feature_names_))[::-1]:
s = s.replace(f'X_{i}', self.feature_names_[i])
return s
def print_tree(self, X, y):
s = '------------\n' + '\n\t+\n'.join([self._tree_to_str_with_data(X, y, t) for t in self.trees_])
if hasattr(self, 'feature_names_') and self.feature_names_ is not None:
for i in range(len(self.feature_names_))[::-1]:
s = s.replace(f'X_{i}', self.feature_names_[i])
return s
def predict(self, X):
X = check_array(X)
preds = np.zeros(X.shape[0])
for tree in self.trees_:
preds += self._predict_tree(tree, X)
if self._estimator_type == 'regressor':
return preds
elif self._estimator_type == 'classifier':
return (preds > 0.5).astype(int)
def predict_proba(self, X):
X = check_array(X)
if self._estimator_type == 'regressor':
return NotImplemented
preds = np.zeros(X.shape[0])
for tree in self.trees_:
preds += self._predict_tree(tree, X)
preds = np.clip(preds, a_min=0., a_max=1.) # constrain to range of probabilities
return np.vstack((1 - preds, preds)).transpose()
def _predict_tree(self, root: Node, X):
"""Predict for a single tree
"""
def _predict_tree_single_point(root: Node, x):
if root.left is None and root.right is None:
return root.value
left = x[root.feature] <= root.threshold
if left:
if root.left is None: # we don't actually have to worry about this case
return root.value
else:
return _predict_tree_single_point(root.left, x)
else:
if root.right is None: # we don't actually have to worry about this case
return root.value
else:
return _predict_tree_single_point(root.right, x)
preds = np.zeros(X.shape[0])
for i in range(X.shape[0]):
preds[i] = _predict_tree_single_point(root, X[i])
return preds
def plot(self, cols=2, feature_names=None, filename=None, label="all",
impurity=False, tree_number=None, dpi=150):
is_single_tree = len(self.trees_) < 2 or tree_number is not None
n_cols = int(cols)
n_rows = int(np.ceil(len(self.trees_) / n_cols))
# if is_single_tree:
# fig, ax = plt.subplots(1)
# else:
# fig, axs = plt.subplots(n_rows, n_cols)
n_plots = int(len(self.trees_)) if tree_number is None else 1
fig, axs = plt.subplots(n_plots, dpi=dpi)
criterion = "squared_error" if self._estimator_type == "regressor" else "gini"
n_classes = 1 if self._estimator_type == 'regressor' else 2
ax_size = int(len(self.trees_)) # n_cols * n_rows
for i in range(n_plots):
r = i // n_cols
c = i % n_cols
if not is_single_tree:
# ax = axs[r, c]
ax = axs[i]
else:
ax = axs
try:
tree = self.trees_[i] if tree_number is None else self.trees_[tree_number]
plot_tree(DecisionTreeViz(tree, criterion, n_classes),
ax=ax, feature_names=feature_names, label=label,
impurity=impurity)
except IndexError:
ax.axis('off')
continue
ax.set_title(f"Tree {i}")
if filename is not None:
plt.savefig(filename)
return
plt.show()
class FIGSRegressor(FIGS, RegressorMixin):
def _init_estimator_type(self):
self._estimator_type = 'regressor'
class FIGSClassifier(FIGS, ClassifierMixin):
def _init_estimator_type(self):
self._estimator_type = 'classifier'
class FIGSCV:
def __init__(self, figs,
n_rules_list: List[float] = [6, 12, 24, 30, 50],
cv: int = 3, scoring=None, *args, **kwargs):
self._figs_class = figs
self.n_rules_list = np.array(n_rules_list)
self.cv = cv
self.scoring = scoring
def fit(self, X, y):
self.scores_ = []
for n_rules in self.n_rules_list:
est = self._figs_class(max_rules=n_rules)
cv_scores = cross_val_score(est, X, y, cv=self.cv, scoring=self.scoring)
mean_score = np.mean(cv_scores)
if len(self.scores_) == 0:
self.figs = est
elif mean_score > np.max(self.scores_):
self.figs = est
self.scores_.append(mean_score)
self.figs.fit(X=X, y=y)
def predict_proba(self, X):
return self.figs.predict_proba(X)
def predict(self, X):
return self.figs.predict(X)
@property
def max_rules(self):
return self.figs.max_rules
class FIGSRegressorCV(FIGSCV):
def __init__(self,
n_rules_list: List[int] = [6, 12, 24, 30, 50],
cv: int = 3, scoring='r2', *args, **kwargs):
super(FIGSRegressorCV, self).__init__(figs=FIGSRegressor, n_rules_list=n_rules_list,
cv=cv, scoring=scoring, *args, **kwargs)
class FIGSClassifierCV(FIGSCV):
def __init__(self,
n_rules_list: List[int] = [6, 12, 24, 30, 50],
cv: int = 3, scoring="accuracy", *args, **kwargs):
super(FIGSClassifierCV, self).__init__(figs=FIGSClassifier, n_rules_list=n_rules_list,
cv=cv, scoring=scoring, *args, **kwargs)
if __name__ == '__main__':
from sklearn import datasets
X_cls, Y_cls = datasets.load_breast_cancer(return_X_y=True)
X_reg, Y_reg = datasets.make_friedman1(100)
est = FIGSClassifier(max_rules=10)
# est.fit(X_cls, Y_cls, sample_weight=np.arange(0, X_cls.shape[0]))
est.fit(X_cls, Y_cls, sample_weight=[1] * X_cls.shape[0])
est.predict(X_cls)
est = FIGSRegressorCV()
est.fit(X_reg, Y_reg)
est.predict(X_reg)
print(est.max_rules)
est.figs.plot(tree_number=0)
est = FIGSClassifierCV()
est.fit(X_cls, Y_cls)
est.predict(X_cls)
print(est.max_rules)
est.figs.plot(tree_number=0)
# %%
|
riptable/rt_sort_cache.py | 972d5defe3218bd62b741e6a2f11f5b3/riptable | 307 | 11147941 | __all__ = ['SortCache', ]
import numpy as np
from .rt_numpy import lexsort, crc64
from .rt_enum import TypeRegister
class SortCache(object):
'''
Global sort cache for uid - unique ids which are often generated from GetTSC (CPU time stamp counter)
to ensure that the values have not changed underneath, it performs a crc check and compares via the crc of a known sorted index array
'''
_cache = {}
_logging = False
@classmethod
def logging_on(cls):
cls._logging=True
@classmethod
def logging_off(cls):
cls._logging=False
@classmethod
def store_sort(cls, uid, sortlist, sortidx):
'''
Restore a sort index from file.
'''
crcvals = []
for c in sortlist:
crcvals.append(crc64(c))
cls._cache[uid] = (crcvals, sortidx, len(sortidx))
@classmethod
def get_sorted_row_index(cls, uid, nrows, sortdict):
if sortdict is not None and len(sortdict) > 0:
crcvals=[]
sortlist=list(sortdict.values())
for vals in sortlist:
# perform a crc on known sorted values and remember the crc
crcvals.append(crc64(vals))
updateCache=True
sort_idx = None
if uid in cls._cache:
checkvals, sort_idx, checkrows = cls._cache[uid]
if len(checkvals) == len(crcvals) and checkrows == nrows:
updateCache = False
# compare all multikey sort values to see if a match
for i in range(len(checkvals)):
if checkvals[i] != crcvals[i]:
updateCache = True
break
if updateCache:
if cls._logging: print("performing lexsort on columns:",list(sortdict.keys()))
sortlist.reverse()
sort_idx = lexsort(sortlist)
cls._cache[uid] = (crcvals, sort_idx, nrows)
else:
if cls._logging: print("NOT performing lexsort on columns:",list(sortdict.keys()))
return sort_idx
else:
return None
# NOTE: arange too costly, disabling this path for now
# TODO: if nrows under max int32, return 32 bit version to save memory
#if nrows is None: nrows = 0
#sort_idx = np.arange(nrows,dtype=np.int64)
#cls._cache[uid] = ([], sort_idx, nrows)
return sort_idx
@classmethod
def invalidate(cls, uid):
if uid in cls._cache:
del cls._cache[uid]
@classmethod
def invalidate_all(cls):
to_delete = [*cls._cache.keys()]
for uid in to_delete:
del cls._cache[uid]
TypeRegister.SortCache = SortCache
|
src/setup.py | kevinsigwart/ArcREST | 208 | 11147970 | <reponame>kevinsigwart/ArcREST
"""
ArcREST Setup Code
"""
from distutils.core import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
packages = ['arcresthelper','arcresthelper/packages',
'arcrest','arcrest/agol','arcrest/agol/helperservices', 'arcrest/ags', 'arcrest/common',
'arcrest/manageorg', 'arcrest/security', 'arcrest/web',
'arcrest/_abstract', 'arcrest/webmap', 'arcrest/geometryservice',
'arcrest/manageags', 'arcrest/manageportal', 'arcrest/hostedservice',
'arcrest/enrichment', 'arcrest/opendata', 'arcrest/cmp', 'arcrest/packages',
'arcrest/packages/ntlm3']
# Get the long description from the README file
try:
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_decription = f.read()
except:
long_decription = "ArcREST Python Package"
setup(
name='ArcREST',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='3.5.6',
description='ArcREST is a Python Wrapper for the Esri REST Framework',
long_description=long_decription,
# The project's main homepage.
url='https://github.com/Esri/ArcREST',
# Author details
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
# Choose your license
license='Apache',
classifiers=[
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers/GIS Users',
'Topic :: Software Development :: Esri REST API',
# Pick your license as you wish (should match "license" above)
'License :: Apache License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords='REST, Esri, ArcGIS, Python, ArcPy',
packages=packages,
include_package_data=True,
zip_safe=False,
install_requires=['numpy>=1.7.1'],
extras_require={},
package_data={'arcrest/enrichment' : ['__countrycodes.csv', '__datacollectionnames.csv']},
)
|
model_analyzer/reports/report_manager.py | MarkMoTrin/model_analyzer | 115 | 11147975 | <gh_stars>100-1000
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model_analyzer.constants import LOGGER_NAME, TOP_MODELS_REPORT_KEY
from model_analyzer.result.constraint_manager import ConstraintManager
from model_analyzer.record.metrics_manager import MetricsManager
from model_analyzer.plots.plot_manager import PlotManager
from model_analyzer.result.result_table import ResultTable
from .pdf_report import PDFReport
import os
from collections import defaultdict
import logging
logger = logging.getLogger(LOGGER_NAME)
class ReportManager:
"""
Manages the building and export of
various types of reports
"""
def __init__(self, mode, config, gpu_info, result_manager):
"""
Parameters
----------
mode: str
The mode in which Model Analyzer is operating
config :ConfigCommandProfile
The model analyzer's config containing information
about the kind of reports to generate
gpu_info: dict
containing information about the GPUs used
during profiling
result_manager : ResultManager
instance that manages the result tables and
adding results
"""
self._mode = mode
self._config = config
self._gpu_info = gpu_info
self._result_manager = result_manager
# Create the plot manager
self._plot_manager = PlotManager(config=self._config,
result_manager=self._result_manager)
self._summary_data = defaultdict(list)
self._summaries = {}
self._detailed_report_data = {}
self._detailed_reports = {}
self._reports_export_directory = os.path.join(config.export_path,
'reports')
os.makedirs(self._reports_export_directory, exist_ok=True)
def report_keys(self):
"""
Returns
-------
list of str
identifiers for all the
reports in this report manager
"""
return list(self._summary_data.keys())
def data(self, report_key):
"""
Parameters
----------
report_key: str
An identifier for a particular report
Returns
-------
dict
The data in the report corresponding with
the report key
"""
return self._summary_data[report_key]
def create_summaries(self):
"""
Add summary data and build summary report
"""
self._add_summary_data()
self._plot_manager.create_summary_plots()
self._plot_manager.export_summary_plots()
statistics = self._result_manager.get_result_statistics()
model_names = [
model.model_name() for model in self._config.analysis_models
]
at_least_one_summary = False
for model_name in model_names:
if model_name in self._summary_data:
at_least_one_summary = True
self._summaries[model_name] = self._build_summary_report(
report_key=model_name,
num_configs=self._config.num_configs_per_model,
statistics=statistics)
else:
logger.warning(
f'No data found for model {model_name}, skipping export summary.'
)
if self._config.num_top_model_configs and at_least_one_summary:
self._summaries[TOP_MODELS_REPORT_KEY] = self._build_summary_report(
report_key=TOP_MODELS_REPORT_KEY,
num_configs=self._config.num_top_model_configs,
statistics=statistics)
def export_summaries(self):
"""
Write a PDF summary to disk
"""
for report_key, summary in self._summaries.items():
model_report_dir = os.path.join(self._reports_export_directory,
'summaries', report_key)
os.makedirs(model_report_dir, exist_ok=True)
output_filename = os.path.join(model_report_dir,
'result_summary.pdf')
logger.info(f"Exporting Summary Report to {output_filename}...")
summary.write_report(filename=output_filename)
def create_detailed_reports(self):
"""
Adds detailed report data and build detailed reports
"""
self._add_detailed_report_data()
self._plot_manager.create_detailed_plots()
self._plot_manager.export_detailed_plots()
for report_model_config in self._config.report_model_configs:
model_config_name = report_model_config.model_config_name()
self._detailed_reports[
model_config_name] = self._build_detailed_report(
report_model_config)
def export_detailed_reports(self):
"""
Write a detailed report PDF to disk
"""
for report_key, report in self._detailed_reports.items():
model_report_dir = os.path.join(self._reports_export_directory,
'detailed', report_key)
os.makedirs(model_report_dir, exist_ok=True)
output_filename = os.path.join(model_report_dir,
'detailed_report.pdf')
logger.info(f"Exporting Detailed Report to {output_filename}...")
report.write_report(filename=output_filename)
def _add_summary_data(self):
"""
Adds measurements on which the report manager
can do complex analyses or with which it can
build tables and add to reports
"""
model_names = [
model.model_name() for model in self._config.analysis_models
]
for model_name in model_names:
for result in self._result_manager.top_n_results(
model_name=model_name,
n=self._config.num_configs_per_model):
model_config = result.model_config()
for measurement in result.top_n_measurements(n=1):
self._summary_data[model_name].append(
(model_config, measurement))
if self._config.num_top_model_configs:
for result in self._result_manager.top_n_results(
n=self._config.num_top_model_configs):
model_config = result.model_config()
for measurement in result.top_n_measurements(n=1):
self._summary_data[TOP_MODELS_REPORT_KEY].append(
(model_config, measurement))
def _add_detailed_report_data(self):
"""
Adds data specific to the model configs
for which we want detailed reports
"""
model_names = [
model.model_config_name()
for model in self._config.report_model_configs
]
for model_config_name in model_names:
self._detailed_report_data[
model_config_name] = self._result_manager.get_model_config_measurements(
model_config_name)
def _build_detailed_report(self, report_model_config):
"""
Builder method for a detailed report
"""
detailed_report = PDFReport()
report_key = report_model_config.model_config_name()
model_config, _ = self._detailed_report_data[report_key]
detailed_report.add_title(title="Detailed Report")
detailed_report.add_subheading(subheading=f"Model Config: {report_key}")
if self._mode == 'online':
# Add main latency breakdown image
detailed_plot = os.path.join(self._config.export_path, 'plots',
'detailed', report_key,
'latency_breakdown.png')
detailed_caption = f"Latency Breakdown for Online Performance of {report_key}"
# First add row of detailed
detailed_report.add_images([detailed_plot], [detailed_caption])
# Next add the SimplePlots created for this detailed report
plot_stack = []
caption_stack = []
plot_path = os.path.join(self._config.export_path, 'plots', 'simple',
report_key)
for plot_config in report_model_config.plots():
if model_config.cpu_only() and (
plot_config.y_axis().startswith('gpu_') or
plot_config.x_axis().startswith('gpu_')):
continue
plot_stack.append(
os.path.join(plot_path, f"{plot_config.name()}.png"))
caption_stack.append(
f"{plot_config.title()} curves for config {report_key}")
if len(plot_stack) == 2:
detailed_report.add_images(plot_stack,
caption_stack,
float="left")
plot_stack = []
caption_stack = []
# Odd number of plots
if plot_stack:
detailed_report.add_images(plot_stack, caption_stack, float="left")
# Next add table of measurements
detailed_table = self._build_detailed_table(report_key)
detailed_report.add_table(table=detailed_table)
# Add some details about the config
detailed_info = self._build_detailed_info(report_key)
detailed_report.add_line_breaks(num_breaks=2)
detailed_report.add_paragraph(detailed_info, font_size=18)
detailed_report.add_paragraph(
"The first plot above shows the breakdown of the latencies in "
"the latency throughput curve for this model config. Following that "
"are the requested configurable plots showing the relationship between "
"various metrics measured by the Model Analyzer. The above table contains "
"detailed data for each of the measurements taken for this model config in "
"decreasing order of throughput.",
font_size=18)
return detailed_report
def _build_summary_report(self, report_key, num_configs, statistics):
"""
Builder method for a summary
report.
"""
summary = PDFReport()
total_measurements = statistics.total_measurements(report_key)
total_configurations = statistics.total_configurations(report_key)
num_best_configs = min(num_configs, total_configurations)
# Get GPU names and memory
model_config = self._summary_data[report_key][0][0]
cpu_only = model_config.cpu_only()
gpu_dict = self._get_gpu_stats(
measurements=[v for _, v in self._summary_data[report_key]])
gpu_names = ','.join(list(gpu_dict.keys()))
max_memories = ','.join([str(x) + ' GB' for x in gpu_dict.values()])
# Get batch sizes and constraints
static_batch_sizes = ','.join(
sorted(
set([
str(measurement[1].perf_config()['batch-size'])
for measurement in self._summary_data[report_key]
])))
constraint_strs = self._build_constraint_strings()
constraint_str = "None"
if constraint_strs:
if report_key in constraint_strs:
constraint_str = constraint_strs[report_key]
elif report_key == TOP_MODELS_REPORT_KEY:
constraint_str = constraint_strs['default']
# Build summary table and info sentence
if not cpu_only:
table, summary_sentence = self._build_summary_table(
report_key=report_key,
num_measurements=total_measurements,
gpu_name=gpu_names)
else:
table, summary_sentence = self._build_summary_table(
report_key=report_key,
num_measurements=total_measurements,
cpu_only=True)
# Add summary sections
summary.add_title(title=f"{self._mode.title()} Result Summary")
summary.add_subheading(f"Model: {report_key}")
if not cpu_only:
summary.add_paragraph(f"GPU(s): {gpu_names}")
summary.add_paragraph(f"Total Available GPU Memory: {max_memories}")
summary.add_paragraph(
f"Client Request Batch Size: {static_batch_sizes}")
summary.add_paragraph(f"Constraint targets: {constraint_str}")
summary.add_paragraph(summary_sentence)
summary.add_paragraph(
f"Curves corresponding to the {num_best_configs} best model "
f"configuration(s) out of a total of {total_configurations} are "
"shown in the plots.")
throughput_plot_config = self._config.plots[0]
throughput_plot = os.path.join(self._config.export_path, 'plots',
'simple', report_key,
f'{throughput_plot_config.name()}.png')
caption_throughput = f"{throughput_plot_config.title()} curves for {num_best_configs} best configurations."
if not cpu_only:
summary.add_paragraph(
"The maximum GPU memory consumption for each of the above points is"
f" shown in the second plot. The GPUs {gpu_names} have"
f" a total available memory of {max_memories} respectively.")
summary.add_images([throughput_plot], [caption_throughput],
image_width=66)
if self._mode == 'online':
memory_latency_plot = os.path.join(self._config.export_path,
'plots', 'simple',
report_key,
'gpu_mem_v_latency.png')
caption_memory_latency = f"GPU Memory vs. Latency curves for {num_best_configs} best configurations."
summary.add_images([memory_latency_plot],
[caption_memory_latency],
image_width=66)
else:
summary.add_paragraph(
"The maximum GPU memory consumption for each of the above points is"
f" shown in the second plot.")
summary.add_images([throughput_plot], [caption_throughput],
image_width=66)
if self._mode == 'online':
memory_latency_plot = os.path.join(self._config.export_path,
'plots', 'simple',
report_key,
'cpu_mem_v_latency.png')
caption_memory_latency = f"CPU Memory vs. Latency curves for {num_best_configs} best configurations."
summary.add_images([memory_latency_plot],
[caption_memory_latency],
image_width=66)
summary.add_paragraph(
"The following table summarizes each configuration at the measurement"
" that optimizes the desired metrics under the given constraints.")
summary.add_table(table=table)
return summary
def _get_dynamic_batching_phrase(self, config):
dynamic_batching_str = config.dynamic_batching_string()
if dynamic_batching_str == "Disabled":
dynamic_batch_phrase = "dynamic batching disabled"
elif dynamic_batching_str == "Enabled":
dynamic_batch_phrase = "dynamic batching enabled"
else:
dynamic_batch_phrase = f"preferred batch size of {dynamic_batching_str}"
return dynamic_batch_phrase
def _build_summary_table(self,
report_key,
num_measurements,
gpu_name=None,
cpu_only=False):
"""
Creates a result table corresponding
to the best measurements for a particular
model
"""
if not cpu_only:
summary_table = ResultTable(headers=[
'Model Config Name', 'Preferred Batch Size', 'Instance Count',
'p99 Latency (ms)', 'Throughput (infer/sec)',
'Max CPU Memory Usage (MB)', 'Max GPU Memory Usage (MB)',
'Average GPU Utilization (%)'
],
title="Report Table")
else:
summary_table = ResultTable(headers=[
'Model Config Name', 'Preferred Batch Size', 'Instance Count',
'p99 Latency (ms)', 'Throughput (infer/sec)',
'Max CPU Memory Usage (MB)'
],
title="Report Table")
sorted_measurements = sorted(self._summary_data[report_key],
key=lambda x: x[1])
# Construct summary sentence using best config
best_config = sorted_measurements[0][0]
model_config_dict = best_config.get_config()
platform = model_config_dict['backend'] if \
'backend' in model_config_dict \
else model_config_dict['platform']
dynamic_batch_phrase = self._get_dynamic_batching_phrase(best_config)
if not best_config.cpu_only():
summary_sentence = (
f"In {num_measurements} measurement(s), "
f"{best_config.instance_group_string()} model instance(s) "
f"with {dynamic_batch_phrase} "
f"on platform {platform} delivers "
f"maximum throughput under the given constraints on GPU(s) {gpu_name}."
)
else:
summary_sentence = (
f"In {num_measurements} measurement(s), "
f"{best_config.instance_group_string()} model instance(s) "
f"with {dynamic_batch_phrase} "
f"on platform {platform} delivers "
f"maximum throughput.")
# Construct table
if not cpu_only:
for model_config, measurement in sorted_measurements:
instance_group_str = model_config.instance_group_string()
row = [
model_config.get_field('name'),
model_config.dynamic_batching_string(), instance_group_str,
measurement.get_metric_value('perf_latency_p99'),
measurement.get_metric_value('perf_throughput'),
measurement.get_metric_value('cpu_used_ram'),
measurement.get_metric_value('gpu_used_memory'),
round(measurement.get_metric_value('gpu_utilization'), 1)
]
summary_table.insert_row_by_index(row)
else:
for model_config, measurement in sorted_measurements:
instance_group_str = model_config.instance_group_string()
row = [
model_config.get_field('name'),
model_config.dynamic_batching_string(), instance_group_str,
measurement.get_metric_value('perf_latency_p99'),
measurement.get_metric_value('perf_throughput'),
measurement.get_metric_value('cpu_used_ram')
]
summary_table.insert_row_by_index(row)
return summary_table, summary_sentence
def _build_detailed_table(self, model_config_name):
"""
Build the table used in the detailed report
"""
model_config, measurements = self._detailed_report_data[
model_config_name]
sort_by_tag = 'perf_latency_p99' if self._mode == 'online' else 'perf_throughput'
measurements = sorted(measurements,
key=lambda x: x.get_metric_value(sort_by_tag),
reverse=True)
cpu_only = model_config.cpu_only()
first_column_header = 'Request Concurrency' if self._mode == 'online' else 'Client Batch Size'
first_column_tag = 'concurrency-range' if self._mode == 'online' else 'batch-size'
if not cpu_only:
detailed_table = ResultTable(headers=[
first_column_header, 'p99 Latency (ms)',
'Client Response Wait (ms)', 'Server Queue (ms)',
'Server Compute Input (ms)', 'Server Compute Infer (ms)',
'Throughput (infer/sec)', 'Max CPU Memory Usage (MB)',
'Max GPU Memory Usage (MB)', 'Average GPU Utilization (%)'
],
title="Detailed Table")
else:
detailed_table = ResultTable(headers=[
first_column_header, 'p99 Latency (ms)',
'Client Response Wait (ms)', 'Server Queue (ms)',
'Server Compute Input (ms)', 'Server Compute Infer (ms)',
'Throughput (infer/sec)', 'Max CPU Memory Usage (MB)'
],
title="Detailed Table")
# Construct table
if not cpu_only:
for measurement in measurements:
row = [
measurement.get_parameter(first_column_tag),
measurement.get_metric_value('perf_latency_p99'),
measurement.get_metric_value('perf_client_response_wait'),
measurement.get_metric_value('perf_server_queue'),
measurement.get_metric_value('perf_server_compute_input'),
measurement.get_metric_value('perf_server_compute_infer'),
measurement.get_metric_value('perf_throughput'),
measurement.get_metric_value('cpu_used_ram'),
measurement.get_metric_value('gpu_used_memory'),
round(measurement.get_metric_value('gpu_utilization'), 1)
]
detailed_table.insert_row_by_index(row)
else:
for measurement in measurements:
row = [
measurement.get_parameter(first_column_tag),
measurement.get_metric_value('perf_latency_p99'),
measurement.get_metric_value('perf_client_response_wait'),
measurement.get_metric_value('perf_server_queue'),
measurement.get_metric_value('perf_server_compute_input'),
measurement.get_metric_value('perf_server_compute_infer'),
measurement.get_metric_value('perf_throughput'),
measurement.get_metric_value('cpu_used_ram')
]
detailed_table.insert_row_by_index(row)
return detailed_table
def _build_detailed_info(self, model_config_name):
"""
Constructs important info sentence about the model config
specified
"""
model_config, measurements = self._detailed_report_data[
model_config_name]
instance_group_string = model_config.instance_group_string()
dynamic_batching_string = model_config.dynamic_batching_string()
platform = model_config.get_field('platform')
# Measurements and GPU info
if model_config.cpu_only():
sentence = (
f"The model config {model_config_name} uses {instance_group_string.replace('/', ' ')} "
f"instances. {len(measurements)} measurements were obtained for the model config "
f"on CPU. ")
else:
gpu_dict = self._get_gpu_stats(measurements=measurements)
gpu_names = ','.join(list(gpu_dict.keys()))
max_memories = ','.join([str(x) + ' GB' for x in gpu_dict.values()])
sentence = (
f"The model config \"{model_config_name}\" uses {instance_group_string.replace('/', ' ')} "
f"instances. {len(measurements)} measurements were obtained for the model config "
f"on GPU(s) {gpu_names} with memory limit(s) {max_memories}. This model "
f"uses the platform {platform}. ")
# Dynamic batching
if dynamic_batching_string == 'N/A':
sentence += "This model does not support batching. "
elif dynamic_batching_string == 'Disabled':
sentence += "This model config has dynamic batching disabled. "
else:
sentence += (
"This model config has dynamic batching enabled "
f"with preferred batch size(s) {dynamic_batching_string}. ")
return sentence
def _get_gpu_stats(self, measurements):
"""
Gets names and memory infos of GPUs used in measurements
"""
gpu_dict = {}
for measurement in measurements:
for gpu_uuid, gpu_info in self._gpu_info.items():
if gpu_uuid in measurement.gpus_used():
gpu_name = gpu_info['name']
max_memory = round(gpu_info['total_memory'] / (2**30), 1)
if gpu_name not in gpu_dict:
gpu_dict[gpu_name] = max_memory
return gpu_dict
def _build_constraint_strings(self):
"""
Constructs constraint strings to show the constraints under which
each model is being run.
"""
constraint_strs = {}
for model_name, model_constraints in ConstraintManager.get_constraints_for_all_models(
self._config).items():
strs = []
if model_constraints:
for metric, constraint in model_constraints.items():
metric_header = MetricsManager.get_metric_types(
[metric])[0].header(aggregation_tag='')
for constraint_type, constraint_val in constraint.items():
# String looks like 'Max p99 Latency : 99 ms'
metric_header_name = metric_header.rsplit(' ', 1)[0]
metric_unit = metric_header.rsplit(' ', 1)[1][1:-1]
strs.append(
f"{constraint_type.capitalize()} {metric_header_name} : {constraint_val} {metric_unit}"
)
constraint_strs[model_name] = ', '.join(strs)
return constraint_strs
|
api/util.py | AmbiteamProject/spleeter-web | 202 | 11148020 | <filename>api/util.py
import re
def get_valid_filename(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; remove anything that is not an
alphanumeric, dash, whitespace, comma, bracket, underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = str(s).strip()
return re.sub(r'(?u)[^-\w\s.,[\]()]', '', s)
|
2017/quals/2017-crypto-selfhash/challenge/challenge.py | tonghuaroot/google-ctf | 2,757 | 11148021 | <filename>2017/quals/2017-crypto-selfhash/challenge/challenge.py
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def crc_82_darc(s):
r = 0
for c in s:
r ^= ord(c)
for _ in range(8):
if r & 1:
r ^= 0x441011401440444018861
r >>= 1
return r
def main():
sys.stdout.write("Give me some data: ")
sys.stdout.flush()
data = sys.stdin.readline().strip()
print
if not (len(data) == 82):
print "Check failed.\nExpected:"
print " len(data) == 82"
print "Was:"
print " %r" % len(data)
return
if not (set(data) <= set("01")):
print "Check failed.\nExpected: "
print " set(data) <= set(\"01\")"
print "Was:"
print " %r" % set(data)
return
if not (crc_82_darc(data) == int(data, 2)):
print "Check failed.\nExpected: "
print " crc_82_darc(data) == int(data, 2)"
print "Was:"
print " %r" % crc_82_darc(data)
print " %r" % int(data, 2)
return
with open('flag.txt') as fd:
print fd.read()
main()
|
scripts/content/playlist.py | sudheer09441/Saavn-Downloader | 119 | 11148074 | <filename>scripts/content/playlist.py<gh_stars>100-1000
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import json
from ..download_manager import Manager
class Playlist():
def __init__(self, proxies, headers, url=None):
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
self.proxies = proxies
self.headers = headers
self.playlistID = None
self.songs_json = []
self.url = url
def getPlaylistID(self, url=None):
if url:
input_url = url
else:
input_url = self.url
token = input_url.split("/")[-1]
input_url = "https://www.jiosaavn.com/api.php?__call=webapi.get&token={0}&type=playlist&p=1&n=20&includeMetaTags=0&ctx=web6dot0&api_version=4&_format=json&_marker=0".format(token)
try:
res = requests.get(input_url, proxies=self.proxies, headers=self.headers)
except Exception as e:
print('Error accessing website error: {0}'.format(e))
exit()
self.playlistID = res.json()["id"]
return self.playlistID
def setPlaylistID(self, playlistID=None):
self.playlistID = playlistID
def getPlaylist(self, playlistID=None):
if playlistID is None:
playlistID = self.playlistID
response = requests.get(
'https://www.jiosaavn.com/api.php?listid={0}&_format=json&__call=playlist.getDetails'.format(playlistID), verify=False, proxies=self.proxies, headers=self.headers)
if response.status_code == 200:
self.songs_json = [x for x in response.text.splitlines() if x.strip().startswith('{')][0]
self.songs_json = json.loads(self.songs_json)
return self.songs_json
def downloadPlaylist(self):
if self.playlistID is not None:
print("Initiating Playlist Downloading")
manager = Manager()
manager.downloadSongs(self.getPlaylist())
def start_download(self):
self.getPlaylistID()
self.downloadPlaylist() |
Lesson02/cyclic_rotation.py | nawang87/codility_lessons | 114 | 11148076 | """
Task:CyclicRotation
An array A consisting of N integers is given. Rotation of the array means that each element is shifted right by one index, and the last element of the array is moved to the first place. For example, the rotation of array A = [3, 8, 9, 7, 6] is [6, 3, 8, 9, 7] (elements are shifted right by one index and 6 is moved to the first place).
The goal is to rotate array A K times; that is, each element of A will be shifted to the right K times.
Write a function:
def solution(A, K)
that, given an array A consisting of N integers and an integer K, returns the array A rotated K times.
For example, given
A = [3, 8, 9, 7, 6]
K = 3
the function should return [9, 7, 6, 3, 8]. Three rotations were made:
[3, 8, 9, 7, 6] -> [6, 3, 8, 9, 7]
[6, 3, 8, 9, 7] -> [7, 6, 3, 8, 9]
[7, 6, 3, 8, 9] -> [9, 7, 6, 3, 8]
For another example, given
A = [0, 0, 0]
K = 1
the function should return [0, 0, 0]
Given
A = [1, 2, 3, 4]
K = 4
the function should return [1, 2, 3, 4]
Assume that:
N and K are integers within the range [0..100];
each element of array A is an integer within the range [−1,000..1,000].
In your solution, focus on correctness. The performance of your solution will not be the focus of the assessment.
Copyright 2009–2019 by Codility Limited. All Rights Reserved. Unauthorized copying, publication or disclosure prohibited.
You can check it out the result at https://app.codility.com/demo/results/trainingBXQ39V-BTU/ .
"""
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A , K):
# write your code in Python 3.6
# 利用array相加做出rotation,A[-1]代表array的最後一個,依此類推。
# more detail please check it out at https://openhome.cc/Gossip/CodeData/PythonTutorial/NumericString.html .
if len(A) == 0:
return A
K = K % len(A)
return A[-K:] + A[:-K]
# testcase 1
A = [3, 8, 9, 7, 6]
K = 3
print(solution(A , K))
# testcase 2
A = [0, 0, 0]
K = 1
print(solution(A , K))
# testcase 3
A = [1, 2, 3, 4]
K = 4
print(solution(A , K)) |
tests/kafkatest/tests/streams/streams_broker_down_resilience_test.py | mitchell-h/kafka | 126 | 11148085 | <filename>tests/kafkatest/tests/streams/streams_broker_down_resilience_test.py
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from kafkatest.services.streams import StreamsBrokerDownResilienceService
from kafkatest.tests.streams.base_streams_test import BaseStreamsTest
class StreamsBrokerDownResilience(BaseStreamsTest):
"""
This test validates that Streams is resilient to a broker
being down longer than specified timeouts in configs
"""
inputTopic = "streamsResilienceSource"
outputTopic = "streamsResilienceSink"
client_id = "streams-broker-resilience-verify-consumer"
num_messages = 10000
message = "processed [0-9]* messages"
connected_message = "Discovered group coordinator"
def __init__(self, test_context):
super(StreamsBrokerDownResilience, self).__init__(test_context,
topics={self.inputTopic: {'partitions': 3, 'replication-factor': 1},
self.outputTopic: {'partitions': 1, 'replication-factor': 1}},
num_brokers=1)
def setUp(self):
self.zk.start()
def test_streams_resilient_to_broker_down(self):
self.kafka.start()
# Broker should be down over 2x of retries * timeout ms
# So with (2 * 15000) = 30 seconds, we'll set downtime to 70 seconds
broker_down_time_in_seconds = 70
processor = StreamsBrokerDownResilienceService(self.test_context, self.kafka, self.get_configs())
processor.start()
self.assert_produce_consume(self.inputTopic,
self.outputTopic,
self.client_id,
"before_broker_stop")
node = self.kafka.leader(self.inputTopic)
self.kafka.stop_node(node)
time.sleep(broker_down_time_in_seconds)
with processor.node.account.monitor_log(processor.LOG_FILE) as monitor:
self.kafka.start_node(node)
monitor.wait_until(self.connected_message,
timeout_sec=120,
err_msg=("Never saw output '%s' on " % self.connected_message) + str(processor.node.account))
self.assert_produce_consume(self.inputTopic,
self.outputTopic,
self.client_id,
"after_broker_stop",
timeout_sec=120)
self.kafka.stop()
def test_streams_runs_with_broker_down_initially(self):
self.kafka.start()
node = self.kafka.leader(self.inputTopic)
self.kafka.stop_node(node)
configs = self.get_configs(extra_configs=",application.id=starting_wo_broker_id")
# start streams with broker down initially
processor = StreamsBrokerDownResilienceService(self.test_context, self.kafka, configs)
processor.start()
processor_2 = StreamsBrokerDownResilienceService(self.test_context, self.kafka, configs)
processor_2.start()
processor_3 = StreamsBrokerDownResilienceService(self.test_context, self.kafka, configs)
processor_3.start()
broker_unavailable_message = "Broker may not be available"
# verify streams instances unable to connect to broker, kept trying
self.wait_for_verification(processor, broker_unavailable_message, processor.LOG_FILE, 10)
self.wait_for_verification(processor_2, broker_unavailable_message, processor_2.LOG_FILE, 10)
self.wait_for_verification(processor_3, broker_unavailable_message, processor_3.LOG_FILE, 10)
with processor.node.account.monitor_log(processor.LOG_FILE) as monitor_1:
with processor_2.node.account.monitor_log(processor_2.LOG_FILE) as monitor_2:
with processor_3.node.account.monitor_log(processor_3.LOG_FILE) as monitor_3:
self.kafka.start_node(node)
monitor_1.wait_until(self.connected_message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.connected_message) + str(processor.node.account))
monitor_2.wait_until(self.connected_message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.connected_message) + str(processor_2.node.account))
monitor_3.wait_until(self.connected_message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.connected_message) + str(processor_3.node.account))
with processor.node.account.monitor_log(processor.STDOUT_FILE) as monitor_1:
with processor_2.node.account.monitor_log(processor_2.STDOUT_FILE) as monitor_2:
with processor_3.node.account.monitor_log(processor_3.STDOUT_FILE) as monitor_3:
self.assert_produce(self.inputTopic,
"sending_message_after_broker_down_initially",
num_messages=self.num_messages,
timeout_sec=120)
monitor_1.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor.node.account))
monitor_2.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor_2.node.account))
monitor_3.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor_3.node.account))
self.assert_consume(self.client_id,
"consuming_message_after_broker_down_initially",
self.outputTopic,
num_messages=self.num_messages,
timeout_sec=120)
self.kafka.stop()
def test_streams_should_scale_in_while_brokers_down(self):
self.kafka.start()
configs = self.get_configs(extra_configs=",application.id=shutdown_with_broker_down")
processor = StreamsBrokerDownResilienceService(self.test_context, self.kafka, configs)
processor.start()
processor_2 = StreamsBrokerDownResilienceService(self.test_context, self.kafka, configs)
processor_2.start()
processor_3 = StreamsBrokerDownResilienceService(self.test_context, self.kafka, configs)
# need to wait for rebalance once
rebalance = "State transition from REBALANCING to RUNNING"
with processor_3.node.account.monitor_log(processor_3.LOG_FILE) as monitor:
processor_3.start()
monitor.wait_until(rebalance,
timeout_sec=120,
err_msg=("Never saw output '%s' on " % rebalance) + str(processor_3.node.account))
with processor.node.account.monitor_log(processor.STDOUT_FILE) as monitor_1:
with processor_2.node.account.monitor_log(processor_2.STDOUT_FILE) as monitor_2:
with processor_3.node.account.monitor_log(processor_3.STDOUT_FILE) as monitor_3:
self.assert_produce(self.inputTopic,
"sending_message_normal_broker_start",
num_messages=self.num_messages,
timeout_sec=120)
monitor_1.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor.node.account))
monitor_2.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor_2.node.account))
monitor_3.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor_3.node.account))
self.assert_consume(self.client_id,
"consuming_message_normal_broker_start",
self.outputTopic,
num_messages=self.num_messages,
timeout_sec=120)
node = self.kafka.leader(self.inputTopic)
self.kafka.stop_node(node)
processor.stop()
processor_2.stop()
shutdown_message = "Complete shutdown of streams resilience test app now"
self.wait_for_verification(processor, shutdown_message, processor.STDOUT_FILE)
self.wait_for_verification(processor_2, shutdown_message, processor_2.STDOUT_FILE)
with processor_3.node.account.monitor_log(processor_3.LOG_FILE) as monitor_3:
self.kafka.start_node(node)
monitor_3.wait_until(self.connected_message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.connected_message) + str(processor_3.node.account))
self.assert_produce_consume(self.inputTopic,
self.outputTopic,
self.client_id,
"sending_message_after_stopping_streams_instance_bouncing_broker",
num_messages=self.num_messages,
timeout_sec=120)
self.kafka.stop()
def test_streams_should_failover_while_brokers_down(self):
self.kafka.start()
configs = self.get_configs(extra_configs=",application.id=failover_with_broker_down")
processor = StreamsBrokerDownResilienceService(self.test_context, self.kafka, configs)
processor.start()
processor_2 = StreamsBrokerDownResilienceService(self.test_context, self.kafka, configs)
processor_2.start()
processor_3 = StreamsBrokerDownResilienceService(self.test_context, self.kafka, configs)
# need to wait for rebalance once
rebalance = "State transition from REBALANCING to RUNNING"
with processor_3.node.account.monitor_log(processor_3.LOG_FILE) as monitor:
processor_3.start()
monitor.wait_until(rebalance,
timeout_sec=120,
err_msg=("Never saw output '%s' on " % rebalance) + str(processor_3.node.account))
with processor.node.account.monitor_log(processor.STDOUT_FILE) as monitor_1:
with processor_2.node.account.monitor_log(processor_2.STDOUT_FILE) as monitor_2:
with processor_3.node.account.monitor_log(processor_3.STDOUT_FILE) as monitor_3:
self.assert_produce(self.inputTopic,
"sending_message_after_normal_broker_start",
num_messages=self.num_messages,
timeout_sec=120)
monitor_1.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor.node.account))
monitor_2.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor_2.node.account))
monitor_3.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor_3.node.account))
self.assert_consume(self.client_id,
"consuming_message_after_normal_broker_start",
self.outputTopic,
num_messages=self.num_messages,
timeout_sec=120)
node = self.kafka.leader(self.inputTopic)
self.kafka.stop_node(node)
processor.abortThenRestart()
processor_2.abortThenRestart()
processor_3.abortThenRestart()
with processor.node.account.monitor_log(processor.LOG_FILE) as monitor_1:
with processor_2.node.account.monitor_log(processor_2.LOG_FILE) as monitor_2:
with processor_3.node.account.monitor_log(processor_3.LOG_FILE) as monitor_3:
self.kafka.start_node(node)
monitor_1.wait_until(self.connected_message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.connected_message) + str(processor.node.account))
monitor_2.wait_until(self.connected_message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.connected_message) + str(processor_2.node.account))
monitor_3.wait_until(self.connected_message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.connected_message) + str(processor_3.node.account))
with processor.node.account.monitor_log(processor.STDOUT_FILE) as monitor_1:
with processor_2.node.account.monitor_log(processor_2.STDOUT_FILE) as monitor_2:
with processor_3.node.account.monitor_log(processor_3.STDOUT_FILE) as monitor_3:
self.assert_produce(self.inputTopic,
"sending_message_after_hard_bouncing_streams_instance_bouncing_broker",
num_messages=self.num_messages,
timeout_sec=120)
monitor_1.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor.node.account))
monitor_2.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor_2.node.account))
monitor_3.wait_until(self.message,
timeout_sec=120,
err_msg=("Never saw '%s' on " % self.message) + str(processor_3.node.account))
self.assert_consume(self.client_id,
"consuming_message_after_stopping_streams_instance_bouncing_broker",
self.outputTopic,
num_messages=self.num_messages,
timeout_sec=120)
self.kafka.stop()
|
third_party/chromite/cros_bisect/manual_evaluator_unittest.py | zipated/src | 2,151 | 11148096 | <reponame>zipated/src<gh_stars>1000+
# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test manual_evaluator module."""
from __future__ import print_function
import os
from chromite.cros_bisect import common
from chromite.cros_bisect import manual_evaluator
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
class TestManualEvaluator(cros_test_lib.MockTempDirTestCase,
cros_test_lib.OutputTestCase):
"""Tests ManualEvaluator class."""
BUILD_LABEL = 'test_build'
BUILD_LABEL2 = 'test_build2'
def setUp(self):
"""Sets up default evaluator."""
options = cros_test_lib.EasyAttr(base_dir=self.tempdir, reuse_eval=True)
self.evaluator = manual_evaluator.ManualEvaluator(options)
def testGetReportPath(self):
"""Tests GetReportPath()."""
self.assertEqual(
os.path.join(self.tempdir, 'reports',
'manual.%s.report' % self.BUILD_LABEL),
self.evaluator.GetReportPath(self.BUILD_LABEL))
def testEvaluate(self):
"""Tests Evaluate()."""
report_path = self.evaluator.GetReportPath(self.BUILD_LABEL)
m = self.PatchObject(cros_build_lib, 'GetInput')
m.return_value = 'yes'
self.assertEqual(common.Score([1.0]),
self.evaluator.Evaluate(None, self.BUILD_LABEL))
self.assertEqual('1', osutils.ReadFile(report_path))
m.return_value = 'no'
self.assertEqual(common.Score([0.0]),
self.evaluator.Evaluate(None, self.BUILD_LABEL))
self.assertEqual('0', osutils.ReadFile(report_path))
def testCheckLastEvaluate(self):
"""Tests CheckLastEvaluate()."""
# Report does not exist.
self.assertFalse(self.evaluator.CheckLastEvaluate(self.BUILD_LABEL))
# Generate a report for BUILD_LABEL
m = self.PatchObject(cros_build_lib, 'GetInput')
m.return_value = 'yes'
self.evaluator.Evaluate(None, self.BUILD_LABEL)
# Found latest evaluation result.
self.assertEqual(common.Score([1.0]),
self.evaluator.CheckLastEvaluate(self.BUILD_LABEL))
# Yet another unseen build.
self.assertFalse(self.evaluator.CheckLastEvaluate(self.BUILD_LABEL2))
# Generate a report for BUILD_LABEL2
m.return_value = 'no'
self.evaluator.Evaluate(None, self.BUILD_LABEL2)
# Found latest evaluation result.
self.assertEqual(common.Score([1.0]),
self.evaluator.CheckLastEvaluate(self.BUILD_LABEL))
self.assertEqual(common.Score([0.0]),
self.evaluator.CheckLastEvaluate(self.BUILD_LABEL2))
def testCheckLastLabelWithReuseEvalOptionUnset(self):
"""Tests CheckLastEvaluate() with options.reuse_eval unset."""
options = cros_test_lib.EasyAttr(base_dir=self.tempdir, reuse_eval=False)
self.evaluator = manual_evaluator.ManualEvaluator(options)
# Report does not exist.
self.assertFalse(self.evaluator.CheckLastEvaluate(self.BUILD_LABEL))
# Generate a report for BUILD_LABEL
m = self.PatchObject(cros_build_lib, 'GetInput')
m.return_value = 'yes'
self.evaluator.Evaluate(None, self.BUILD_LABEL)
# Unlike testCheckLastEvaluate(), it returns empty Score() object.
self.assertFalse(self.evaluator.CheckLastEvaluate(self.BUILD_LABEL))
|
dynamic_rcnn/utils/pyt_utils.py | yyzq1/bigwork | 177 | 11148098 | <reponame>yyzq1/bigwork
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import errno
import os
import cv2
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def link_file(src, target):
"""symbol link the source directories to target."""
if os.path.isdir(target) or os.path.isfile(target):
os.remove(target)
os.system('ln -s {} {}'.format(src, target))
def findContours(*args, **kwargs):
"""
Wraps cv2.findContours to maintain compatiblity between versions
3 and 4
Returns:
contours, hierarchy
"""
if cv2.__version__.startswith('4'):
contours, hierarchy = cv2.findContours(*args, **kwargs)
elif cv2.__version__.startswith('3'):
_, contours, hierarchy = cv2.findContours(*args, **kwargs)
else:
raise AssertionError(
'cv2 must be either version 3 or 4 to call this method')
return contours, hierarchy
def draw_box(image, box, label, color=(0, 0, 255), score=None, linewidth=2):
"""Draw a bounding box with label on the image."""
if score is not None:
text = "{}: {:.4f}".format(label, score)
else:
text = str(label)
cv2.rectangle(image, (int(box[0]), int(box[1])),
(int(box[2]), int(box[3])), color, linewidth)
cx = box[0] + (box[2] - box[0]) / 2 - 5
cy = box[1] + 12
cv2.putText(image, text, (int(cx), int(cy)),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255)) |
lullaby/tools/generate_entity_schema.py | dherbst/lullaby | 1,198 | 11148109 | """Append Lullaby-specific Entity Schema to a client Component Schema."""
from __future__ import absolute_import
import argparse
from shutil import copyfile
def main():
parser = argparse.ArgumentParser(description='Create an entity schema fbs.')
parser.add_argument('--infile', '-i', help='Input component fbs file.',
required=True)
parser.add_argument('--appendfile', '-x', help='Entity schema file.',
required=True)
parser.add_argument('--outfile', '-o', help='Output entity fbs file.',
required=True)
parser.add_argument(
'--identifier',
help='file_identifier of the resulting flatbuffer type.',
required=True)
args = parser.parse_args()
copyfile(args.infile, args.outfile)
txt = ''
with open(args.appendfile, 'r') as f:
txt = f.read()
with open(args.outfile, 'a') as f:
f.write(txt)
f.write('file_identifier "%s";' % args.identifier)
if __name__ == '__main__':
main()
|
finetune_src/tagging_eval.py | yangshoujian/CoSPA | 119 | 11148141 | <reponame>yangshoujian/CoSPA
#-*-coding:utf8-*-
import sys, os
import numpy as np
import tensorflow as tf
import modeling
import optimization
import time
os.environ["PYTHONIOENCODING"] = "utf-8"
tf.logging.set_verbosity(tf.logging.ERROR)
def score_f(ans, print_flg=False, only_check=False, out_dir=''):
fout = open('%s/pred.txt' % out_dir, 'w', encoding="utf-8")
total_gold_err, total_pred_err, right_pred_err = 0, 0, 0
check_right_pred_err = 0
inputs, golds, preds = ans
assert len(inputs) == len(golds)
assert len(golds) == len(preds)
for ori, god, prd in zip(inputs, golds, preds):
ori_txt = str(ori)
god_txt = str(god) #''.join(list(map(str, god)))
prd_txt = str(prd) #''.join(list(map(str, prd)))
if print_flg is True:
print(ori_txt, '\t', god_txt, '\t', prd_txt)
if 'UNK' in ori_txt:
continue
if ori_txt == god_txt and ori_txt == prd_txt:
continue
if prd_txt != god_txt:
fout.writelines('%s\t%s\t%s\n' % (ori_txt, god_txt, prd_txt))
if ori != god:
total_gold_err += 1
if prd != ori:
total_pred_err += 1
if (ori != god) and (prd != ori):
check_right_pred_err += 1
if god == prd:
right_pred_err += 1
fout.close()
#check p, r, f
p = 1. * check_right_pred_err / (total_pred_err + 0.001)
r = 1. * check_right_pred_err / (total_gold_err + 0.001)
f = 2 * p * r / (p + r + 1e-13)
print('token check: p=%.3f, r=%.3f, f=%.3f' % (p, r, f))
if only_check is True:
return p, r, f
#correction p, r, f
#p = 1. * right_pred_err / (total_pred_err + 0.001)
pc = 1. * right_pred_err / (check_right_pred_err + 0.001)
rc = 1. * right_pred_err / (total_gold_err + 0.001)
fc = 2 * pc * rc / (pc + rc + 1e-13)
print('token correction: p=%.3f, r=%.3f, f=%.3f' % (pc, rc, fc))
return p, r, f
def score_f_py(ans_py, ans_zi, out_dir, print_flg=False, only_check=False):
fout = open('%s/pred_py.txt' % out_dir, 'w', encoding="utf-8")
total_gold_err, total_pred_err, right_pred_err = 0, 0, 0
check_right_pred_err = 0
inputs, golds, preds = ans_py
inputs_z, golds_z, preds_z = ans_zi
assert len(inputs) == len(golds)
assert len(golds) == len(preds)
assert len(inputs_z) == len(golds_z)
index = -1
total_len = len(inputs_z)
for ori, god, prd in zip(inputs_z, golds_z, preds_z):
index += 1
ori_txt = str(ori)
god_txt = str(god) #''.join(list(map(str, god)))
prd_txt = str(prd) #''.join(list(map(str, prd)))
if print_flg is True:
print(ori_txt, '\t', god_txt, '\t', prd_txt)
if 'UNK' in ori_txt:
continue
ori_py, god_py, prd_py = str(inputs[index]), str(golds[index]), str(preds[index])
if (ori_txt == god_txt and ori_txt == prd_txt and prd_py == ori_py):
continue
if (god_txt != prd_txt) or (prd_py != ori_py):
start_idx = index - 5
if start_idx < 0: start_idx = 0
end_idx = index + 5
if end_idx > total_len: end_idx = total_len
for _idx in range(start_idx, end_idx, 1):
fout.writelines('%s\t%s\t%s\t%s\t%s\t%s\n' % (inputs_z[_idx], golds_z[_idx], preds_z[_idx], inputs[_idx], golds[_idx], preds[_idx]))
fout.writelines('\n')
if ori != god:
total_gold_err += 1
if (prd != ori) or (prd_py != ori_py):
total_pred_err += 1
if (ori != god) and ((prd != ori) or (prd_py != ori_py)):
check_right_pred_err += 1
if god_py == prd_py:
right_pred_err += 1
fout.close()
#check p, r, f
p = 1. * check_right_pred_err / (total_pred_err + 0.001)
r = 1. * check_right_pred_err / (total_gold_err + 0.001)
f = 2 * p * r / (p + r + 1e-13)
print('token check: p=%.3f, r=%.3f, f=%.3f' % (p, r, f))
if only_check is True:
return p, r, f
#correction p, r, f
#p = 1. * right_pred_err / (total_pred_err + 0.001)
pc = 1. * right_pred_err / (check_right_pred_err + 0.001)
rc = 1. * right_pred_err / (total_gold_err + 0.001)
fc = 2 * pc * rc / (pc + rc + 1e-13)
print('token correction: p=%.3f, r=%.3f, f=%.3f' % (pc, rc, fc))
return p, r, f
def score_f_sent(inputs, golds, preds):
assert len(inputs) == len(golds)
assert len(golds) == len(preds)
total_gold_err, total_pred_err, right_pred_err = 0, 0, 0
check_right_pred_err = 0
fout = open('sent_pred_result.txt', 'w', encoding='utf-8')
for ori_tags, god_tags, prd_tags in zip(inputs, golds, preds):
assert len(ori_tags) == len(god_tags)
assert len(god_tags) == len(prd_tags)
gold_errs = [idx for (idx, tk) in enumerate(god_tags) if tk != ori_tags[idx]]
pred_errs = [idx for (idx, tk) in enumerate(prd_tags) if tk != ori_tags[idx]]
if len(gold_errs) > 0 or len(pred_errs) > 0:
fout.writelines('\n%s\n%s\n%s\n' % ('|'.join(ori_tags), '|'.join(god_tags),'|'.join(prd_tags)))
if len(gold_errs) > 0:
total_gold_err += 1
fout.writelines('gold_err\n')
if len(pred_errs) > 0:
fout.writelines('check_err\n')
total_pred_err += 1
if gold_errs == pred_errs:
check_right_pred_err += 1
fout.writelines('check_right\n')
if god_tags == prd_tags:
right_pred_err += 1
fout.writelines('correct_right\n')
fout.close()
p = 1. * check_right_pred_err / total_pred_err
r = 1. * check_right_pred_err / total_gold_err
f = 2 * p * r / (p + r + 1e-13)
#print(total_gold_err, total_pred_err, right_pred_err, check_right_pred_err)
print('sent check: p=%.3f, r=%.3f, f=%.3f' % (p, r, f))
p = 1. * right_pred_err / total_pred_err
r = 1. * right_pred_err / total_gold_err
f = 2 * p * r / (p + r + 1e-13)
print('sent correction: p=%.3f, r=%.3f, f=%.3f' % (p, r, f))
return p, r, f
|
utils/date.py | ujlbu4/vas3k.club | 496 | 11148142 | from datetime import datetime
def first_day_of_next_month(dt):
if dt.month == 12:
return datetime(year=dt.year + 1, month=1, day=1, tzinfo=dt.tzinfo)
else:
return datetime(year=dt.year, month=dt.month + 1, day=1, tzinfo=dt.tzinfo)
|
tests/integration/test_tmle.py | ronikobrosly/causal-curve | 212 | 11148147 | """ Integration tests of the tmle.py module """
import pandas as pd
from causal_curve import TMLE_Regressor
def test_full_tmle_flow(continuous_dataset_fixture):
"""
Tests the full flow of the TMLE tool
"""
tmle = TMLE_Regressor(
random_seed=100,
verbose=True,
)
tmle.fit(
T=continuous_dataset_fixture["treatment"],
X=continuous_dataset_fixture[["x1", "x2"]],
y=continuous_dataset_fixture["outcome"],
)
tmle_results = tmle.calculate_CDRC(0.95)
assert isinstance(tmle_results, pd.DataFrame)
check = tmle_results.columns == [
"Treatment",
"Causal_Dose_Response",
"Lower_CI",
"Upper_CI",
]
assert check.all()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.